metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshpearce/knausj_talon",
"score": 2
} |
#### File: knausj_talon/code/microphone_selection.py
```python
from talon import Module, actions, app, imgui
from talon.lib import cubeb
ctx = cubeb.Context()
mod = Module()
microphone_device_list = []
# by convention, None and System Default are listed first
# to match the Talon context menu.
def update_microphone_list():
global microphone_device_list
microphone_device_list = ["None", "System Default"]
# On Windows, it's presently necessary to check the state, or
# we will get any and every microphone that was ever connected.
devices = [
dev.name for dev in ctx.inputs() if dev.state == cubeb.DeviceState.ENABLED
]
devices.sort()
microphone_device_list += devices
def devices_changed(device_type):
update_microphone_list()
@imgui.open()
def gui(gui: imgui.GUI):
gui.text("Select a Microphone")
gui.line()
for index, item in enumerate(microphone_device_list, 1):
if gui.button("{}. {}".format(index, item)):
actions.user.microphone_select(index)
@mod.action_class
class Actions:
def microphone_selection_toggle():
""""""
if gui.showing:
gui.hide()
else:
update_microphone_list()
gui.show()
def microphone_select(index: int):
"""Selects a micropohone"""
if 1 <= index and index <= len(microphone_device_list):
actions.speech.set_microphone(microphone_device_list[index - 1])
app.notify(
"Activating microphone: {}".format(microphone_device_list[index - 1])
)
gui.hide()
def on_ready():
ctx.register("devices_changed", devices_changed)
update_microphone_list()
app.register("ready", on_ready)
```
#### File: lang/batch/batch.py
```python
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
mode: user.batch
mode: user.auto_lang
and code.language: batch
"""
@ctx.action_class("user")
class UserActions:
# tag(): user.code_generic
def code_comment():
actions.auto_insert("REM ")
``` |
{
"source": "josh-perry/venture",
"score": 2
} |
#### File: venture/game/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class Room(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=255)
description = models.TextField()
exits = models.ManyToManyField("Exit", blank=True, null=True)
class Exit(models.Model):
def __str__(self):
if self.to_room:
name = self.to_room.name
else:
name = None
return self.name + " to " + (name or " Nowhere")
name = models.CharField(max_length=255)
to_room = models.OneToOneField(Room, blank=True, null=True)
class Player(models.Model):
def __str__(self):
return self.user.username
user = models.OneToOneField(User, on_delete=models.CASCADE)
current_room = models.ForeignKey(Room)
``` |
{
"source": "joshpeterson/SundayMassReadings",
"score": 3
} |
#### File: SundayMassReadings/utilities/ExtractSundayReadingData.py
```python
from bs4 import BeautifulSoup
import glob
import re
import sys
def BuildSundayMassReadingJson(html_string, url_string):
soup = BeautifulSoup(html_string, "html.parser")
date = soup.find_all("h1")[0].text
title = soup.find_all("h3")[2].text
title = title[0:title.find("Lectionary")]
readings = soup.find_all("a", { "class" : "book" })
if len(readings) == 0:
readings = soup.find_all("div", {"class":"bibleReadingsWrapper"})
sub_soup = BeautifulSoup(str(readings), "html.parser")
sub_readings = sub_soup.find_all("a", {"href":re.compile(".*bible.*")})
if len(sub_readings) == 0:
print "Problem parsing " + url_string
return
readings = sub_readings
for i in range(0, len(sub_readings)):
readingName = sub_readings[i].parent.text
if readingName.startswith("Reading 1"):
firstReadingIndex = i
elif readingName.startswith("Responsorial Psalm"):
psalmIndex = i
elif readingName.startswith("Reading 2"):
secondReadingIndex = i
elif readingName.startswith("Gospel"):
gospelIndex = i
firstReading = readings[firstReadingIndex].text.replace("\\n","").replace("\u2014", "-")
psalm = readings[psalmIndex].text.replace("\\n","").replace("\u2014", "-")
secondReading = readings[secondReadingIndex].text.replace("\\n","").replace("\u2014", "-")
gospelReading = readings[gospelIndex].text.replace("\\n","").replace("\u2014", "-")
json_entry = "\"" + date + "\"" + ":{title:\"" + title + "\",url:\"" + url_string + "\",first:\"" + firstReading + "\",psalm:\"" + psalm + "\",second:\"" + secondReading + "\",gospel:\"" + gospelReading + "\"},"
print json_entry
for file_name in sorted(glob.glob("*.cfm")):
f = open(file_name);
try:
BuildSundayMassReadingJson(f.read(), file_name[:6])
except KeyboardInterrupt:
raise
except:
print "Exception parsing " + file_name + "\n" + str(sys.exc_info()[0])
``` |
{
"source": "joshpetit/biblehub",
"score": 3
} |
#### File: biblehub/biblehub/scrape_passages.py
```python
import requests
from bs4 import BeautifulSoup
from biblehub.scrape_utils import parse_str
# TODO: Optimize with obj parameter
def find_passage(reference: str, version='niv') -> dict:
"""
Find multiple verses in a single chapter, or an entire chapter
:param reference: The reference to find on biblehub
:param version: The version to return
:return: A dictionary with a nested dictionary of the verses
"""
version = version.lower()
response = {'verses': {}, 'reference': reference.title()}
reference = parse_str(reference)
response['bnc'] = reference['book'].title() + ' ' + str(reference['chapter'])
url = 'https://biblehub.com/%s/%s/%d.htm' % (version, reference['book'].replace(" ", "_"), reference['chapter'])
request = requests.get(url)
page = BeautifulSoup(request.content, "html.parser")
chap = page.find("div", {"id": "leftbox"})
verses = chap.find_all("span", {"class": "reftext"})
if reference['end_verse'] is not None:
verses = verses[reference['start_verse'] - 1:]
verses = verses[0: reference['end_verse'] - reference['start_verse'] + 1]
for verse in verses:
num = int(verse.get_text())
verse_text = verse.next_sibling.strip()
verse = verse.parent.next_sibling
while verse is not None and verse.name is not None:
verse_text += '\n' + verse.get_text()
verse = verse.parent.next_sibling
response['verses'][num] = verse_text
return response
```
#### File: biblehub/biblehub/styling.py
```python
class COLORS:
header = "\033[4m"
red = "\033[31m"
green = "\033[32m"
blue = "\033[34m"
normal = "\033[0m"
def format_verse(reference, text) -> str:
return "{blue}{reference}{normal} \n{verse}\n".format(
blue=COLORS.blue, reference=reference,
normal=COLORS.normal, verse=text
)
def format_reference(reference) -> str:
return "{blue}{reference}{normal}".format(blue=COLORS.blue,
reference=reference, normal=COLORS.normal)
def format_header(text) -> str:
return "\n{header}{text}{normal}\n".format(header=COLORS.header, text=text, normal=COLORS.normal)
``` |
{
"source": "joshpoll/lynx",
"score": 3
} |
#### File: lynx/obsidian/fields.py
```python
from dataclasses import field
from typing import Dict, Any
from pysmt.shortcuts import FreshSymbol
from pysmt.typing import REAL
def SMTField(): return field(default_factory=lambda: FreshSymbol(REAL)) # yo dawg i heard you like factories...
def StyleField(): return field(default_factory=dict)
STYLE = Dict[str, Any]
```
#### File: lynx/obsidian/shape.py
```python
from dataclasses import dataclass, fields
from numbers import Real as ABCReal
from obsidian.fields import SMTField
from pysmt.shortcuts import Real
from pysmt.typing import REAL
@dataclass
class Bounds:
left_edge: REAL = SMTField()
right_edge: REAL = SMTField()
top_edge: REAL = SMTField()
bottom_edge: REAL = SMTField()
def __post_init__(self):
self.width = self.right_edge - self.left_edge
self.height = self.bottom_edge - self.top_edge
class Shape:
"""For subclassing by shape dataclasses.
Provides a __post_init__ method which replaces any real-annotated fields'
values with their pysmt.Real equivalents, so that native Python numbers can
be passed into shape dataclasses without issue.
"""
def __post_init__(self):
for field in fields(self):
if field.type is not REAL:
continue
attr = getattr(self, field.name)
if isinstance(attr, ABCReal):
setattr(self, field.name, Real(attr))
@classmethod
def factory(cls, **kwargs):
"""
Returns a variable-argument factory function for the shape. Any keyword
args passed to this function will be passed on to all new shape
instances. Args passed to the factory function itself will take
precedence over args passed to this function.
This resembles currying, and makes it easier for user code to set up
factories for use with e.g. obsidian.shapes.ShapeGrid in a readable way
(i.e. without having to expose the reader to uninteresting details like
lambdas or ** notation).
"""
return lambda *args, **kw: cls(*args, **kwargs, **kw)
@property
def bounds(self):
raise NotImplementedError
@property
def center(self):
from obsidian.geometry import Point
bounds = self.bounds
center_x = (bounds.left_edge + bounds.right_edge) / 2
center_y = (bounds.top_edge + bounds.bottom_edge) / 2
return Point(center_x, center_y)
``` |
{
"source": "joshpoll/tvm",
"score": 3
} |
#### File: tvm/contrib/util.py
```python
from __future__ import absolute_import as _abs
import os
import tempfile
import shutil
try:
import fcntl
except ImportError:
fcntl = None
class TempDirectory(object):
"""Helper object to manage temp directory during testing.
Automatically removes the directory when it went out of scope.
"""
def __init__(self):
self.temp_dir = tempfile.mkdtemp()
self._rmtree = shutil.rmtree
def remove(self):
"""Remote the tmp dir"""
if self.temp_dir:
self._rmtree(self.temp_dir, ignore_errors=True)
self.temp_dir = None
def __del__(self):
self.remove()
def relpath(self, name):
"""Relative path in temp dir
Parameters
----------
name : str
The name of the file.
Returns
-------
path : str
The concatenated path.
"""
return os.path.join(self.temp_dir, name)
def listdir(self):
"""List contents in the dir.
Returns
-------
names : list
The content of directory
"""
return os.listdir(self.temp_dir)
def tempdir():
"""Create temp dir which deletes the contents when exit.
Returns
-------
temp : TempDirectory
The temp directory object
"""
return TempDirectory()
class FileLock(object):
"""File lock object
Parameters
----------
path : str
The path to the lock
"""
def __init__(self, path):
self.lock_file = open(path, "w")
if fcntl:
fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
def release(self):
"""Release the lock"""
if self.lock_file:
if fcntl:
fcntl.lockf(self.lock_file, fcntl.LOCK_UN)
self.lock_file.close()
self.lock_file = None
def filelock(path):
"""Create a file lock which locks on path
Parameters
----------
path : str
The path to the lock
Returns
-------
lock : File lock object
"""
return FileLock(path)
def is_source_path(path):
"""Check if path is source code path.
Parameters
----------
path : str
A possible path
Returns
-------
valid : bool
Whether path is a possible source path
"""
if os.path.exists(path):
return True
if path.find("\n") != -1:
return False
spath = path.rsplit(".", 1)
return len(spath) == 2 and spath[1].strip() == spath[1]
def which(exec_name):
"""Try to find full path of exec_name
Parameters
----------
exec_name : str
The executable name
Returns
-------
path : str
The full path of executable if found, otherwise returns None
"""
base_list = ["", "/bin"] + os.environ.get("PATH", "").split(os.pathsep)
for path in base_list:
full_path = os.path.join(path, exec_name)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
``` |
{
"source": "JoshPrim/ECG-Pipeline",
"score": 2
} |
#### File: ECG-Pipeline/extractors/extractor_cardiosoft.py
```python
import pandas as pd
import PyPDF2
import numpy as np
import math
import os
from extractors.abstract_extractor import AbstractExractor
from utils.extract_utils.extract_utils import rotate_origin_only, move_along_the_axis, scale_values_based_on_eich_peak, \
create_measurement_points, adjust_leads_baseline, preprocess_page_content, extract_graphics_string
from utils.misc.datastructure import perform_shape_switch
from utils.data.visualisation import visualiseIndividualfromDF, visualiseIndividualinMPL
from tqdm import tqdm
import logging
class CardiosoftExtractor(AbstractExractor):
def __init__(self, params):
super().__init__(params)
if 'ecg_path_source' not in params:
raise ValueError('ecg_path_source is not set in params')
else:
self.path_source = params['ecg_path_source']
if 'ecg_path_sink' not in params:
raise ValueError('ecg_path_sink is not set in params')
else:
self.path_sink = params['ecg_path_sink']
# reference value for the calibration jag
self.eich_ref = 1000
# extracted height for the calibration jag in PDF
self.eichzacke = 236.99999999999997
if 'number_of_points' not in params:
raise ValueError('number_of_points is not set in params')
else:
# number of measuring points in XML
self.number_of_points = params['number_of_points']
if 'show_visualisation' in params:
self.show_visualisation = params['show_visualisation']
else:
self.show_visualisation = False
if 'vis_scale' in params:
self.vis_scale = params['vis_scale']
else:
self.vis_scale = 1
if 'vis_MPL' in params:
self.vis_MPL = params['vis_MPL']
else:
self.vis_MPL = False
self.gamma = self.eich_ref / self.eichzacke
if 'version' not in params:
self.version = '6.5'
else:
self.version = params['version']
def extract(self):
for file_name in tqdm(os.listdir(self.path_source)):
logging.info('Converting "{}"'.format(file_name))
try:
# Extract leads from PDF
lead_list, lead_ids, record_id = self.extract_leads_from_pdf(file_name)
if lead_list is not None:
new_lead_list = []
for lead in lead_list:
tmp_lead = []
# Preprocess extracted vectors
for t in lead:
x, y = rotate_origin_only(float(t[0]), float(t[1]), math.radians(90))
tmp_lead.append([x, y])
new_lead = move_along_the_axis(tmp_lead)
# Scale values based on eich peak
new_lead = scale_values_based_on_eich_peak(new_lead, self.gamma)
# Create (e.g. 5000) measurement points based on the unevenly distributed points
measurement_points = create_measurement_points(new_lead, self.number_of_points)
# Collect converted leads
new_lead_list.append(measurement_points)
# Convert lead list to dataframe
df_leads = pd.DataFrame(perform_shape_switch(new_lead_list), columns=lead_ids)
# Adjust baseline position of each lead
df_leads = adjust_leads_baseline(df_leads)
# Plot leads of ECG if config is set to do so
if self.show_visualisation:
if not self.vis_MPL:
visualiseIndividualfromDF(df_leads, self.vis_scale)
else:
visualiseIndividualinMPL(df_leads)
df_leads.to_csv(('{}{}.csv'.format(self.path_sink, file_name.replace(".pdf", ""))),
index=False)
else:
logging.error('Lead list is none')
except Exception:
logging.warning(('Failed to extract ' + str(file_name)))
def extract_leads_from_pdf(self, filename):
reader = PyPDF2.PdfFileReader(open(self.path_source + filename, 'rb'))
try:
leads = []
lead_ids = []
record_id = None
for p in range(reader.getNumPages()):
if len(leads) == 12:
break
page = reader.getPage(p)
text = page.extractText()
is_cover_page = text.startswith('Page') or text.startswith('Seite')
if not is_cover_page:
self.get_version(text)
page_content_raw = reader.getPage(p).getContents()._data
page_content = preprocess_page_content(page_content_raw)
graphics_string = extract_graphics_string(page_content)
leads += self.extract_leads_from_page_content(graphics_string)
lead_ids += self.extract_lead_ids(text)
record_id = self.extract_record_id(text)
else:
logging.info('Skipping cover page (page {})'.format(p))
if len(leads) != 12:
raise Exception('Invalid ECG with {} leads'.format(len(leads)))
except Exception as e:
logging.error('Could not convert "{}": '.format(filename, e))
leads = None
lead_ids = None
record_id = None
return leads, lead_ids, record_id
def extract_lead_ids(self, pagetext):
lines = pagetext.split('\n')
lead_ids = lines[-7: -1]
if lead_ids[1] == 'III':
lead_ids[0] = 'I'
lead_ids[1] = 'II'
return lead_ids
def get_version(self, pagetext):
lines = pagetext.split('\n')
version = []
for element in lines:
if 'GE CardioSoft' in element:
version.append(element)
elif 'GE CASE' in element:
version.append(element)
if 'V6.0' in version[0]:
self.version = '6.0'
else:
self.version = '6.5'
def extract_record_id(self, pagetext):
lines = pagetext.split('\n')
record_id = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('Patient'):
parts = line.split(':')
number = parts[1].replace(' ', '')
date = lines[i + 2].replace('.', '-')
time = lines[i + 4].replace(':', '-')
record_id = '{}_{}_{}'.format(number, date, time)
break
return record_id
def extract_leads_from_page_content(self, graphics_string):
leads = []
if float(self.version) < 6.5:
cutting_range = [7, 13]
else:
cutting_range = [8, 14]
for i in range(cutting_range[0], cutting_range[1]):
points = graphics_string[i].split('S')[0].split('\n')
lead = []
for p in points:
coordinates = p.split(' ')
if len(coordinates) == 2:
lead.append(coordinates)
lead = np.array(lead)
leads.append(lead)
return leads
if __name__ == '__main__':
path_source = '../data/pdf_data/pdf_cardiosoft/original_ecgs/'
path_sink = '../data/pdf_data/pdf_cardiosoft//extracted_ecgs/'
params = {
'ecg_path_source': path_source,
'ecg_path_sink': path_sink,
'number_of_points': 5000,
'show_visualisation': True,
}
tmp = CardiosoftExtractor(params)
tmp.extract()
```
#### File: utils/data/data.py
```python
import json
import logging
import os
import numpy as np
# noinspection PyUnresolvedReferences
from xml.dom import minidom
from utils.data.validation import validate_and_clean_float, validate_and_clean_char
from utils.file.file import load_string_from_file, load_dict_from_json
from utils.misc.datastructure import perform_shape_switch
def parse_ecg_xml(xmlcode, leads_to_use=None):
xmlparsed = minidom.parseString(xmlcode)
itemlist = xmlparsed.getElementsByTagName('sequence')
leads = {}
uom = ''
length = 0
for i in range(0, 12):
cur_sequence = itemlist[i + 1]
lead = list(np.fromstring(cur_sequence.getElementsByTagName('digits')[0].childNodes[0].nodeValue,
dtype=int,
sep=' '))
length = len(lead)
uom = cur_sequence.getElementsByTagName('scale')[0].getAttribute('unit')
lead_id = cur_sequence.getElementsByTagName('code')[0].getAttribute('code').replace('MDC_ECG_LEAD_', '')
if leads_to_use is None:
leads[lead_id] = lead
elif lead_id in leads_to_use:
leads[lead_id] = lead
# TODO: Add active filters, etc.
metadata = {'sampling_rate_sec': 500,
'unitofmeasurement': uom,
'length_sec': 10,
'length_timesteps': length}
return leads, metadata
def load_ecg_xml(path, leads_to_use=None):
xmlcode = load_string_from_file(path)
leads, metadata = parse_ecg_xml(xmlcode, leads_to_use)
return leads, metadata
def load_ecgs_from_redcap_snapshot(leads_to_use, record_ids_excluded,
ecg_path='../../data/xml_data/ecg/'):
ecgfiles = os.listdir(ecg_path)
ecgs = {}
for filename in ecgfiles:
exclude = False
record_id = filename.replace('.xml', '')
if record_ids_excluded is not None:
if record_id in record_ids_excluded:
exclude = True
logging.info('Excluded record "{}" from dataloading (ECG)'.format(record_id))
if exclude is False:
leads, metadata = load_ecg_xml(ecg_path + filename, leads_to_use)
ecgs[record_id] = {'leads': leads, 'metadata': metadata}
return ecgs
def load_clinical_parameters_json(path, params_input):
"""
Converts an ecg to a format, containing absolute voltage numbers
:param path: the Path of the file to load
:param params_input: list of clinical parameters to load
:return: clinical parameters forom a single file
"""
allparams = load_dict_from_json(path)
inputs = {}
outputs = {}
if params_input is not None:
for param in params_input:
try:
inputs[param] = allparams[param]
except KeyError:
raise Exception('Unknown clinical input parameter "{}". Aborting.'.format(param))
assert (len(inputs)) > 0
return inputs
def load_metadata(metadata_id, metadata_directory='./../data/metadata/'):
"""
Loads a metadata file
:param metadata_id: ID of the metadata file(stored in config)
:param metadata_directory: Directory where the metadata file is stored
:return: a json metadata file
"""
path = metadata_directory + metadata_id + '.json'
try:
with open(path, 'r') as f:
metadata = json.load(f)
return metadata
except FileNotFoundError:
raise Exception('Metadata file at "{}" does not exist. Aborting.'.format(path))
except json.decoder.JSONDecodeError as e:
raise Exception(
'Metadata file at "{}" contains errors. JSON could not be parsed. Aborting. Error message: {}'.format(path,
str(
e)))
def one_hot_encode_clinical_parameters(clinical_parameters, metadata):
"""
Uses one hot encoding to encode the parameters according to the supplied metadata
:param clinical_parameters: Clinical parameters to be encoded
:param metadata: metadata that describes the encoding rules
:return: encoded parameters
"""
encoded = {}
for param in clinical_parameters:
value = clinical_parameters[param]
try:
encoded[param] = np.array(metadata[param]['values_one_hot'][value])
except KeyError:
raise Exception(
'One hot encoding failed because of missing rule for clinical parameter "{}" and value "{}". Check value or implement rule!'.format(
param, value))
return encoded
def scale_ecg(ecg, factor):
"""
Scales an ECG by a scaling factor and adjusts the unit of measurement accordingly
:param ecg: ECG containing multiple leads
:param factor : a scaling value
:return: an ECG scaled by the factor
"""
for lead_id in ecg['leads']:
lead = np.array(ecg['leads'][lead_id])
ecg['leads'][lead_id] = lead * factor
if factor == 1 / 1000 and ecg['metadata']['unitofmeasurement'] == 'uV':
ecg['metadata']['unitofmeasurement'] = 'mV'
else:
ecg['metadata']['unitofmeasurement'] = ecg['metadata']['unitofmeasurement'] + '*' + str(factor)
return ecg
def scale_ecgs(ecgs, factor):
"""
Scales a list of ECGs by a scaling factor
:param ecgs: list of ECGs
:param factor : a scaling value
:return: a list of ECGs scaled by the factor
"""
scaled_ecgs = {}
for record_id in ecgs:
scaled_ecgs[record_id] = scale_ecg(ecgs[record_id], factor)
return scaled_ecgs
def derive_ecg_variants_multi(ecgs, variants):
"""
Converts a list of ECGs to the same format, only containing absolute voltage numbers
:param ecgs: List of ECGs
:param variants: possible formats
:return: a list of ECGs with absolute voltage values
"""
derived_ecgs = {}
for record_id in ecgs:
derived_ecgs[record_id] = derive_ecg_variants(ecgs[record_id], variants)
return derived_ecgs
def calculate_delta_for_lead(lead):
"""
Converts a lead that is recorded as delta values into a lead with absolute values
:param lead: a lead with delta voltage values
:return: a lead with absolute voltage values
"""
delta_list = []
for index in range(0, len(lead) - 1):
delta_list.append(lead[index + 1] - lead[index])
delta_list = np.round(np.array(delta_list), 6)
return delta_list
def calculate_delta_for_leads(leads):
"""
Converts a leads recorded as delta values into a leads with absolute values
:param leads: leads with delta voltage values
:return: leads with absolute voltage values
"""
delta_leads = {}
for lead_id in leads:
delta_leads[lead_id] = calculate_delta_for_lead(leads[lead_id])
return delta_leads
def derive_ecg_variants(ecg, variants):
"""
Converts an ecg to a format, containing absolute voltage numbers
:param ecg: an ECG
:param variants: possible formats
:return: an ECG with absolute voltage values
"""
derived_ecg = {}
for variant in variants:
if variant == 'ecg_raw':
derived_ecg[variant] = ecg['leads']
elif variant == 'ecg_delta':
derived_ecg[variant] = calculate_delta_for_leads(ecg['leads'])
derived_ecg['metadata'] = ecg['metadata']
return derived_ecg
def update_length_in_metadata(metadata, start, end):
secs_old = metadata['length_sec']
timesteps_old = metadata['length_timesteps']
timesteps_new = end - start
secs_new = round(timesteps_new * secs_old / timesteps_old, 1)
metadata['length_sec'] = secs_new
metadata['length_timesteps'] = timesteps_new
def extract_subsample_from_ecg_matrix_based(ecg, start, end):
return ecg[start:end]
def subsample_ecgs(ecgs, subsampling_factor, window_size, ecg_variant='ecg_raw'):
collected_subsamples = []
collected_clinical_parameters = []
collected_metadata = []
collected_record_ids = []
for record_id in ecgs:
start = 0
record = ecgs[record_id]
metadata = record['metadata']
length = metadata['length_timesteps']
ecg = convert_lead_dict_to_matrix(record[ecg_variant])
clinical_parameters = concatenate_one_hot_encoded_parameters(record['clinical_parameters_inputs'])
if not length > window_size:
raise Exception(
'Record "{}" is shorter ({}) than the configured subsampling window size of {} timesteps. Aborting.'.format(
record_id, length, window_size))
stride = int((length - window_size) / subsampling_factor)
for i in range(subsampling_factor):
end = start + window_size
if end > length:
break
subsample = extract_subsample_from_ecg_matrix_based(ecg, start, end)
record_id_new = '{}_{}'.format(record_id, i)
metadata_new = dict(metadata)
update_length_in_metadata(metadata, start, end)
metadata_new['subsample_start'] = start
metadata_new['subsample_end'] = end
metadata_new['original_record_id'] = record_id
metadata_new['record_id'] = record_id_new
collected_subsamples.append(subsample)
collected_clinical_parameters.append(clinical_parameters)
collected_metadata.append(metadata_new)
collected_record_ids.append(record_id_new)
start = start + stride
return collected_record_ids, collected_metadata, collected_clinical_parameters, collected_subsamples
def load_clinical_parameters_from_redcap_snapshot(clinical_parameters_inputs,
record_ids_excluded,
clinical_parameters_directory):
"""
Fetches the clinical parameters corresponding to the ECGs
:param clinical_parameters_inputs: list of parameters to load from the files
:param record_ids_excluded: List of records to be ignored
:param clinical_parameters_directory: the folder path where clinical parameter files are stored
:return: loaded clinical parameters
"""
parameterfiles = os.listdir(clinical_parameters_directory)
clinicalparameters = {}
for filename in parameterfiles:
exclude = False
record_id = filename.replace('.json', '')
if record_ids_excluded is not None:
if record_id in record_ids_excluded:
exclude = True
logging.info('Excluded record "{}" from dataloading (clinical parameters)'.format(record_id))
if exclude is False:
inputs = load_clinical_parameters_json(clinical_parameters_directory + filename, clinical_parameters_inputs)
clinicalparameters[record_id] = {'clinical_parameters_inputs': inputs}
return clinicalparameters
def validate_and_clean_clinical_parameters_for_records(records, metadata):
"""
Validates that the clinical parameters are within their accepted value ranges, cleans differing parameter values
:param records: records containing clinical parameters to be validated
:param metadata: metadata corresponding to the files
:return: validated and cleaned records
"""
validated_and_cleaned = {}
for recid in records:
try:
inputs = validate_and_clean_clinical_parameters(records[recid]['clinical_parameters_inputs'], metadata)
except Exception as e: # In case of other exceptions, raise new exception with record-id information added
raise Exception('Record-ID {}: {}'.format(recid, e))
validated_and_cleaned[recid] = {'clinical_parameters_inputs': inputs}
return validated_and_cleaned
def validate_and_clean_clinical_parameters(clinical_parameters, metadata):
"""
Validates that the clinical parameters are within their accepted value ranges, cleans differing parameter values
:param clinical_parameters: Clinical parameters to be validated
:param metadata: metadata corresponding to the files
:return: validated and cleaned clinical parameters
"""
validated_and_cleaned = {}
for param in clinical_parameters:
value = clinical_parameters[param]
if metadata[param]['type'] == 'char':
value_vc = validate_and_clean_char(param, str(value),
metadata[param]['values_allowed'],
metadata[param]['values_replace'])
elif metadata[param]['type'] == 'float':
value_vc = validate_and_clean_float(param, value,
metadata[param]['valmin'],
metadata[param]['valmax'])
else:
raise Exception('Unkown parameter: "{}". Please implement validation and cleansing rule!'.format(param))
validated_and_cleaned[param] = value_vc
return validated_and_cleaned
def categorize_clinical_parameters(clinical_parameters, metadata):
"""
Categorizes real valued data into value bands
:param clinical_parameters: Clinical parameters to be categorised
:param metadata: metadata corresponding to the files
:return: categorized parameters
"""
for param in clinical_parameters:
if "categorization_rules" in metadata[param]:
categorylist = metadata[param]['categorization_rules']
for category in categorylist:
if category['end'] in ['Infinity', 'INF', 'NaN']:
clinical_parameters[param] = category['name']
break
elif category['start'] <= clinical_parameters[param] < category['end']:
clinical_parameters[param] = category['name']
break
return clinical_parameters
def categorize_clinical_parameters_for_records(records, metadata):
"""
Categorizes real valued data within the records into value bands
:param records: Records containing clinical parameters
:param metadata: metadata corresponding to the files
:return: Records containing categorized clinical data
"""
categorized = {}
for recid in records:
inputs = categorize_clinical_parameters(records[recid]['clinical_parameters_inputs'], metadata)
categorized[recid] = {'clinical_parameters_inputs': inputs}
return categorized
def one_hot_encode_clinical_parameters_for_records(records, metadata):
"""
Uses one hot encoding to encode the parameters within the records according to the supplied metadata
:param records: records, containing clinical parameters to be encoded
:param metadata: metadata that describes the encoding rules
:return: encoded records
"""
onehot_encoded = {}
for recid in records:
inputs = one_hot_encode_clinical_parameters(records[recid]['clinical_parameters_inputs'], metadata)
onehot_encoded[recid] = {'clinical_parameters_inputs': inputs}
return onehot_encoded
def combine_ecgs_and_clinical_parameters(ecgs, clinical_parameters):
"""
Combines ECGs and their corresponding clinical parameters
:param ecgs: List of ECGs
:param clinical_parameters: Corresponding clinical parameters
:return: Medical data for each patient including ECGs and the patients clinical parameters
"""
combined = {}
for record_id in ecgs:
ecg = ecgs[record_id]
try:
cp = clinical_parameters[record_id]
except KeyError:
logging.warning(
'No clinical parameters available in datapipeline for record "{}". Skipping record.'.format(record_id))
continue
combined[record_id] = dict(ecg)
combined[record_id].update(cp)
return combined
def concatenate_one_hot_encoded_parameters(dct):
collected = []
for p in dct:
collected += list(dct[p])
return np.array(collected)
def convert_lead_dict_to_matrix(leads, shape_switch=True):
collected = []
for lead_id in leads:
collected.append(leads[lead_id])
collected = np.asarray(collected)
if shape_switch:
collected = perform_shape_switch(collected)
return collected
``` |
{
"source": "JoshPrim/EVA-Projekt",
"score": 2
} |
#### File: dashboard/projekt/dashboard_server.py
```python
import sys
import dash
import dash_auth
import dash_core_components
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import flask
import pandas as pd
import plotly.graph_objs as go
import pymongo
import threading
from dash.dependencies import Input, Output
import os
import collections
from pprint import pprint
from pymongo.command_cursor import CommandCursor
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from types import *
import pandas as pd
import numpy as np
from pandas import DataFrame
sys.path.append('./Clients')
import folium
from geopy.geocoders import Nominatim
#from sqlalchemy import create_engine
import psycopg2
########################################################################## #############################################################################################################################################
########################################################################## Web Application #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Konstanten
MONGO_URL = os.environ.get('MONGO_URI')
POSTGRESS_URL = os.environ.get('POSTGRES_URL')
HOST_ID = '0.0.0.0'
PORT = '37002'
print('Fasta Server initialisiert!')
def createGraphDataForEscalatorPage(numberOfLastEntries: int):
ergDF = pd.DataFrame(columns=['Datum', 'Anzahl_Ausfälle'])
facilities_collection = facilities.find({})
pandas_facilities = pd.DataFrame(list(facilities_collection))
pandas_facilities = pandas_facilities[['equipmentnumber', 'datetime', 'state']]
facilities_distinct = pandas_facilities
facilities_distinct.columns = ['ID', 'Datum', 'Status']
facilities_distinct['Datum'] = pd.to_datetime(facilities_distinct['Datum'], format="%Y-%m-%d_%H-%M-%S")
facilities_distinct['Datum'] = facilities_distinct['Datum'].dt.strftime('%Y-%m-%d')
facilities_distinct_inactive = facilities_distinct[facilities_distinct['Status'] == 'INACTIVE']
dfOnlyDatetime = pd.DataFrame(facilities_distinct_inactive['Datum'], columns=['Datum']).drop_duplicates()
facilities_distinct_inactive_latestDate = facilities_distinct_inactive.groupby('ID')['Datum'].max()
counter = 0
for index, row in dfOnlyDatetime.iterrows():
counter = 0
for key, value in facilities_distinct_inactive_latestDate.items():
if value == row['Datum']:
counter += 1
ergDF.loc[index] = row['Datum'], counter
ergDF = ergDF.reset_index().drop(['index'], axis=1)
ergDF = ergDF.iloc[-numberOfLastEntries:]
return ergDF
def getDesiredState(listWithStates, state):
stateCounter = 0
for i in listWithStates:
if state == i['state']:
stateCounter += 1
return stateCounter
def getDesiredStateExplanation(listWithStates, state, stateExplanation):
stateExpressionCounter = 0
for i in listWithStates:
if state == i['state'] and stateExplanation == i['stateExplanation']:
stateExpressionCounter += 1
return stateExpressionCounter
def createOverview(givenType: str):
resultOverview = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
listWithStates = []
for i in resultOverview:
listWithStates.append(i)
stateCountACTIVE = getDesiredState(listWithStates, 'ACTIVE')
stateCountINACTIVE = getDesiredState(listWithStates, 'INACTIVE')
stateCountUNKNOWN = getDesiredState(listWithStates, 'UNKNOWN')
return stateCountACTIVE, stateCountINACTIVE, stateCountUNKNOWN
def createReasonsForInactivity(givenType: str):
uniqueList = facilities.distinct("stateExplanation");
resultGruendeFuerInaktivitaet = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
'stateExplanation': {'$last': '$stateExplanation'}
}}
])
listWithStateExplanations = []
for i in resultGruendeFuerInaktivitaet:
listWithStateExplanations.append(i)
dictStateExplanationReason = {}
for i in uniqueList:
count = getDesiredStateExplanation(listWithStateExplanations, 'INACTIVE', str(i))
if count != 0:
dictStateExplanationReason[str(i)] = count
key_array = []
value_array = []
for key, value in dictStateExplanationReason.items():
key_array.append(key)
value_array.append(value)
return key_array, value_array
def createInitialData():
client = pymongo.MongoClient(MONGO_URL, maxPoolSize=50)
dbeva = client.eva_dev
facilities = dbeva['facilities']
# Aufzüge reinladen
conn = psycopg2.connect(host='station-db', user='postgres', password='<PASSWORD>', dbname='eva_dev', port=5432)
cur = conn.cursor()
querry = 'select * from "elevator"'
cur.execute( querry )
stammdaten_liste = cur.fetchall()
aufzüge = pd.DataFrame(stammdaten_liste)
columns = ['ID','Standort Equipment', 'TechnPlatzBezeichng', 'Equipment', 'Equipmentname', 'Ort', 'Wirtschaftseinheit',
'Hersteller',
'Baujahr', 'ANTRIEBSART', 'ANZAHL_HALTESTELLEN', 'ANZAHL_TUEREN_KABINE', 'ANZAHL_TUEREN_SCHACHT',
'FOERDERGESCHWINDIGKEIT',
'FOERDERHOEHE', 'LAGE', 'TRAGKRAFT', 'ERWEITERTE_ORTSANGABE', 'MIN_TUERBREITE', 'KABINENTIEFE',
'KABINENBREITE',
'KABINENHOEHE', 'TUERHOHE', 'FABRIKNUMMER', 'TUERART', 'GEOKOORDINATERECHTSWERT',
'GEOKOORDINATEHOCHWERT', 'AUSFTEXTLICHEBESCHREIBUNG']
aufzüge.columns = columns
aufzüge = aufzüge.drop(0)
aufzüge['Equipment'] = aufzüge['Equipment'].astype(str).astype('int64')
aufzüge = aufzüge.drop_duplicates(['Equipment'])
aufzüge = aufzüge.drop(columns=['ID'])
aufzüge = aufzüge.fillna(value=np.nan)
aufzüge['Baujahr'] = pd.to_numeric(aufzüge['Baujahr'], errors='coerce')
print('Anzahl Aufzüge: ', len(aufzüge))
return facilities, aufzüge
def createMap(givenType: str):
resultCommandCursor = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'description': {'$last': '$description'},
'geocoordX': {'$last': '$geocoordX'},
'geocoordY': {'$last': '$geocoordY'},
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
resultCommandCursor = pd.DataFrame(list(resultCommandCursor))
resultCommandCursor.columns = ['equipmentnumber', 'description', 'geocoordX', 'geocoordY', 'lastStateChangeDate', 'state']
inactive = resultCommandCursor[resultCommandCursor['state'] == 'INACTIVE']
active = resultCommandCursor[resultCommandCursor['state'] == 'ACTIVE']
# Zoom am ausgewählten Ort
geolocator = Nominatim(user_agent="Eva_Dashboard")
return inactive, active, geolocator
#####################################################################
################ Start of Code (create initial data) ################
#####################################################################
facilities, aufzüge = createInitialData()
############################################################
################# Die Aufzüge im Überblick #################
############################################################
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
############################################################
############### Die Rolltreppen im Überblick ###############
############################################################
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
####################################################
###### Gründe für Inaktivität von Fahrstühlen ######
####################################################
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
####################################################
###### Gründe für Inaktivität von Rolltreppen ######
####################################################
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
####################################################
###### Routine zum Aktualisieren der Daten ######
####################################################
def updateValues():
global facilities, aufzüge, elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN
global escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN
global elevator_key_array, elevator_value_array
global escalator_key_array, escalator_value_array
facilities, aufzüge = createInitialData()
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
# Daten werden jede Stunde aktualisiert
scheduler = BlockingScheduler()
scheduler.add_job(updateValues, 'interval', minutes=5)
class UpdateValue(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
scheduler.start()
print('Thread zum Updaten der Werte gestartet!')
tread = UpdateValue()
tread.start()
####################################
###### Wusstest du schon? ######
####################################
# Ältester Aufzug
aeltesteAufzug_datensatz = aufzüge[aufzüge['Baujahr'] == int(aufzüge['Baujahr'].min())]
aeltesteAufzug_ort = aeltesteAufzug_datensatz['Ort'].values[0]
aeltesteAufzug_jahr = int(aeltesteAufzug_datensatz['Baujahr'].values[0])
# Station mit den meisten Aufzügen
uniquelist_orte = aufzüge['Ort'].unique()
df_anzahlProStation = pd.DataFrame(columns=['Ort', 'Anzahl_Aufzüge'])
for i in uniquelist_orte:
tmp = len(aufzüge[aufzüge['Ort'] == i])
df_anzahlProStation.loc[i] = i,tmp
df_anzahlProStation = df_anzahlProStation.sort_values(by=['Anzahl_Aufzüge'], ascending=False)
####################################
###### Aggregierte Werte ######
####################################
# Anzahl Antriebsart
anzahl_seilAufzüge = len(aufzüge[aufzüge['ANTRIEBSART'] == 'SEIL'])
anzahl_hydraulischAufzüge = len(aufzüge[aufzüge['ANTRIEBSART'] == 'HYDRAULISCH'])
# Top Hersteller
uniquelist_hersteller = aufzüge['Hersteller'].unique()
df_anzahlAufzüge = pd.DataFrame(columns=['Hersteller', 'Anzahl_Aufzüge'])
for i in uniquelist_hersteller:
tmp = len(aufzüge[aufzüge['Hersteller'] == i])
df_anzahlAufzüge.loc[i] = i,tmp
df_anzahlAufzüge = df_anzahlAufzüge.sort_values(by=['Anzahl_Aufzüge'], ascending=False)
# Aufälle gesamt
df_anzahlAusfälle = pd.DataFrame(columns=['Aufzug_ID', 'Anzahl_Ausfälle'])
temp_count = facilities.aggregate( [
{ '$match': { 'state': 'INACTIVE' } },
{
'$group': {
'_id': "$equipmentnumber",
'count': { '$sum': 1 }
}
}
] )
for i in temp_count:
df_anzahlAusfälle.loc[i['_id']] = i['_id'], i['count']
df_anzahlAusfälle = df_anzahlAusfälle.sort_values(by=['Anzahl_Ausfälle'], ascending=False)
aufzug_aggregiert, anzahl_aggregiert = df_anzahlAusfälle['Aufzug_ID'].iloc[0], df_anzahlAusfälle['Anzahl_Ausfälle'].iloc[0]
###############################
###### Karte für Aufzüge ######
###############################
inactive, active, geolocator = createMap('ELEVATOR')
###################################
###### Karte für Rolltreppen ######
###################################
escalator_inactive, escalator_active, escalator_geolocator = createMap('ESCALATOR')
###################################
##### Daten für Rolltreppen ######
###################################
graphDataEscalator = createGraphDataForEscalatorPage(14)
####################################
###### APP ######
####################################
# Die Passworter eigentlich aus dem Quellcode-Repository heraushalten und in einer Datei oder einer Datenbank speichern.
VALID_USERNAME_PASSWORD_PAIRS = [
['Josh', '<PASSWORD>'],
['Sophie', '<PASSWORD>'],
['Phil', '<PASSWORD>'],
['Bart', '<PASSWORD>']
]
server = flask.Flask('EVA Dashboard')
app = dash.Dash('EVA Dashboard', server=server)
app.title = 'EVA Dashboard'
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
# Erklärung:
# Since we're adding callbacks to elements that don't exist in the app.layout, Dash will raise an exception to warn us
# that we might be doing something wrong. In this case, we're adding the elements through a callback, so we can ignore the exception.
app.config.suppress_callback_exceptions = True
###########################################################################################################
###########################################################################################################
####################################### #######################################
####################################### 2. Seite für Rolltreppen #######################################
####################################### #######################################
###########################################################################################################
###########################################################################################################
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content'),
html.Div(dt.DataTable(rows=[{}]), style={'display': 'none'})
])
page_rolltreppen = html.Div(children=[
# Überschrift
html.Div([
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em'},
children='EVA Dashboard'),
]),
# Unterüberschrift
html.Div([
html.Hr(),
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em',
'color': '#000099'}, children='Der Rolltreppenwärter'),
dcc.Markdown('''
**Informationen rund um Rolltreppen in Bahnhöfen der DB Station & Service AG**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000099', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
]),
html.Div([
dcc.Link('Go to Page Aufzüge', href='/page_aufzuege')
], style={'text-align': 'left'}),
# Hauptteil
html.Div([
# Diagramme
html.Div([dcc.Graph(
id='diagramm_status',
figure={
'data': [
{'x': ['aktiv', 'inaktiv', 'keine Information'],
'y': [escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN],
'type': 'bar', 'name': 'Rolltreppen',
'marker': dict(color=['green', 'red', 'orange'])
},
],
'layout': {
'title': 'Die Rolltreppen im Überblick',
'width': '35%',
'align': 'left'
}
}
)], style={'width': '35%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}),
html.Div([dcc.Graph(
id='diagramm_inaktive',
figure={
'data': [
{'values': escalator_value_array, 'type': 'pie', 'name': 'GründeInaktivität',
'marker': dict(colors=['#DCDCDC', '#778899', '#C0C0C0']), 'labels': escalator_key_array
},
],
'layout': {
'title': 'Gründe für Inaktivität',
'width': '35%',
'align': 'right'
}
}
)],
style={'width': '40%', 'text-align': 'right', 'display': 'inline-block', 'padding-left': 10,
'padding-bottom': 10}),
html.Hr(),
html.Div([dcc.Graph(
figure=go.Figure(
data=[
go.Bar(
x=graphDataEscalator['Datum'],
y=graphDataEscalator['Anzahl_Ausfälle'],
name='Anzahl Ausfälle',
marker=go.Marker(
color='rgb(55, 83, 109)'
)
)
],
layout=go.Layout(
title='Anzahl der Ausfälle von Rolltreppen auf Tagesebene',
showlegend=True,
legend=go.Legend(
x=0,
y=1.0
),
margin=go.Margin(l=40, r=0, t=40, b=30)
)
),
style={'height': 300, 'width': 800},
id='escalator_mid_graph'
)], style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}),
html.Hr(),
# unteres Drittel
html.Div([
# Titel
html.Div([
html.H3(style={'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'},
children='Funktionieren die Rolltreppen an deiner Haltestelle? - Finde es heraus!'),
], style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}), ## neu vorher gar nichts
# linker Teil ########################################## geändert alle ids + escalator
html.Div([
html.Div(['Stadt: '],
style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='escalator_stadt_input', value='Frankfurt', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Div(['Bundesland: '],
style={'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='escalator_bundesland_input', value='Hessen', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
dcc.RadioItems(
id='escalator_radio_button',
options=[
{'label': 'Aktive Rolltreppen', 'value': 'aktiv'},
{'label': 'Inaktive Rolltreppen', 'value': 'inaktiv'},
{'label': ' Alle Rolltreppen', 'value': 'beide'}
],
value='inaktiv', style={'margin-left': 10}
),
html.Iframe(id='escalator_karte', srcDoc=open('./projekt/Maps/map_inactive_elevators.html', 'r').read(),
style={'width': '90%', 'height': '30em'})
], style={'width': '49%', 'display': 'inline-block'}),
#style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
# 'padding-left': 140, 'padding-bottom': 10}),
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
# rechter Teil
html.Div([
html.Br(), html.Br(),
html.Div(['Rolltreppen-ID: '],
style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='rolltreppe_id_input', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Hr(),
# Tabelle
html.Div([
dt.DataTable(
rows=[{}],
columns=['Datum_Uhrzeit', 'Status', 'Erklärung des Status'],
editable=False,
row_selectable=False,
filterable=False,
sortable=False,
id='datatable-status-escalator',
selected_row_indices=[],
min_height=250
),
html.Br(),
])
], style={'width': '49%', 'display': 'inline-block', 'vertical-align': 'top'})
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
], style={'margin-left': '20'}),
], style={'background-color': '#E6E6FA'}),
# Fußzeile
html.Div([], style={'height': 70}),
html.Hr(),
html.Div([
dcc.Markdown('''
**THM Friedberg**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}}),
dcc.Markdown('''
**<NAME>, <NAME>, <NAME>, <NAME>**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
], style={'height': 70}),
], style={'marginTop': '2%', 'marginLeft': '5%', 'marginRight': '5%'})
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
page_aufzuege = html.Div(children=[
# Überschrift
html.Div([
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em'},
children='EVA Dashboard'),
]),
# Unterüberschrift
html.Div([
html.Hr(),
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '10em',
'color': '#000099'}, children='Der Aufzugwächter'),
dcc.Markdown('''
**Informationen rund um Aufzüge in Bahnhöfen der DB Station & Service AG**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000099', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
]),
html.Div([
dcc.Link('Go to Page Rolltreppen', href='/page-rolltreppen')
], style={'text-align':'right'}),
# Hauptteil
html.Div([
#Diagramme
html.Div([], style={'width':'10%', 'display': 'inline-block', 'vertical-align':'top'}),
html.Div([
html.Div([ dcc.Graph(
id='diagramm_status',
figure={
'data': [
{'x': ['aktiv', 'inaktiv', 'keine Information'], 'y': [elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN], 'type': 'bar', 'name': 'Aufzüge',
'marker': dict(color=['green', 'red', 'orange'])
},
],
'layout': {
'title': 'Die Aufzüge im Überblick',
'width': '35%',
'align': 'left'
}
}
)], style={'width': '40%', 'display': 'inline-block', 'padding-top': 10, 'padding-bottom': 10}),
html.Div([ dcc.Graph(
id='diagramm_inaktive',
figure={
'data': [
{'values': elevator_value_array, 'type': 'pie', 'name': 'GründeInaktivität',
'marker': dict(colors=['#DCDCDC', '#778899', '#C0C0C0']), 'labels': elevator_key_array
},
],
'layout': {
'title': 'Gründe für Inaktivität',
'width': '35%',
'align': 'right'
}
}
)],
style={'width': '40%', 'display': 'inline-block', 'padding-left': 10, 'padding-bottom': 10}),
], style={'width':'90%', 'margin':'auto', 'display': 'inline-block', 'vertical-align':'top'}),
html.Hr(),
#mittleres Drittel: "Wusstest du schon?", aggregierte Werte etc.
html.Div([]),
html.Div([
html.H3(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'right',
'color': '#000099'}, children='Wusstest du schon?'),
html.Br(),
html.Div('Der älteste Aufzug ist aus dem Jahr {} steht in: {}'.format(aeltesteAufzug_jahr, aeltesteAufzug_ort)),
html.Div(id='aeltester_aufzug', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Div('Die Station mit den meisten Aufzügen ist: {} mit {} Aufzügen'.format(df_anzahlProStation['Ort'].iloc[0], df_anzahlProStation['Anzahl_Aufzüge'].iloc[0])),
#count wie oft eine 'stationnumber' vorkommt, kann dann die mit den meisten dann einer Stadt zugeordnet werden?
html.Div(id='meisten_aufzüge', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Div('Der Aufzug mit den meinste Ausfällen ist {} mit {} Ausfällen'.format(aufzug_aggregiert, anzahl_aggregiert)),
#count wie oft 'inactive' im Status vorkommt
html.Div(id='meiste_ausfälle', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
], style={'display': 'inline-block', 'text-align': 'right', 'width': '45%', 'margin-right':20, 'vertical-align':'top'}),
html.Hr(style={'width': 1, 'height': 200, 'display': 'inline-block'}),
html.Div([
html.H3(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'}, children='Aggregierte Werte'),
html.Div([
html.Div('Antriebsart:'),
html.Br(), html.Br(), html.Br(), html.Br(),
html.Div('Top Hersteller:'),
html.Br(),
], style={'display':'inline-block', 'width': '20%' }),
html.Div([
html.Div('HYDRAULISCH: {} Aufzüge'.format(anzahl_hydraulischAufzüge)),
html.Div('SEIL: {} Aufzüge'.format(anzahl_seilAufzüge)),
html.Br(), html.Br(), html.Br(),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[0], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[0])),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[1], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[1])),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[2], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[2]))
], style={'display':'inline-block', 'width': '80%', 'vertical-align':'top'})
], style={'display': 'inline-block', 'text-align': 'left', 'width': '50%', 'margin-left':20, 'vertical-align':'top'}),
html.Hr(),
#unteres Drittel
html.Div([
#Titel
html.Div([
html.H3(style={'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'}, children='Funktionieren die Aufzüge an deiner Haltestelle? - Finde es heraus!'),
]),
#linker Teil
html.Div([
html.Div(['Stadt: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='stadt_input', value='Frankfurt', type='text', style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Div(['Bundesland: '], style={'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='bundesland_input', value='Hessen', type='text', style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
dcc.RadioItems(
id='radio_button',
options=[
{'label': 'Aktive Aufzüge', 'value': 'aktiv'},
{'label': 'Inaktive Aufzüge', 'value': 'inaktiv'},
{'label': ' Alle Aufzüge', 'value': 'beide'}
],
value='inaktiv', style={'margin-left':10}
),
html.Iframe(id='karte', srcDoc=open('./projekt/Maps/map_inactive_elevators.html', 'r').read(),
style={'width': '90%', 'height': '30em'})
], style={'width': '49%', 'display': 'inline-block'}),
#rechter Teil
html.Div([
html.Br(), html.Br(),
html.Div(['Aufzug-ID: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='aufzug_id_input', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Hr(),
html.Div([
html.Div(['Stationsname: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Beschreibung: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Hersteller: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Antriebsart: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Baujahr: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
], style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Div(id='stationsname', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='beschreibung', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='hersteller',style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='antrieb', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='baujahr', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
], style={'width': '80%', 'display': 'inline-block'}),
# Tabelle
html.Div([
dt.DataTable(
rows=[{}],
columns=['Datum_Uhrzeit', 'Status' , 'Erklärung des Status'],
editable=False,
row_selectable=False,
filterable=False,
sortable=False,
id='datatable-status-elevator',
selected_row_indices=[],
min_height=250
),
html.Br(),
])
], style={'width': '49%','display': 'inline-block', 'vertical-align':'top'})
], style={'margin-left':'20'}),
], style = {'background-color': '#E6E6FA'}),
#Fußzeile
html.Div([ ], style={'height':70}),
html.Hr(),
html.Div([
dcc.Markdown('''
**THM Friedberg**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}}),
dcc.Markdown('''
**<NAME>, <NAME>, <NAME>, <NAME>**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style':{'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
], style={'height':70}),
], style={'marginTop': '2%', 'marginLeft': '5%', 'marginRight': '5%'})
########################################################################## #############################################################################################################################################
########################################################################## CALLBACKS #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Callback Karte aktualisieren für Aufzüge
@app.callback(
Output(component_id='karte', component_property='srcDoc'),
[Input(component_id='stadt_input', component_property='value'),
Input(component_id='bundesland_input', component_property='value'),
Input(component_id='radio_button', component_property='value')]
)
def karte_aktualisieren(input_stadt, input_bland, radio_button):
if radio_button == 'aktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
# TODO: Zeitmessung!
for i, row in active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_active_elevators.html')
return open('./projekt/Maps/map_active_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_active_elevators_FFM.html', 'r').read()
elif radio_button == 'inaktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
for i, row in inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_inactive_elevators.html')
return open('./projekt/Maps/map_inactive_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_elevators_FFM.html', 'r').read()
else:
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
for i, row in active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
for i, row in inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_both_elevators.html')
return open('./projekt/Maps/map_both_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_elevators_FFM.html', 'r').read()
######################################################################################################
# Callback Karte aktualisieren für Rolltreppen
@app.callback(
Output(component_id='escalator_karte', component_property='srcDoc'),
[Input(component_id='escalator_stadt_input', component_property='value'),
Input(component_id='escalator_bundesland_input', component_property='value'),
Input(component_id='escalator_radio_button', component_property='value')]
)
def karte_aktualisieren(input_stadt, input_bland, radio_button):
if radio_button == 'aktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_active_escalators.html')
return open('./projekt/Maps/map_active_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_active_escalators_FFM.html', 'r').read()
elif radio_button == 'inaktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_inactive_escalators.html')
return open('./projekt/Maps/map_inactive_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_escalators_FFM.html', 'r').read()
else:
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
for i, row in escalator_inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_both_escalators.html')
return open('./projekt/Maps/map_both_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_escalators_FFM.html', 'r').read()
######################################################################################################
# Callback Stationsname aktualisieren
@app.callback(
Output(component_id='stationsname', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def stationsname_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Ort'].values
return attribute[0]
except:
return str('Aufzug existiert nicht!')
# Callback Hersteller aktualisieren
@app.callback(
Output(component_id='hersteller', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def hersteller_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Hersteller'].values
return attribute[0]
except:
return ''
# Callback Beschreibung aktualisieren
@app.callback(
Output(component_id='beschreibung', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def beschreibung_aktualisieren(input_value):
try:
tmp3 = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = tmp3['Standort Equipment'].values
return attribute[0]
except:
return ''
# Callback Antriebsart aktualisieren
@app.callback(
Output(component_id='antrieb', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def anstriebsart_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['ANTRIEBSART'].values
return attribute[0]
except:
return ''
# Callback Baujahr aktualisieren
@app.callback(
Output(component_id='baujahr', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def baujahr_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Baujahr'].values
return attribute[0]
except:
return ''
# Callback Tabelle aktualisieren
@app.callback(
Output(component_id='datatable-status-elevator', component_property='rows'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def elevator_tabelle_aktualisieren(input_value):
try:
tabellen_input = facilities.find({"type": "ELEVATOR", "equipmentnumber": int(input_value)})
tabellen_input = pd.DataFrame(list(tabellen_input))
tabellen_input = tabellen_input[['datetime', 'state', 'stateExplanation']]
status_tabelle = tabellen_input[::-1]
status_tabelle.columns = ['Datum_Uhrzeit', 'Status', 'Erklärung des Status']
return status_tabelle.to_dict('records')
except:
return [{}]
@app.callback(
Output(component_id='datatable-status-escalator', component_property='rows'),
[Input(component_id='rolltreppe_id_input', component_property='value')]
)
def escalator_tabelle_aktualisieren(input_value):
try:
tabellen_input = facilities.find({"type": "ESCALATOR", "equipmentnumber": int(input_value)})
tabellen_input = pd.DataFrame(list(tabellen_input))
tabellen_input = tabellen_input[['datetime', 'state', 'stateExplanation']]
status_tabelle = tabellen_input[::-1]
status_tabelle.columns = ['Datum_Uhrzeit', 'Status', 'Erklärung des Status']
return status_tabelle.to_dict('records')
except:
return [{}]
#Seite updaten für den Wechsel zwischen Aufzügen und Rolltreppen
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/page-aufzuege':
return page_aufzuege
elif pathname == '/page-rolltreppen':
return page_rolltreppen
else:
return page_aufzuege
if sys.version_info < (3, 0):
sys.exit("Dieses Programm erfordert Python 3.0 und höher")
app.run_server(debug=False, host=HOST_ID, port=PORT)
```
#### File: services/db-api/manage.py
```python
from flask.cli import FlaskGroup, with_appcontext
from project import app, db, mongo
from project.api_models.station import Station
import os
cli = FlaskGroup(app)
@cli.command()
def recreate_db():
print('Running cli_recreate_db')
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def init_tables():
print('Running through cli_init_tables')
""" Load station master data from csv"""
with open(os.environ.get('MASTER_STATION'), 'r') as f:
conn = db.engine.connect().connection
cursor = conn.cursor()
cmd = 'COPY station(bundesland,rb,bm,bfnr,station,bfdsabk,katvst,strasse,plz,ort,aufgabenvergeber) FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ";", ENCODING "UTF-8")'
cursor.copy_expert(cmd, f)
conn.commit()
with open(os.environ.get('MASTER_ELEVATOR'), 'r') as f:
conn = db.engine.connect().connection
cursor = conn.cursor()
cmd = 'COPY elevator(standort_equipment,technplatzbezeichng,equipment,equipmentname,ort,wirtschaftseinheit,hersteller,baujahr,antriebsart,anzahl_haltestellen,anzahl_tueren_kabine,anzahl_tueren_schacht,foerdergeschwindigkeit,foerderhoehe,lage,tragkraft,erweiterte_ortsangabe,min_tuerbreite,kabinentiefe,kabinenbreite,kabinenhoehe,tuerhohe,fabriknummer,tuerart,geokoordinaterechtswert,geokoordinatehochwert,ausftextlichebeschreibung) FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ";", ENCODING "UTF-8")'
cursor.copy_expert(cmd, f)
conn.commit()
#mongo cli commands
@cli.command()
def mongo_init():
print('run trough docker with: docker-compose exec mongo-db mongorestore -u bart -p "downy37)tory" -h mongo-db --port 27017 -d eva_dev ./data/db/dump/eva')
#mongo.cx.admin.command('ismaster')
if __name__ == '__main__':
print('Running through main')
cli()
``` |
{
"source": "joshpsawyer/pyhere",
"score": 2
} |
#### File: pyhere/tests/test_here.py
```python
from os import chdir
import sys
import pytest
from pyhere import __version__
from pyhere import here
from pyhere import set_here
from pyhere import find_root
from pyhere import root_indicators
# this test is to ensure I don't mess up versioning
def test_version():
assert __version__ == "1.0.2"
def test_here(tmp_path):
# create dummy project
f1 = tmp_path / "proj" / "src"
f1.mkdir(parents=True, exist_ok=True)
f2 = tmp_path / "proj" / "data"
f2.mkdir(parents=True, exist_ok=True)
# create a .here file at the project root
tmp_path.joinpath("proj").joinpath(".here").touch()
# create a fake data file
f2.joinpath("data.csv").touch()
# set working dir to the src directory
if sys.version_info[0] == 2:
chdir(str(f1))
else:
chdir(f1)
herepath = here("data", "data.csv")
assert herepath.resolve() == f2.joinpath("data.csv").resolve()
def test_set_here(tmp_path):
# create dummy project
f1 = tmp_path / "proj" / "proj_dir"
f1.mkdir(parents=True, exist_ok=True)
# create .here in tmp_path
set_here()
cwd_path = tmp_path.cwd()
assert cwd_path.joinpath(".here").exists()
# create .here in /proj/proj_dir
set_here(f1)
assert f1.joinpath(".here").exists()
# create .here in /proj/
set_here(str(tmp_path / "proj"))
assert tmp_path.joinpath("proj").joinpath(".here").exists()
@pytest.mark.parametrize(
"r_indicator",
root_indicators,
)
def test_find_root_from_indicator(tmp_path, r_indicator):
# create /proj/r_indicator
f1 = tmp_path / "proj" / r_indicator
f1.mkdir(parents=True, exist_ok=True)
# create a different directory, /proj/src/test.txt
f2 = f1 / "src" / "test.txt"
f2.mkdir(parents=True, exist_ok=True)
# verify they point to the same project root
assert find_root(f1).resolve() == find_root(f2).resolve()
def test_find_sys_root(tmp_path):
# create some nested directories
f1 = tmp_path / "proj" / "another_path"
# verify that they all recurse to the system root
assert find_root(tmp_path).resolve() == find_root(f1).resolve()
``` |
{
"source": "joshr17/autogluon",
"score": 2
} |
#### File: tab_transformer/hyperparameters/parameters.py
```python
from ....constants import BINARY, MULTICLASS, REGRESSION
def get_fixed_params():
""" Parameters that currently cannot be searched during HPO
TODO: HPO NOT CURRENTLY IMPLEMENTED FOR TABTRANSFORMER
Will need to figure out what (in future PR) is "fixed" and what is searchable. """
fixed_params = {'batch_size': 512,
'tab_kwargs': {'n_cont_embeddings': 0,
'n_layers': 1,
'n_heads': 8,
'hidden_dim': 128,
'norm_class_name': 'LayerNorm',
'tab_readout': 'none',
'column_embedding': True,
'shared_embedding': False,
#'n_shared_embs': 8, #8, #careful
'p_dropout': 0.1,
'orig_emb_resid': False,
'one_hot_embeddings': False,
'drop_whole_embeddings': False,
'max_emb_dim': 8,
'lr': 1e-3,
'weight_decay': 1e-6,
'base_exp_decay': 0.95},
'encoders': {'CATEGORICAL': 'CategoricalOrdinalEnc',
'DATETIME' : 'DatetimeOrdinalEnc',
'LATLONG' : 'LatLongQuantileOrdinalEnc',
'SCALAR' : 'ScalarQuantileOrdinalEnc',
'TEXT' : 'TextSummaryScalarEnc'},
'augmentation': {'mask_prob': 0.4,
'num_augs' : 1},
'pretext': 'BERT_pretext',
'n_cont_features': 8,
'fix_attention': False,
'freq': 1,
'pretrain_freq': 100,
'feature_dim': 64,
'epochs': 100,
'pretrain_epochs': 200,
'epochs_wo_improve': 10}
return fixed_params
def get_default_param(problem_type, nunique=None):
params = get_fixed_params()
params['problem_type'] = problem_type
if problem_type==REGRESSION:
params['n_classes'] = 1
elif problem_type==BINARY:
params['n_classes'] = 2
elif problem_type==MULTICLASS:
params['n_classes'] = nunique
return params
``` |
{
"source": "joshr17/markov-aggregation",
"score": 2
} |
#### File: joshr17/markov-aggregation/elimination_test.py
```python
import logging
import time
import numpy as np
import scipy as sp
import elimination as elim
def test_LUDecomp():
"""
doctest:
>>> test_LUDecomp() # doctest: +NORMALIZE_WHITESPACE
LYX =
(0, 1) 0.5000000000000002
(1, 0) 15.999999999999986
(1, 1) 7.999999999999996
new_PYY =
(0, 0) 0.9450000000000001
(1, 0) 0.7699999999999996
(0, 1) 0.05500000000000001
(1, 1) 0.22999999999999998
new_TY =
(0, 0) 1.5000000000000002
(1, 0) 24.999999999999982
LU =
[[ True True True True True]
[ True True True True True]
[ True True True True True]
[ True True True True True]]
"""
P = sp.sparse.csr_matrix([[.95, .05, 0., 0.],\
[0., 0.9, 0.09, 0.01],\
[0., 0.05, 0.9, 0.05],\
[0.8, 0., 0.05, 0.15]])
#These are the mean waiting times
T = sp.sparse.csr_matrix([[1], [1], [1], [1]])
M = elim.augment_matrix(P, T)
n = 2
#form object
decomp = elim.LUdecomp(M, n)
#calculating quantities, want to check these are right.
LYX = decomp.LYX()
new_PYY = decomp.new_PYY(LYX)
new_TY = decomp.new_TY(LYX)
L = decomp.L(LYX)
U = decomp.U(new_PYY, new_TY)
LU = L*U
print("LYX = ")
print(LYX)
print("new_PYY = ")
print(new_PYY)
print("new_TY = ")
print(new_TY)
print("LU = ")
tol = 10e-5
print(abs((LU - M).toarray()) < tol)
def test_calc_TAB():
"""
doctest:
>>> test_calc_TAB() # doctest: +NORMALIZE_WHITESPACE
(0, 0) 2.569060773480663
(1, 0) 3.2044198895027627
(2, 0) 1.9613259668508287
"""
P = sp.sparse.csr_matrix([[0.0, 0.0, 0.8, 0.2], [0.4, 0.0, 0.6, 0.0],
[0.0, 0.3, 0.0, 0.7], [0.0, 0.0, 0.0, 1.0]])
T = sp.sparse.csr_matrix([[1.0], [1.0], [1.0], [1.0]])
val = elim.calc_TAB(P, T, 1)
print(val)
def test_general_elimination_pi():
"""
doctest:
>>> test_general_elimination_pi()
[[0.25]
[0.5 ]
[0.25]]
[[0.25]
[0.5 ]
[0.25]]
"""
P = sp.sparse.csr_matrix([[0.5, 0.5, 0], [0.25, 0.5, 0.25], [0, 0.5, 0.5]])
T = sp.sparse.csr_matrix([[1.0], [1.0], [1.0]])
order = [1, 1, 1]
stat_dist_elim2 = elim.general_elimination_pi(P, T, order)
print(stat_dist_elim2)
stat_dist = elim.calc_stationary_dist(P, T)
print(stat_dist)
def test_general_elimination_pi_stochastic():
"""
>>> test_general_elimination_pi_stochastic()
True
"""
P = elim.rand_stoch_matrix(900, 0.01)
T = elim.rand_trans_times(900).tocsr()
order = [450]*2
val1 = elim.general_elimination_pi(P, T, order)
val2 = elim.calc_stationary_dist(P, T)
equal = np.allclose(val2, val1, rtol=1e-05, atol=1e-04)
print(equal)
def test_calc_stationary_dist_stochastic():
"""
doctest not possible on stochastic matrix
"""
P = elim.rand_stoch_matrix(500, 0.01)
T = elim.rand_trans_times(500)
start_time = time.time()
elim.calc_stationary_dist(P, T)
print("--- %s seconds ---" % (time.time() - start_time))
def test_elimination_pi_stochastic():
"""Tests of elimination_pi gives the right answer (i.e. the same answer as
calc_stationary_dist).
doctest:
>>> test_elimination_pi_stochastic()
True
"""
P = elim.rand_stoch_matrix(100, 0.1)
T = elim.rand_trans_times(100)
statDistElim = elim.elimination_pi(P, T)
statDistManual = elim.calc_stationary_dist(P, T)
tol = 1e-4
print(np.all(statDistElim - statDistManual) < tol)
if __name__ == "__main__":
LOGGER = logging.getLogger('markagg')
LOGGER.setLevel(logging.DEBUG)
print("Running module tests")
test_LUDecomp()
test_calc_TAB()
test_general_elimination_pi()
test_general_elimination_pi_stochastic()
test_calc_stationary_dist_stochastic()
test_elimination_pi_stochastic()
print("Running doctest")
import doctest
doctest.testmod()
``` |
{
"source": "joshr2020/Code-Reuse-Estimation",
"score": 3
} |
#### File: Code-Reuse-Estimation/src/test_binary_compare.py
```python
import binary_compare
def test_getTotalSize():
dict1 = {'hi': '5', 'bye': '15'}
assert binary_compare.getTotalSize(dict1) == 26 # bc base 16
def test_jaccard():
dict1 = {'hi': '5', 'bye': '15', 'shalom': '10', 'pi': '25'}
dict2 = {'hi': '6', 'bye': '15', 'shalom2': '10', 'pi': '25'}
assert binary_compare.jaccard(dict1, dict2) == float(58/101)
``` |
{
"source": "joshraker/docker-mongodb",
"score": 3
} |
#### File: docker-mongodb/bin/parse_mongo_url.py
```python
import sys
import urlparse
from pipes import quote # pipes.quote is deprecated in 2.7, if upgrading to 3.x, use shlex.quote
DEFAULT_MONGO_PORT = 27017
SSL_CA_FILE = "/etc/ssl/certs/ca-certificates.crt"
def qs_uses_ssl(qs):
"""
By default, we don't use SSL. If ?ssl=true is found, we do.
"""
for ssl_value in qs.get('ssl', []):
if ssl_value == "true":
return True
return False
def qs_checks_ssl(qs):
"""
By default, we check SSL certificate validity. If ?x-sslVerify=false is found, we don't.
We prepend x- to the option because it's non-standard in MongoDB connection strings.
"""
for check_ssl_value in qs.get("x-sslVerify", []):
if check_ssl_value == "false":
return False
return True
def prepare_options(u):
qs = urlparse.parse_qs(u.query)
use_ssl = qs_uses_ssl(qs)
check_ssl = qs_checks_ssl(qs)
# Prepare our Mongo options
options = [
"--host", u.hostname,
"--port", str(u.port or DEFAULT_MONGO_PORT),
]
for opt, val in zip(["username", "password"], [u.username, u.password]):
if val:
options.extend(["--{0}".format(opt), val])
if use_ssl:
options.extend(["--ssl", "--sslCAFile", SSL_CA_FILE])
if not check_ssl:
options.append("--sslAllowInvalidCertificates")
return {
"host": u.hostname,
"port": u.port,
"username": u.username,
"password": <PASSWORD>,
"database": u.path.lstrip('/'),
"mongo_options": options
}
def sanity_check(u):
if u.hostname is None:
print >> sys.stderr, "URL must include hostname"
sys.exit(1)
def main(mongo_url):
u = urlparse.urlparse(mongo_url)
sanity_check(u)
# And now provide this to the shell
for k, v in prepare_options(u).items():
if isinstance(v, list):
array = "({0})".format(" ".join([quote(o) for o in v]))
print "{0}={1}".format(k, array)
else:
print "{0}={1}".format(k, quote(str(v)))
if __name__ == "__main__":
main(sys.argv[1])
``` |
{
"source": "joshreeder/viewports",
"score": 2
} |
#### File: viewports/handlers/Packing.py
```python
from __future__ import unicode_literals
import datetime
import frappe
import frappe.defaults
import pprint
from Page import Page
pp = pprint.PrettyPrinter(indent=4)
class Packing(Page):
def get_header(self):
orders = self.get_sales_orders()
packed = sum([item['total_packed'] for item in orders]) * 5
total = sum([item['total_quantity'] for item in orders]) * 5
percent_complete = self.percent_complete(packed,total)
now = datetime.datetime.now()
header = {
"daily_average": self.get_average("packaged_actual"),
"weekday":now.strftime("%A"),
"date":now.strftime("%b. %d"),
"time": now.strftime("%H:%M"),
"pounds_today": {
"packed": packed,
"total":total
},
"percent_complete":percent_complete,
"speed":"3,800"
}
return header
def get_page(self):
order_list = self.get_sales_orders()
return {"orders":order_list}
def get_sales_orders(self):
sales_orders = frappe.get_all('Sales Order', fields=["*"])
sales_orders = [item for item in sales_orders if item['delivery_date'] == self.tdate]
orders = {}
for so in sales_orders:
so_doc = frappe.get_doc('Sales Order',so['name'])
order = {
"company":so_doc.customer_name,
"items":{},
"total_quantity": 0,
"total_packed": 0
}
for item in so_doc.items:
item = item.__dict__
item_code = item['item_code']
order_item = {
"packed": 0,
"quantity": int(item['qty']),
"item_code": item_code,
"percent_complete": 0
}
order["total_quantity"] += int(item['qty'])
order["items"][item_code] = order_item
orders[so_doc.customer_name] = order
delivery_notes = frappe.get_all('Delivery Note', fields=["*"])
delivery_notes = [item for item in delivery_notes if item['posting_date'] == self.tdate]
for dn in delivery_notes:
dn = frappe.get_doc('Delivery Note',dn['name'])
dn = dn.__dict__
if orders.get(dn['customer_name']) != None:
for item in dn["items"]:
item_code = item.item_code
if orders[dn['customer_name']]["items"].get(item_code) != None:
orders[dn['customer_name']]["items"][item_code]["packed"] += item.qty
orders[dn['customer_name']]["total_packed"] += item.qty
order_list = []
for key in orders:
order = orders[key]
item_list = []
for key2 in order['items']:
item = order['items'][key2]
item["percent_complete"] = self.percent_complete(item["packed"],item["quantity"])
item_list.append(item)
order['items'] = item_list
order_list.append(order)
return order_list
```
#### File: viewports/www/packaging.py
```python
from __future__ import unicode_literals
import frappe
import datetime
import pprint
pp = pprint.PrettyPrinter(indent=4)
def get_header_info():
now = datetime.datetime.now()
header = {
"daily_average": "24,000",
"weekday":now.strftime("%A"),
"date":now.strftime("%b. %d"),
"time": now.strftime("%H:%M"),
"pounds_today": {
"packed": "18,482",
"total":"27,500"
},
"percent_complete":"67%",
"speed":"3,800"
}
return header
def get_context(context):
stock_entries = frappe.get_all('Stock Entry',filters={},fields=['name','creation','purpose','posting_date','posting_time','title'])
for idx,item in enumerate(stock_entries):
entry = frappe.get_doc('Stock Entry',item.name)
if entry.items:
stock_entries[idx]["items"] = entry.items
print stock_entries
print"done"
print "get doc test"
stock_entry = frappe.get_doc('Stock Entry','STE-00001')
pp.pprint( stock_entry.__dict__ )
print "stock Entry"
pp.pprint( stock_entry.items[0].__dict__)
header = get_header_info()
product = {
"weight":"6-8",
"name":"fre rrt fil",
"completed":15,
"total":20
}
products = [product for item in range(6)]
page = {
"products":products
}
context["header"] = header
context["page"] = page
context["name"] = "packaging"
context.data = "test data 2"
return context
``` |
{
"source": "joshreini1/fantasy_intellectuals",
"score": 3
} |
#### File: data/src/load_tweets.py
```python
import pandas as pd
import os
from dotenv import load_dotenv
import tweepy as tw
from tweepy import OAuthHandler
#get twitter api keys from dot env
load_dotenv()
consumerKey = os.getenv('api_key')
consumerSecret = os.getenv('api_key_secret')
bearer_token = os.getenv('bearer_token')
accessToken = os.getenv('access_token')
accessSecret = os.getenv('access_token_secret')
auth = OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessSecret)
api = tw.API(auth, wait_on_rate_limit=True)
def stream_tweets(search_term):
data = [] # empty list to which tweet_details obj will be added
counter = 0 # counter to keep track of each iteration
print(counter)
for tweet in tw.Cursor(api.search_tweets, q='\"{}\" -filter:retweets'.format(search_term), count=100, lang='en', tweet_mode='extended').items():
tweet_details = {}
tweet_details['name'] = tweet.user.screen_name
tweet_details['tweet'] = tweet.full_text
tweet_details['retweets'] = tweet.retweet_count
tweet_details['location'] = tweet.user.location
tweet_details['created'] = tweet.created_at.strftime("%d-%b-%Y")
tweet_details['followers'] = tweet.user.followers_count
tweet_details['is_user_verified'] = tweet.user.verified
print(tweet_details)
data.append(tweet_details)
counter += 1
if counter == 1000:
break
else:
pass
df = pd.DataFrame(data)
df.to_csv('..\\tweets\\' + search_term + '.csv', index=False)
print('done!')
search_terms = ['climate change','covid']
if __name__ == "__main__":
print('Starting to stream...')
for search_term in search_terms:
stream_tweets(search_term)
``` |
{
"source": "Josh-repository/Dashboard-CityManager-",
"score": 3
} |
#### File: main_project/Bus_API/process_bus_delays.py
```python
import requests
import json
from ..Config.config_handler import read_config
class ProcessBusDelays:
def __init__(self):
self.config_vals = read_config("Bus_API")
# Get the live data of Buses(Arrival Time, Departure Time, Delay) from API and returns.
def get_data_from_bus_api(self):
url = self.config_vals["api_url"]
headers = {self.config_vals["api_key_name"]:self.config_vals["api_key_value"]}
response = requests.get(url, headers=headers)
bus_data = json.loads(response.text)
bus_trip_delays = bus_data["entity"]
return bus_trip_delays
# Structure the live data (Delays, Arrival Time, Departure Time) in required format to send the recent stop details to frontend.
def get_delay_for_trip_live(self):
bus_trip_delays=self.get_data_from_bus_api()
result_response={}
for trip in bus_trip_delays:
temp = trip["trip_update"]
if temp["trip"]["schedule_relationship"]!="CANCELED":
delay_details = temp["stop_time_update"][-1]
if "departure" not in delay_details:
temp_delay = delay_details["arrival"]
if "delay" not in temp_delay:
delay = "Not Available"
else:
delay = temp_delay["delay"]
result_response[trip["id"]] = {
"STOP_ID": delay_details["stop_id"],
"STOP_SEQUENCE": delay_details["stop_sequence"],
"DELAY": delay
}
else:
temp_delay = delay_details["departure"]
if "delay" not in temp_delay:
delay = "Not Available"
else:
delay = temp_delay["delay"]
result_response[trip["id"]] = {
"STOP_ID": delay_details["stop_id"],
"STOP_SEQUENCE": delay_details["stop_sequence"],
"DELAY": delay
}
else:
result_response[trip["id"]] = {"STATUS":"CANCELED"}
return result_response
```
#### File: Bus_API/views_bus_api/show_bus_delays.py
```python
import os
import uuid
from django.http import JsonResponse
from django.http import HttpResponse
from rest_framework.views import APIView
import time as processTiming
from datetime import time
from rest_framework.decorators import api_view
from ..process_bus_delays import ProcessBusDelays
class BusTripDelays(APIView):
# API to fetch bus delays used by frontend. The result consist of arrival time, departure time or delays if any.
@classmethod
def get(self, request, bus_trip_delays = ProcessBusDelays()):
startTime = processTiming.time()
call_uuid = uuid.uuid4()
ID = "BUS_TRIP_DELAYS"
result = bus_trip_delays.get_delay_for_trip_live()
return JsonResponse(
{
"API_ID": ID,
"CALL_UUID": call_uuid,
"DATA": {
"RESULT": result
},
"TIMESTAMP": "{} seconds".format(float(round(processTiming.time() - startTime, 2)))}
)
```
#### File: main_project/Emergency_Service_API/store_emergency_service_data_in_database.py
```python
import sys
from datetime import datetime, timedelta
from mongoengine import *
import requests
import json
import pytz
import csv
import time as time
import pandas as pd
from main_project.Logs.service_logs import emergency_service_log
# Structure of collection storing Firestations details
class FireStations(Document):
station_name = StringField(max_length=200, unique=True)
station_address = StringField(max_length=200)
station_phone = StringField(max_length=200)
station_email = StringField(max_length=200)
service_type = StringField(max_length=200)
station_lat = DecimalField(precision=3, rounding='ROUND_HALF_UP')
station_lon = DecimalField(precision=3, rounding='ROUND_HALF_UP')
meta = {'collection': 'Fire_Service'
}
# Structure of collection storing Health Centers details
class HealthCenters(Document):
center_name = StringField(max_length=200, unique=True)
center_address = StringField(max_length=200)
center_email = StringField(max_length=200)
center_phone = StringField(max_length=200)
center_lat = DecimalField(precision=3, rounding='ROUND_HALF_UP')
center_lon = DecimalField(precision=3, rounding='ROUND_HALF_UP')
meta = {'collection': 'Health_Centers'
}
# Structure of collection storing Garda Stations details
class GardaStations(Document):
station = StringField(max_length=200)
station_address = StringField(max_length=200)
station_phone = StringField(max_length=200)
station_division = StringField(max_length=200)
station_divisional_hq = StringField(max_length=200)
station_lat = DecimalField(precision=3, rounding='ROUND_HALF_UP')
station_lon = DecimalField(precision=3, rounding='ROUND_HALF_UP')
meta = {'collection': 'Garda_Station'
}
# Structure of collection storing Hospitals details
class Hospitals(Document):
center_name = StringField(max_length=200, unique=True)
center_address = StringField(max_length=200)
center_lat = DecimalField(precision=3, rounding='ROUND_HALF_UP')
center_lon = DecimalField(precision=3, rounding='ROUND_HALF_UP')
meta = {'collection': 'Hospitals'
}
class StoreServiceData:
def __init__(self):
self.logger = emergency_service_log()
self.pd = pd
# Method reads the csv file containing the information of fire stations and return the list of details of fire stations
def read_fire_stations(self):
readfile = []
self.logger.info("Reading fire stations file")
with open("../sustainableCityManagement/main_project/Emergency_Service_API/resources/fcc_fire_stations_dublin.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant fire stations information in Database
def store_fire_stations(self):
readfile = self.read_fire_stations()
self.logger.info("Storing fire stations Data in DB")
for i in range(1, len(readfile)):
firestations = FireStations(station_name=readfile[i][0],
station_address=readfile[i][1],
station_phone=readfile[i][2],
station_email=readfile[i][3],
service_type=readfile[i][5],
station_lat=readfile[i][6],
station_lon=readfile[i][7])
try:
firestations.save()
except:
pass
# Method fetches the fire stations information from Database and returns list of fire stations details
def fetch_fire_station_informations(self, locationName="all"):
q_set = FireStations.objects() # Fetch Data from DB
# Converts the Fire stations Data from DB into JSON format
json_data = q_set.to_json()
fire_stations = json.loads(json_data)
if fire_stations is None:
self.logger.error('Fire stations data not retrieved from DB')
else:
self.logger.info("Retrieved fire stations from DB")
return fire_stations
# Method reads the csv file containing the information of health centers and return the list of details of health centers
def read_health_centers(self):
readfile = []
self.logger.info("Reading health centers file")
with open("../sustainableCityManagement/main_project/Emergency_Service_API/resources/fcc_health_centers_dublin.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant health centers information in Database
def store_health_centers(self):
readfile = self.read_health_centers()
self.logger.info("Storing health centers Data in DB")
for i in range(1, len(readfile)):
healthcenters = HealthCenters(center_name=readfile[i][0],
center_address=readfile[i][1] + readfile[i][2] +
readfile[i][3] + readfile[i][4],
center_phone=readfile[i][5],
center_lat=readfile[i][8],
center_lon=readfile[i][9])
try:
healthcenters.save()
except:
pass
# Method fetches the health centers information from Database and returns list of health centers details
def fetch_health_center_informations(self, locationName="all"):
q_set = HealthCenters.objects() # Fetch Data from DB
# Converts the Health Centers Data from DB into JSON format
json_data = q_set.to_json()
health_centers = json.loads(json_data)
if health_centers is None:
self.logger.error('Health centers data is not retrieved from DB')
else:
self.logger.info("Retrieved health centers from DB")
return health_centers
# Method reads the csv file containing the information of garda stations and return the list of details of garda stations
def read_garda_stations(self):
readfile = []
self.logger.info("Reading Garda Stations file")
with open("../sustainableCityManagement/main_project/Emergency_Service_API/resources/garda_stations_dublin.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant garda stations information in Database
def store_garda_stations(self):
readfile = self.read_garda_stations()
self.logger.info("Storing Garda stations Data in DB")
for i in range(1, len(readfile)):
garda_stations = GardaStations(station=readfile[i][0], station_address=readfile[i][1] + readfile[i][2] + readfile[i][3], station_phone=readfile[i]
[4], station_division=readfile[i][6], station_divisional_hq=readfile[i][7], station_lat=readfile[i][13], station_lon=readfile[i][14])
garda_stations.save()
return garda_stations
# Method fetches the garda stations information from Database and returns list of garda stations details
def fetch_garda_station_informations(self):
q_set = GardaStations.objects() # Fetch Data from DB
# Converts the Processed Bus Data from DB into JSON format
json_data = q_set.to_json()
garda_stations = json.loads(json_data)
if garda_stations is None:
self.logger.error('Bus Trips data is not retrieved from DB')
else:
self.logger.info("Retrieved Bus Trips from DB")
return garda_stations
# Method reads the csv file containing the information of hospitals and return the list of details of hospitals
def read_hospitals(self):
readfile = []
self.logger.info("Reading Hospitals file")
with open("../sustainableCityManagement/main_project/Emergency_Service_API/resources/list_of_hospitals_in_ireland.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant hospitals information in Database
def store_hospitals(self):
readfile = self.read_hospitals()
self.logger.info("Storing Hospitals Data in DB")
for i in range(1, len(readfile)):
if "Dublin" in readfile[i][1]:
hospitals_data = Hospitals(center_name=readfile[i][0],
center_address=readfile[i][1],
center_lat=readfile[i][3],
center_lon=readfile[i][4])
try:
hospitals_data.save()
except:
pass
# Method fetches the hospitals information from Database and returns list of garda stations details
def fetch_hospital_informations(self):
q_set = Hospitals.objects() # Fetch Data from DB
# Converts the Processed Hospitals Data from DB into JSON format
json_data = q_set.to_json()
hospitals_data = json.loads(json_data)
if hospitals_data is None:
self.logger.error('Hospitals data is not retrieved from DB')
else:
self.logger.info("Retrieved Hospitals Data from DB")
return hospitals_data
```
#### File: main_project/Footfall_API/fetch_footfallapi.py
```python
import sys
# from ..Logs.service_logs import bus_log
from .store_footfall_data_in_database import StoreFootfallData
from ..Config.config_handler import read_config
from datetime import datetime, timedelta
import copy
from ..ML_models import footfall_prediction as predictor
# from collections import Counter
# import collections
import json
config_vals = read_config("Footfall_API")
class FootfallApi:
def __init__(self):
self.FootfallObj = StoreFootfallData()
# Structure the footfalls data in required format to send it to frontend
def footfall_datebased_graphvalues_predictions(self, required_location, days_interval=config_vals["days_interval_size"]):
result_response = {}
footfall_count_arr = []
footfall_dateBased, last_date = self.FootfallObj.fetch_data_from_db_with_prediction(
days_interval, required_location)
prediction_date = datetime.strftime(
last_date + timedelta(days=1), "%Y-%m-%d")
for item in footfall_dateBased:
location = required_location
result_response[location] = {}
for data in item["footfall_data"]:
date = datetime.strftime(data["data_date"], "%Y-%m-%d")
result_response[location][date] = data["count"]
footfall_count_arr.append(data["count"])
predicted_val = predictor.predict_footfall(footfall_count_arr)
result_response[required_location][prediction_date] = predicted_val
return result_response
# Structure the overall footfalls data in required format to send it to frontend
def footfall_overall(self):
result_response = {}
footfall_overall = self.FootfallObj.fetch_footfall_overall()
counter = 0
with open(config_vals["footfall_locations_file"], "r") as f:
loaded_locations = json.load(f)
for item in footfall_overall:
location = item["location"]
result_response[location] = {}
result_response[location]["Footfall"] = item["count"]
result_response[location]["Lat"] = loaded_locations[location]["lat"]
result_response[location]["Lon"] = loaded_locations[location]["lon"]
return result_response
```
#### File: main_project/Parkings_API/store_parkingsdata_to_database.py
```python
import sys
from mongoengine import *
import requests
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
import pytz
from ..Logs.service_logs import bike_log
from ..Config.config_handler import read_config
from ..Parkings_API.parkings_collections_db import ParkingsAvailability, ParkingAvailability
import json
import logging
import statistics
# Calling confiig valuees for this Parkings API.
config_vals = read_config("Parkings_API")
class StoreParkingsData:
# Method gets the live parking data from from parkings spaces API (default timespan: ~5 minutes)
def get_parkings_spaces_availability_live(self):
url = config_vals["api_url"]
response = requests.request("GET", url)
parkingSpaces = ET.fromstring(response.text)
return parkingSpaces
# This method srores the relevant parking data in DB
def store_parking_spaces_availability_live(self):
try:
parkingSpaces = self.get_parkings_spaces_availability_live()
timestamp = parkingSpaces[-1].text
# If data already present for that timestamp, return from db
q_set = ParkingsAvailability.objects(updateTimestamp=timestamp)
if q_set:
return q_set
# Else parse, store and return new data
else:
parkings = []
for area in parkingSpaces:
areaName = area.tag.upper()
if areaName != "TIMESTAMP":
for parking in area:
name = parking.attrib["name"].upper()
try:
spaces = int(parking.attrib["spaces"])
except:
spaces = None
parkings.append(ParkingAvailability(
area=areaName, name=name, availableSpaces=spaces))
parkingsAvailability = ParkingsAvailability(
updateTimestamp=timestamp, parkings=parkings)
parkingsAvailability.save()
return ParkingsAvailability.objects(updateTimestamp=timestamp)
except:
logger.exception('Not able to fetch data from API')
raise
# Fetch parkings availaility from db for a particular date
def fetch_data_from_db_for_day(self, dateForData):
start_date_str = dateForData.strftime("%Y-%m-%dT00:00:00Z")
end_date_str = dateForData.strftime("%Y-%m-%dT23:59:59Z")
start_date = datetime.strptime(start_date_str, "%Y-%m-%dT%H:%M:%SZ")
end_date = datetime.strptime(end_date_str, "%Y-%m-%dT%H:%M:%SZ")
return ParkingsAvailability.objects(updateTimestamp__gte=start_date, updateTimestamp__lte=end_date)
# Fetch historical data of parkings availaility from db
def fetch_data_from_db_historical(self, dateFrom, dateTo):
# For each day between dateFrom and dateTo, fetch "fetch_data_from_db_for_day"
res = []
for dayDate in self.daterange(dateFrom, dateTo):
q_set = self.fetch_data_from_db_for_day(dayDate)
if not q_set:
continue
dayAvgSpaces = {}
for parkingsAvailability in q_set:
for parkingAvailability in parkingsAvailability["parkings"]:
if not parkingAvailability["name"] in dayAvgSpaces:
dayAvgSpaces[parkingAvailability["name"]] = []
# If available space is not None (i.e. missing data)
if parkingAvailability["availableSpaces"]:
dayAvgSpaces[parkingAvailability["name"]].append(
parkingAvailability["availableSpaces"])
# Average day's availability values for each parking
for parkingName in dayAvgSpaces:
if dayAvgSpaces[parkingName]:
dayAvgSpaces[parkingName] = int(
statistics.mean(dayAvgSpaces[parkingName]))
else:
# If no available data to compute average
dayAvgSpaces[parkingName] = None
res.append({
"_id": {"$oid": None},
"updateTimestamp": {
"$date": dayDate
},
"parkings": dayAvgSpaces
})
return res
# Set data range
def daterange(self, start_date, end_date):
for n in range(int((end_date - start_date).days) + 1):
yield start_date + timedelta(n)
```
#### File: Parkings_API/views_parkings_api/show_parkings_locations.py
```python
import os
import random
import tempfile
import uuid
import json
from django.http import JsonResponse
from django.http import HttpResponse
from rest_framework.views import APIView
from django.views.decorators.csrf import csrf_exempt
import time as processTiming
from datetime import timedelta, datetime, time, date
from rest_framework.decorators import api_view
from django.shortcuts import render
from ..fetch_parkingsapi import FetchParkingsApi
# API to fetch parkings locations used by frontend. The result consist of parking name, latitude and longitude.
class ParkingsLocations(APIView):
@classmethod
def get(self, request, parkings = FetchParkingsApi()):
startTime = processTiming.time()
call_uuid = uuid.uuid4()
ID = "PARKINGS_LOCATIONS"
result = parkings.parkings_locations()
# If query param doesn't match any condition above.
return JsonResponse(
{
"API_ID": ID,
"CALL_UUID": call_uuid,
"DATA": {
"RESULT": result
},
"TIMESTAMP": "{} seconds".format(float(round(processTiming.time() - startTime, 2)))}
)
```
#### File: main_project/Parkings_Recreational_Places_API/store_recreational_locations_in_db.py
```python
import sys
from mongoengine import *
import csv
import json
import math
from ..Logs.service_logs import parkings_log
from ..Parkings_Recreational_Places_API.recreational_places_parkings_collections_db import Parks, Cinemas, PlayingPitches, Beaches
class StoreRecreationalPlacesParkingsData:
def __init__(self):
self.logger = parkings_log()
# Method reads the csv file containing the information of beaches and return the list of details of beaches
def read_beaches_locations(self):
readfile = []
self.logger.info("Reading Beaches file")
with open("../sustainableCityManagement/main_project/Parkings_Recreational_Places_API/resources/Beaches.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant beaches information in Database
def store_beaches_locations(self):
readfile = self.read_beaches_locations()
self.logger.info("Storing Beaches Data in DB")
for i in range(1, len(readfile)):
parkings=self.get_parkings(float(readfile[i][2]),float(readfile[i][3]))
beaches = Beaches(beach_id=readfile[i][0],
beach_name=readfile[i][1],
beach_lat=readfile[i][2],
beach_lon=readfile[i][3],
beach_parkings=parkings)
try:
beaches.save()
except:
pass
# Method fetches the beaches information from Database and returns it
def fetch_beaches_location(self, locationName="all"):
q_set = Beaches.objects() # Fetch Data from DB
# Converts the Beach Data from DB into JSON format
json_data = q_set.to_json()
beaches = json.loads(json_data)
if beaches is None:
self.logger.error('Beach data not retrieved from DB')
else:
self.logger.info("Retrieved Beaches from DB")
for b in beaches:
del b["_id"]
return beaches
# Method reads the csv file containing the information of playing pitches and return the list of details of fplaying pitches
def read_playing_pitches_locations(self):
readfile = []
self.logger.info("Reading Playing Pitches file")
with open("../sustainableCityManagement/main_project/Parkings_Recreational_Places_API/resources/PlayingPitches.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant playing pitches information in Database
def store_playing_pitches_locations(self):
readfile = self.read_playing_pitches_locations()
self.logger.info("Storing Playing Pitches Data in DB")
for i in range(1, len(readfile)):
parkings=self.get_parkings(float(readfile[i][3]),float(readfile[i][4]))
playing_pitches = PlayingPitches(facility_type=readfile[i][0],
facility_name=readfile[i][1],
facility_location=readfile[i][2],
facility_lat=readfile[i][3],
facility_lon=readfile[i][4],
facility_parkings=parkings)
try:
playing_pitches.save()
except:
pass
# Method fetches the playing pitches information from Database and returns it
def fetch_playing_pitches_location(self, locationName="all"):
q_set = PlayingPitches.objects() # Fetch Data from DB
json_data = q_set.to_json()
playing_pitches = json.loads(json_data)
if playing_pitches is None:
self.logger.error('Playing Pitch data not retrieved from DB')
else:
self.logger.info("Retrieved Playing Pitches from DB")
for p in playing_pitches:
del p["_id"]
return playing_pitches
# Method reads the csv file containing the information of parks and return the list of details of parks
def read_parks_locations(self):
readfile = []
self.logger.info("Reading Parks file")
with open("../sustainableCityManagement/main_project/Parkings_Recreational_Places_API/resources/Parks.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant parks information in Database
def store_parks_locations(self):
readfile = self.read_parks_locations()
self.logger.info("Storing Parks Data in DB")
for i in range(1, len(readfile)):
parkings=self.get_parkings(float(readfile[i][3]),float(readfile[i][4]))
parks = Parks(park_name=readfile[i][0],
park_address=readfile[i][1],
park_area=readfile[i][2],
park_lat=readfile[i][3],
park_lon=readfile[i][4],
park_parkings=parkings)
try:
parks.save()
except:
pass
# Method fetches the parks information from Database and returns it
def fetch_parks_location(self, locationName="all"):
q_set = Parks.objects() # Fetch Data from DB
# Converts the Parks Data from DB into JSON format
json_data = q_set.to_json()
parks = json.loads(json_data)
if parks is None:
self.logger.error('Parks data not retrieved from DB')
else:
self.logger.info("Retrieved Parks from DB")
for p in parks:
del p["_id"]
return parks
# Method reads the csv file containing the information of cinemas and return the list of details of cinemas
def read_cinemas_locations(self):
readfile = []
self.logger.info("Reading Cinemas file")
with open("../sustainableCityManagement/main_project/Parkings_Recreational_Places_API/resources/Cinemas.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
return readfile
# Method stores the relevant cinemas information in Database
def store_cinemas_locations(self):
readfile = self.read_cinemas_locations()
self.logger.info("Storing Cinemas Data in DB")
for i in range(1, len(readfile)):
parkings=self.get_parkings(float(readfile[i][2]),float(readfile[i][3]))
cinemas = Cinemas(cinema_name=readfile[i][0],
cinema_address=readfile[i][1],
cinema_lat=readfile[i][2],
cinema_lon=readfile[i][3],
cinema_parkings=parkings)
try:
cinemas.save()
except:
pass
# Method fetches the cinemas information from Database and returns it
def fetch_cinemas_location(self, locationName="all"):
q_set = Cinemas.objects() # Fetch Data from DB
# Converts the Cinemas Data from DB into JSON format
json_data = q_set.to_json()
cinemas = json.loads(json_data)
if cinemas is None:
self.logger.error('Cinemas data not retrieved from DB')
else:
self.logger.info("Retrieved Cinemas from DB")
for c in cinemas:
del c["_id"]
return cinemas
# Method to get the five nearest parkings to a particular location.
def get_parkings(self,lat,lon):
with open("../sustainableCityManagement/main_project/Parkings_Recreational_Places_API/resources/disabledparkings.csv", "r", encoding="utf8") as f:
readfile = list(csv.reader(f))
loc_parkings=[]
for i in range(1, len(readfile)):
lat_parkings = float(readfile[i][2])
lon_parkings = float(readfile[i][3])
R = 6371e3; # metres
phi1 = lat_parkings * math.pi/180
phi2 = lat * math.pi/180
delta_phi = (lat-lat_parkings) * math.pi/180
delta_lambda = (lon-lon_parkings) * math.pi/180
a = math.sin(delta_phi/2) * math.sin(delta_phi/2) + math.cos(phi1) * math.cos(phi2) * math.sin(delta_lambda/2) * math.sin(delta_lambda/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
if d < 2000.0:
loc_parkings.append({
"road": readfile[i][1],
"lat": readfile[i][2],
"lng": readfile[i][3],
"distance": d
})
if len(loc_parkings)>5:
loc_parkings=sorted(loc_parkings, key=lambda d: d['distance'], reverse=False)
return loc_parkings[:5]
else:
return loc_parkings
```
#### File: main_project/Population_API/views_population.py
```python
from django.http import JsonResponse
from django.http import HttpResponse
from rest_framework.views import APIView
from .store_population import StorePopulation
import time as processTiming
import uuid
# API to fetch Ireland population used by frontend. The result consist of population estimate and year.
class IrelandPopulationView(APIView):
@classmethod
def get(self, request, fetch_population=StorePopulation()):
startTime = processTiming.time()
call_uuid = uuid.uuid4()
ID = "IRELAND_POPULATION_INFO"
result = fetch_population.fetch_irish_population()
# If query param doesn't match any condition above.
return JsonResponse(
{
"API_ID": ID,
"CALL_UUID": call_uuid,
"DATA": {
"RESULT": result
},
"TIMESTAMP": "{} seconds".format(float(round(processTiming.time() - startTime, 2)))}
)
# API to fetch Dublin population used by frontend. The result consist of population estimate and year.
class DublinPopulationView(APIView):
@classmethod
def get(self, request, fetch_population=StorePopulation()):
startTime = processTiming.time()
call_uuid = uuid.uuid4()
ID = "DUBLIN_POPULATION_INFO"
result = fetch_population.fetch_dublin_population()
# If query param doesn't match any condition above.
return JsonResponse(
{
"API_ID": ID,
"CALL_UUID": call_uuid,
"DATA": {
"RESULT": result
},
"TIMESTAMP": "{} seconds".format(float(round(processTiming.time() - startTime, 2)))}
)
```
#### File: tests/Bike_API/test_fetch_bikeapi.py
```python
from main_project.Bike_API import fetch_bikeapi
from main_project.Bike_API.store_bikedata_to_database import StoreBikeDataToDatabase
from django.test import TestCase
from unittest.mock import MagicMock
from mock import patch
import json
import datetime
from freezegun import freeze_time
@freeze_time("2021-03-11 17")
class TestFetchBikeApi(TestCase):
@classmethod
def setUpTestData(cls):
pass
def test_bikeapi_locations_false_one_value_per_location_multiple_locations(self):
fetch_bike_api_class = fetch_bikeapi.FetchBikeApi()
store_bike_data_to_database = StoreBikeDataToDatabase()
mocked_result = [
{'historical': [
{
'bike_stands': 40,
'available_bike_stands': 31,
'time': datetime.datetime(2021, 3, 11, 16, 40, 3)
}
], 'name': 'MOUNT STREET LOWER'
},
{'historical': [
{
'bike_stands': 30,
'available_bike_stands': 11,
'time': datetime.datetime(2021, 3, 11, 16, 40, 3)
}
], 'name': 'SOUTH DOCK ROAD'
}
]
store_bike_data_to_database.fetch_data_from_db_for_minutes = MagicMock(return_value=mocked_result)
expected_result = {
'MOUNT STREET LOWER': {'TOTAL_STANDS': 40, 'IN_USE': 31, 'TIME': '2021-03-11 17:00'},
'SOUTH DOCK ROAD': {'TOTAL_STANDS': 30, 'IN_USE': 11, 'TIME': '2021-03-11 17:00'}
}
result = fetch_bike_api_class.bikeapi(locations=False, store_bike_data_to_database=store_bike_data_to_database)
self.assertDictEqual(result, expected_result)
def test_bikeapi_locations_false_more_than_one_value_per_location(self):
fetch_bike_api_class = fetch_bikeapi.FetchBikeApi()
store_bike_data_to_database = StoreBikeDataToDatabase()
mocked_result = [
{'historical': [
{
'bike_stands': 40,
'available_bike_stands': 30,
'time': datetime.datetime(2021, 3, 11, 16, 45, 3)
},
{
'bike_stands': 40,
'available_bike_stands': 20,
'time': datetime.datetime(2021, 3, 11, 16, 40, 3)
}
], 'name': 'MOUNT STREET LOWER'
},
{'historical': [
{
'bike_stands': 30,
'available_bike_stands': 20,
'time': datetime.datetime(2021, 3, 11, 16, 45, 3)
},
{
'bike_stands': 30,
'available_bike_stands': 10,
'time': datetime.datetime(2021, 3, 11, 16, 40, 3)
}
], 'name': 'SOUTH DOCK ROAD'
}
]
store_bike_data_to_database.fetch_data_from_db_for_minutes = MagicMock(return_value=mocked_result)
expected_result = {
'MOUNT STREET LOWER': {'TOTAL_STANDS': 40, 'IN_USE': 25, 'TIME': '2021-03-11 17:00'},
'SOUTH DOCK ROAD': {'TOTAL_STANDS': 30, 'IN_USE': 15, 'TIME': '2021-03-11 17:00'}
}
result = fetch_bike_api_class.bikeapi(locations=False, store_bike_data_to_database=store_bike_data_to_database)
self.assertDictEqual(result, expected_result)
def test_bikeapi_locations_true(self):
fetch_bike_api_class = fetch_bikeapi.FetchBikeApi()
store_bike_data_to_database = StoreBikeDataToDatabase()
mocked_result = [
{
"name": "test_name_1",
"latitude": 1,
"longitude": 2
},
{
"name": "test_name_2",
"latitude": 3,
"longitude": 4
}
]
store_bike_data_to_database.fetch_bike_stands_location = MagicMock(return_value=mocked_result)
expected_result = {
"test_name_1": {
"LATITUDE": 1,
"LONGITUDE": 2
},
"test_name_2": {
"LATITUDE": 3,
"LONGITUDE": 4
}
}
result = fetch_bike_api_class.bikeapi(locations=True, store_bike_data_to_database=store_bike_data_to_database)
self.assertDictEqual(result, expected_result)
```
#### File: tests/Bus_API/test_store_bus_routes_in_db.py
```python
import pandas as pd
from datetime import datetime, timedelta, date
from django.test import TestCase
from unittest.mock import MagicMock
from main_project.Bus_API.store_bus_routes_data_in_database import StoreBusRoutesData
from main_project.Bus_API.bus_collections_db import BusStops, BusTimings, BusRoutes, BusTrips, BusPath
from mongoengine import *
import mongomock as mm
from decimal import Decimal
class TestStoreBusRoutesData(TestCase):
@classmethod
def setUpTestData(cls):
pass
def test_read_bus_stops(self):
read_bus_stops = StoreBusRoutesData()
assert read_bus_stops.read_bus_stops()[0] == [
'\ufeffstop_id', 'stop_name', 'stop_lat', 'stop_lon']
assert read_bus_stops.read_bus_stops()[1][1] == "Killeen Bridge"
def test_store_bus_stops(self):
store_bus_stops_loc = StoreBusRoutesData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["35", "Dublin Bus Stop2", 0.78656, -0.1563]]
store_bus_stops_loc.read_bus_stops = MagicMock(
return_value=expectedresult)
store_bus_stops_loc.store_bus_stops()
fetch_bus_stops = BusStops.objects(
stop_name="Dublin Bus Stop2").first()
assert fetch_bus_stops["stop_name"] == "Dublin Bus Stop2"
assert fetch_bus_stops["stop_id"] == "35"
self.assertAlmostEqual(
fetch_bus_stops["stop_lat"], Decimal(0.786), None, None, 0.001)
self.assertAlmostEqual(
fetch_bus_stops["stop_lon"], Decimal(-0.156), None, None, 0.001)
def test_fetch_busstops_location(self):
fetch_bus_stops_loc = StoreBusRoutesData()
expectedresult = [[], ["35", "Dublin Bus Stop2", 0.78656, -0.1563]]
fetch_bus_stops_loc.read_bus_stops = MagicMock(
return_value=expectedresult)
fetch_bus_stops_loc.store_bus_stops()
bus_stops_loc = fetch_bus_stops_loc.fetch_busstops_location()
assert bus_stops_loc[0]["stop_name"] == "Dublin Bus Stop2"
assert bus_stops_loc[0]["stop_id"] == "35"
self.assertAlmostEqual(
bus_stops_loc[0]["stop_lat"], 0.786, None, None, 0.002)
self.assertAlmostEqual(
bus_stops_loc[0]["stop_lon"], -0.156, None, None, 0.002)
def test_read_bus_routes(self):
read_bus_routes = StoreBusRoutesData()
assert read_bus_routes.read_bus_routes()[0] == [
'\ufeffroute_id', 'agency_id', 'route_short_name', 'route_long_name', 'route_type']
assert read_bus_routes.read_bus_routes()[1][0] == "10-100-e19-1"
def test_store_bus_routes(self):
store_bus_stops_routes = StoreBusRoutesData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["566-45-e41", 78, 'BR1', "Bus Route 1"]]
store_bus_stops_routes.read_bus_routes = MagicMock(
return_value=expectedresult)
store_bus_stops_routes.store_bus_routes()
fetch_bus_routes = BusRoutes.objects().first()
assert fetch_bus_routes["route_name"] == "Bus Route 1"
assert fetch_bus_routes["route_id"] == "566-45-e41"
def test_fetch_busroutes(self):
fetch_bus_routes = StoreBusRoutesData()
expectedresult = [[], ["566-45-e41", 78, 'BR1', "Bus Route 1"]]
fetch_bus_routes.read_bus_routes = MagicMock(
return_value=expectedresult)
fetch_bus_routes.store_bus_routes()
bus_routes = fetch_bus_routes.fetch_busroutes()
assert bus_routes[0]["route_name"] == "Bus Route 1"
assert bus_routes[0]["route_id"] == "566-45-e41"
def test_read_bus_trips(self):
read_bus_trip = StoreBusRoutesData()
assert read_bus_trip.read_bus_trips()[0] == [
'route_id', 'service_id', 'trip_id', 'shape_id', 'trip_headsign', 'direction_id']
assert read_bus_trip.read_bus_trips()[1][2] == "1381339.3.10-101-e19-1.261.I"
def test_store_bus_trips(self):
store_bus_trips = StoreBusRoutesData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["17-e19-34", "tyy",
"345.3.I", "1345.3.I", "Bus Station 1", "1"]]
store_bus_trips.read_bus_trips = MagicMock(return_value=expectedresult)
store_bus_trips.store_bus_trips()
fetch_bus_trips = BusTrips.objects().first()
assert fetch_bus_trips["trip_id"] == "345.3.I"
assert fetch_bus_trips["route_id"] == "17-e19-34"
def test_store_bus_times(self):
store_bus_trips = StoreBusRoutesData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["17-e19-34", "tyy",
"345.3.I", "1345.3.I", "Bus Station 1", "1"]]
store_bus_trips.read_bus_trips = MagicMock(return_value=expectedresult)
store_bus_trips.store_bus_trips()
expectedresult_timings_df = pd.DataFrame({'trip_id': ["345.3.I", "345.3.I"], 'arrival_time': ["06:20:00", "06:25:00"], 'departure_time': [
"06:20:00", "06:25:00"], 'stop_id': ["7866R56", "7866RT7"], 'stop_sequence': [1, 2]})
store_bus_trips.pd.read_csv = MagicMock(
return_value=expectedresult_timings_df)
store_bus_trips.store_bus_times()
fetch_bus_trips = BusTrips.objects().first()
assert fetch_bus_trips["trip_id"] == "345.3.I"
assert fetch_bus_trips["stops"][0]["stop_id"] == "7866R56"
assert fetch_bus_trips["stops"][0]["stop_arrival_time"] == "06:20:00"
assert fetch_bus_trips["stops"][0]["stop_departure_time"] == "06:20:00"
assert fetch_bus_trips["stops"][0]["stop_sequence"] == 1
def test_fetch_bustrips(self):
fetch_bus_trips = StoreBusRoutesData()
expectedresult = [[], ["17-e19-34", "tyy",
"345.3.I", "1345.3.I", "Bus Station 1", "1"]]
fetch_bus_trips.read_bus_trips = MagicMock(return_value=expectedresult)
fetch_bus_trips.store_bus_trips()
expectedresult_timings_df = pd.DataFrame({'trip_id': ["345.3.I", "345.3.I"], 'arrival_time': ["06:20:00", "06:25:00"], 'departure_time': [
"06:20:00", "06:25:00"], 'stop_id': ["7866R56", "7866RT7"], 'stop_sequence': [1, 2]})
fetch_bus_trips.pd.read_csv = MagicMock(
return_value=expectedresult_timings_df)
fetch_bus_trips.store_bus_times()
bus_trips = fetch_bus_trips.fetch_bustrips()
assert bus_trips[0]["trip_id"] == "345.3.I"
assert bus_trips[0]["stops"][0]["stop_id"] == "7866R56"
assert bus_trips[0]["stops"][0]["stop_arrival_time"] == "06:20:00"
assert bus_trips[0]["stops"][0]["stop_departure_time"] == "06:20:00"
assert bus_trips[0]["stops"][0]["stop_sequence"] == 1
def test_read_bus_paths(self):
read_bus_stops = StoreBusRoutesData()
first_datapoint = read_bus_stops.read_bus_paths()['paths'][0]
assert first_datapoint['start'] == '8300B1359501'
assert first_datapoint['end'] == '8300B1006201'
assert first_datapoint['coordinates'][0] == [53.711935, -6.352762]
def test_store_bus_paths(self):
store_bus_stops_loc = StoreBusRoutesData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = {
"paths": [
{
"start": "8300B1359501",
"end": "8300B1006201",
"coordinates": [
[53.711935, -6.352762]
]
}
]
}
store_bus_stops_loc.read_bus_paths = MagicMock(
return_value=expectedresult)
store_bus_stops_loc.store_bus_paths()
bus_path= BusPath.objects(
_id='8300B13595018300B1006201').first()
assert bus_path["start_stop_id"] == "8300B1359501"
assert bus_path["end_stop_id"] == "8300B1006201"
self.assertAlmostEqual(
bus_path.coordinates[0]['lat'], Decimal(53.711935), None, None, 0.001)
self.assertAlmostEqual(
bus_path.coordinates[0]['lon'], Decimal(-6.352762), None, None, 0.001)
def test_fetch_bus_paths(self):
fetch_bus_stops_loc = StoreBusRoutesData()
expectedresult = {
"paths": [
{
"start": "8300B1359501",
"end": "8300B1006201",
"coordinates": [
[53.711935, -6.352762]
]
}
]
}
fetch_bus_stops_loc.read_bus_paths = MagicMock(
return_value=expectedresult)
fetch_bus_stops_loc.store_bus_paths()
bus_paths = fetch_bus_stops_loc.fetch_bus_paths()
assert bus_paths == [
{
'_id': '8300B13595018300B1006201',
'start_stop_id': '8300B1359501',
'end_stop_id': '8300B1006201',
'coordinates': [
{
'lat': 53.711935,
'lon': -6.352762
}
]
}
]
```
#### File: Bus_API/views_bus_api/test_show_bus_data.py
```python
from main_project.Bus_API.views_bus_api.show_bus_info import BusStopsLocations, BusTripsTimings
from django.test import TestCase
from unittest.mock import MagicMock
from rest_framework.request import Request
from django.http import HttpRequest
from main_project.Bus_API.fetch_busapi import FetchBusApi
import json
class TestBusStopsLocations(TestCase):
@classmethod
def setUpTestData(cls):
pass
def test_show_bus_location_data(self):
show_bus_data = BusStopsLocations()
request = HttpRequest()
request.method = 'GET'
request_wrapper = Request(request)
fetch_bus_api = FetchBusApi()
expected_result = {"test": "test_value"}
fetch_bus_api.bus_stand_locations = MagicMock(
return_value=expected_result)
response = show_bus_data.get(request_wrapper, fetch_bus_api)
fetch_bus_api.bus_stand_locations.assert_called_with()
assert response.status_code == 200
content = json.loads(response.content)
assert 'DATA' in content
data = content['DATA']
assert 'RESULT' in data
assert data['RESULT'] == expected_result
def test_show_bus_trip_timings(self):
show_trip_timing = BusTripsTimings()
request = HttpRequest()
request.method = 'GET'
request_wrapper = Request(request)
fetch_bus_api = FetchBusApi()
expected_result = {"test": "test_value"}
fetch_bus_api.bus_trips_timings = MagicMock(
return_value=expected_result)
response = show_trip_timing.get(request_wrapper, fetch_bus_api)
fetch_bus_api.bus_trips_timings.assert_called_with()
assert response.status_code == 200
content = json.loads(response.content)
assert 'DATA' in content
data = content['DATA']
assert 'RESULT' in data
assert data['RESULT'] == expected_result
```
#### File: tests/Emergency_Service_API/test_store_emergency_service_data_in_database.py
```python
import pandas as pd
from datetime import datetime, timedelta, date
from django.test import TestCase
from unittest.mock import MagicMock
from main_project.Emergency_Service_API.store_emergency_service_data_in_database import StoreServiceData
from main_project.Emergency_Service_API.store_emergency_service_data_in_database import FireStations
from main_project.Emergency_Service_API.store_emergency_service_data_in_database import HealthCenters
from main_project.Emergency_Service_API.store_emergency_service_data_in_database import GardaStations
from main_project.Emergency_Service_API.store_emergency_service_data_in_database import Hospitals
from mongoengine import *
import mongomock as mm
from decimal import Decimal
import json
class TestStoreServiceData(TestCase):
@classmethod
def setUpTestData(cls):
pass
# Testing firestation functions over database.
def test_read_fire_stations(self):
read_services = StoreServiceData()
assert read_services.read_fire_stations()[0] == [
'Name', 'Address', 'Phone', 'Email', 'Website', 'Fire_Service', 'LAT', 'LONG']
assert read_services.read_fire_stations(
)[1][0] == "Balbriggan Fire Station"
def test_store_fire_stations(self):
store_service = StoreServiceData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["Balbriggan Fire Station",
"Balbriggan Enterprise and Trade Centre Harry Reynolds Road Balbriggan Co. Dublin", "+353 1 6734000", "<EMAIL>", "x.<EMAIL>", "Dublin Fire Brigade - Foxtrot District", 0.78656, -0.1563]]
store_service.read_fire_stations = MagicMock(
return_value=expectedresult)
store_service.store_fire_stations()
fetch_fire_service = FireStations.objects(
station_name="Balbriggan Fire Station").first()
assert fetch_fire_service["station_address"] == "Balbriggan Enterprise and Trade Centre Harry Reynolds Road Balbriggan Co. Dublin"
assert fetch_fire_service["station_phone"] == "+353 1 6734000"
assert fetch_fire_service["station_email"] == "<EMAIL>"
assert fetch_fire_service["service_type"] == "Dublin Fire Brigade - Foxtrot District"
self.assertAlmostEqual(
fetch_fire_service["station_lat"], Decimal(0.786), None, None, 0.001)
self.assertAlmostEqual(
fetch_fire_service["station_lon"], Decimal(-0.156), None, None, 0.001)
def test_fetch_fire_station_informations(self):
fetch_service_fire = StoreServiceData()
expectedresult = [[], ["Balbriggan Fire Station",
"Balbriggan Enterprise and Trade Centre Harry Reynolds Road Balbriggan Co. Dublin", "+353 1 6734000", "<EMAIL>", "x.com", "Dublin Fire Brigade - Foxtrot District", 0.78656, -0.1563]]
fetch_service_fire.read_fire_stations = MagicMock(
return_value=expectedresult)
fetch_service_fire.store_fire_stations()
fire_service_loc = fetch_service_fire.fetch_fire_station_informations()
assert fire_service_loc[0]["station_name"] == "Balbriggan Fire Station"
assert fire_service_loc[0]["station_address"] == "Balbriggan Enterprise and Trade Centre Harry Reynolds Road Balbriggan Co. Dublin"
assert fire_service_loc[0]["station_phone"] == "+353 1 6734000"
assert fire_service_loc[0]["station_email"] == "<EMAIL>"
assert fire_service_loc[0]["service_type"] == "Dublin Fire Brigade - Foxtrot District"
self.assertAlmostEqual(
fire_service_loc[0]["station_lat"], 0.786, None, None, 0.002)
self.assertAlmostEqual(
fire_service_loc[0]["station_lon"], -0.156, None, None, 0.002)
# Testing healthcenters functions over database.
def test_read_health_centers(self):
read_services = StoreServiceData()
assert read_services.read_health_centers()[0] == [
'Name', 'Address1', 'Address2', 'Address3', 'Address4', 'Phone', 'Email', 'Website', 'LAT', 'LONG']
assert read_services.read_health_centers(
)[1][0] == "Balbriggan Health Centre"
def test_store_health_center_informations(self):
store_service = StoreServiceData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ['Balbriggan Health Centre', 'Hampton Street,', 'Balbriggan,',
'Co. Dublin', 'end', '+353 1 8834906', '<EMAIL>', 'y.com', 0.78656, -0.1563]]
store_service.read_health_centers = MagicMock(
return_value=expectedresult)
store_service.store_health_centers()
fetch_health_service = HealthCenters.objects(
center_name="Balbriggan Health Centre").first()
assert fetch_health_service["center_address"] == "Hampton Street,Balbriggan,Co. Dublinend"
assert fetch_health_service["center_phone"] == "+353 1 8834906"
self.assertAlmostEqual(
fetch_health_service["center_lat"], Decimal(0.786), None, None, 0.001)
self.assertAlmostEqual(
fetch_health_service["center_lon"], Decimal(-0.156), None, None, 0.001)
def test_fetch_health_center_informations(self):
fetch_service = StoreServiceData()
expectedresult = [[], ['Balbriggan Health Centre', 'Hampton Street,', 'Balbriggan,',
'Co. Dublin', 'end', '+353 1 8834906', '<EMAIL>', 'y.com', 0.78656, -0.1563]]
fetch_service.read_health_centers = MagicMock(
return_value=expectedresult)
fetch_service.store_health_centers()
health_service_loc = fetch_service.fetch_health_center_informations()
assert health_service_loc[0]["center_name"] == "Balbriggan Health Centre"
assert health_service_loc[0]["center_address"] == "Hampton Street,Balbriggan,Co. Dublinend"
assert health_service_loc[0]["center_phone"] == "+353 1 8834906"
self.assertAlmostEqual(
health_service_loc[0]["center_lat"], 0.786, None, None, 0.002)
self.assertAlmostEqual(
health_service_loc[0]["center_lon"], -0.156, None, None, 0.002)
# Testing gardastation functions over database.
def test_read_garda_stations(self):
read_services = StoreServiceData()
assert read_services.read_garda_stations()[0] == ['Name', 'Address1', 'Address2', 'Address3', 'Phone', 'Website', 'Division',
'Divisional_HQ', 'Divisional_HQ_Phone', 'District', 'District_HQ', 'District_HQ_Phone', 'Opening_Hours', 'LAT', 'LONG']
assert read_services.read_garda_stations(
)[1][0] == "Balbriggan Garda Station"
def test_store_garda_stations(self):
store_service = StoreServiceData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ['Balbriggan Garda Station', 'Drogheda Road,', 'Balbriggan,', 'Co. Dublin,', '+353 1 8020510', ' http://www.garda.ie/Stations/Default.aspx',
'Dublin Metropolitan Region Northern Division', 'Ballymun', '+353 1 6664493', 'Balbriggan', 'Balbriggan', '+353 1 8020510', 'Open 24hrs', 0.78656, -0.1563]]
store_service.read_garda_stations = MagicMock(
return_value=expectedresult)
store_service.store_garda_stations()
fetch_garda_station = GardaStations.objects(
station="Balbriggan Garda Station").first()
assert fetch_garda_station["station_address"] == "Drogheda Road,Balbriggan,Co. Dublin,"
assert fetch_garda_station["station_division"] == "Dublin Metropolitan Region Northern Division"
assert fetch_garda_station["station_divisional_hq"] == "Ballymun"
assert fetch_garda_station["station_phone"] == "+353 1 8020510"
self.assertAlmostEqual(
fetch_garda_station["station_lat"], Decimal(0.786), None, None, 0.001)
self.assertAlmostEqual(
fetch_garda_station["station_lon"], Decimal(-0.156), None, None, 0.001)
def test_fetch_garda_station_informations(self):
fetch_service_garda = StoreServiceData()
expectedresult = [[], ['Balbriggan Garda Station', 'Drogheda Road,', 'Balbriggan,', 'Co. Dublin,', '+353 1 8020510', ' http://www.garda.ie/Stations/Default.aspx',
'Dublin Metropolitan Region Northern Division', 'Ballymun', '+353 1 6664493', 'Balbriggan', 'Balbriggan', '+353 1 8020510', 'Open 24hrs', 0.78656, -0.1563]]
fetch_service_garda.read_garda_stations = MagicMock(
return_value=expectedresult)
fetch_service_garda.store_garda_stations()
garda_service_loc = fetch_service_garda.fetch_garda_station_informations()
assert garda_service_loc[0]["station_address"] == "Drogheda Road,Balbriggan,Co. Dublin,"
assert garda_service_loc[0]["station_division"] == "Dublin Metropolitan Region Northern Division"
assert garda_service_loc[0]["station_divisional_hq"] == "Ballymun"
assert garda_service_loc[0]["station_phone"] == "+353 1 8020510"
self.assertAlmostEqual(
garda_service_loc[0]["station_lat"], 0.786, None, None, 0.002)
self.assertAlmostEqual(
garda_service_loc[0]["station_lon"], -0.156, None, None, 0.002)
# Testing hospital functions over database.
def test_read_hospitals(self):
read_services = StoreServiceData()
assert read_services.read_hospitals()[0] == [
'name', 'address', 'eircode', 'x', 'y']
assert read_services.read_hospitals(
)[1][0] == "Midland Regional Hospital Portlaoise"
def test_store_hospitals(self):
store_service = StoreServiceData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["Midland Regional Hospital Portlaoise",
"Dublin Road, Portlaoise, Co. Laois, ", "yyy", 0.78656, -0.1563]]
store_service.read_hospitals = MagicMock(
return_value=expectedresult)
store_service.store_hospitals()
fetch_hospital_service = Hospitals.objects(
center_name="Midland Regional Hospital Portlaoise").first()
assert fetch_hospital_service["center_address"] == "Dublin Road, Portlaoise, Co. Laois, "
self.assertAlmostEqual(
fetch_hospital_service["center_lat"], Decimal(0.786), None, None, 0.001)
self.assertAlmostEqual(
fetch_hospital_service["center_lon"], Decimal(-0.156), None, None, 0.001)
def test_fetch_hospital_informations(self):
fetch_service = StoreServiceData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["Midland Regional Hospital Portlaoise",
"Dublin Road, Portlaoise, Co. Laois, ", "yyy", 0.78656, -0.1563]]
fetch_service.read_hospitals = MagicMock(
return_value=expectedresult)
fetch_service.store_hospitals()
hospital_service_loc = fetch_service.fetch_hospital_informations()
assert hospital_service_loc[0]["center_name"] == "Midland Regional Hospital Portlaoise"
assert hospital_service_loc[0]["center_address"] == "Dublin Road, Portlaoise, Co. Laois, "
self.assertAlmostEqual(
hospital_service_loc[0]["center_lat"], 0.786, None, None, 0.002)
self.assertAlmostEqual(
hospital_service_loc[0]["center_lon"], -0.156, None, None, 0.002)
```
#### File: Emergency_Service_API/views_emergency_service_api/test_show_emergency_service_data.py
```python
from main_project.Emergency_Service_API.views_emergency_service_api.show_emergency_service_data import FireStations, HealthCenters, GardaStations, Hospitals
from django.test import TestCase
from unittest.mock import MagicMock
from rest_framework.request import Request
from django.http import HttpRequest
from main_project.Emergency_Service_API.fetch_emergency_service import FetchEmergencyServiceApi
import json
class TestEmergencyService(TestCase):
@classmethod
def setUpTestData(cls):
pass
def test_show_fire_service_data(self):
show_fire_service_data = FireStations()
request = HttpRequest()
request.method = 'GET'
request_wrapper = Request(request)
fetch_fire_service_api = FetchEmergencyServiceApi()
expected_result = {"test": "test_value"}
fetch_fire_service_api.fire_stations_data=MagicMock(return_value=expected_result)
response=show_fire_service_data.get(
request_wrapper, fetch_fire_service_api)
fetch_fire_service_api.fire_stations_data.assert_called_with()
assert response.status_code == 200
content=json.loads(response.content)
assert 'DATA' in content
data=content['DATA']
assert 'RESULT' in data
assert data['RESULT'] == expected_result
def test_show_garda_stations(self):
show_garda_stations=GardaStations()
request=HttpRequest()
request.method='GET'
request_wrapper=Request(request)
fetch_garda_stations_api=FetchEmergencyServiceApi()
expected_result={"test": "test_value"}
fetch_garda_stations_api.garda_stations_data=MagicMock(
return_value=expected_result)
response=show_garda_stations.get(
request_wrapper, fetch_garda_stations_api)
fetch_garda_stations_api.garda_stations_data.assert_called_with()
assert response.status_code == 200
content=json.loads(response.content)
assert 'DATA' in content
data=content['DATA']
assert 'RESULT' in data
assert data['RESULT'] == expected_result
def test_show_hospital_centers(self):
show_hospitals=Hospitals()
request=HttpRequest()
request.method='GET'
request_wrapper=Request(request)
fetch_hospital_api=FetchEmergencyServiceApi()
expected_result={"test": "test_value"}
fetch_hospital_api.hospitals_data=MagicMock(
return_value=expected_result)
response=show_hospitals.get(
request_wrapper, fetch_hospital_api)
fetch_hospital_api.hospitals_data.assert_called_with()
assert response.status_code == 200
content=json.loads(response.content)
assert 'DATA' in content
data=content['DATA']
assert 'RESULT' in data
assert data['RESULT'] == expected_result
```
#### File: tests/Parkings_API/test_fetch_parkingsapi.py
```python
from main_project.Parkings_API.fetch_parkingsapi import FetchParkingsApi
from main_project.Parkings_API.store_parkingsdata_to_database import StoreParkingsData
from main_project.Parkings_API.parkings_collections_db import ParkingAvailability, ParkingsAvailability
from django.test import TestCase
from unittest.mock import MagicMock
from mock import patch
import json
from datetime import datetime
import mongomock as mm
from mongoengine import get_connection
from freezegun import freeze_time
from freezegun.api import FakeDatetime
@freeze_time("2021-03-11 17")
class TestFetchParkingsApi(TestCase):
@classmethod
def setUpTestData(cls):
pass
def tearDown(self):
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
ParkingsAvailability.objects().delete()
def test_parkings_availability(self):
fetch_parkings_availability = FetchParkingsApi()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
ParkingsAvailability(updateTimestamp='2021-03-14 09:34:13').save()
ParkingsAvailability(updateTimestamp='2021-03-13 08:34:13').save()
Parkings_1 = ParkingsAvailability.objects(
updateTimestamp='2021-03-14 09:34:13').first()
Parkings_2 = ParkingsAvailability.objects(
updateTimestamp='2021-03-13 08:34:13').first()
Parkings_1.parkings.append(
ParkingAvailability(
area="NORTHWEST",
name="PARNELL",
availableSpaces=50)
)
Parkings_1.save()
Parkings_2.parkings.append(
ParkingAvailability(
area="NORTHWEST",
name="PARNELL",
availableSpaces=60)
)
Parkings_2.save()
startdate = "2021-03-12"
enddate = "2021-03-15"
mocked_result = [{'_id': {'$oid': None}, 'updateTimestamp': {'$date': FakeDatetime(2021, 3, 13, 0, 0)}, 'parkings': {'PARNELL': 60}}, {
'_id': {'$oid': None}, 'updateTimestamp': {'$date': FakeDatetime(2021, 3, 14, 0, 0)}, 'parkings': {'PARNELL': 50}}]
result = fetch_parkings_availability.parkings_availability(
startdate, enddate)
assert result == mocked_result
```
#### File: tests/Parkings_Recreational_Places_API/test_store_recreational_locations_in_db.py
```python
import pandas as pd
from datetime import datetime, timedelta, date
from django.test import TestCase
from unittest.mock import MagicMock
from main_project.Parkings_Recreational_Places_API.store_recreational_locations_in_db import StoreRecreationalPlacesParkingsData
from main_project.Parkings_Recreational_Places_API.recreational_places_parkings_collections_db import Parks, Beaches, PlayingPitches, Cinemas
from mongoengine import *
import mongomock as mm
from decimal import Decimal
class TestStoreRecreationalPlacesData(TestCase):
@classmethod
def setUpTestData(cls):
pass
def test_read_beaches_locations(self):
read_beaches_locations = StoreRecreationalPlacesParkingsData()
assert read_beaches_locations.read_beaches_locations()[0] == [
'ID', 'NAME', 'LAT', 'LONG']
assert read_beaches_locations.read_beaches_locations()[1][1] == "Loughshinny"
def test_store_beaches_locations(self):
store_beaches_loc = StoreRecreationalPlacesParkingsData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], [35, "Dublin Beach", 0.23656, -0.1963]]
store_beaches_loc.read_beaches_locations = MagicMock(
return_value=expectedresult)
store_beaches_loc.store_beaches_locations()
fetch_beaches_locations = Beaches.objects(
beach_name="Dublin Beach").first()
assert fetch_beaches_locations["beach_name"] == "Dublin Beach"
assert fetch_beaches_locations["beach_id"] == 35
self.assertAlmostEqual(
fetch_beaches_locations["beach_lat"], Decimal(0.236), None, None, 0.005)
self.assertAlmostEqual(
fetch_beaches_locations["beach_lon"], Decimal(-0.196), None, None, 0.005)
def test_fetch_beaches_location(self):
fetch_beaches_loc = StoreRecreationalPlacesParkingsData()
expectedresult = [[], [35, "Dublin Beach", 0.23656, -0.1963]]
fetch_beaches_loc.read_beaches_locations = MagicMock(
return_value=expectedresult)
fetch_beaches_loc.store_beaches_locations()
beaches_loc = fetch_beaches_loc.fetch_beaches_location()
assert beaches_loc[0]["beach_name"] == "<NAME>"
assert beaches_loc[0]["beach_id"] == 35
self.assertAlmostEqual(
beaches_loc[0]["beach_lat"], 0.236, None, None, 0.005)
self.assertAlmostEqual(
beaches_loc[0]["beach_lon"], -0.196, None, None, 0.005)
def test_read_playing_pitches_locations(self):
read_playing_pitches = StoreRecreationalPlacesParkingsData()
assert read_playing_pitches.read_playing_pitches_locations()[0] == [
'FACILITY_TYPE', 'FACILITY_NAME','LOCATION', 'LAT', 'LONG']
assert read_playing_pitches.read_playing_pitches_locations()[1][1] == "Balbriggan Town Park"
def test_store_playing_pitches(self):
store_playing_pitches_loc = StoreRecreationalPlacesParkingsData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["Basketball Court", "Facility_BB","Dublin",0.23656, -0.1963]]
store_playing_pitches_loc.read_playing_pitches_locations = MagicMock(
return_value=expectedresult)
store_playing_pitches_loc.store_playing_pitches_locations()
fetch_playing_pitches_locations = PlayingPitches.objects(
facility_name="Facility_BB").first()
assert fetch_playing_pitches_locations["facility_name"] == "Facility_BB"
assert fetch_playing_pitches_locations["facility_type"] == "Basketball Court"
assert fetch_playing_pitches_locations["facility_location"] == "Dublin"
self.assertAlmostEqual(
fetch_playing_pitches_locations["facility_lat"], Decimal(0.236), None, None, 0.005)
self.assertAlmostEqual(
fetch_playing_pitches_locations["facility_lon"], Decimal(-0.196), None, None, 0.005)
def test_fetch_playing_pitches_location(self):
fetch_playing_pitches_loc = StoreRecreationalPlacesParkingsData()
expectedresult = [[], ["Basketball Court", "Facility_BB","Dublin",0.23656, -0.1963]]
fetch_playing_pitches_loc.read_playing_pitches_locations = MagicMock(
return_value=expectedresult)
fetch_playing_pitches_loc.store_playing_pitches_locations()
playing_pitches_loc = fetch_playing_pitches_loc.fetch_playing_pitches_location()
assert playing_pitches_loc[0]["facility_name"] == "Facility_BB"
assert playing_pitches_loc[0]["facility_type"] == "Basketball Court"
assert playing_pitches_loc[0]["facility_location"] == "Dublin"
self.assertAlmostEqual(
playing_pitches_loc[0]["facility_lat"], 0.236, None, None, 0.005)
self.assertAlmostEqual(
playing_pitches_loc[0]["facility_lon"], -0.196, None, None, 0.005)
def test_read_parks_locations(self):
read_parks = StoreRecreationalPlacesParkingsData()
assert read_parks.read_parks_locations()[0] == [
'Name', 'Address','Area', 'LAT', 'LONG']
assert read_parks.read_parks_locations()[1][0] == "<NAME>"
def test_store_parks(self):
store_parks_loc = StoreRecreationalPlacesParkingsData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["Dublin Park", "Co. Dublin",770262.7877,0.23656, -0.1963]]
store_parks_loc.read_parks_locations = MagicMock(return_value=expectedresult)
store_parks_loc.store_parks_locations()
fetch_parks_locations = Parks.objects(park_name="Dublin Park").first()
assert fetch_parks_locations["park_name"] == "Dublin Park"
assert fetch_parks_locations["park_address"] == "Co. Dublin"
self.assertAlmostEqual(
fetch_parks_locations["park_area"], Decimal(770262.787), None, None, 0.005)
self.assertAlmostEqual(
fetch_parks_locations["park_lat"], Decimal(0.236), None, None, 0.005)
self.assertAlmostEqual(
fetch_parks_locations["park_lon"], Decimal(-0.196), None, None, 0.005)
def test_fetch_parks_location(self):
fetch_parks_loc = StoreRecreationalPlacesParkingsData()
expectedresult = [[], ["Dublin Park", "Co. Dublin",770262.7877,0.23656, -0.1963]]
fetch_parks_loc.read_parks_locations = MagicMock(
return_value=expectedresult)
fetch_parks_loc.store_parks_locations()
parks_loc = fetch_parks_loc.fetch_parks_location()
assert parks_loc[0]["park_name"] == "Dublin Park"
assert parks_loc[0]["park_address"] == "Co. Dublin"
self.assertAlmostEqual(
parks_loc[0]["park_area"], 770262.787, None, None, 0.005)
self.assertAlmostEqual(
parks_loc[0]["park_lat"], 0.236, None, None, 0.005)
self.assertAlmostEqual(
parks_loc[0]["park_lon"], -0.196, None, None, 0.005)
def test_read_cinemas_locations(self):
read_cinemas_locations = StoreRecreationalPlacesParkingsData()
assert read_cinemas_locations.read_cinemas_locations()[0] == [
'Name', 'Address', 'LAT', 'LONG']
assert read_cinemas_locations.read_cinemas_locations()[1][0] == "Quayside Cinema"
def test_store_cinemas_locations(self):
store_cinemas_loc = StoreRecreationalPlacesParkingsData()
conn = get_connection()
self.assertTrue(isinstance(conn, mm.MongoClient))
expectedresult = [[], ["Dublin Cinema", "Dublin", 0.23656, -0.1963]]
store_cinemas_loc.read_cinemas_locations = MagicMock(
return_value=expectedresult)
store_cinemas_loc.store_cinemas_locations()
fetch_cinemas_locations = Cinemas.objects(
cinema_name="<NAME>").first()
assert fetch_cinemas_locations["cinema_name"] == "<NAME>"
assert fetch_cinemas_locations["cinema_address"] == "Dublin"
self.assertAlmostEqual(
fetch_cinemas_locations["cinema_lat"], Decimal(0.236), None, None, 0.005)
self.assertAlmostEqual(
fetch_cinemas_locations["cinema_lon"], Decimal(-0.196), None, None, 0.005)
def test_fetch_cinemas_location(self):
fetch_cinemas_loc = StoreRecreationalPlacesParkingsData()
expectedresult = [[], ["Dublin Cinema", "Dublin", 0.23656, -0.1963]]
fetch_cinemas_loc.read_cinemas_locations = MagicMock(
return_value=expectedresult)
fetch_cinemas_loc.store_cinemas_locations()
cinemas_loc = fetch_cinemas_loc.fetch_cinemas_location()
assert cinemas_loc[0]["cinema_name"] == "<NAME>"
assert cinemas_loc[0]["cinema_address"] == "Dublin"
self.assertAlmostEqual(
cinemas_loc[0]["cinema_lat"], 0.236, None, None, 0.005)
self.assertAlmostEqual(
cinemas_loc[0]["cinema_lon"], -0.196, None, None, 0.005)
# def test_read_bus_routes(self):
# read_bus_routes = StoreBusRoutesData()
# assert read_bus_routes.read_bus_routes()[0] == [
# '\ufeffroute_id', 'agency_id', 'route_short_name', 'route_long_name', 'route_type']
# assert read_bus_routes.read_bus_routes()[1][0] == "10-100-e19-1"
# def test_store_bus_routes(self):
# store_bus_stops_routes = StoreBusRoutesData()
# conn = get_connection()
# self.assertTrue(isinstance(conn, mm.MongoClient))
# expectedresult = [[], ["566-45-e41", 78, 'BR1', "Bus Route 1"]]
# store_bus_stops_routes.read_bus_routes = MagicMock(
# return_value=expectedresult)
# store_bus_stops_routes.store_bus_routes()
# fetch_bus_routes = BusRoutes.objects().first()
# assert fetch_bus_routes["route_name"] == "Bus Route 1"
# assert fetch_bus_routes["route_id"] == "566-45-e41"
# def test_fetch_busroutes(self):
# fetch_bus_routes = StoreBusRoutesData()
# expectedresult = [[], ["566-45-e41", 78, 'BR1', "Bus Route 1"]]
# fetch_bus_routes.read_bus_routes = MagicMock(
# return_value=expectedresult)
# fetch_bus_routes.store_bus_routes()
# bus_routes = fetch_bus_routes.fetch_busroutes()
# assert bus_routes[0]["route_name"] == "Bus Route 1"
# assert bus_routes[0]["route_id"] == "566-45-e41"
# def test_read_bus_trips(self):
# read_bus_trip = StoreBusRoutesData()
# assert read_bus_trip.read_bus_trips()[0] == [
# 'route_id', 'service_id', 'trip_id', 'shape_id', 'trip_headsign', 'direction_id']
# assert read_bus_trip.read_bus_trips()[1][2] == "1381339.3.10-101-e19-1.261.I"
# def test_store_bus_trips(self):
# store_bus_trips = StoreBusRoutesData()
# conn = get_connection()
# self.assertTrue(isinstance(conn, mm.MongoClient))
# expectedresult = [[], ["17-e19-34", "tyy",
# "345.3.I", "1345.3.I", "Bus Station 1", "1"]]
# store_bus_trips.read_bus_trips = MagicMock(return_value=expectedresult)
# store_bus_trips.store_bus_trips()
# fetch_bus_trips = BusTrips.objects().first()
# assert fetch_bus_trips["trip_id"] == "345.3.I"
# assert fetch_bus_trips["route_id"] == "17-e19-34"
# def test_store_bus_times(self):
# store_bus_trips = StoreBusRoutesData()
# conn = get_connection()
# self.assertTrue(isinstance(conn, mm.MongoClient))
# expectedresult = [[], ["17-e19-34", "tyy",
# "345.3.I", "1345.3.I", "Bus Station 1", "1"]]
# store_bus_trips.read_bus_trips = MagicMock(return_value=expectedresult)
# store_bus_trips.store_bus_trips()
# expectedresult_timings_df = pd.DataFrame({'trip_id': ["345.3.I", "345.3.I"], 'arrival_time': ["06:20:00", "06:25:00"], 'departure_time': [
# "06:20:00", "06:25:00"], 'stop_id': ["7866R56", "7866RT7"], 'stop_sequence': [1, 2]})
# expectedresult_timings_df.to_csv("StoreDataTest.csv")
# store_bus_trips.pd.read_csv = MagicMock(
# return_value=pd.read_csv("StoreDataTest.csv", chunksize=2))
# store_bus_trips.store_bus_times()
# fetch_bus_trips = BusTrips.objects().first()
# assert fetch_bus_trips["trip_id"] == "345.3.I"
# assert fetch_bus_trips.stops[0]["stop_id"] == "7866R56"
# assert fetch_bus_trips.stops[0]["stop_arrival_time"] == "06:20:00"
# assert fetch_bus_trips.stops[0]["stop_departure_time"] == "06:20:00"
# assert fetch_bus_trips.stops[0]["stop_sequence"] == 1
# def test_fetch_bustrips(self):
# fetch_bus_trips = StoreBusRoutesData()
# expectedresult = [[], ["17-e19-34", "tyy",
# "345.3.I", "1345.3.I", "Bus Station 1", "1"]]
# fetch_bus_trips.read_bus_trips = MagicMock(return_value=expectedresult)
# fetch_bus_trips.store_bus_trips()
# fetch_bus_trips.pd.read_csv = MagicMock(
# return_value=pd.read_csv("StoreDataTest.csv", chunksize=2))
# fetch_bus_trips.store_bus_times()
# bus_trips = fetch_bus_trips.fetch_bustrips()
# assert bus_trips[0]["trip_id"] == "345.3.I"
# assert bus_trips[0]["stops"][0]["stop_id"] == "7866R56"
# assert bus_trips[0]["stops"][0]["stop_arrival_time"] == "06:20:00"
# assert bus_trips[0]["stops"][0]["stop_departure_time"] == "06:20:00"
# assert bus_trips[0]["stops"][0]["stop_sequence"] == 1
# def test_read_bus_paths(self):
# read_bus_stops = StoreBusRoutesData()
# first_datapoint = read_bus_stops.read_bus_paths()['paths'][0]
# assert first_datapoint['start'] == '8300B1359501'
# assert first_datapoint['end'] == '8300B1006201'
# assert first_datapoint['coordinates'][0] == [53.711935, -6.352762]
# def test_store_bus_paths(self):
# store_bus_stops_loc = StoreBusRoutesData()
# conn = get_connection()
# self.assertTrue(isinstance(conn, mm.MongoClient))
# expectedresult = {
# "paths": [
# {
# "start": "8300B1359501",
# "end": "8300B1006201",
# "coordinates": [
# [53.711935, -6.352762]
# ]
# }
# ]
# }
# store_bus_stops_loc.read_bus_paths = MagicMock(
# return_value=expectedresult)
# store_bus_stops_loc.store_bus_paths()
# bus_path= BusPath.objects(
# _id='8300B13595018300B1006201').first()
# assert bus_path["start_stop_id"] == "8300B1359501"
# assert bus_path["end_stop_id"] == "8300B1006201"
# self.assertAlmostEqual(
# bus_path.coordinates[0]['lat'], Decimal(53.711935), None, None, 0.001)
# self.assertAlmostEqual(
# bus_path.coordinates[0]['lon'], Decimal(-6.352762), None, None, 0.001)
# def test_fetch_bus_paths(self):
# fetch_bus_stops_loc = StoreBusRoutesData()
# expectedresult = {
# "paths": [
# {
# "start": "8300B1359501",
# "end": "8300B1006201",
# "coordinates": [
# [53.711935, -6.352762]
# ]
# }
# ]
# }
# fetch_bus_stops_loc.read_bus_paths = MagicMock(
# return_value=expectedresult)
# fetch_bus_stops_loc.store_bus_paths()
# bus_paths = fetch_bus_stops_loc.fetch_bus_paths()
# assert bus_paths == [
# {
# '_id': '8300B13595018300B1006201',
# 'start_stop_id': '8300B1359501',
# 'end_stop_id': '8300B1006201',
# 'coordinates': [
# {
# 'lat': 53.711935,
# 'lon': -6.352762
# }
# ]
# }
# ]
``` |
{
"source": "joshriess/InfraBot",
"score": 2
} |
#### File: InfraBot/app/LabManager.py
```python
import InfraBot
from InfraModule import InfraModule
import Database
from datetime import datetime, timedelta
class LabManager(InfraModule):
def __init__ (self):
self.workspaces = {}
#default menu options
menu_options = {
"options": [
{
"text": "Chess",
"value": "chess"
},
{
"text": "Global Thermonuclear War",
"value": "war"
}
]
}
super().__init__("lab", menu_options)
queries = Database.Status.query.all()
for workspace in queries:
dbWorkspace = Database.Workspaces.query.filter_by(id = workspace.workspace).first()
if dbWorkspace is None:
print("Workspace does not exist in database")
else:
self.workspaces[dbWorkspace.team_id] = workspace.workspace
''' Module entry point for initial commands
Input:
message: Command for the module (minus the '!lab')
channel: Channel the command originated in
user: UserID that issued the command
team_id: TeamID the command is from
Output:
String to be logged by the InfraBot core
'''
def api_entry(self, message, channel, user, team_id):
if message is "":
# Start menu to select hint to give
if not InfraBot.checkPermission(user, "user", team_id):
InfraBot.sendEphemeral("Permission Denied", channel, user,team_id)
return "!lab - Permission Denied: User " + user
message_attachments = [
{
"text": "Lab Menu",
"fallback": "If you could read this message, you'd be choosing something fun to do right now.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "lab",
"actions": [
{
"name": "initial_menu",
"text": "Select an option...",
"type": "select",
"options": [
{
"text": "List",
"value": "list"
},
{
"text": "Hint",
"value": "hint"
},
{
"text": "Submit",
"value": "submit"
}
]
}
]
}
]
if InfraBot.checkDM(channel, team_id):
InfraBot.sendMessage("", channel, team_id, attachments_send=message_attachments)
else:
InfraBot.sendEphemeral("", channel, user, team_id, attachments_send=message_attachments)
return "Initial Lab"
elif message.startswith("hint reset "):
# Handle command to reset the hint lockout for a user
if not InfraBot.checkPermission(user, "admin", team_id):
InfraBot.sendEphemeral("Permission Denied", channel, user,team_id)
return "!lab hint reset - Permission Denied: User " + user
remainder = message[len("hint reset "):]
curUser = Database.Users.query.filter_by(user_id=remainder[2:-1]).first()
curUser.last_hint = None
Database.db.session.commit()
InfraBot.sendEphemeral("Reset hint timer for " + InfraBot.getUserName(remainder[2:-1],team_id), channel, user, team_id)
return "Reset hint timer for " + InfraBot.getUserName(remainder[2:-1],team_id)
elif message.startswith("set timeout "):
# Set the workspace hint lockout
if not InfraBot.checkPermission(user, "owner", team_id):
InfraBot.sendEphemeral("Permission Denied", channel, user,team_id)
return "!lab hint reset - Permission Denied: User " + user
# Retrieve the number of minutes for the hint timeout from the command
remainder = message[len("set timeout "):]
try:
newTimeout = int(remainder)
except:
self.send_error("<number> must be an integer!", channel, user, team_id)
return "!lab set timeout - Number not integer"
curWorkspace = Database.Workspaces.query.filter_by(team_id = team_id).first()
if curWorkspace is None:
print("Workspace not found")
return "Workspace not found"
# Database stores hint_timeout in seconds, command input is in minutes
curWorkspace.hint_timeout = newTimeout*60
Database.db.session.commit()
InfraBot.sendEphemeral("Set timeout to " + str(newTimeout) + " minutes", channel, user, team_id)
return "Set workspace timeout for workspace " + team_id + " to " + str(newTimeout) + "minutes"
else:
self.send_error("Invalid Command", channel, user, team_id)
return "Command not found"
''' Module entry point for interactive action responses
Input:
form_data: Payload of the message sent as a result of
the interactive message
Output: N/A
'''
def action_entry(self, form_data):
channel = form_data['channel']['id']
user = form_data['user']['id']
team = form_data['team']['id']
if not team in self.workspaces:
if not self.add_workspace_id(team_id):
print("Workspace does not exist")
return "Workspace " + team + " does not exist"
for action in form_data["actions"]:
# Separate true action name from previously selected option
splitArr = action['name'].split(":")
name = splitArr[0]
if len(splitArr) > 1:
data = splitArr[1]
else:
data = None
# Determine if user wants list, hint, or to submit
if name == "initial_menu":
if action['selected_options'][0]['value'] == "list":
message_text,attachments = self.labs_list(user, channel, team, form_data)
elif action['selected_options'][0]['value'] == "hint":
# Check if the user is allowed to get a hint
curUser = Database.Users.query.filter_by(user_id=user).first()
curWorkspace = Database.Workspaces.query.filter_by(team_id = team).first()
curTime = datetime.now()
lastHint = curUser.last_hint
if not lastHint is None:
if curWorkspace is None:
print("Workspace is None")
InfraBot.deleteMessage(form_data['message_ts'], channel, team)
return""
timeFrame = timedelta(seconds=curWorkspace.hint_timeout)
if curTime < (lastHint + timeFrame):
response = "You must wait "
response += str((lastHint+timeFrame)-curTime)
response += " until your next hint"
InfraBot.deleteMessage(form_data['message_ts'], channel, team)
InfraBot.sendEphemeral(response, channel, user, team)
return ""
#Set time that user last got hint
curUser.last_hint = datetime.now()
Database.db.session.commit()
message_text,attachments = self.labs_hints_list(user, channel, team, form_data)
elif action['selected_options'][0]['value'] == "submit":
message_text,attachments = self.labs_submit(user, channel, team, form_data)
elif name == "list":
message_text,attachments = self.labs_hints_categories(user, channel, team, form_data)
elif name == "categories":
message_text,attachments = self.labs_hint_selection(user,channel,team, form_data)
elif name == "hints":
message_text,attachments = self.labs_hint_dispense(user,channel,team,form_data)
else:
message_text = "Other"
attachments = None
if InfraBot.checkDM(channel, team):
InfraBot.sendMessage(message_text, channel, team, attachments_send=attachments)
else:
InfraBot.sendEphemeral(message_text, channel, user, team, attachments_send=attachments)
''' Module entry point for dynamic options for interactive actions
Input:
form_data: Payload of the message sent as a result of
the interactive message
Output:
Object that when json.dumped contains the properly formatted
options for the specified interactive messages
'''
def option_entry(self, form_data):
# Separate true action name from previously selected option
splitArr = form_data['name'].split(":")
name = splitArr[0]
if len(splitArr) > 1:
data = splitArr[1]
else:
data = None
team = form_data['team']['id']
if name == "list":
# List possible labs as options
first = True
newOptions = {}
newOptions['options'] = []
results = Database.Labs.query.filter_by(workspace_id = self.workspaces[team]).all()
if results is None:
print("Error: No Labs Found")
return
else:
for result in results:
newOption = {}
newOption['text'] = result.name
newOption['value'] = result.id
newOptions['options'].append(newOption)
return newOptions
elif name == "categories":
# List possible categories from the selected lab
categories = Database.HintCategories.query.filter_by(lab_id=data).all()
newOptions = {}
newOptions['options'] = []
if categories is None:
print("Error: No Labs Found")
return
else:
for category in categories:
newOption = {}
newOption['text'] = category.name
newOption['value'] = category.id
newOptions['options'].append(newOption)
return newOptions
elif name == "hints":
# List hint numbers from the selected category
hints = Database.Hints.query.filter_by(category=data).all()
newOptions = {}
newOptions['options'] = []
if hints is None:
print("Error: No Hints Found")
return
else:
for hint in hints:
newOption = {}
newOption['text'] = "Hint #" + str(hint.seq_num)
newOption['value'] = hint.id
newOptions['options'].append(newOption)
return newOptions
return self.options
''' Function that generates a list of labs and their URLs
Input:
user: User that requested the lab listing
channel: Channel to post the listing in
team: Team whose labs to query
form: The entirety of the form data from the request
Output:
A tuple with the message and any attachments to send.
The message will be a newline separated list of labs
and the attachments will be None
'''
def labs_list(self, user, channel, team, form):
resultString = ""
results = Database.Labs.query.filter_by(workspace_id = self.workspaces[team]).all()
if results is None:
resultString = "Error: No Labs Found"
else:
for result in results:
resultString += result.name + " - " + result.url + "\n"
InfraBot.deleteMessage(form['message_ts'], channel, team)
return resultString,None
''' Function that creates the needed attachment to ask the user
which lab they would like a hint for
Input:
user: User that requested the lab listing
channel: Channel to post the listing in
team: Team whose labs to query
form: The entirety of the form data from the request
Output:
A tuple with the message and attachments to send. The
message will be empty string and the attachments will
be the json formatted attachment with the proper action
name ("list") to continue the workflow in the action_entry
function.
'''
def labs_hints_list(self, user, channel, team, form):
InfraBot.deleteMessage(form['message_ts'], channel, team)
message_attachments = [
{
"text": "For which lab would you like a hint?",
"fallback": "If you could read this message, you'd be choosing something fun to do right now.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "lab",
"actions": [
{
"name": "list",
"text": "Select a Lab",
"type": "select",
"data_source": "external",
}
]
}
]
return "",message_attachments
''' Function that creates the needed attachment to ask the user
which category of hint they want from their selected lab
Input:
user: User that requested the lab listing
channel: Channel to post the listing in
team: Team whose labs to query
form: The entirety of the form data from the request
Output:
A tuple with the message and attachments to send. The
message will be empty string and the attachments will
be the json formatted attachment with the proper action
name ("categories:<value>") to continue the workflow in
the action_entry function. The <value> indicates the lab
the user selected in the previous dialog.
'''
def labs_hints_categories(self, user, channel, team, form):
InfraBot.deleteMessage(form['message_ts'], channel, team)
tempVal = form['actions'][0]['selected_options'][0]['value']
message_attachments = [
{
"text": "Select a Category from Lab",
"fallback": "If you could read this message, you'd be choosing something fun to do right now.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "lab",
"actions": [
{
"name": "categories:"+tempVal,
"text": "Select Category",
"type": "select",
"data_source": "external",
}
],
"value":"1"
}
]
return "",message_attachments
''' Function that creates the needed attachment to ask the user
which hint number they want from their selected lab and
category.
Input:
user: User that requested the lab listing
channel: Channel to post the listing in
team: Team whose labs to query
form: The entirety of the form data from the request
Output:
A tuple with the message and attachments to send. The
message will be empty string and the attachments will
be the json formatted attachment with the proper action
name ("hints:<value>") to continue the workflow in
the action_entry function. The <value> indicates the
category the user selected in the previous dialog.
'''
def labs_hint_selection(self, user, channel, team, form):
InfraBot.deleteMessage(form['message_ts'], channel, team)
tempVal = form['actions'][0]['selected_options'][0]['value']
message_attachments = [
{
"text": "Select a hint number",
"fallback": "If you could read this message, you'd be choosing something fun to do right now.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "lab",
"actions": [
{
"name": "hints:"+tempVal,
"text": "Select Hint Number",
"type": "select",
"data_source": "external",
}
],
"value":"1"
}
]
return "",message_attachments
''' Function that fethes the requested hint for the lab and
category selected by the user in the previous messages.
Input:
user: User that requested the lab listing
channel: Channel to post the listing in
team: Team whose labs to query
form: The entirety of the form data from the request
Output:
A tuple with the message and attachments to send. The
message will contain the text of the hint selected by the
user throughout the previous workflow. The attachments will
be None.
'''
def labs_hint_dispense(self, user, channel, team, form):
InfraBot.deleteMessage(form['message_ts'], channel, team)
tempVal = form['actions'][0]['selected_options'][0]['value']
hint = Database.Hints.query.filter_by(id=tempVal).first()
lab = Database.Labs.query.filter_by(id=hint.lab_id).first()
category = Database.HintCategories.query.filter_by(id=hint.category).first()
message = lab.name+" "
message += category.name+" "
message += "#" + str(hint.seq_num) + " - " + hint.hint
return message,None
''' Function that will generate the start of the submit lab workflow
at a later date.
Input:
user: User that requested the lab listing
channel: Channel to post the listing in
team: Team whose labs to query
form: The entirety of the form data from the request
Output:
A tuple with the message and attachments to send. The
message will contain a prompt indicating that submissions
are not yet supported and the attachments will be None.
'''
def labs_submit(self, user, channel, team, form):
InfraBot.deleteMessage(form['message_ts'], channel, team)
return "Lab submissions not yet implemented",None
''' Function that sends an error/help prompt to the user
Input:
message: Error message to send to the user, ommitted if None
channel: Destination channel of the error message
user: Recipient of the error message
team_id: Team in which to send the message
Output:
N/A
'''
def send_error(self, message, channel, user, team_id):
messageString = ""
if not message is None:
messageString += message +"\n\n"
messageString += "Lab Help:\n"
messageString += "\t!lab - Open the interactive lab menu\n"
messageString += "\t!lab hint reset <user> - Reset the hint timer for the given user (requires admin privileges)\n"
messageString += "\t!lab set timeout <number> - Sets the workspace hint timeout to <number> minutes"
messageString += "\t!lab help - Prints this help prompt\n"
InfraBot.sendEphemeral(messageString, channel, user, team_id)
```
#### File: InfraBot/app/Updater.py
```python
import threading
from time import sleep
import InfraBot
from InfraModule import InfraModule
import Database
db = InfraBot.db
class Updater(InfraModule):
def __init__(self):
super().__init__("update", None)
def api_entry(self, message, channel, user, team_id):
if message.startswith("in "):
remainder = message[len("in "):]
splitNum = remainder.split(' ', 1)
number = int(splitNum[0])
self.__inReminder = Updater_InThread(splitNum[1], number, channel, team_id)
self.__inReminder.start()
#Schedule reminder in X minutes
InfraBot.sendEphemeral("Update Scheduled", channel, user, team_id)
return "Reminder Scheduled"
elif message.startswith("every "):
#Schedule reminder every X minutes
InfraBot.sendEphemeral("Command Not Found: 'every'", channel, user, team_id)
return "Command not yet found"
elif message.startswith("for "):
#Schedule reminder for N time every day
InfraBot.sendEphemeral("Command Not Found: 'for'", channel, user, team_id)
return "Command not yet found"
elif message == "list":
#List all recurring tasks
InfraBot.sendEphemeral("Command Not Found: 'list'", channel, user, team_id)
return "Command not yet found"
elif message.startswith("stop "):
#Stop reminder with the given ID
InfraBot.sendEphemeral("Command Not Found: 'list'", channel, user, team_id)
return "Command not yet found"
else:
InfraBot.sendEphemeral("Command Not Found", channel, user, team_id)
return "Command not found"
''' Updater to handle a single update in the future '''
class Updater_InThread(threading.Thread):
''' Function to initialize the thread with the time to wait and message information '''
def __init__(self, message, time, channel, workspace):
threading.Thread.__init__(self)
self.waitTime = time
self.updateMessage = message
self.updateChannel = channel
self.updateWorkspace = workspace
''' Function that waits for the configured amount of time and then sends the configured
message on the given channel in the given workspace '''
def run(self):
sleep(self.waitTime * 60)
InfraBot.sendMessage(self.updateMessage, self.updateChannel, self.updateWorkspace)
``` |
{
"source": "joshringer/netflixgenres",
"score": 3
} |
#### File: joshringer/netflixgenres/recover.py
```python
import argparse
import logging
import re
import shelve
from genrescrape import LOG_FMT, Scraper
GENRE_REGEX = re.compile(r'\* (.*) \(\[#(\d*)\]\((.*)\)\)')
log = logging.getLogger(__name__)
def recover_cache(from_file):
"""Search from_file for genre entries, place back into genre cache."""
log.info('Recovering cache from %s', from_file)
count = 0
with shelve.open(Scraper.genre_cache_fn) as cache:
for line in from_file:
match = GENRE_REGEX.match(line)
if match:
log.debug('Found entry %r', match.groups())
title = match.group(1)
number = match.group(2)
url = match.group(3)
cache[number] = (title, url)
count += 1
return count
def main():
"""Entrypoint to recovercache script."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('filename')
arg_parser.add_argument('-v', action='count', default=0)
ns = arg_parser.parse_args()
log_level = logging.WARNING - 10 * ns.v
logging.basicConfig(level=log_level, **LOG_FMT)
with open(ns.filename) as infile:
recoveries = recover_cache(infile)
print('Recovered {} entries'.format(recoveries))
if __name__ == '__main__':
main()
``` |
{
"source": "joshringer/python-vendor",
"score": 2
} |
#### File: joshringer/python-vendor/setup.py
```python
import os.path
from setuptools import find_packages, setup
def read_package_meta():
"""Read metadata from __about__ file."""
filepath = os.path.join(os.path.dirname(__file__), 'vendor', '__about__.py')
data = {}
with open(filepath) as fp:
exec(fp.read(), {}, data)
return data
def read_readme():
"""Read readme file."""
filepath = os.path.join(os.path.dirname(__file__), 'README.rst')
with open(filepath) as fp:
return fp.read()
if __name__ == '__main__':
meta = read_package_meta()
setup(
name='vendor',
version=meta['__version__'],
description=__doc__,
long_description=read_readme(),
long_description_content_type='text/x-rst; charset=utf8',
url=meta['__url__'],
author=meta['__author__'],
author_email=meta['__author_email__'],
license=meta['__license__'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
keywords='aws lambda vendor build binary package wheel',
packages=find_packages(),
install_requires=[
'boto3>=1.4.4',
'packaging',
'requests',
'six',
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*',
package_data={
'vendor': ['aws/*.yml', 'aws/vendor/*.py', 'aws/vendor/build.sh'],
},
zip_safe=False,
)
``` |
{
"source": "joshrl-clearme/mdl-ref-apps",
"score": 3
} |
#### File: mdl-ref-apps/mdl-ref-server/test.py
```python
import cbor
import hashlib
import tornado.testing
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
import server
import util
class coseTest(unittest.TestCase):
def test_cose_key(self):
private_key = ec.generate_private_key(ec.SECP256R1(), default_backend())
public_key = private_key.public_key()
cose_key = util.to_cose_key(public_key)
extracted_public_key = util.from_cose_key(cose_key)
# Can't just compare the objects, we have to get the raw numbers/bytes
# and compare them
public_key_bytes = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo)
extracted_public_key_bytes = extracted_public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo)
self.assertEqual(public_key_bytes, extracted_public_key_bytes)
def test_cose_sign(self):
private_key = ec.generate_private_key(ec.SECP256R1(), default_backend())
public_key = private_key.public_key()
data = b"some bytes to sign"
sig = util.cose_sign1_sign(private_key, data)
self.assertEqual(data, util.cose_sign1_get_data(sig))
self.assertTrue(util.cose_sign1_verify(public_key, sig, data))
class certTest(unittest.TestCase):
def test_credential_key_cert(self):
credential_key_private = ec.generate_private_key(ec.SECP256R1(), default_backend())
credential_key_public = credential_key_private.public_key()
cert_chain = util.generate_x509_cert_for_credential_key(credential_key_private)
# Can't just compare the objects, we have to get the raw numbers/bytes
# and compare them
extracted_public_key = util.cert_chain_get_public_key(cert_chain)
credential_key_public_bytes = credential_key_public.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo)
extracted_public_key_bytes = extracted_public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo)
self.assertEqual(credential_key_public_bytes, extracted_public_key_bytes)
# TODO: check Android Attestation Extension stuff
def test_auth_key_cert(self):
credential_key_private = ec.generate_private_key(ec.SECP256R1(), default_backend())
credential_key_public = credential_key_private.public_key()
auth_key_private = ec.generate_private_key(ec.SECP256R1(), default_backend())
auth_key_public = auth_key_private.public_key()
pop_sha256 = b"123"
cert = util.generate_x509_cert_for_auth_key(auth_key_public, credential_key_private,
pop_sha256)
# TODO: check ProofOfBinding at OID
# TODO: check signature
class mdlServerTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
self.s = server.Server(":memory:")
util.setup_test_data(self.s.database)
return self.s.get_app()
def test_happy_path(self):
path = "/mdlServer"
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "StartProvisioning",
"provisioningCode": "1001",
# TODO: MCD
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"], "ReadyToProvisionMessage")
session_id = cbor_response["eSessionId"]
self.assertTrue(len(session_id) > 0)
# TODO: insert AdditionalInformationRequired steps here
response = self.fetch(method="POST", path=path, body=cbor.dumps({
"messageType": "com.android.identity_credential.StartProvisioning",
"eSessionId": session_id,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.ProvisioningResponse")
self.assertEqual(session_id, cbor_response["eSessionId"])
challenge = cbor_response["challenge"]
self.assertTrue(isinstance(challenge, bytes) and len(challenge) > 0)
doc_type = cbor_response["docType"]
self.assertEqual(doc_type, "org.iso.18013.5.1.mDL")
credential_key = ec.generate_private_key(ec.SECP256R1(), default_backend())
# TODO: pass challenge and other Android-specific things to
# be included in the Android Attestation Extension
#
cert_chain = util.generate_x509_cert_for_credential_key(credential_key)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.SetCertificateChain",
"eSessionId": session_id,
"credentialKeyCertificateChain": cert_chain,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.DataToProvisionMessage")
self.assertEqual(session_id, cbor_response["eSessionId"])
access_control_profiles = cbor_response["accessControlProfiles"]
self.assertTrue(access_control_profiles and isinstance(access_control_profiles, list))
name_spaces = cbor_response["nameSpaces"]
self.assertTrue(name_spaces and isinstance(name_spaces, dict))
# Build ProofOfProvisioning
#
proof_of_provisioning = cbor.dumps(["ProofOfProvisioning",
doc_type,
access_control_profiles,
name_spaces,
False])
pop_sha256 = hashlib.sha256(proof_of_provisioning).digest()
pop_signature = util.cose_sign1_sign(credential_key, proof_of_provisioning)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.SetProofOfProvisioning",
"eSessionId": session_id,
"proofOfProvisioningSignature": pop_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"], "EndSessionMessage")
self.assertEqual(session_id, cbor_response["eSessionId"])
self.assertEqual("Success", cbor_response["reason"])
# --------------------------------------------------------------------------------
# Now get some auth keys. This is a new flow.
#
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.CertifyAuthKeys",
"credentialKey": util.to_cose_key(credential_key.public_key()),
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.CertifyAuthKeysProveOwnership")
session_id = cbor_response["eSessionId"]
self.assertTrue(len(session_id) > 0)
challenge = cbor_response["challenge"]
self.assertTrue(len(challenge) > 0)
# Identify ourselves
#
proof_of_ownership = cbor.dumps(["ProofOfOwnership",
doc_type,
challenge,
False])
poo_signature = util.cose_sign1_sign(credential_key, proof_of_ownership)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.CertifyAuthKeysProveOwnershipResponse",
"eSessionId": session_id,
"proofOfOwnershipSignature": poo_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.CertifyAuthKeysReady")
self.assertEqual(session_id, cbor_response["eSessionId"])
# Create some auth keys and send them
#
auth_key_certs = []
for n in range(3):
auth_key = ec.generate_private_key(ec.SECP256R1(), default_backend())
cert = util.generate_x509_cert_for_auth_key(auth_key.public_key(), credential_key,
pop_sha256)
auth_key_certs.append(cert)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.CertifyAuthKeysSendCerts",
"eSessionId": session_id,
"authKeyCerts": auth_key_certs,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.CertifyAuthKeysResponse")
self.assertEqual(session_id, cbor_response["eSessionId"])
static_auth_datas = cbor_response["staticAuthDatas"]
# TODO: inspect |staticAuthDatas|
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "RequestEndSession",
"eSessionId": session_id,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"], "EndSessionMessage")
self.assertEqual(session_id, cbor_response["eSessionId"])
self.assertEqual("Success", cbor_response["reason"])
# --------------------------------------------------------------------------------
# Check update. This is a new flow.
#
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.UpdateCredential",
"credentialKey": util.to_cose_key(credential_key.public_key()),
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.UpdateCredentialProveOwnership")
session_id = cbor_response["eSessionId"]
self.assertTrue(len(session_id) > 0)
challenge = cbor_response["challenge"]
self.assertTrue(len(challenge) > 0)
# Identify ourselves expected no updates
#
proof_of_ownership = cbor.dumps(["ProofOfOwnership",
doc_type,
challenge,
False])
poo_signature = util.cose_sign1_sign(credential_key, proof_of_ownership)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.UpdateCredentialProveOwnershipResponse",
"eSessionId": session_id,
"proofOfOwnershipSignature": poo_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.UpdateCredentialResponse")
self.assertEqual(session_id, cbor_response["eSessionId"])
# self.assertEqual("no_update", cbor_response["updateCredentialResult"])
# --------------------------------------------------------------------------------
# Change the document data in the database.
# document_id 11 - Erika
util.update_document_test_data(self.s.database, 11)
# Check update. This is a new flow.
#
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.UpdateCredential",
"credentialKey": util.to_cose_key(credential_key.public_key()),
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.UpdateCredentialProveOwnership")
session_id = cbor_response["eSessionId"]
self.assertTrue(len(session_id) > 0)
challenge = cbor_response["challenge"]
self.assertTrue(len(challenge) > 0)
# Identify ourselves expected update
#
proof_of_ownership = cbor.dumps(["ProofOfOwnership",
doc_type,
challenge,
False])
poo_signature = util.cose_sign1_sign(credential_key, proof_of_ownership)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.UpdateCredentialProveOwnershipResponse",
"eSessionId": session_id,
"proofOfOwnershipSignature": poo_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.UpdateCredentialResponse")
self.assertEqual(session_id, cbor_response["eSessionId"])
self.assertEqual("update", cbor_response["updateCredentialResult"])
# Get data to update (new provisioning)
#
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.UpdateCredentialGetDataToUpdate",
"eSessionId": session_id,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.UpdateCredentialDataToProvisionMessage")
self.assertEqual(session_id, cbor_response["eSessionId"])
access_control_profiles = cbor_response["accessControlProfiles"]
self.assertTrue(access_control_profiles and isinstance(access_control_profiles, list))
name_spaces = cbor_response["nameSpaces"]
self.assertTrue(name_spaces and isinstance(name_spaces, dict))
# Build ProofOfProvisioning for updated data
#
proof_of_provisioning = cbor.dumps(["ProofOfProvisioning",
doc_type,
access_control_profiles,
name_spaces,
False])
pop_sha256 = hashlib.sha256(proof_of_provisioning).digest()
pop_signature = util.cose_sign1_sign(credential_key, proof_of_provisioning)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.UpdateCredentialSetProofOfProvisioning",
"eSessionId": session_id,
"proofOfProvisioningSignature": pop_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"], "EndSessionMessage")
self.assertEqual(session_id, cbor_response["eSessionId"])
self.assertEqual("Success", cbor_response["reason"])
# --------------------------------------------------------------------------------
# Delete credential. This is a new flow.
#
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.DeleteCredential",
"credentialKey": util.to_cose_key(credential_key.public_key()),
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.DeleteCredentialProveOwnership")
session_id = cbor_response["eSessionId"]
self.assertTrue(len(session_id) > 0)
challenge = cbor_response["challenge"]
self.assertTrue(len(challenge) > 0)
# Identify ourselves expected update
#
proof_of_ownership = cbor.dumps(["ProofOfOwnership",
doc_type,
challenge,
False])
poo_signature = util.cose_sign1_sign(credential_key, proof_of_ownership)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.DeleteCredentialProveOwnershipResponse",
"eSessionId": session_id,
"proofOfOwnershipSignature": poo_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"],
"com.android.identity_credential.DeleteCredentialReadyForDeletion")
self.assertEqual(session_id, cbor_response["eSessionId"])
challenge = cbor_response["challenge"]
self.assertTrue(len(challenge) > 0)
# Get data to update (new provisioning)
#
proof_of_deletion = cbor.dumps(["ProofOfDeletion",
doc_type,
challenge,
False])
pod_signature = util.cose_sign1_sign(credential_key, proof_of_deletion)
response = self.fetch(method="POST", path=path,
body=cbor.dumps({
"messageType": "com.android.identity_credential.DeleteCredentialDeleted",
"eSessionId": session_id,
"proofOfDeletionSignature": pod_signature,
}))
self.assertEqual(response.code, 200)
cbor_response = cbor.loads(response.body)
self.assertEqual(cbor_response["messageType"], "EndSessionMessage")
self.assertEqual(session_id, cbor_response["eSessionId"])
self.assertEqual("Success", cbor_response["reason"])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshromoff/tdprop",
"score": 3
} |
#### File: tdprop/sarsa/our_q_model.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import init
from backpack import backpack, extend
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, num_actions, base_kwargs=None, extra_kwargs=None):
super(Policy, self).__init__()
self.use_backpack = extra_kwargs['use_backpack']
self.recurrent_hidden_state_size = 1
num_outputs = num_actions
hidden_size = 512
conv_init_ = lambda m: init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0), nn.init.calculate_gain('relu'))
lin_init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0))
self.model = nn.Sequential(conv_init_(nn.Conv2d(obs_shape[0], 32, 8, stride=4)), nn.ReLU(),
conv_init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
conv_init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
conv_init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU(),
lin_init_(nn.Linear(hidden_size, num_outputs)))
if self.use_backpack:
extend(self.model)
self.model.train()
def forward(self, inputs):
qs = self.model(inputs / 255.)
return qs
``` |
{
"source": "joshron/iauditor-TASNET",
"score": 2
} |
#### File: joshron/iauditor-TASNET/exporter.py
```python
import time
import sys
try:
from modules.exporters import export_audit_pdf_word, export_audit_json, export_audit_pandas, export_audit_csv, \
export_actions
from modules.global_variables import *
from modules.last_successful import get_last_successful, update_sync_marker_file
from modules.logger import configure_logger
from modules.media import check_if_media_sync_offset_satisfied, export_audit_media
from modules.other import show_preferences_and_exit
from modules.settings import parse_export_filename, parse_command_line_arguments, configure
from modules.sql import sql_setup
from modules.web_report_links import export_audit_web_report_link
except ImportError as e:
print(e)
print(
'The ModuleNotFoundError indicates that some packages required by the script have not been installed. \n The '
'error above will give details of whichever package was found to be missing first.\n Sometimes you need to '
'close and reopen your command window after install, so try that first.\n If you still get the error, '
'ensure you have run: pip install -r requirements.txt \n'
'If pip is not found, try pip install -r requirements.txt instead. \n'
'If you continue to see this error, please review this page of the documentation: '
'https://safetyculture.github.io/iauditor-exporter/script-setup/installing-packages/')
sys.exit()
def sync_exports(logger, settings, sc_client):
"""
Perform sync, exporting documents modified since last execution
:param logger: the logger
:param settings: Settings from command line and configuration file
:param sc_client: Instance of SDK object
"""
get_started = None
if settings[EXPORT_ARCHIVED] is not None:
archived_setting = settings[EXPORT_ARCHIVED]
else:
archived_setting = False
if settings[EXPORT_COMPLETED] is not None:
completed_setting = settings[EXPORT_COMPLETED]
else:
completed_setting = True
if 'actions-sql' in settings[EXPORT_FORMATS]:
get_started = sql_setup(logger, settings, 'actions')
export_actions(logger, settings, sc_client, get_started)
if 'actions' in settings[EXPORT_FORMATS]:
get_started = None
export_actions(logger, settings, sc_client, get_started)
if not bool(
set(settings[EXPORT_FORMATS]) & {'pdf', 'docx', 'csv', 'media', 'web-report-link', 'json', 'sql', 'pickle',
'doc_creation'}):
return
last_successful = get_last_successful(logger, settings[CONFIG_NAME])
if settings[TEMPLATE_IDS] is not None:
if settings[TEMPLATE_IDS].endswith('.txt'):
file = settings[TEMPLATE_IDS].strip()
f = open(file, "r")
ids_to_search = []
for id in f:
ids_to_search.append(id.strip())
elif len(settings[TEMPLATE_IDS]) != 1:
ids_to_search = settings[TEMPLATE_IDS].split(",")
else:
ids_to_search = [settings[TEMPLATE_IDS][0]]
list_of_audits = sc_client.discover_audits(modified_after=last_successful, template_id=ids_to_search,
completed=completed_setting, archived=archived_setting)
else:
list_of_audits = sc_client.discover_audits(modified_after=last_successful, completed=completed_setting,
archived=archived_setting)
if list_of_audits is not None:
logger.info(str(list_of_audits['total']) + ' audits discovered')
export_count = 1
export_total = list_of_audits['total']
get_started = 'ignored'
for export_format in settings[EXPORT_FORMATS]:
if export_format == 'sql':
get_started = sql_setup(logger, settings, 'audit')
elif export_format in ['pickle']:
get_started = ['complete', 'complete']
# if export_format == 'pickle' and os.path.isfile('{}.pkl'.format(settings[SQL_TABLE])):
# logger.error(
# 'The Pickle file already exists. Appending to Pickles isn\'t currently possible, please '
# 'remove {}.pkl and try again.'.format(
# settings[SQL_TABLE]))
# sys.exit(0)
for audit in list_of_audits['audits']:
logger.info('Processing audit (' + str(export_count) + '/' + str(export_total) + ')')
process_audit(logger, settings, sc_client, audit, get_started)
export_count += 1
def process_audit(logger, settings, sc_client, audit, get_started):
"""
Export audit in the format specified in settings. Formats include PDF, JSON, CSV, MS Word (docx), media, or
web report link.
:param logger: The logger
:param settings: Settings from command line and configuration file
:param sc_client: instance of safetypy.SafetyCulture class
:param audit: Audit JSON to be exported
"""
if not check_if_media_sync_offset_satisfied(logger, settings, audit):
return
audit_id = audit['audit_id']
logger.info('downloading ' + audit_id)
audit_json = sc_client.get_audit(audit_id)
template_id = audit_json['template_id']
preference_id = None
if settings[PREFERENCES] is not None and template_id in settings[PREFERENCES].keys():
preference_id = settings[PREFERENCES][template_id]
export_filename = parse_export_filename(audit_json, settings[FILENAME_ITEM_ID]) or audit_id
for export_format in settings[EXPORT_FORMATS]:
if export_format in ['pdf', 'docx']:
export_audit_pdf_word(logger, sc_client, settings, audit_id, preference_id, export_format, export_filename)
elif export_format == 'json':
export_audit_json(logger, settings, audit_json, export_filename)
elif export_format == 'csv':
export_audit_csv(settings, audit_json)
elif export_format in ['sql', 'pickle']:
if get_started[0] == 'complete':
export_audit_pandas(logger, settings, audit_json, get_started)
elif get_started[0] != 'complete':
logger.error('Something went wrong connecting to the database, please check your settings.')
sys.exit(1)
elif export_format == 'media':
export_audit_media(logger, sc_client, settings, audit_json, audit_id, export_filename)
elif export_format == 'web-report-link':
export_audit_web_report_link(logger, settings, sc_client, audit_json, audit_id, template_id)
logger.debug('setting last modified to ' + audit['modified_at'])
update_sync_marker_file(audit['modified_at'], settings[CONFIG_NAME])
def loop(logger, sc_client, settings):
"""
Loop sync until interrupted by user
:param logger: the logger
:param sc_client: instance of SafetyCulture SDK object
:param settings: dictionary containing config settings values
"""
sync_delay_in_seconds = settings[SYNC_DELAY_IN_SECONDS]
while True:
sync_exports(logger, settings, sc_client)
logger.info('Next check will be in ' + str(sync_delay_in_seconds) + ' seconds. Waiting...')
time.sleep(sync_delay_in_seconds)
def main():
try:
logger = configure_logger()
path_to_config_file, export_formats, preferences_to_list, loop_enabled, docker_enabled = parse_command_line_arguments(
logger)
sc_client, settings = configure(logger, path_to_config_file, export_formats, docker_enabled)
if preferences_to_list is not None:
show_preferences_and_exit(preferences_to_list, sc_client)
if loop_enabled:
loop(logger, sc_client, settings)
else:
sync_exports(logger, settings, sc_client)
logger.info('Completed sync process, exiting')
except KeyboardInterrupt:
print("Interrupted by user, exiting.")
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: iauditor-TASNET/modules/settings.py
```python
import argparse
import re
import yaml
from yaml.scanner import ScannerError
from modules.global_variables import *
from modules.logger import log_critical_error, create_directory_if_not_exists
from safetypy import safetypy as sp
def load_setting_api_access_token(logger, config_settings):
"""
Attempt to parse API token from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: API token if valid, else None
"""
try:
api_token = config_settings['API']['token']
token_is_valid = re.match('^[a-f0-9]{64}$', api_token)
if token_is_valid:
logger.debug('API token matched expected pattern')
return api_token
else:
logger.error('API token failed to match expected pattern')
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception parsing API token from config.yaml')
return None
def docker_load_setting_api_access_token(logger, api_token):
"""
Attempt to parse API token from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: API token if valid, else None
Args:
api_token:
"""
try:
token_is_valid = re.match('^[a-f0-9]{64}$', api_token)
if token_is_valid:
logger.debug('API token matched expected pattern')
return api_token
else:
logger.error('API token failed to match expected pattern')
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception parsing API token from config.yaml')
return None
def load_export_inactive_items_to_csv(logger, config_settings):
"""
Attempt to parse export_inactive_items from config settings. Value of true or false is expected.
True means the CSV exporter will include inactive items. False means the CSV exporter will exclude inactive items.
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: value of export_inactive_items_to_csv if valid, else DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV
"""
try:
if config_settings['export_options']['merge_rows'] is True:
logger.info('Merge rows is enabled, turning on the export of inactive items.')
export_inactive_items_to_csv = True
else:
export_inactive_items_to_csv = config_settings['export_options']['export_inactive_items']
if not isinstance(export_inactive_items_to_csv, bool):
logger.info('Invalid export_inactive_items value from configuration file, defaulting to true')
export_inactive_items_to_csv = DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV
return export_inactive_items_to_csv
except Exception as ex:
log_critical_error(logger, ex,
'Exception parsing export_inactive_items from the configuration file, defaulting to {0}'.
format(str(DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV)))
return DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV
def load_setting_sync_delay(logger, config_settings):
"""
Attempt to parse delay between sync loops from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: extracted sync delay if valid, else DEFAULT_SYNC_DELAY_IN_SECONDS
"""
try:
sync_delay = config_settings['export_options']['sync_delay_in_seconds']
sync_delay_is_valid = re.match('^[0-9]+$', str(sync_delay))
if sync_delay_is_valid and sync_delay >= 0:
if sync_delay < DEFAULT_SYNC_DELAY_IN_SECONDS:
'{0} seconds'.format(logger.info(
'Sync delay is less than the minimum recommended value of ' + str(DEFAULT_SYNC_DELAY_IN_SECONDS)))
return sync_delay
else:
logger.info('Invalid sync_delay_in_seconds from the configuration file, defaulting to {0}'.format(str(
DEFAULT_SYNC_DELAY_IN_SECONDS)))
return DEFAULT_SYNC_DELAY_IN_SECONDS
except Exception as ex:
log_critical_error(logger, ex,
'Exception parsing sync_delay from the configuration file, defaulting to {0}'.format(str(
DEFAULT_SYNC_DELAY_IN_SECONDS)))
return DEFAULT_SYNC_DELAY_IN_SECONDS
def load_setting_preference_mapping(logger, config_settings):
"""
Attempt to parse preference settings from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: export preference mapping if valid, else None
"""
try:
preference_mapping = {}
preference_settings = config_settings['export_options']['preferences']
if preference_settings is not None:
preference_lines = preference_settings.split(' ')
for preference in preference_lines:
template_id = preference[:preference.index(':')]
if template_id not in preference_mapping.keys():
preference_mapping[template_id] = preference
return preference_mapping
except KeyError:
logger.debug('No preference key in the configuration file')
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception getting preferences from the configuration file')
return None
def load_setting_export_path(logger, config_settings):
"""
Attempt to extract export path from config settings
:param config_settings: config settings loaded from config file
:param logger: the logger
:return: export path, None if path is invalid or missing
"""
try:
export_path = config_settings['export_options']['export_path']
if export_path is not None:
return export_path
else:
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception getting export path from the configuration file')
return None
def load_setting_media_sync_offset(logger, config_settings):
"""
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: media sync offset parsed from file, else default media sync offset
defined as global constant
"""
try:
media_sync_offset = config_settings['export_options']['media_sync_offset_in_seconds']
if media_sync_offset is None or media_sync_offset < 0 or not isinstance(media_sync_offset, int):
media_sync_offset = DEFAULT_MEDIA_SYNC_OFFSET_IN_SECONDS
return media_sync_offset
except Exception as ex:
log_critical_error(logger, ex, 'Exception parsing media sync offset from config file')
return DEFAULT_MEDIA_SYNC_OFFSET_IN_SECONDS
def parse_export_filename(audit_json, filename_item_id):
"""
Get 'response' value of specified header item to use for export file name
:param header_items: header_items array from audit JSON
:param filename_item_id: item_id from config settings
:return: 'response' value of specified item from audit JSON
"""
if filename_item_id is None:
return None
# Not all Audits will actually contain an Audit Title item. For examples, when Audit Title rules are set,
# the Audit Title item is not going to be included by default.
# When this item ID is specified in the custom export filename configuration, the audit_data.name property
# will be used to populate the data as it covers all cases.
if filename_item_id == AUDIT_TITLE_ITEM_ID and 'audit_data' in audit_json.keys() \
and 'name' in audit_json['audit_data'].keys():
return audit_json['audit_data']['name'].replace('/', '_')
for item in audit_json['header_items']:
if item['item_id'] == filename_item_id:
if 'responses' in item.keys():
if 'text' in item['responses'].keys() and item['responses']['text'].strip() != '':
return item['responses']['text']
return None
def get_filename_item_id(logger, config_settings):
"""
Attempt to parse item_id for file naming from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: item_id extracted from config_settings if valid, else None
"""
try:
filename_item_id = config_settings['export_options']['filename']
if filename_item_id is not None:
if len(filename_item_id) > 36:
logger.critical('You can only specify one value for the filename. Please remove any additional item '
'IDs and try again. For more complex title rules, consider setting the title rules '
'within iAuditor. Defaulting to Audit ID.')
if filename_item_id == 'f3245d42-ea77-11e1-aff1-0800200c9a66':
logger.critical('Date fields are not compatible with the title rule feature. Defaulting to Audit ID')
else:
return filename_item_id
else:
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception retrieving setting "filename" from the configuration file')
return None
def set_env_defaults(name, env_var, logger):
# if env_var is None or '':
if not env_var:
if name == 'CONFIG_NAME':
logger.error('You must set the CONFIG_NAME')
sys.exit()
if name == 'DB_SCHEMA':
env_var = 'dbo'
if name.startswith('DB_'):
env_var = None
if name == 'SQL_TABLE':
env_var = None
if name == 'TEMPLATE_IDS':
env_var = None
else:
env_var = 'false'
print(name, ' set to ', env_var)
return env_var
def load_setting_ssl_cert(logger, config_settings):
cert_location = None
if 'ssl_cert' in config_settings['API']:
if config_settings['API']['ssl_cert']:
cert_location = config_settings['API']['ssl_cert']
return cert_location
def load_setting_ssl_verify(logger, config_settings):
verify_cert = None
if 'ssl_verify' in config_settings['API']:
if config_settings['API']['ssl_verify']:
verify_cert = config_settings['API']['ssl_verify']
return verify_cert
def load_setting_proxy(logger, config_settings, http_or_https):
proxy = None
if http_or_https == 'https':
if 'proxy_https' in config_settings['API']:
if config_settings['API']['proxy_https']:
proxy = config_settings['API']['proxy_https']
elif http_or_https == 'http':
if 'proxy_http' in config_settings['API']:
if config_settings['API']['proxy_http']:
proxy = config_settings['API']['proxy_http']
else:
proxy = None
return proxy
def load_actions_table(actions_table_name):
if actions_table_name is None:
actions_table_name = 'iauditor'
return actions_table_name
else:
return actions_table_name
def load_config_settings(logger, path_to_config_file, docker_enabled):
"""
Load config settings from config file
:param logger: the logger
:param path_to_config_file: location of config file
:return: settings dictionary containing values for:
api_token, export_path, preferences,
filename_item_id, sync_delay_in_seconds loaded from
config file, media_sync_offset_in_seconds
"""
if docker_enabled is True:
settings = {
API_TOKEN: docker_load_setting_api_access_token(logger, os.environ['API_TOKEN']),
EXPORT_PATH: None,
# PREFERENCES: load_setting_preference_mapping(logger, config_settings),
# FILENAME_ITEM_ID: get_filename_item_id(logger, config_settings),
SYNC_DELAY_IN_SECONDS: int(os.environ['SYNC_DELAY_IN_SECONDS']),
# EXPORT_INACTIVE_ITEMS_TO_CSV: load_export_inactive_items_to_csv(logger, config_settings),
MEDIA_SYNC_OFFSET_IN_SECONDS: int(os.environ['MEDIA_SYNC_OFFSET_IN_SECONDS']),
TEMPLATE_IDS: set_env_defaults('TEMPLATE_IDS', os.environ['TEMPLATE_IDS'], logger),
SQL_TABLE: set_env_defaults('SQL_TABLE', os.environ['SQL_TABLE'], logger),
DB_TYPE: set_env_defaults('DB_TYPE', os.environ['DB_TYPE'], logger),
DB_USER: set_env_defaults('DB_USER', os.environ['DB_USER'], logger),
DB_PWD: set_env_defaults('DB_PWD', os.environ['DB_PWD'], logger),
DB_SERVER: set_env_defaults('DB_SERVER', os.environ['DB_SERVER'], logger),
DB_PORT: set_env_defaults('DB_PORT', os.environ['DB_PORT'], logger),
DB_NAME: set_env_defaults('DB_NAME', os.environ['DB_NAME'], logger),
DB_SCHEMA: set_env_defaults('DB_SCHEMA', os.environ['DB_SCHEMA'], logger),
USE_REAL_TEMPLATE_NAME: set_env_defaults('USE_REAL_TEMPLATE_NAME', os.environ['USE_REAL_TEMPLATE_NAME'],
logger),
CONFIG_NAME: set_env_defaults('CONFIG_NAME', os.environ['CONFIG_NAME'], logger),
EXPORT_ARCHIVED: set_env_defaults('EXPORT_ARCHIVED', os.environ['EXPORT_ARCHIVED'], logger),
EXPORT_COMPLETED: set_env_defaults('EXPORT_COMPLETED', os.environ['EXPORT_COMPLETED'], logger),
MERGE_ROWS: set_env_defaults('MERGE_ROWS', os.environ['MERGE_ROWS'], logger),
ALLOW_TABLE_CREATION: set_env_defaults('ALLOW_TABLE_CREATION', os.environ['ALLOW_TABLE_CREATION'], logger),
ACTIONS_TABLE: 'iauditor_actions_data',
ACTIONS_MERGE_ROWS: set_env_defaults('ACTIONS_MERGE_ROWS', os.environ['ACTIONS_MERGE_ROWS'], logger),
PREFERENCES: None,
FILENAME_ITEM_ID: None,
EXPORT_INACTIVE_ITEMS_TO_CSV: None
}
else:
try:
config_settings = yaml.safe_load(open(path_to_config_file))
except ScannerError as e:
logger.error(e)
logger.critical('There is a problem with your config file. The most likely reason is not leaving spaces '
'after the colons. Open your config.yaml file and ensure that after every : you have left '
'a space. For example, config_name:iauditor would create this error, it should be '
'config_name: iauditor ')
logger.critical('Please refer to '
'https://safetyculture.github.io/iauditor-exporter/script-setup/config/ for more '
'information.')
sys.exit()
if config_settings['config_name'] is None:
logger.info('The Config Name has been left blank, defaulting to iauditor.')
config_name = 'iauditor'
elif ' ' in config_settings['config_name']:
config_name = config_settings['config_name'].replace(' ', '_')
else:
config_name = config_settings['config_name']
if re.match("^[A-Za-z0-9_-]*$", config_name):
config_name = config_name
else:
logger.critical('Config name can only contain letters, numbers, hyphens or underscores.')
sys.exit()
if 'allow_table_creation' in config_settings['export_options']:
table_creation = config_settings['export_options']['allow_table_creation']
else:
table_creation = False
if load_setting_export_path(logger, config_settings) is None:
export_path = os.path.join('exports', config_name)
else:
export_path = os.path.join(load_setting_export_path(logger, config_settings), config_name)
settings = {
API_TOKEN: load_setting_api_access_token(logger, config_settings),
SSL_CERT: load_setting_ssl_cert(logger, config_settings),
SSL_VERIFY: load_setting_ssl_cert(logger, config_settings),
PROXY_HTTP: load_setting_proxy(logger, config_settings, 'http'),
PROXY_HTTPS: load_setting_proxy(logger, config_settings, 'https'),
EXPORT_PATH: export_path,
PREFERENCES: load_setting_preference_mapping(logger, config_settings),
FILENAME_ITEM_ID: get_filename_item_id(logger, config_settings),
SYNC_DELAY_IN_SECONDS: load_setting_sync_delay(logger, config_settings),
EXPORT_INACTIVE_ITEMS_TO_CSV: load_export_inactive_items_to_csv(logger, config_settings),
MEDIA_SYNC_OFFSET_IN_SECONDS: load_setting_media_sync_offset(logger, config_settings),
TEMPLATE_IDS: config_settings['export_options']['template_ids'],
SQL_TABLE: config_settings['export_options']['sql_table'],
DB_TYPE: config_settings['export_options']['database_type'],
DB_USER: config_settings['export_options']['database_user'],
DB_PWD: config_settings['export_options']['database_pwd'],
DB_SERVER: config_settings['export_options']['database_server'],
DB_PORT: config_settings['export_options']['database_port'],
DB_NAME: config_settings['export_options']['database_name'],
DB_SCHEMA: config_settings['export_options']['database_schema'],
USE_REAL_TEMPLATE_NAME: config_settings['export_options']['use_real_template_name'],
CONFIG_NAME: config_name,
EXPORT_ARCHIVED: config_settings['export_options']['export_archived'],
EXPORT_COMPLETED: config_settings['export_options']['export_completed'],
MERGE_ROWS: config_settings['export_options']['merge_rows'],
ALLOW_TABLE_CREATION: table_creation,
ACTIONS_TABLE: load_actions_table(config_settings['export_options']['sql_table']) + '_actions',
ACTIONS_MERGE_ROWS: config_settings['export_options']['actions_merge_rows']
}
return settings
def configure(logger, path_to_config_file, export_formats, docker_enabled):
"""
instantiate and configure logger, load config settings from file, instantiate SafetyCulture SDK
:param logger: the logger
:param path_to_config_file: path to config file
:param export_formats: desired export formats
:return: instance of SafetyCulture SDK object, config settings
"""
config_settings = load_config_settings(logger, path_to_config_file, docker_enabled)
config_settings[EXPORT_FORMATS] = export_formats
if config_settings[PROXY_HTTP] is not None and config_settings[PROXY_HTTPS] is not None:
proxy_settings = {
"http": config_settings[PROXY_HTTP],
"https": config_settings[PROXY_HTTPS]
}
else:
proxy_settings = None
sc_client = sp.SafetyCulture(config_settings[API_TOKEN],
proxy_settings=proxy_settings,
certificate_settings=config_settings[SSL_CERT],
ssl_verify=config_settings[SSL_VERIFY])
if config_settings[EXPORT_PATH] is not None:
if config_settings[CONFIG_NAME] is not None:
create_directory_if_not_exists(logger, os.path.join(config_settings[EXPORT_PATH]))
else:
logger.error("You must set the config_name in your config file before continuing.")
sys.exit()
else:
logger.info('No export path was found in ' + path_to_config_file + ', defaulting to /exports')
config_settings[EXPORT_PATH] = os.path.join(os.getcwd(), 'exports')
if config_settings[CONFIG_NAME] is not None:
create_directory_if_not_exists(logger, os.path.join(config_settings[EXPORT_PATH]))
else:
logger.error("You must set the config_name in your config file before continuing.")
sys.exit()
return sc_client, config_settings
def rename_config_sample(logger):
if not os.path.isfile('configs/config.yaml'):
if os.path.isfile('configs/config.yaml.sample'):
file_size = os.stat('configs/config.yaml.sample')
file_size = file_size.st_size
if file_size <= 666:
logger.info('It looks like the config file has not been filled out. Open the folder named "configs" '
'and edit the file named "config.yaml.sample" before continuing')
sys.exit()
if file_size >= 667:
logger.info('It looks like you have not renamed "config.yaml.sample" to "config.yaml". Would you like '
'the '
'script to do it for you (recommended!)? If you say no, you will need to manually remove '
'.sample from the file name. ')
question = input('Please type either y (yes) or n (no) and press enter to continue. ')
if question.startswith('y'):
os.rename(r'configs/config.yaml.sample', r'configs/config.yaml')
else:
sys.exit()
else:
logger.info('No config file found. Please either name it config.yaml or specify it with --config.')
sys.exit()
else:
logger.info('No config file found. Please either name it config.yaml or specify it with --config.')
sys.exit()
def parse_command_line_arguments(logger):
"""
Parse command line arguments received, if any
Print example if invalid arguments are passed
:param logger: the logger
:return: config_filename passed as argument if any, else DEFAULT_CONFIG_FILENAME
export_formats passed as argument if any, else 'pdf'
list_preferences if passed as argument, else None
do_loop False if passed as argument, else True
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='config file to use, defaults to ' + DEFAULT_CONFIG_FILENAME)
parser.add_argument('--docker', nargs='*', help='Switches settings to ENV variables for use with docker.')
parser.add_argument('--format', nargs='*', help='formats to download, valid options are pdf, '
'json, docx, csv, media, web-report-link, actions, pickle, sql')
parser.add_argument('--list_preferences', nargs='*', help='display all preferences, or restrict to specific'
' template_id if supplied as additional argument')
parser.add_argument('--loop', nargs='*', help='execute continuously until interrupted')
parser.add_argument('--setup', action='store_true', help='Automatically create new directory containing the '
'necessary config file.'
'Directory will be named iAuditor Audit Exports, and will '
'be placed in your current directory')
args = parser.parse_args()
if args.config is None:
rename_config_sample(logger)
if args.config is not None:
config_filename = os.path.join('configs', args.config)
if os.path.isfile(config_filename):
config_filename = os.path.join('configs', args.config)
logger.debug(config_filename + ' passed as config argument')
else:
logger.error(config_filename + ' is either missing or corrupt.')
sys.exit(1)
else:
config_filename = os.path.join('configs', DEFAULT_CONFIG_FILENAME)
export_formats = ['pdf']
if args.format is not None and len(args.format) > 0:
valid_export_formats = ['json', 'docx', 'pdf', 'csv', 'media', 'web-report-link', 'actions', 'actions-sql',
'sql', 'pickle', 'doc_creation']
export_formats = []
for option in args.format:
if option not in valid_export_formats:
print('{0} is not a valid export format. Valid options are pdf, json, docx, csv, web-report-link, '
'media, actions, pickle, actions_sql, or sql'.format(option))
logger.info('invalid export format argument: {0}'.format(option))
else:
export_formats.append(option)
loop_enabled = True if args.loop is not None else False
docker_enabled = True if args.docker is not None else False
return config_filename, export_formats, args.list_preferences, loop_enabled, docker_enabled
``` |
{
"source": "joshRookout/deployment-examples",
"score": 2
} |
#### File: deployment-examples/python-fork/fork_rookout.py
```python
import time
from flask import Flask
from random import randint
import os
from datetime import datetime
import rook
app = Flask(__name__)
@app.route("/")
def home():
time.sleep(0.01 * randint(10, 200) + 0.1)
return 'Index Main Page'
@app.route('/hello')
def hello():
time.sleep(0.01 * randint(10, 200) + 0.1)
return 'Hello, World' # Set bp here
def debug_here(i):
j = i * i
return j # Set bp here
def child_routine():
for i in range(10):
debug_here(i)
time.sleep(5)
if __name__ == "__main__":
rook.start(throw_errors=True, fork=True)
if 0 == os.fork():
app.run(host="0.0.0.0", port=5000, threaded=True)
else:
child_routine()
``` |
{
"source": "joshRooz/NordApi",
"score": 3
} |
#### File: NordApi/nordapi/EdgeRouterX.py
```python
import OpenVpnUdp
import sys
import getopt
import fileinput
def main(argv):
country = None
try:
opts, args = getopt.getopt(argv, "h:c:", ["country="])
except getopt.GetoptError:
print('EdgeRouterX.py -c <country>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('EdgeRouterX.py -c <country>')
sys.exit()
elif opt in ("-c", "--country"):
country = arg
assert type(country) == str, 'Invalid Country: EdgeRouterX.py -h'
# 1. Download the recommended config file from NordVPN
ovpn = OpenVpnUdp.OpenVpnUdp()
ovpnfile = ovpn.get_file(country)
# 2. Replace the inline username and password fields with a file path
# Disable ability for OpenVPN provider to change routes in the firewall
with fileinput.FileInput(ovpnfile, inplace=True) as file:
for line in file:
line = line.replace(
'auth-user-pass',
'auth-user-pass /config/user-data/openvpn/nordvpnauth.txt'
)
line = line.replace(
'pull',
'pull\nroute-nopull'
)
print(line, end='')
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: NordApi/nordapi/NordApi.py
```python
from requests import get
# from iso3166 import countries_by_alpha2
class NordApi(object):
"""nordapi - Fetch VPN Endpoints
Interact with the undocumented, but publicly exposed API from NordVPN
"""
def __init__(self, limit=10, cap=30):
"""[initialization]
Args:
cap (int, optional): Maximum server load. 0-100. Defaults to 30.
limit (int, optional): Number of results. Defaults to 10.
"""
assert type(cap) == int and type(limit) == int
self.url = "https://api.nordvpn.com"
self.cap = cap
self.limit = limit
self.country = None
self.cid = None
def get_request(self, endpoint, payload=None):
assert type(endpoint) == str
if payload is None:
payload = {}
else:
assert type(payload) is dict
try:
response = get(self.url + endpoint, params=payload)
response.raise_for_status()
except response.exceptions.RequestException as e:
raise Exception(e)
print(str(response.status_code) + ' : ' + response.url)
return response
def bisect_search(self, L, keyinput, keyoutput, match):
if len(L) == 0:
raise Exception('empty list. no value found.')
elif len(L) < 2:
raise Exception(str(match) + ' not found.')
else:
middle = len(L) // 2
if match.lower() == L[middle][keyinput].lower():
return L[middle][keyoutput]
elif match.lower() > L[middle][keyinput].lower():
return self.bisect_search(
L[middle:], keyinput, keyoutput, match
)
else:
return self.bisect_search(
L[:middle], keyinput, keyoutput, match
)
def get_country_id(self, country):
assert type(country) == str
self.country = country
endpoint = "/v1/servers/countries"
key_input = "name"
key_output = "id"
# response is sorted A-Z on 'name' and id's are ascending
response = self.get_request(endpoint)
self.cid = (self.bisect_search(
response.json(), key_input, key_output, country))
return self.cid
def get_recommended(self):
assert type(
self.cid) == int, "Country ID must be defined and an integer"
# ex: /v1/servers/recommendations?filters[country_id]=227&limit=3
endpoint = "/v1/servers/recommendations"
payload = {'limit': self.limit, 'filters[country_id]': self.cid}
return self.get_request(endpoint, payload).json()
def set_limit(self, newlimit):
assert type(newlimit) == int, "Limit must be an integer"
self.limit = newlimit
def set_capacity(self, newcapacity):
assert type(newcapacity) == int, "Capacity must be an integer"
self.cap = newcapacity
```
#### File: NordApi/nordapi/OpenVpnUdp.py
```python
from NordApi import NordApi
class OpenVpnUdp(NordApi):
def get_file(self, country, fname=None):
assert type(country) == str
NordApi.get_country_id(self, country)
NordApi.set_limit(self, 1)
json = NordApi.get_recommended(self)
self.servername = json[0]['hostname']
self.load = json[0]['load']
self.url = "https://downloads.nordcdn.com"
endpoint = (
'/configs/files/ovpn_udp/servers/'
+ self.servername
+ '.udp.ovpn'
)
response = NordApi.get_request(self, endpoint)
filename = self.servername + '.udp.ovpn'
writebytes = open(filename, 'wb')
try:
writebytes.write(response.content)
finally:
writebytes.close()
return filename
```
#### File: NordApi/tests/test_nordapi_meths.py
```python
from pytest import unittest
from nordapi.NordApi import NordApi
class TestCountryExists(unittest.TestCase):
def test_valid_country(self):
valid = NordApi()
assert type(valid.get_country_id('australia')) == int
def test_invalid_country(self):
invalid = NordApi()
self.assertRaises(Exception, invalid.get_country_id('erie'))
class TestGetRecommended(unittest.TestCase):
def test_valid_getrecom(self):
valid = NordApi()
valid.get_country_id('croatia')
assert type(valid.get_recommended().json()) == dict
``` |
{
"source": "joshrose/audacity",
"score": 2
} |
#### File: waflib/extras/objcopy.py
```python
from waflib.Utils import def_attrs
from waflib import Task
from waflib.TaskGen import feature, after_method
class objcopy(Task.Task):
run_str = '${OBJCOPY} -O ${TARGET_BFDNAME} ${OBJCOPYFLAGS} ${SRC} ${TGT}'
color = 'CYAN'
@feature('objcopy')
@after_method('apply_link')
def map_objcopy(self):
def_attrs(self,
objcopy_bfdname = 'ihex',
objcopy_target = None,
objcopy_install_path = "${PREFIX}/firmware",
objcopy_flags = '')
link_output = self.link_task.outputs[0]
if not self.objcopy_target:
self.objcopy_target = link_output.change_ext('.' + self.objcopy_bfdname).name
task = self.create_task('objcopy', src=link_output, tgt=self.path.find_or_declare(self.objcopy_target))
task.env.append_unique('TARGET_BFDNAME', self.objcopy_bfdname)
try:
task.env.append_unique('OBJCOPYFLAGS', getattr(self, 'objcopy_flags'))
except AttributeError:
pass
if self.objcopy_install_path:
self.add_install_files(install_to=self.objcopy_install_path, install_from=task.outputs[0])
def configure(ctx):
ctx.find_program('objcopy', var='OBJCOPY', mandatory=True)
```
#### File: waflib/Tools/winres.py
```python
"Process *.rc* files for C/C++: X{.rc -> [.res|.rc.o]}"
import re
from waflib import Task
from waflib.TaskGen import extension
from waflib.Tools import c_preproc
@extension('.rc')
def rc_file(self, node):
"""
Binds the .rc extension to a winrc task
"""
obj_ext = '.rc.o'
if self.env.WINRC_TGT_F == '/fo':
obj_ext = '.res'
rctask = self.create_task('winrc', node, node.change_ext(obj_ext))
try:
self.compiled_tasks.append(rctask)
except AttributeError:
self.compiled_tasks = [rctask]
re_lines = re.compile(
'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\
'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)',
re.IGNORECASE | re.MULTILINE)
class rc_parser(c_preproc.c_parser):
"""
Calculates dependencies in .rc files
"""
def filter_comments(self, node):
"""
Overrides :py:meth:`waflib.Tools.c_preproc.c_parser.filter_comments`
"""
code = node.read()
if c_preproc.use_trigraphs:
for (a, b) in c_preproc.trig_def:
code = code.split(a).join(b)
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
ret = []
for m in re.finditer(re_lines, code):
if m.group(2):
ret.append((m.group(2), m.group(3)))
else:
ret.append(('include', m.group(5)))
return ret
class winrc(Task.Task):
"""
Compiles resource files
"""
run_str = '${WINRC} ${WINRCFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}'
color = 'BLUE'
def scan(self):
tmp = rc_parser(self.generator.includes_nodes)
tmp.start(self.inputs[0], self.env)
return (tmp.nodes, tmp.names)
def configure(conf):
"""
Detects the programs RC or windres, depending on the C/C++ compiler in use
"""
v = conf.env
if not v.WINRC:
if v.CC_NAME == 'msvc':
conf.find_program('RC', var='WINRC', path_list=v.PATH)
v.WINRC_TGT_F = '/fo'
v.WINRC_SRC_F = ''
else:
conf.find_program('windres', var='WINRC', path_list=v.PATH)
v.WINRC_TGT_F = '-o'
v.WINRC_SRC_F = '-i'
```
#### File: waflib/extras/clang_cross_common.py
```python
from waflib.Configure import conf
import waflib.Context
def normalize_target_triple(target_triple):
target_triple = target_triple[:-1]
normalized_triple = target_triple.replace('--', '-unknown-')
if normalized_triple.startswith('-'):
normalized_triple = 'unknown' + normalized_triple
if normalized_triple.endswith('-'):
normalized_triple += 'unknown'
# Normalize MinGW builds to *arch*-w64-mingw32
if normalized_triple.endswith('windows-gnu'):
normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-w64-mingw32'
# Strip the vendor when doing msvc builds, since it's unused anyway.
if normalized_triple.endswith('windows-msvc'):
normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-windows-msvc'
return normalized_triple.replace('-', '_')
@conf
def clang_modifier_msvc(conf):
import os
"""
Really basic setup to use clang in msvc mode.
We actually don't really want to do a lot, even though clang is msvc compatible
in this mode, that doesn't mean we're actually using msvc.
It's probably the best to leave it to the user, we can assume msvc mode if the user
uses the clang-cl frontend, but this module only concerns itself with the gcc-like frontend.
"""
v = conf.env
v.cprogram_PATTERN = '%s.exe'
v.cshlib_PATTERN = '%s.dll'
v.implib_PATTERN = '%s.lib'
v.IMPLIB_ST = '-Wl,-IMPLIB:%s'
v.SHLIB_MARKER = []
v.CFLAGS_cshlib = []
v.LINKFLAGS_cshlib = ['-Wl,-DLL']
v.cstlib_PATTERN = '%s.lib'
v.STLIB_MARKER = []
del(v.AR)
conf.find_program(['llvm-lib', 'lib'], var='AR')
v.ARFLAGS = ['-nologo']
v.AR_TGT_F = ['-out:']
# Default to the linker supplied with llvm instead of link.exe or ld
v.LINK_CC = v.CC + ['-fuse-ld=lld', '-nostdlib']
v.CCLNK_TGT_F = ['-o']
v.def_PATTERN = '-Wl,-def:%s'
v.LINKFLAGS = []
v.LIB_ST = '-l%s'
v.LIBPATH_ST = '-Wl,-LIBPATH:%s'
v.STLIB_ST = '-l%s'
v.STLIBPATH_ST = '-Wl,-LIBPATH:%s'
CFLAGS_CRT_COMMON = [
'-Xclang', '--dependent-lib=oldnames',
'-Xclang', '-fno-rtti-data',
'-D_MT'
]
v.CFLAGS_CRT_MULTITHREADED = CFLAGS_CRT_COMMON + [
'-Xclang', '-flto-visibility-public-std',
'-Xclang', '--dependent-lib=libcmt',
]
v.CXXFLAGS_CRT_MULTITHREADED = v.CFLAGS_CRT_MULTITHREADED
v.CFLAGS_CRT_MULTITHREADED_DBG = CFLAGS_CRT_COMMON + [
'-D_DEBUG',
'-Xclang', '-flto-visibility-public-std',
'-Xclang', '--dependent-lib=libcmtd',
]
v.CXXFLAGS_CRT_MULTITHREADED_DBG = v.CFLAGS_CRT_MULTITHREADED_DBG
v.CFLAGS_CRT_MULTITHREADED_DLL = CFLAGS_CRT_COMMON + [
'-D_DLL',
'-Xclang', '--dependent-lib=msvcrt'
]
v.CXXFLAGS_CRT_MULTITHREADED_DLL = v.CFLAGS_CRT_MULTITHREADED_DLL
v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = CFLAGS_CRT_COMMON + [
'-D_DLL',
'-D_DEBUG',
'-Xclang', '--dependent-lib=msvcrtd',
]
v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CFLAGS_CRT_MULTITHREADED_DLL_DBG
@conf
def clang_modifier_target_triple(conf, cpp=False):
compiler = conf.env.CXX if cpp else conf.env.CC
output = conf.cmd_and_log(compiler + ['-dumpmachine'], output=waflib.Context.STDOUT)
modifier = ('clangxx' if cpp else 'clang') + '_modifier_'
clang_modifier_func = getattr(conf, modifier + normalize_target_triple(output), None)
if clang_modifier_func:
clang_modifier_func()
```
#### File: waflib/extras/pep8.py
```python
import threading
from waflib import Task, Options
pep8 = __import__('pep8')
class Pep8(Task.Task):
color = 'PINK'
lock = threading.Lock()
def check_options(self):
if pep8.options:
return
pep8.options = Options.options
pep8.options.prog = 'pep8'
excl = pep8.options.exclude.split(',')
pep8.options.exclude = [s.rstrip('/') for s in excl]
if pep8.options.filename:
pep8.options.filename = pep8.options.filename.split(',')
if pep8.options.select:
pep8.options.select = pep8.options.select.split(',')
else:
pep8.options.select = []
if pep8.options.ignore:
pep8.options.ignore = pep8.options.ignore.split(',')
elif pep8.options.select:
# Ignore all checks which are not explicitly selected
pep8.options.ignore = ['']
elif pep8.options.testsuite or pep8.options.doctest:
# For doctest and testsuite, all checks are required
pep8.options.ignore = []
else:
# The default choice: ignore controversial checks
pep8.options.ignore = pep8.DEFAULT_IGNORE.split(',')
pep8.options.physical_checks = pep8.find_checks('physical_line')
pep8.options.logical_checks = pep8.find_checks('logical_line')
pep8.options.counters = dict.fromkeys(pep8.BENCHMARK_KEYS, 0)
pep8.options.messages = {}
def run(self):
with Pep8.lock:
self.check_options()
pep8.input_file(self.inputs[0].abspath())
return 0 if not pep8.get_count() else -1
def options(opt):
opt.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
opt.add_option('-r', '--repeat', action='store_true',
help="show all occurrences of the same error")
opt.add_option('--exclude', metavar='patterns',
default=pep8.DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" %
pep8.DEFAULT_EXCLUDE,
dest='exclude')
opt.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns (default: "
"*.py)")
opt.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
opt.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
opt.add_option('--show-source', action='store_true',
help="show source code for each error")
opt.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error")
opt.add_option('--statistics', action='store_true',
help="count errors and warnings")
opt.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
opt.add_option('--benchmark', action='store_true',
help="measure processing speed")
opt.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
opt.add_option('--doctest', action='store_true',
help="run doctest on myself")
```
#### File: waflib/extras/softlink_libs.py
```python
from waflib.TaskGen import feature, after_method
from waflib.Task import Task, always_run
from os.path import basename, isabs
from os import tmpfile, linesep
def options(opt):
grp = opt.add_option_group('Softlink Libraries Options')
grp.add_option('--exclude', default='/usr/lib,/lib', help='No symbolic links are created for libs within [%default]')
def configure(cnf):
cnf.find_program('ldd')
if not cnf.env.SOFTLINK_EXCLUDE:
cnf.env.SOFTLINK_EXCLUDE = cnf.options.exclude.split(',')
@feature('softlink_libs')
@after_method('process_rule')
def add_finder(self):
tgt = self.path.find_or_declare(self.target)
self.create_task('sll_finder', tgt=tgt)
self.create_task('sll_installer', tgt=tgt)
always_run(sll_installer)
class sll_finder(Task):
ext_out = 'softlink_libs'
def run(self):
bld = self.generator.bld
linked=[]
target_paths = []
for g in bld.groups:
for tgen in g:
# FIXME it might be better to check if there is a link_task (getattr?)
target_paths += [tgen.path.get_bld().bldpath()]
linked += [t.outputs[0].bldpath()
for t in getattr(tgen, 'tasks', [])
if t.__class__.__name__ in
['cprogram', 'cshlib', 'cxxprogram', 'cxxshlib']]
lib_list = []
if len(linked):
cmd = [self.env.LDD] + linked
# FIXME add DYLD_LIBRARY_PATH+PATH for osx+win32
ldd_env = {'LD_LIBRARY_PATH': ':'.join(target_paths + self.env.LIBPATH)}
# FIXME the with syntax will not work in python 2
with tmpfile() as result:
self.exec_command(cmd, env=ldd_env, stdout=result)
result.seek(0)
for line in result.readlines():
words = line.split()
if len(words) < 3 or words[1] != '=>':
continue
lib = words[2]
if lib == 'not':
continue
if any([lib.startswith(p) for p in
[bld.bldnode.abspath(), '('] +
self.env.SOFTLINK_EXCLUDE]):
continue
if not isabs(lib):
continue
lib_list.append(lib)
lib_list = sorted(set(lib_list))
self.outputs[0].write(linesep.join(lib_list + self.env.DYNAMIC_LIBS))
return 0
class sll_installer(Task):
ext_in = 'softlink_libs'
def run(self):
tgt = self.outputs[0]
self.generator.bld.install_files('${LIBDIR}', tgt, postpone=False)
lib_list=tgt.read().split()
for lib in lib_list:
self.generator.bld.symlink_as('${LIBDIR}/'+basename(lib), lib, postpone=False)
return 0
```
#### File: waflib/extras/sphinx.py
```python
from waflib.Node import Node
from waflib import Utils
from waflib.Task import Task
from waflib.TaskGen import feature, after_method
def configure(cnf):
"""Check if sphinx-build program is available and loads gnu_dirs tool."""
cnf.find_program('sphinx-build', var='SPHINX_BUILD', mandatory=False)
cnf.load('gnu_dirs')
@feature('sphinx')
def build_sphinx(self):
"""Builds sphinx sources.
"""
if not self.env.SPHINX_BUILD:
self.bld.fatal('Program SPHINX_BUILD not defined.')
if not getattr(self, 'sphinx_source', None):
self.bld.fatal('Attribute sphinx_source not defined.')
if not isinstance(self.sphinx_source, Node):
self.sphinx_source = self.path.find_node(self.sphinx_source)
if not self.sphinx_source:
self.bld.fatal('Can\'t find sphinx_source: %r' % self.sphinx_source)
Utils.def_attrs(self, sphinx_output_format='html')
self.env.SPHINX_OUTPUT_FORMAT = self.sphinx_output_format
self.env.SPHINX_OPTIONS = getattr(self, 'sphinx_options', [])
for source_file in self.sphinx_source.ant_glob('**/*'):
self.bld.add_manual_dependency(self.sphinx_source, source_file)
sphinx_build_task = self.create_task('SphinxBuildingTask')
sphinx_build_task.set_inputs(self.sphinx_source)
sphinx_build_task.set_outputs(self.path.get_bld())
# the sphinx-build results are in <build + output_format> directory
sphinx_output_directory = self.path.get_bld().make_node(self.env.SPHINX_OUTPUT_FORMAT)
sphinx_output_directory.mkdir()
Utils.def_attrs(self, install_path=get_install_path(self))
self.add_install_files(install_to=self.install_path,
install_from=sphinx_output_directory.ant_glob('**/*'),
cwd=sphinx_output_directory,
relative_trick=True)
def get_install_path(tg):
if tg.env.SPHINX_OUTPUT_FORMAT == 'man':
return tg.env.MANDIR
elif tg.env.SPHINX_OUTPUT_FORMAT == 'info':
return tg.env.INFODIR
else:
return tg.env.DOCDIR
class SphinxBuildingTask(Task):
color = 'BOLD'
run_str = '${SPHINX_BUILD} -M ${SPHINX_OUTPUT_FORMAT} ${SRC} ${TGT} ${SPHINX_OPTIONS}'
def keyword(self):
return 'Compiling (%s)' % self.env.SPHINX_OUTPUT_FORMAT
```
#### File: waflib/extras/valadoc.py
```python
from waflib import Task, Utils, Errors, Logs
from waflib.TaskGen import feature
VALADOC_STR = '${VALADOC}'
class valadoc(Task.Task):
vars = ['VALADOC', 'VALADOCFLAGS']
color = 'BLUE'
after = ['cprogram', 'cstlib', 'cshlib', 'cxxprogram', 'cxxstlib', 'cxxshlib']
quiet = True # no outputs .. this is weird
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.output_dir = ''
self.doclet = ''
self.package_name = ''
self.package_version = ''
self.files = []
self.vapi_dirs = []
self.protected = True
self.private = False
self.inherit = False
self.deps = False
self.vala_defines = []
self.vala_target_glib = None
self.enable_non_null_experimental = False
self.force = False
def run(self):
if not self.env['VALADOCFLAGS']:
self.env['VALADOCFLAGS'] = ''
cmd = [Utils.subst_vars(VALADOC_STR, self.env)]
cmd.append ('-o %s' % self.output_dir)
if getattr(self, 'doclet', None):
cmd.append ('--doclet %s' % self.doclet)
cmd.append ('--package-name %s' % self.package_name)
if getattr(self, 'package_version', None):
cmd.append ('--package-version %s' % self.package_version)
if getattr(self, 'packages', None):
for package in self.packages:
cmd.append ('--pkg %s' % package)
if getattr(self, 'vapi_dirs', None):
for vapi_dir in self.vapi_dirs:
cmd.append ('--vapidir %s' % vapi_dir)
if not getattr(self, 'protected', None):
cmd.append ('--no-protected')
if getattr(self, 'private', None):
cmd.append ('--private')
if getattr(self, 'inherit', None):
cmd.append ('--inherit')
if getattr(self, 'deps', None):
cmd.append ('--deps')
if getattr(self, 'vala_defines', None):
for define in self.vala_defines:
cmd.append ('--define %s' % define)
if getattr(self, 'vala_target_glib', None):
cmd.append ('--target-glib=%s' % self.vala_target_glib)
if getattr(self, 'enable_non_null_experimental', None):
cmd.append ('--enable-non-null-experimental')
if getattr(self, 'force', None):
cmd.append ('--force')
cmd.append (' '.join ([x.abspath() for x in self.files]))
return self.generator.bld.exec_command(' '.join(cmd))
@feature('valadoc')
def process_valadoc(self):
"""
Generate API documentation from Vala source code with valadoc
doc = bld(
features = 'valadoc',
output_dir = '../doc/html',
package_name = 'vala-gtk-example',
package_version = '1.0.0',
packages = 'gtk+-2.0',
vapi_dirs = '../vapi',
force = True
)
path = bld.path.find_dir ('../src')
doc.files = path.ant_glob (incl='**/*.vala')
"""
task = self.create_task('valadoc')
if getattr(self, 'output_dir', None):
task.output_dir = self.path.find_or_declare(self.output_dir).abspath()
else:
Errors.WafError('no output directory')
if getattr(self, 'doclet', None):
task.doclet = self.doclet
else:
Errors.WafError('no doclet directory')
if getattr(self, 'package_name', None):
task.package_name = self.package_name
else:
Errors.WafError('no package name')
if getattr(self, 'package_version', None):
task.package_version = self.package_version
if getattr(self, 'packages', None):
task.packages = Utils.to_list(self.packages)
if getattr(self, 'vapi_dirs', None):
vapi_dirs = Utils.to_list(self.vapi_dirs)
for vapi_dir in vapi_dirs:
try:
task.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath())
except AttributeError:
Logs.warn('Unable to locate Vala API directory: %r', vapi_dir)
if getattr(self, 'files', None):
task.files = self.files
else:
Errors.WafError('no input file')
if getattr(self, 'protected', None):
task.protected = self.protected
if getattr(self, 'private', None):
task.private = self.private
if getattr(self, 'inherit', None):
task.inherit = self.inherit
if getattr(self, 'deps', None):
task.deps = self.deps
if getattr(self, 'vala_defines', None):
task.vala_defines = Utils.to_list(self.vala_defines)
if getattr(self, 'vala_target_glib', None):
task.vala_target_glib = self.vala_target_glib
if getattr(self, 'enable_non_null_experimental', None):
task.enable_non_null_experimental = self.enable_non_null_experimental
if getattr(self, 'force', None):
task.force = self.force
def configure(conf):
conf.find_program('valadoc', errmsg='You must install valadoc <http://live.gnome.org/Valadoc> for generate the API documentation')
```
#### File: waflib/extras/wix.py
```python
import os, copy
from waflib import TaskGen
from waflib import Task
from waflib.Utils import winreg
class candle(Task.Task):
run_str = '${CANDLE} -nologo ${CANDLEFLAGS} -out ${TGT} ${SRC[0].abspath()}',
class light(Task.Task):
run_str = "${LIGHT} -nologo -b ${SRC[0].parent.abspath()} ${LIGHTFLAGS} -out ${TGT} ${SRC[0].abspath()}"
@TaskGen.feature('wix')
@TaskGen.before_method('process_source')
def wix(self):
#X.wxs -> ${SRC} for CANDLE
#X.wxobj -> ${SRC} for LIGHT
#X.dll -> -ext X in ${LIGHTFLAGS}
#X.wxl -> wixui.wixlib -loc X.wxl in ${LIGHTFLAGS}
wxobj = []
wxs = []
exts = []
wxl = []
rest = []
for x in self.source:
if x.endswith('.wxobj'):
wxobj.append(x)
elif x.endswith('.wxs'):
wxobj.append(self.path.find_or_declare(x[:-4]+'.wxobj'))
wxs.append(x)
elif x.endswith('.dll'):
exts.append(x[:-4])
elif '.' not in x:
exts.append(x)
elif x.endswith('.wxl'):
wxl.append(x)
else:
rest.append(x)
self.source = self.to_nodes(rest) #.wxs
cndl = self.create_task('candle', self.to_nodes(wxs), self.to_nodes(wxobj))
lght = self.create_task('light', self.to_nodes(wxobj), self.path.find_or_declare(self.gen))
cndl.env.CANDLEFLAGS = copy.copy(getattr(self,'candleflags',[]))
lght.env.LIGHTFLAGS = copy.copy(getattr(self,'lightflags',[]))
for x in wxl:
lght.env.append_value('LIGHTFLAGS','wixui.wixlib')
lght.env.append_value('LIGHTFLAGS','-loc')
lght.env.append_value('LIGHTFLAGS',x)
for x in exts:
cndl.env.append_value('CANDLEFLAGS','-ext')
cndl.env.append_value('CANDLEFLAGS',x)
lght.env.append_value('LIGHTFLAGS','-ext')
lght.env.append_value('LIGHTFLAGS',x)
#wix_bin_path()
def wix_bin_path():
basekey = r"SOFTWARE\Microsoft\.NETFramework\AssemblyFolders"
query = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, basekey)
cnt=winreg.QueryInfoKey(query)[0]
thiskey = r'C:\Program Files (x86)\WiX Toolset v3.10\SDK'
for i in range(cnt-1,-1,-1):
thiskey = winreg.EnumKey(query,i)
if 'WiX' in thiskey:
break
winreg.CloseKey(query)
return os.path.normpath(winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE, basekey+r'\\'+thiskey)+'..\\bin')
def configure(ctx):
path_list=[wix_bin_path()]
ctx.find_program('candle', var='CANDLE', mandatory=True, path_list = path_list)
ctx.find_program('light', var='LIGHT', mandatory=True, path_list = path_list)
```
#### File: waflib/Tools/flex.py
```python
import os, re
from waflib import Task, TaskGen
from waflib.Tools import ccroot
def decide_ext(self, node):
if 'cxx' in self.features:
return ['.lex.cc']
return ['.lex.c']
def flexfun(tsk):
env = tsk.env
bld = tsk.generator.bld
wd = bld.variant_dir
def to_list(xx):
if isinstance(xx, str):
return [xx]
return xx
tsk.last_cmd = lst = []
lst.extend(to_list(env.FLEX))
lst.extend(to_list(env.FLEXFLAGS))
inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs]
if env.FLEX_MSYS:
inputs = [x.replace(os.sep, '/') for x in inputs]
lst.extend(inputs)
lst = [x for x in lst if x]
txt = bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0)
tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207
TaskGen.declare_chain(
name = 'flex',
rule = flexfun, # issue #854
ext_in = '.l',
decider = decide_ext,
)
# To support the following:
# bld(features='c', flexflags='-P/foo')
Task.classes['flex'].vars = ['FLEXFLAGS', 'FLEX']
ccroot.USELIB_VARS['c'].add('FLEXFLAGS')
ccroot.USELIB_VARS['cxx'].add('FLEXFLAGS')
def configure(conf):
"""
Detect the *flex* program
"""
conf.find_program('flex', var='FLEX')
conf.env.FLEXFLAGS = ['-t']
if re.search (r"\\msys\\[0-9.]+\\bin\\flex.exe$", conf.env.FLEX[0]):
# this is the flex shipped with MSYS
conf.env.FLEX_MSYS = True
```
#### File: waflib/Tools/icc.py
```python
import sys
from waflib.Tools import ccroot, ar, gcc
from waflib.Configure import conf
@conf
def find_icc(conf):
"""
Finds the program icc and execute it to ensure it really is icc
"""
cc = conf.find_program(['icc', 'ICL'], var='CC')
conf.get_cc_version(cc, icc=True)
conf.env.CC_NAME = 'icc'
def configure(conf):
conf.find_icc()
conf.find_ar()
conf.gcc_common_flags()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
```
#### File: waflib/Tools/irixcc.py
```python
from waflib import Errors
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_irixcc(conf):
v = conf.env
cc = None
if v.CC:
cc = v.CC
elif 'CC' in conf.environ:
cc = conf.environ['CC']
if not cc:
cc = conf.find_program('cc', var='CC')
if not cc:
conf.fatal('irixcc was not found')
try:
conf.cmd_and_log(cc + ['-version'])
except Errors.WafError:
conf.fatal('%r -version could not be executed' % cc)
v.CC = cc
v.CC_NAME = 'irix'
@conf
def irixcc_common_flags(conf):
v = conf.env
v.CC_SRC_F = ''
v.CC_TGT_F = ['-c', '-o']
v.CPPPATH_ST = '-I%s'
v.DEFINES_ST = '-D%s'
if not v.LINK_CC:
v.LINK_CC = v.CC
v.CCLNK_SRC_F = ''
v.CCLNK_TGT_F = ['-o']
v.LIB_ST = '-l%s' # template for adding libs
v.LIBPATH_ST = '-L%s' # template for adding libpaths
v.STLIB_ST = '-l%s'
v.STLIBPATH_ST = '-L%s'
v.cprogram_PATTERN = '%s'
v.cshlib_PATTERN = 'lib%s.so'
v.cstlib_PATTERN = 'lib%s.a'
def configure(conf):
conf.find_irixcc()
conf.find_cpp()
conf.find_ar()
conf.irixcc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
```
#### File: waflib/Tools/md5_tstamp.py
```python
import os, stat
from waflib import Utils, Build, Node
STRONGEST = True
Build.SAVED_ATTRS.append('hashes_md5_tstamp')
def h_file(self):
filename = self.abspath()
st = os.stat(filename)
cache = self.ctx.hashes_md5_tstamp
if filename in cache and cache[filename][0] == st.st_mtime:
return cache[filename][1]
if STRONGEST:
ret = Utils.h_file(filename)
else:
if stat.S_ISDIR(st[stat.ST_MODE]):
raise IOError('Not a file')
ret = Utils.md5(str((st.st_mtime, st.st_size)).encode()).digest()
cache[filename] = (st.st_mtime, ret)
return ret
h_file.__doc__ = Node.Node.h_file.__doc__
Node.Node.h_file = h_file
```
#### File: scripts/mw2html_audacity/htmldata.py
```python
__version__ = '1.1.2'
__all__ = ['examples', 'tagextract', 'tagjoin', 'urlextract',
'urljoin', 'URLMatch']
# -------------------------------------------------------------------
# Globals
# -------------------------------------------------------------------
import re
import shlex
import string
import urllib.request, urllib.parse, urllib.error
import urllib.parse
import types
# Translate text between these strings as plain text (not HTML).
_IGNORE_TAGS = [('script', '/script'),
('style', '/style')]
# Special tags where we have to look for _END_X as part of the
# HTML/XHTML parsing rules.
_BEGIN_COMMENT = '<!--'
_END_COMMENT = '-->'
_BEGIN_CDATA = '<![CDATA['
_END_CDATA = ']]>'
# Mime types that can be parsed as HTML or HTML-like.
_HTML_MIMETYPES = ['text/html', 'application/xhtml',
'application/xhtml+xml', 'text/xml',
'application/xml']
# Mime types that can be parsed as CSS.
_CSS_MIMETYPES = ['text/css']
# -------------------------------------------------------------------
# HTML <-> Data structure
# -------------------------------------------------------------------
def tagextract(doc):
"""
Convert HTML to data structure.
Returns a list. HTML tags become C{(name, keyword_dict)} tuples
within the list, while plain text becomes strings within the
list. All tag names are lowercased and stripped of whitespace.
Tags which end with forward slashes have a single forward slash
placed at the end of their name, to indicate that they are XML
unclosed tags.
Example:
>>> tagextract('<img src=hi.gif alt="hi">foo<br><br/></body>')
[('img', {'src': 'hi.gif', 'alt': 'hi'}), 'foo',
('br', {}), ('br/', {}), ('/body', {})]
Text between C{'<script>'} and C{'<style>'} is rendered directly to
plain text. This prevents rogue C{'<'} or C{'>'} characters from
interfering with parsing.
>>> tagextract('<script type="a"><blah>var x; </script>')
[('script', {'type': 'a'}), '<blah>var x; ', ('/script', {})]
Comment strings and XML directives are rendered as a single long
tag with no attributes. The case of the tag "name" is not changed:
>>> tagextract('<!-- blah -->')
[('!-- blah --', {})]
>>> tagextract('<?xml version="1.0" encoding="utf-8" ?>')
[('?xml version="1.0" encoding="utf-8" ?', {})]
>>> tagextract('<!DOCTYPE html PUBLIC etc...>')
[('!DOCTYPE html PUBLIC etc...', {})]
Greater-than and less-than characters occurring inside comments or
CDATA blocks are correctly kept as part of the block:
>>> tagextract('<!-- <><><><>>..> -->')
[('!-- <><><><>>..> --', {})]
>>> tagextract('<!CDATA[[><>><>]<> ]]>')
[('!CDATA[[><>><>]<> ]]', {})]
Note that if one modifies these tags, it is important to retain the
C{"--"} (for comments) or C{"]]"} (for C{CDATA}) at the end of the
tag name, so that output from L{tagjoin} will be correct HTML/XHTML.
"""
L = _full_tag_extract(doc)
for i in range(len(L)):
if isinstance(L[i], _TextTag):
# _TextTag object.
L[i] = L[i].text
else:
# _HTMLTag object.
L[i] = (L[i].name, L[i].attrs)
return L
def _is_str(s):
"""
True iff s is a string (checks via duck typing).
"""
return hasattr(s, 'capitalize')
def tagjoin(L):
"""
Convert data structure back to HTML.
This reverses the L{tagextract} function.
More precisely, if an HTML string is turned into a data structure,
then back into HTML, the resulting string will be functionally
equivalent to the original HTML.
>>> tagjoin(tagextract(s))
(string that is functionally equivalent to s)
Three changes are made to the HTML by L{tagjoin}: tags are
lowercased, C{key=value} pairs are sorted, and values are placed in
double-quotes.
"""
if _is_str(L):
raise ValueError('got string arg, expected non-string iterable')
ans = []
for item in L:
# Check for string using duck typing.
if _is_str(item):
# Handle plain text.
ans.append(item)
elif item[0] == '--':
# Handle closing comment.
ans.append('-->')
elif item[0] == '!--':
# Handle opening comment.
ans.append('<!--')
else:
# Handle regular HTML tag.
(name, d) = item
if name[-1:] == '/':
rslash = ' /'
name = name[:-1]
else:
rslash = ''
tag_items = []
items = list(d.items())
items.sort()
for (key, value) in items:
if value != None:
if '"' in value and "'" in value:
raise ValueError('attribute value contains both single' +
' and double quotes')
elif '"' in value:
tag_items.append(key + "='" + value + "'")
else:
tag_items.append(key + '="' + value + '"')
else:
tag_items.append(key)
tag_items = ' '.join(tag_items)
if tag_items != '':
tag_items = ' ' + tag_items
ans.append('<' + name + tag_items + rslash + '>')
return ''.join(ans)
def _enumerate(L):
"""
Like C{enumerate}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator.
"""
return list(zip(list(range(len(L))), L))
def _ignore_tag_index(s, i):
"""
Helper routine: Find index within C{_IGNORE_TAGS}, or C{-1}.
If C{s[i:]} begins with an opening tag from C{_IGNORE_TAGS}, return
the index. Otherwise, return C{-1}.
"""
for (j, (a, b)) in _enumerate(_IGNORE_TAGS):
if s[i:i + len(a) + 1].lower() == '<' + a:
return j
return - 1
def _html_split(s):
"""
Helper routine: Split string into a list of tags and non-tags.
>>> html_split(' blah <tag text> more </tag stuff> ')
[' blah ', '<tag text>', ' more ', '</tag stuff>', ' ']
Tags begin with C{'<'} and end with C{'>'}.
The identity C{''.join(L) == s} is always satisfied.
Exceptions to the normal parsing of HTML tags:
C{'<script>'}, C{'<style>'}, and HTML comment tags ignore all HTML
until the closing pair, and are added as three elements:
>>> html_split(' blah<style><<<><></style><!-- hi -->' +
... ' <script language="Javascript"></>a</script>end')
[' blah', '<style>', '<<<><>', '</style>', '<!--', ' hi ', '-->',
' ', '<script language="Javascript">', '</>a', '</script>', 'end']
"""
s_lower = s.lower()
L = []
i = 0 # Index of char being processed
while i < len(s):
c = s[i]
if c == '<':
# Left bracket, handle various cases.
if s[i:i + len(_BEGIN_COMMENT)].startswith(_BEGIN_COMMENT):
# HTML begin comment tag, '<!--'. Scan for '-->'.
i2 = s.find(_END_COMMENT, i)
if i2 < 0:
# No '-->'. Append the remaining malformed content and stop.
L.append(s[i:])
break
else:
# Append the comment.
L.append(s[i:i2 + len(_END_COMMENT)])
i = i2 + len(_END_COMMENT)
elif s[i:i + len(_BEGIN_CDATA)].startswith(_BEGIN_CDATA):
# XHTML begin CDATA tag. Scan for ']]>'.
i2 = s.find(_END_CDATA, i)
if i2 < 0:
# No ']]>'. Append the remaining malformed content and stop.
L.append(s[i:])
break
else:
# Append the CDATA.
L.append(s[i:i2 + len(_END_CDATA)])
i = i2 + len(_END_CDATA)
else:
# Regular HTML tag. Scan for '>'.
orig_i = i
found = False
in_quot1 = False
in_quot2 = False
for i2 in range(i + 1, len(s)):
c2 = s[i2]
if c2 == '"' and not in_quot1:
in_quot2 = not in_quot2
# Only turn on double quote if it's in a realistic place.
if in_quot2 and not in_quot1:
if i2 > 0 and s[i2 - 1] not in [' ', '\t', '=']:
in_quot2 = False
elif c2 == "'" and not in_quot2:
in_quot1 = not in_quot1
# Only turn on single quote if it's in a realistic place.
if in_quot1 and not in_quot2:
if i2 > 0 and s[i2 - 1] not in [' ', '\t', '=']:
in_quot1 = False
elif c2 == '>' and (not in_quot2 and not in_quot1):
found = True
break
if not found:
# No end '>'. Append the rest as text.
L.append(s[i:])
break
else:
# Append the tag.
L.append(s[i:i2 + 1])
i = i2 + 1
# Check whether we found a special ignore tag, eg '<script>'
tagi = _ignore_tag_index(s, orig_i)
if tagi >= 0:
# It's an ignore tag. Scan for the end tag.
i2 = s_lower.find('<' + _IGNORE_TAGS[tagi][1], i)
if i2 < 0:
# No end tag. Append the rest as text.
L.append(s[i2:])
break
else:
# Append the text sandwiched between the tags.
L.append(s[i:i2])
# Catch the closing tag with the next loop iteration.
i = i2
else:
# Not a left bracket, append text up to next left bracket.
i2 = s.find('<', i)
if i2 < 0:
# No left brackets, append the rest as text.
L.append(s[i:])
break
else:
L.append(s[i:i2])
i = i2
return L
def _shlex_split(s):
"""
Like C{shlex.split}, but reversible, and for HTML.
Splits a string into a list C{L} of strings. List elements
contain either an HTML tag C{name=value} pair, an HTML name
singleton (eg C{"checked"}), or whitespace.
The identity C{''.join(L) == s} is always satisfied.
>>> _shlex_split('a=5 b="15" name="<NAME>"')
['a=5', ' ', 'b="15"', ' ', 'name="<NAME>"']
>>> _shlex_split('a = a5 b=#b19 name="foo bar" q="hi"')
['a = a5', ' ', 'b=#b19', ' ', 'name="foo bar"', ' ', 'q="hi"']
>>> _shlex_split('a="9"b="15"')
['a="9"', 'b="15"']
"""
ans = []
i = 0
while i < len(s):
c = s[i]
if c in string.whitespace:
# Whitespace. Add whitespace while found.
for i2 in range(i, len(s)):
if s[i2] not in string.whitespace:
break
# Include the entire string if the last char is whitespace.
if s[i2] in string.whitespace:
i2 += 1
ans.append(s[i:i2])
i = i2
else:
# Match 'name = "value"'
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*"[^"]*"')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match "name = 'value'"
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*\'[^\']*\'')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match 'name = value'
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*[^ \t\n\r\f\v"\']*')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match 'name'
c = re.compile(r'[^ \t\n\r\f\v"\']+')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Couldn't match anything so far, so it's likely that the page
# has malformed quotes inside a tag. Add leading quotes
# and spaces to the previous field until we see something.
subadd = []
while i < len(s) and s[i] in ['"', "'", ' ', '\t']:
subadd.append(s[i])
i += 1
# Add whatever we could salvage from the situation and move on.
if len(subadd) > 0:
ans.append(''.join(subadd))
else:
# We totally failed at matching this character, so add it
# as a separate item and move on.
ans.append(s[i])
return ans
def _test_shlex_split():
"""
Unit test for L{_shlex_split}.
"""
assert _shlex_split('') == []
assert _shlex_split(' ') == [' ']
assert _shlex_split('a=5 b="15" name="<NAME>"') == \
['a=5', ' ', 'b="15"', ' ', 'name="<NAME>"']
assert _shlex_split('a=cvn b=32vsd c= 234jk\te d \t="hi"') == \
['a=cvn', ' ', 'b=32vsd', ' ', 'c= 234jk', '\t', 'e', ' ',
'd \t="hi"']
assert _shlex_split(' a b c d=e f g h i="jk" l mno = p ' + \
'qr = "st"') == \
[' ', 'a', ' ', 'b', ' ', 'c', ' ', 'd=e', ' ', 'f', ' ', \
'g', ' ', 'h', ' ', 'i="jk"', ' ', 'l', ' ', 'mno = p', \
' ', 'qr = "st"']
assert _shlex_split('a=5 b="9"c="15 dfkdfkj "d="25"') == \
['a=5', ' ', 'b="9"', 'c="15 dfkdfkj "', 'd="25"']
assert _shlex_split('a=5 b="9"c="15 dfkdfkj "d="25" e=4') == \
['a=5', ' ', 'b="9"', 'c="15 dfkdfkj "', 'd="25"', ' ', \
'e=4']
assert _shlex_split('a=5 b=\'9\'c=\'15 dfkdfkj \'d=\'25\' e=4') == \
['a=5', ' ', 'b=\'9\'', 'c=\'15 dfkdfkj \'', 'd=\'25\'', \
' ', 'e=4']
def _tag_dict(s):
"""
Helper routine: Extracts a dict from an HTML tag string.
>>> _tag_dict('bgcolor=#ffffff text="#000000" blink')
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0,7), 'text':(16,20), 'blink':(31,36)},
{'bgcolor':(8,15), 'text':(22,29), 'blink':(36,36)})
Returns a 3-tuple. First element is a dict of
C{(key, value)} pairs from the HTML tag. Second element
is a dict mapping keys to C{(start, end)} indices of the
key in the text. Third element maps keys to C{(start, end)}
indices of the value in the text.
Names are lowercased.
Raises C{ValueError} for unmatched quotes and other errors.
"""
d = _shlex_split(s)
attrs = {}
key_pos = {}
value_pos = {}
start = 0
for item in d:
end = start + len(item)
equals = item.find('=')
if equals >= 0:
# Contains an equals sign.
(k1, k2) = (start, start + equals)
(v1, v2) = (start + equals + 1, start + len(item))
# Strip spaces.
while k1 < k2 and s[k1] in string.whitespace: k1 += 1
while k1 < k2 and s[k2 - 1] in string.whitespace: k2 -= 1
while v1 < v2 and s[v1] in string.whitespace: v1 += 1
while v1 < v2 and s[v2 - 1] in string.whitespace: v2 -= 1
# Strip one pair of double quotes around value.
if v1 < v2 - 1 and s[v1] == '"' and s[v2 - 1] == '"':
v1 += 1
v2 -= 1
# Strip one pair of single quotes around value.
if v1 < v2 - 1 and s[v1] == "'" and s[v2 - 1] == "'":
v1 += 1
v2 -= 1
(key, value) = (s[k1:k2].lower(), s[v1:v2])
# Drop bad keys and values.
if '"' in key or "'" in key:
continue
if '"' in value and "'" in value:
continue
attrs[key] = value
key_pos[key] = (k1, k2)
value_pos[key] = (v1, v2)
elif item.split() == []:
# Whitespace. Ignore it.
pass
else:
# A single token, like 'blink'.
key = item.lower()
# Drop bad keys.
if '"' in key or "'" in key:
continue
attrs[key] = None
key_pos[key] = (start, end)
value_pos[key] = (end, end)
start = end
return (attrs, key_pos, value_pos)
def _test_tag_dict():
"""
Unit test for L{_tag_dict}.
"""
assert _tag_dict('') == ({}, {}, {})
assert _tag_dict(' \t\r \n\n \r\n ') == ({}, {}, {})
assert _tag_dict('bgcolor=#ffffff text="#000000" blink') == \
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0, 7), 'text':(16, 20), 'blink':(31, 36)},
{'bgcolor':(8, 15), 'text':(22, 29), 'blink':(36, 36)})
assert _tag_dict("bgcolor='#ffffff'text='#000000' blink") == \
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0, 7), 'text':(17, 21), 'blink':(32, 37)},
{'bgcolor':(9, 16), 'text':(23, 30), 'blink':(37, 37)})
s = ' \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n'
(a, b, c) = _tag_dict(s)
assert a == {'text': 'hi you', 'bg': 'val', 'e': '5', 'name': None}
for key in list(a.keys()):
assert s[b[key][0]:b[key][1]] == key
if a[key] != None:
assert s[c[key][0]:c[key][1]] == a[key]
def _full_tag_extract(s):
"""
Like L{tagextract}, but different return format.
Returns a list of L{_HTMLTag} and L{_TextTag} instances.
The return format is very inconvenient for manipulating HTML, and
only will be useful if you want to find the exact locations where
tags occur in the original HTML document.
"""
L = _html_split(s)
# Starting position of each L[i] in s.
Lstart = [0] * len(L)
for i in range(1, len(L)):
Lstart[i] = Lstart[i - 1] + len(L[i - 1])
class NotTagError(Exception): pass
for (i, text) in _enumerate(L):
try:
# Is it an HTML tag?
is_tag = False
if len(text) >= 2 and text[0] == '<' and text[-1] == '>':
# Turn HTML tag text into (name, keyword_dict) tuple.
is_tag = True
is_special = False
if len(text) >= 2 and (text[1] == '!' or text[1] == '?'):
is_special = True
if is_special:
# A special tag such as XML directive or <!-- comment -->
pos = (Lstart[i], Lstart[i] + len(L[i]))
# Wrap inside an _HTMLTag object.
L[i] = _HTMLTag(pos, text[1:-1].strip(), {}, {}, {})
elif is_tag:
# If an HTML tag, strip brackets and handle what's left.
# Strip off '<>' and update offset.
orig_offset = 0
if len(text) >= 1 and text[0] == '<':
text = text[1:]
orig_offset = 1
if len(text) >= 1 and text[-1] == '>':
text = text[:-1]
if len(text) > 0 and text[-1] == '/':
rslash = True
text = text[:-1]
else:
rslash = False
m = re.search(r'\s', text)
first_space = -1
if m:
first_space = m.start()
if first_space < 0:
(name, dtext) = (text, '')
else:
name = text[:first_space]
dtext = text[first_space + 1:len(text)]
# Position of dtext relative to original text.
dtext_offset = len(name) + 1 + orig_offset # +1 for space.
# Lowercase everything except XML directives and comments.
if not name.startswith('!') and not name.startswith('?'):
name = name.strip().lower()
if rslash:
name += '/'
# Strip off spaces, and update dtext_offset as appropriate.
orig_dtext = dtext
dtext = dtext.strip()
dtext_offset += orig_dtext.index(dtext)
(attrs, key_pos, value_pos) = _tag_dict(dtext)
# Correct offsets in key_pos and value_pos.
for key in list(attrs.keys()):
key_pos[key] = (key_pos[key][0] + Lstart[i] + dtext_offset,
key_pos[key][1] + Lstart[i] + dtext_offset)
value_pos[key] = (value_pos[key][0] + Lstart[i] + dtext_offset,
value_pos[key][1] + Lstart[i] + dtext_offset)
pos = (Lstart[i], Lstart[i] + len(L[i]))
# Wrap inside an _HTMLTag object.
L[i] = _HTMLTag(pos, name, attrs, key_pos, value_pos)
else:
# Not an HTML tag.
raise NotTagError
except NotTagError:
# Wrap non-HTML strings inside a _TextTag object.
pos = (Lstart[i], Lstart[i] + len(L[i]))
L[i] = _TextTag(pos, L[i])
return L
class _HTMLTag:
"""
HTML tag extracted by L{_full_tag_extract}.
@ivar pos: C{(start, end)} indices of the entire tag in the
HTML document.
@ivar name: Name of tag. For example, C{'img'}.
@ivar attrs: Dictionary mapping tag attributes to corresponding
tag values.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.attrs
{'href': 'd.com'}
Surrounding quotes are stripped from the values.
@ivar key_pos: Key position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the key string in the C{"key=value"}
HTML pair. Indices are absolute, where 0 is the
start of the HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.key_pos['href']
(3, 7)
>>> '<a href="d.com">'[3:7]
'href'
@ivar value_pos: Value position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the value in the HTML document string.
Surrounding quotes are excluded from this range.
Indices are absolute, where 0 is the start of the
HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.value_pos['href']
(9, 14)
>>> '<a href="d.com">'[9:14]
'd.com'
"""
def __init__(self, pos, name, attrs, key_pos, value_pos):
"""
Create an _HTMLTag object.
"""
self.pos = pos
self.name = name
self.attrs = attrs
self.key_pos = key_pos
self.value_pos = value_pos
class _TextTag:
"""
Text extracted from an HTML document by L{_full_tag_extract}.
@ivar text: Extracted text.
@ivar pos: C{(start, end)} indices of the text.
"""
def __init__(self, pos, text):
"""
Create a _TextTag object.
"""
self.pos = pos
self.text = text
# -------------------------------------------------------------------
# URL Editing
# -------------------------------------------------------------------
# Tags within which URLs may be found.
_URL_TAGS = ['a href', 'applet archive', 'applet code',
'applet codebase', 'area href', 'base href',
'blockquote cite', 'body background', 'del cite',
'form action', 'frame longdesc', 'frame src',
'head profile', 'iframe src', 'iframe longdesc',
'img src', 'img ismap', 'img longdesc', 'img usemap',
'input src', 'ins cite', 'link href', 'object archive',
'object codebase', 'object data', 'object usemap',
'script src', 'table background', 'tbody background',
'td background', 'tfoot background', 'th background',
'thead background', 'tr background']
_URL_TAGS = [tuple(s.split()) for s in _URL_TAGS]
def _finditer(pattern, string):
"""
Like C{re.finditer}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator. Otherwise the return format
is identical to C{re.finditer} (except possibly in the details of
empty matches).
"""
compiled = re.compile(pattern)
ans = []
start = 0
while True:
m = compiled.search(string, start)
if m:
ans.append(m)
else:
return ans
m_start = m.start(m.lastindex)
m_end = m.end(m.lastindex)
if m_end > m_start:
start = m_end
else:
start += 1
def _remove_comments(doc):
"""
Replaces commented out characters with spaces in a CSS document.
"""
ans = []
i = 0
while True:
i2 = doc.find('/*', i)
if i2 < 0:
ans += [doc[i:]]
break
ans += [doc[i:i2]]
i3 = doc.find('*/', i2 + 1)
if i3 < 0:
i3 = len(doc) - 2
ans += [' ' * (i3 - i2 + 2)]
i = i3 + 2
return ''.join(ans)
def _test_remove_comments():
"""
Unit test for L{_remove_comments}.
"""
s = '/*d s kjlsdf */*//*/*//**/**/*//**/a' * 50
assert len(_remove_comments(s)) == len(s)
s = '/**/' * 50 + '/*5845*/*/*//*/**/dfd' + '/*//**//'
assert len(_remove_comments(s)) == len(s)
s = 'a/**/' * 50 + '/**//**/////***/****/*//**//*/' * 5
assert len(_remove_comments(s)) == len(s)
s = 'hi /* foo */ hello /* bar!!!!! \n\n */ there!'
assert _remove_comments(s) == \
'hi hello there!'
def urlextract(doc, siteurl=None, mimetype='text/html'):
"""
Extract URLs from HTML or stylesheet.
Extracts only URLs that are linked to or embedded in the document.
Ignores plain text URLs that occur in the non-HTML part of the
document.
Returns a list of L{URLMatch} objects.
>>> L = urlextract('<img src="a.gif"><a href="www.google.com">')
>>> L[0].url
'a.gif'
>>> L[1].url
'www.google.com'
If C{siteurl} is specified, all URLs are made into absolute URLs
by assuming that C{doc} is located at the URL C{siteurl}.
>>> doc = '<img src="a.gif"><a href="/b.html">'
>>> L = urlextract(doc, 'http://www.python.org/~guido/')
>>> L[0].url
'http://www.python.org/~guido/a.gif'
>>> L[1].url
'http://www.python.org/b.html'
If C{mimetype} is C{"text/css"}, the document will be parsed
as a stylesheet.
If a stylesheet is embedded inside an HTML document, then
C{urlextract} will extract the URLs from both the HTML and the
stylesheet.
"""
mimetype = mimetype.lower()
if mimetype.split()[0] in _CSS_MIMETYPES:
doc = _remove_comments(doc)
# Match URLs within CSS stylesheet.
# Match url(blah) or url('blah') or url("blah").
L = _finditer(
r'''url\s*\(([^\r\n\("']*?)\)|''' +
r'''url\s*\(\s*"([^\r\n]*?)"\s*\)|''' +
r'''url\s*\(\s*'([^\r\n]*?)'\s*\)|''' +
r'''@import\s+([^ \t\r\n"';@\(\)]+)[^\r\n;@\(\)]*[\r\n;]|''' +
r'''@import\s+'([^ \t\r\n"';@\(\)]+)'[^\r\n;@\(\)]*[\r\n;]|''' +
r'''@import\s+"([^ \t\r\n"';\(\)']+)"[^\r\n;@\(\)]*[\r\n;]''',
doc + ';\n')
L = [(x.start(x.lastindex), x.end(x.lastindex)) for x in L]
ans = []
for (s, e) in L:
e = min(e, len(doc))
if e > s:
ans.append(URLMatch(doc, s, e, siteurl, False, True))
elif mimetype.split()[0] in _HTML_MIMETYPES:
# Match URLs within HTML document.
ans = []
L = _full_tag_extract(doc)
item = None
for i in range(len(L)):
prev_item = item
item = L[i]
# Handle string item (text) or tuple item (tag).
if isinstance(item, _TextTag):
# Current item is text.
if isinstance(prev_item, _HTMLTag) and prev_item.name == \
'style':
# And previous item is <style>. Process a stylesheet.
temp = urlextract(item.text, siteurl, 'text/css')
# Offset indices and add to ans.
for j in range(len(temp)):
temp[j].start += item.pos[0]
temp[j].end += item.pos[0]
ans += temp
else:
# Regular text. Ignore.
pass
else:
# Current item is a tag.
if 'style' in item.attrs:
# Process a stylesheet embedded in the 'style' attribute.
temp = urlextract(item.attrs['style'], siteurl, 'text/css')
# Offset indices and add to ans.
for j in range(len(temp)):
temp[j].start += item.value_pos['style'][0]
temp[j].end += item.value_pos['style'][0]
ans += temp
for (a, b) in _URL_TAGS:
if item.name.startswith(a) and b in list(item.attrs.keys()):
# Got one URL.
url = item.attrs[b]
# FIXME: Some HTML tag wants a URL list, look up which
# tag and make it a special case.
(start, end) = item.value_pos[b]
tag_name = a
tag_attr = b
tag_attrs = item.attrs
tag_index = i
tag = URLMatch(doc, start, end, siteurl, True, False, \
tag_attr, tag_attrs, tag_index, tag_name)
ans.append(tag)
# End of 'text/html' mimetype case.
else:
raise ValueError('unknown MIME type: ' + repr(mimetype))
# Filter the answer, removing duplicate matches.
start_end_map = {}
filtered_ans = []
for item in ans:
if (item.start, item.end) not in start_end_map:
start_end_map[(item.start, item.end)] = None
filtered_ans.append(item)
return filtered_ans
def _tuple_replace(s, Lindices, Lreplace):
"""
Replace slices of a string with new substrings.
Given a list of slice tuples in C{Lindices}, replace each slice
in C{s} with the corresponding replacement substring from
C{Lreplace}.
Example:
>>> _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def'])
'0123abc5def9'
"""
ans = []
Lindices = Lindices[:]
Lindices.sort()
if len(Lindices) != len(Lreplace):
raise ValueError('lists differ in length')
for i in range(len(Lindices) - 1):
if Lindices[i][1] > Lindices[i + 1][0]:
raise ValueError('tuples overlap')
if Lindices[i][1] < Lindices[i][0]:
raise ValueError('invalid tuple')
if min(Lindices[i][0], Lindices[i][1]) < 0 or \
max(Lindices[i][0], Lindices[i][1]) >= len(s):
raise ValueError('bad index')
j = 0
offset = 0
for i in range(len(Lindices)):
len1 = Lindices[i][1] - Lindices[i][0]
len2 = len(Lreplace[i])
ans.append(s[j:Lindices[i][0] + offset])
ans.append(Lreplace[i])
j = Lindices[i][1]
ans.append(s[j:])
return ''.join(ans)
def _test_tuple_replace():
"""
Unit test for L{_tuple_replace}.
"""
assert _tuple_replace('', [], []) == ''
assert _tuple_replace('0123456789', [], []) == '0123456789'
assert _tuple_replace('0123456789', [(4, 5), (6, 9)], ['abc', 'def']) == \
'0123abc5def9'
assert _tuple_replace('01234567890123456789', \
[(1, 9), (13, 14), (16, 18)], ['abcd', 'efg', 'hijk']) == \
'0abcd9012efg45hijk89'
def urljoin(s, L):
"""
Write back document with modified URLs (reverses L{urlextract}).
Given a list C{L} of L{URLMatch} objects obtained from
L{urlextract}, substitutes changed URLs into the original
document C{s}, and returns the modified document.
One should only modify the C{.url} attribute of the L{URLMatch}
objects. The ordering of the URLs in the list is not important.
>>> doc = '<img src="a.png"><a href="b.png">'
>>> L = urlextract(doc)
>>> L[0].url = 'foo'
>>> L[1].url = 'bar'
>>> urljoin(doc, L)
'<img src="foo"><a href="bar">'
"""
return _tuple_replace(s, [(x.start, x.end) for x in L], \
[x.url for x in L])
def examples():
"""
Examples of the C{htmldata} module.
Example 1:
Print all absolutized URLs from Google.
Here we use L{urlextract} to obtain all URLs in the document.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... print u.url
...
http://www.google.com/images/logo.gif
http://www.google.com/search
(More output)
Note that the second argument to L{urlextract} causes the
URLs to be made absolute with respect to that base URL.
Example 2:
Print all image URLs from Google in relative form.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents):
... if u.tag_name == 'img':
... print u.url
...
/images/logo.gif
Equivalently, one can use L{tagextract}, and look for occurrences
of C{<img>} tags. The L{urlextract} function is mostly a convenience
function for when one wants to extract and/or modify all URLs in a
document.
Example 3:
Replace all C{<a href>} links on Google with the Microsoft web page.
Here we use L{tagextract} to turn the HTML into a data structure,
and then loop over the in-order list of tags (items which are not
tuples are plain text, which is ignored).
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> L = htmldata.tagextract(contents)
>>> for item in L:
... if isinstance(item, tuple) and item[0] == 'a':
... # It's an HTML <a> tag! Give it an href=.
... item[1]['href'] = 'http://www.microsoft.com/'
...
>>> htmldata.tagjoin(L)
(Microsoftized version of Google)
Example 4:
Make all URLs on an HTML document be absolute.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.urljoin(htmldata.urlextract(contents, url))
(Google HTML page with absolute URLs)
Example 5:
Properly quote all HTML tag values for pedants.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.tagjoin(htmldata.tagextract(contents))
(Properly quoted version of the original HTML)
Example 6:
Modify all URLs in a document so that they are appended
to our proxy CGI script C{http://mysite.com/proxy.cgi}.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> proxy_url = 'http://mysite.com/proxy.cgi?url='
>>> L = htmldata.urlextract(contents)
>>> for u in L:
... u.url = proxy_url + u.url
...
>>> htmldata.urljoin(L)
(Document with all URLs wrapped in our proxy script)
Example 7:
Download all images from a website.
>>> import urllib, htmldata, time
>>> url = 'http://www.google.com/'
>>> contents = urllib.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... if u.tag_name == 'img':
... filename = urllib.quote_plus(u.url)
... urllib.urlretrieve(u.url, filename)
... time.sleep(0.5)
...
(Images are downloaded to the current directory)
Many sites will protect against bandwidth-draining robots by
checking the HTTP C{Referer} [sic] and C{User-Agent} fields.
To circumvent this, one can create a C{urllib2.Request} object
with a legitimate C{Referer} and a C{User-Agent} such as
C{"Mozilla/4.0 (compatible; MSIE 5.5)"}. Then use
C{urllib2.urlopen} to download the content. Be warned that some
website operators will respond to rapid robot requests by banning
the offending IP address.
"""
print(examples.__doc__)
class URLMatch:
"""
A matched URL inside an HTML document or stylesheet.
A list of C{URLMatch} objects is returned by L{urlextract}.
@ivar url: URL extracted.
@ivar start: Starting character index.
@ivar end: End character index.
@ivar in_html: C{True} if URL occurs within an HTML tag.
@ivar in_css: C{True} if URL occurs within a stylesheet.
@ivar tag_attr: Specific tag attribute in which URL occurs.
Example: C{'href'}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_attrs: Dictionary of all tag attributes and values.
Example: C{{'src':'http://X','alt':'Img'}}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_index: Index of the tag in the list that would be
generated by a call to L{tagextract}.
@ivar tag_name: HTML tag name in which URL occurs.
Example: C{'img'}.
C{None} if the URL does not occur within an HTML
tag.
"""
def __init__(self, doc, start, end, siteurl, in_html, in_css,
tag_attr=None, tag_attrs=None, tag_index=None,
tag_name=None):
"""
Create a URLMatch object.
"""
self.doc = doc
self.start = start
self.end = end
self.url = doc[start:end]
self.in_html = in_html
self.in_css = in_css
if siteurl != None:
self.url = urllib.parse.urljoin(siteurl, self.url)
self.tag_attr = tag_attr
self.tag_attrs = tag_attrs
self.tag_index = tag_index
self.tag_name = tag_name
def _cast_to_str(arg, str_class):
"""
Casts string components of several data structures to str_class.
Casts string, list of strings, or list of tuples (as returned by
L{tagextract}) such that all strings are made to type str_class.
"""
if _is_str(arg):
return str_class(arg)
elif isinstance(arg, list):
ans = []
for item in arg:
if _is_str(item):
ans.append(str_class(item))
elif isinstance(item, tuple) and len(item) == 2:
(a, b) = item
b_prime = {}
for (b_key, b_value) in list(b.items()):
if b_value is None:
b_prime[str_class(b_key)] = None
else:
b_prime[str_class(b_key)] = str_class(b_value)
ans.append((str_class(a), b_prime))
else:
raise ValueError('unknown argument type')
return ans
else:
raise ValueError('unknown argument type')
# -------------------------------------------------------------------
# Unit Tests: HTML <-> Data structure
# -------------------------------------------------------------------
def _test_tagextract(str_class=str):
"""
Unit tests for L{tagextract} and L{tagjoin}.
Strings are cast to the string class argument str_class.
"""
# Work around lack of nested scopes in Python <= 2.1.
def f(obj, str_class2=str_class):
return _cast_to_str(obj, str_class2)
# Simple HTML document to test.
doc1 = f('\n\n<Html><BODY bgcolor=#ffffff>Hi<h1>Ho</h1><br>' +
'<br /><img SRc="text%5f.gif"><TAG NOshow>' +
'<img test="5%ff" /></body></html>\nBye!\n')
doc2 = f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>' +
'<test tag="5" content=6><is broken=False><yay>' +
'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
doc3 = f('\r\t< html >< tag> <!--comment--> <tag a = 5> ' +
'<foo \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n>')
doc4 = f('<?xml ??><foo><!-- <img> --><!DOCTYPE blah""/>' +
'<![CDATA[ more and weirder<bar> ] ][]]><![C[DATA[[>' +
'<abc key=value><![CDATA[to eof')
doc5 = f('<a href="foobar/ \t="base="10" x="15"><a x="9"t="20">')
# -----------------------------------------------------------------
# Test _html_split()
# -----------------------------------------------------------------
s = doc1
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['\n\n', '<Html>', '<BODY bgcolor=#ffffff>', 'Hi', '<h1>', 'Ho',
'</h1>', '<br>', '<br />', '<img SRc="text%5f.gif">',
'<TAG NOshow>', '<img test="5%ff" />', '</body>', '</html>',
'\nBye!\n'])
s = doc2
assert s == f('').join(_html_split(s))
# Test single quotes
s = doc2.replace(f('"'), f("'"))
assert s == f('').join(_html_split(s))
s = f('<!-- test weird comment <body> <html> --> <h1>Header' +
'</h1 value=10 a=11>')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- test weird comment <body> <html> -->', ' ',
'<h1>', 'Header', '</h1 value=10 a=11>'])
s = f('<!-- <!-- nested messed up --> blah ok <now> what<style>hi' +
'<><>></style><script language="Java"><aL><>><>></script>a')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- <!-- nested messed up -->', ' blah ok ', '<now>',
' what', '<style>', 'hi<><>>', '</style>',
'<script language="Java">', '<aL><>><>>', '</script>', 'a'])
s = f('<!-- ><# -->!<!-!._-><!-- aa--> <style><tag//</style> <tag ' +
'<tag <! <! -> <!-- </who< <who> tag> <huh-->-</style>' +
'</style<style>')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- ><# -->', '!', '<!-!._->', '<!-- aa-->',
' ', '<style>', '<tag//', '</style>', ' ', '<tag <tag <! <! ->',
' ', '<!-- </who< <who> tag> <huh-->', '-', '</style>',
'</style<style>'])
s = doc4
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<?xml ??>', '<foo>', '<!-- <img> -->', '<!DOCTYPE blah""/>',
'<![CDATA[ more and weirder<bar> ] ][]]>', '<![C[DATA[[>',
'<abc key=value>', '<![CDATA[to eof'])
# -----------------------------------------------------------------
# Test tagextract() and tagjoin()
# -----------------------------------------------------------------
# Test for whitespace handling in tags.
assert (tagextract('<a\n\t\t\t\v\rhref="a.png"\tsize=10>') ==
[('a', {'href': 'a.png', 'size': '10'})])
s = doc1
s2 = doc1.replace(f('"'), f("'")) # Test single quotes, too.
assert tagextract(f('')) == []
assert tagextract(s) == tagextract(s2) == \
f(['\n\n', ('html', {}), ('body', {'bgcolor': '#ffffff'}),
'Hi', ('h1', {}), 'Ho', ('/h1', {}), ('br', {}),
('br/', {}), ('img', {'src': 'text%5f.gif'}),
('tag', {'noshow': None}), ('img/', {'test': '5%ff'}),
('/body', {}), ('/html', {}), '\nBye!\n'])
s2 = f('\n\n<html><body bgcolor="#ffffff">Hi<h1>Ho</h1><br>' +
'<br /><img src="text%5f.gif"><tag noshow>' +
'<img test="5%ff" /></body></html>\nBye!\n')
assert tagjoin(tagextract(s)) == s2
doc2old = doc2
doc2 = f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>' +
'<test tag="5" content=6><is broken=False><yay>' +
'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
assert doc2old == doc2
s = doc2
assert tagextract(s) == f(
['\r', ('html', {}), ('!-- Comment<a href="blah"> --', {}),
('hiya', {}), ('foo', {}),
('test', {'content': '6', 'tag': '5'}),
('is', {'broken': 'False'}), ('yay', {}), ('style', {}), '<><>><',
('/style', {}), ('foo', {'bar': '5'}), 'end',
('!-- <!-- nested --', {}), ' ',
('script', {'language': 'JavaScript'}), ('>!><!_!_!-->!_-', {}),
('/script', {})])
assert tagjoin(tagextract(s)) == f(
'\r<html><!-- Comment<a href="blah"> --><hiya><foo><test ' +
'content="6" tag="5"><is broken="False"><yay><style><><>><' +
'</style><foo bar="5">end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
s = doc5
assert tagextract(s) == f(
[('a', {'href':'foobar/ \t=', 'base':'10', 'x':'15'}),
('a', {'x':'9', 't':'20'})])
assert tagjoin(tagextract(s)) == f(
'<a base="10" href="foobar/ \t=" x="15"><a t="20" x="9">')
# -----------------------------------------------------------------
# Test _full_tag_extract()
# -----------------------------------------------------------------
for s in [doc1, doc2, doc3,
doc1.replace(f('"'), f("'")), doc2.replace(f('"'), f("'")),
doc3.replace(f('"'), f("'"))]:
L = _full_tag_extract(s)
for (i, item) in _enumerate(L):
if isinstance(item, _HTMLTag):
for key in list(item.attrs.keys()):
assert s[item.key_pos[key][0]:item.key_pos[key][1]].lower()\
== key
if item.attrs[key] != None:
assert s[item.value_pos[key][0]:item.value_pos[key][1]] \
== item.attrs[key]
n = 1000
doc4 = f('<tag name = "5" value ="6afdjherknc4 cdk j" a="7" b=8/>')
doc4 *= n
L = tagextract(doc4)
assert len(L) == n
for i in range(n):
assert L[i] == f([('tag/', {'name':'5', 'value':'6afdjherknc4 cdk j',
'a':'7', 'b':'8'})])[0]
# -----------------------------------------------------------------
# Test tagextract() and tagjoin() with XML directives.
# -----------------------------------------------------------------
doc1 = f(
'a<?xml version="1.0"?>' +
'b<!DOCTYPE html' +
'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' +
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" >c' +
'<html a=b><!-- Comment <><> hi! -->' +
'z<![CDATA[ some content ]]>rx' +
'<![C[DATA[ more and weirder ] ][]]>tt')
doc1join = f(
'a<?xml version="1.0"?>b<!DOCTYPE htmlPUBLIC "-//W3C//DTD ' +
'XHTML 1.0 Transitional//EN""http://www.w3.org/TR/xhtml1/DTD/' +
'xhtml1-transitional.dtd">c<html a="b"><!-- Comment <><> hi! ' +
'-->z<![CDATA[ some content ]]>rx<![C[DATA[ more and weirder ]' +
' ][]]>tt')
ans1 = f(
['a', ('?xml version="1.0"?', {}), 'b',
('!DOCTYPE html' +
'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' +
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"', {}),
'c', ('html', {'a':'b'}), ('!-- Comment <><> hi! --', {}), 'z',
('![CDATA[ some content ]]', {}), 'rx',
('![C[DATA[ more and weirder ] ][]]', {}), 'tt'])
assert (tagextract(f('<?xml version="1.0" encoding="utf-8" ?>')) ==
f([('?xml version="1.0" encoding="utf-8" ?', {})]))
assert (tagextract(f('<!DOCTYPE html PUBLIC etc...>')) ==
f([('!DOCTYPE html PUBLIC etc...', {})]))
assert tagextract(doc1) == ans1
assert tagjoin(tagextract(doc1)) == doc1join
# -------------------------------------------------------------------
# Unit Tests: URL Parsing
# -------------------------------------------------------------------
def _test_urlextract(str_class=str):
"""
Unit tests for L{urlextract} and L{urljoin}.
Strings are cast to the string class argument str_class.
"""
# Work around lack of nested scopes in Python <= 2.1.
def f(obj, str_class2=str_class):
return _cast_to_str(obj, str_class2)
doc1 = f('urlblah, url ( blah2, url( blah3) url(blah4) ' +
'url("blah5") hum("blah6") url)"blah7"( url ( " blah8 " );;')
doc2 = f('<html><img src="a.gif" alt="b"><a href = b.html name=' +
'"c"><td background = ./c.png width=100%><a value=/f.jpg>' +
'<img src="http://www.abc.edu/d.tga">http://www.ignore.us/' +
'\nhttp://www.nowhere.com <style>url(h.gif) ' +
'url(http://www.testdomain.com/) http://ignore.com/a' +
'</style><img alt="c" src = "a.gif"><img src=/i.png>')
doc3 = f('@import foo;\n@import bar\n@import url(\'foo2\');' +
'@import url(\'http://bar2\')\n@import\turl("foo!");' +
'@import \'foo3\'\n@import "bar3";\n@importfails;' +
'@import;@import\n;url(\'howdy!\')\n@import foo5 ;' +
'@import \'foo6\' \n@import "foo7";')
doc4 = f('@import foo handheld;\n@import \'bar\' handheld\n' +
'@import url(\'foo2\') handheld; @import url(bar2) ha\n' +
'@import url("foo3") handheld\n')
doc5 = f('<html><img src="a.gif" alt="b" style="url(\'foo\')">' +
'<a href = b.html name="c" style="@import \'bar.css\'">')
doc6 = doc2.replace(f('"'), f("'")) # Test single quotes, too.
# Test CSS.
s = doc1
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f([' blah3', 'blah4', 'blah5', ' blah8 '])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test CSS more.
s = doc3
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f(['foo', 'bar', 'foo2', 'http://bar2', 'foo!',
'foo3', 'bar3', 'howdy!', 'foo5', 'foo6', 'foo7'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test CSS even more.
s = doc4
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f(['foo', 'bar', 'foo2', 'bar2', 'foo3'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test HTML.
s = doc2
L = urlextract(s)
L2 = [x.url for x in L]
L3 = [x.url for x in urlextract(doc6)]
ans = f(['a.gif', 'b.html', './c.png',
'http://www.abc.edu/d.tga', 'h.gif',
'http://www.testdomain.com/', 'a.gif', '/i.png'])
assert L2 == L3 == ans
for i in range(len(L)):
assert s[L[i].start:L[i].end] == L[i].url
# Test HTML more.
n = 100
s2 = s * n
L3 = urlextract(s2)
L4 = [x.url for x in L3]
assert L4 == L2 * n
for i in range(len(L3)):
assert s2[L3[i].start:L3[i].end] == L3[i].url
# Test HTML w/ siteurl.
base = f('http://www.python.org/~guido/')
L = urlextract(s, base)
L2 = [x.url for x in L]
assert L2 == [urllib.parse.urljoin(base, x) for x in ans]
# Test urljoin().
assert urljoin(doc1, urlextract(doc1, mimetype='text/css')) == doc1
assert urljoin(doc2, urlextract(doc2)) == doc2
s = doc2
L = urlextract(s)
L[3].url = f('FOO')
L[5].url = f('BAR')
L[7].url = f('F00!')
assert urljoin(s, L) == f(
'<html><img src="a.gif" alt="b"><a href = b.html name="c">' +
'<td background = ./c.png width=100%><a value=/f.jpg>' +
'<img src="FOO">http://www.ignore.us/\nhttp://www.nowhere.com ' +
'<style>url(h.gif) url(BAR) http://ignore.com/a</style>' +
'<img alt="c" src = "a.gif"><img src=F00!>')
# Test HTML yet more.
s = doc5
L = urlextract(s)
L2 = [x.url for x in L]
assert L2 == f(['foo', 'a.gif', 'bar.css', 'b.html'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# -------------------------------------------------------------------
# Unit Test Main Routine
# -------------------------------------------------------------------
def _test():
"""
Unit test main routine.
"""
print('Unit tests:')
_test_remove_comments()
print(' _remove_comments: OK')
_test_shlex_split()
print(' _shlex_split: OK')
_test_tag_dict()
print(' _tag_dict: OK')
_test_tuple_replace()
print(' _tuple_replace: OK')
_test_tagextract()
print(' tagextract*: OK')
_test_tagextract(str)
print(' tagextract (unicode)*: OK')
_test_urlextract()
print(' urlextract*: OK')
_test_urlextract(str)
print(' urlextract (unicode)*: OK')
print()
print('* The corresponding join method has been tested as well.')
if __name__ == '__main__':
_test()
```
#### File: scripts/piped-work/get_gui_structure.py
```python
import os
import sys
if( sys.platform == 'win32' ):
print( "get-gui-structure.py, running on windows" )
toname = '\\\\.\\pipe\\ToSrvPipe'
fromname = '\\\\.\\pipe\\FromSrvPipe'
EOL = '\r\n\0'
else:
print( "get-gui-structure.py, running on linux or mac" )
toname = '/tmp/audacity_script_pipe.to.' + str(os.getuid())
fromname = '/tmp/audacity_script_pipe.from.' + str(os.getuid())
EOL = '\n'
print( "Write to \"" + toname +"\"" )
if not os.path.exists( toname ) :
print( " ..does not exist. Ensure Audacity is running with mod-script-pipe." )
sys.exit();
print( "Read from \"" + fromname +"\"")
if not os.path.exists( fromname ) :
print( " ..does not exist. Ensure Audacity is running with mod-script-pipe." )
sys.exit();
print( "-- Both pipes exist. Good." )
tofile = open( toname, 'wt+' )
print( "-- File to write to has been opened" )
fromfile = open( fromname, 'rt')
print( "-- File to read from has now been opened too\r\n" )
def sendCommand( command ) :
print( "Send: >>> "+command )
tofile.write( command + EOL )
tofile.flush()
def getResponse() :
result = ''
line = ''
while line != '\n' :
result += line
line = fromfile.readline()
return result
def doCommand( command ) :
sendCommand( command )
response = getResponse()
print( "Rcvd: <<< " + response )
return response
def do( command ) :
doCommand( command )
def getStructure() :
#do( 'Help: CommandName=Help' )
#do( 'Help: CommandName=SetPreference' )
#do( 'SetPreference: PrefName=GUI/Theme PrefValue=light' )
#do( 'Screenshot: CaptureMode=menus' )
do( 'GetInfo: Type=Boxes' )
do( 'GetInfo: Type=Menus' )
getStructure()
``` |
{
"source": "joshrose/Horizon",
"score": 2
} |
#### File: rl/preprocessing/feature_extractor.py
```python
import abc
import dataclasses
from collections import OrderedDict
from functools import partial
from typing import Dict, List, NamedTuple, Optional, Tuple, Type
import numpy as np
import torch
from caffe2.python import core, schema
from ml.rl import types as rlt
from ml.rl.caffe_utils import C2
from ml.rl.preprocessing.normalization import (
MISSING_VALUE,
NormalizationParameters,
get_feature_start_indices,
get_num_output_features,
sort_features_by_normalization,
)
from .preprocessor_net import PreprocessorNet
"""
These are set of classes facilitating interface between Caffe2-style data source
and PyTorch model
"""
class FeatureExtractorNet(NamedTuple):
"""
`init_net` will only be run once. The external outputs of `init_net` are
assumed to be parameters of `net` and will be saved in the predictor file.
`net` should not use any parameters not initialized by `init_net`.
"""
net: core.Net
init_net: core.Net
class FeatureExtractorBase(object, metaclass=abc.ABCMeta):
"""
This is not a transformer because Caffe2 has a weird API. We cannot directly
call functions. It's easier to handle separately.
"""
def __init__(self, model_feature_config: Optional[rlt.ModelFeatureConfig] = None):
super().__init__()
self._init_sequence_features(model_feature_config)
def _init_sequence_features(self, config: Optional[rlt.ModelFeatureConfig]) -> None:
self.id_mapping_configs = config.id_mapping_config if config else {}
self.sequence_features_type = config.sequence_features_type if config else None
sequence_features = (
dataclasses.fields(self.sequence_features_type)
if self.sequence_features_type
else []
)
self.has_sequence_features = bool(sequence_features)
self.sequence_features = OrderedDict(
(s.name, s.type) for s in sequence_features
)
def get_id_features(t):
fields = dataclasses.fields(t)
for f in fields:
# If the `id_features` remains an Optional, it won't be a type object
if f.name != "id_features" or not isinstance(f.type, type):
continue
return f.type.get_feature_config()
return {}
self.sequence_id_features = {
s.name: get_id_features(s.type) for s in sequence_features
}
def get_id_feature_type(t):
fields = dataclasses.fields(t)
for f in fields:
if f.name != "id_features":
continue
return f.type
return None
self.sequence_id_feature_types = {
k: get_id_feature_type(self.sequence_features[k])
for k, v in self.sequence_id_features.items()
if v
}
self.has_sequence_id_features = any(
bool(v) for v in self.sequence_id_features.values()
)
self.has_sequence_float_features = any(
bool(v.get_float_feature_infos()) for v in self.sequence_features.values()
)
def extract(self, ws, extract_record):
"""
If the extractor is to be run, e.g., by the reader, then subclass should
implement
Args:
extract_record (schema.Field): the output the net
"""
raise NotImplementedError
@abc.abstractmethod
def create_net(self) -> FeatureExtractorNet:
"""
Returns FeatureExtractorNet to perform feature extraction.
The returned net must have input & output record set so that it can be
bound to actual inputs/outputs
"""
pass
def create_const( # type: ignore
self, init_net, name, value, dtype=core.DataType.FLOAT # type: ignore
) -> core.BlobReference:
blob: core.BlobReference = init_net.NextScopedBlob(name)
if not isinstance(value, list):
shape: List[int] = []
value = [value]
else:
shape = [len(value)]
init_net.GivenTensorFill([], blob, shape=shape, values=value, dtype=dtype)
init_net.AddExternalOutput(blob)
return blob
def extract_float_features(
self, net, name, field, keys_to_extract, missing_scalar
) -> Tuple[str, str]:
"""
Helper function to extract matrix from stacked sparse tensors
"""
with core.NameScope(name):
float_features, presence_mask = net.SparseToDenseMask(
[field.keys(), field.values(), missing_scalar, field.lengths()],
[net.NextBlob("float_features"), net.NextBlob("presence_mask")],
mask=keys_to_extract,
return_presence_mask=True,
)
return (float_features, presence_mask)
def get_state_sequence_features_schema(
self,
sequence_id_features: Dict[str, Dict[str, core.BlobReference]],
sequence_float_features: Dict[str, core.BlobReference],
) -> schema.Struct:
"""
Layout the record to match SequenceFeatures type. This is necessary to make ONNX
exporting works.
"""
record_fields = []
for sequence_name, sequence_type in self.sequence_features.items():
sequence_record = schema.Struct()
if sequence_name in self.sequence_id_feature_types:
fields = dataclasses.fields(
self.sequence_id_feature_types[sequence_name]
)
sequence_record += schema.Struct(
(
"id_features",
schema.Struct(
*[
(f.name, sequence_id_features[sequence_name][f.name])
for f in fields
]
),
)
)
if sequence_type.get_float_feature_infos():
sequence_record += schema.Struct(
("float_features", sequence_float_features[sequence_name])
)
record_fields.append((sequence_name, sequence_record))
return schema.Struct(*record_fields)
def extract_sequence_id_features(
self,
net: core.Net,
name: str,
sequence_feature_types: Dict[str, Type[rlt.SequenceFeatureBase]],
sequence_id_features: Dict[str, Dict[str, rlt.IdFeatureConfig]],
field: schema.List,
empty_range: schema.BlobReference,
zero_int64: schema.BlobReference,
) -> Dict[str, Dict[str, core.BlobReference]]:
"""
Convert CSR-like format of MAP<BIGINT, LIST<BIGINT>> to dictionary from
sequence name to dictionary from ID-list name to the blob containing the
fixed-length sequence of IDs. Each blob will be 2-D tensor. The first dimension
is the batch size. The second dimension is each element in the list.
"""
feature_names: List[str] = []
feature_ids: List[int] = []
for sequence_name, id_features in sequence_id_features.items():
for id_feature_name, id_feature_config in id_features.items():
feature_names.append("{}_{}".format(sequence_name, id_feature_name))
feature_ids.append(id_feature_config.feature_id)
id_list_feature_ranges = self.extract_id_list_features_ranges(
net, name, field, feature_names, feature_ids, empty_range
)
with core.NameScope(name):
return {
sequence_name: {
id_feature_name: self.range_to_dense(
net,
"{}_{}".format(sequence_name, id_feature_name),
id_list_feature_ranges[
"{}_{}".format(sequence_name, id_feature_name)
]["ranges"],
id_list_feature_ranges[
"{}_{}".format(sequence_name, id_feature_name)
]["values"],
sequence_feature_types[sequence_name].get_max_length(),
zero_int64,
)
for id_feature_name, id_feature_config in id_features.items()
}
for sequence_name, id_features in sequence_id_features.items()
if id_features
}
def extract_sequence_float_features(
self,
net: core.Net,
name: str,
sequence_feature_types: Dict[str, Type[rlt.SequenceFeatureBase]],
field: schema.List,
empty_range: schema.BlobReference,
zero_float: schema.BlobReference,
) -> Dict[str, core.BlobReference]:
"""
Convert CSR-like format of MAP<BIGINT, MAP<BIGINT, FLOAT>> to dictionary from
sequence name to the blob containing the fixed-length sequence of vector of
float features of each element. Each blob will be 3-D tensor. The first
dimension is the batch size. The second dimension is each element in the list.
The third dimension is ordered by the order given by
`SequenceFeatureBase.get_float_feature_infos()`. These float features are not
normalized.
"""
feature_names: List[str] = []
feature_ids: List[int] = []
for sequence_name, sequence_type in sequence_feature_types.items():
for info in sequence_type.get_float_feature_infos():
feature_names.append("{}_{}".format(sequence_name, info.name))
feature_ids.append(info.feature_id)
id_score_list_feature_ranges = self.extract_id_score_list_features_ranges(
net, name, field, feature_names, feature_ids, empty_range
)
with core.NameScope(name):
return {
sequence_name: net.Concat(
[
self.range_to_dense(
net,
"{}_{}".format(sequence_name, info.name),
id_score_list_feature_ranges[
"{}_{}".format(sequence_name, info.name)
]["ranges"],
id_score_list_feature_ranges[
"{}_{}".format(sequence_name, info.name)
]["scores"],
sequence_type.get_max_length(),
zero_float,
)
for info in sequence_type.get_float_feature_infos()
],
[sequence_name, "{}_split_info".format(sequence_name)],
axis=2,
add_axis=1,
)[0]
for sequence_name, sequence_type in sequence_feature_types.items()
if sequence_type.get_float_feature_infos()
}
def create_empty_range(self, init_net: core.Net) -> core.BlobReference:
return self.create_const(
init_net, "empty_range", [0, 0], dtype=core.DataType.INT32 # type: ignore
)
def extract_id_list_features_ranges(
self,
net: core.Net,
name: str,
field: schema.List,
feature_names: List[str],
feature_ids: List[int],
empty_range: core.BlobReference,
) -> Dict[str, Dict[str, core.BlobReference]]:
"""
Convert the CSR-like format of ID-list to ranges and values.
See https://caffe2.ai/docs/operators-catalogue#gatherranges
The return value is keyed by ID-list name
"""
assert len(feature_names) == len(
feature_ids
), "feature_names and feature_ids must be parallel"
with core.NameScope("{}_id_list_ranges".format(name)):
id_list_ranges = net.LengthsToRanges(
field["values"]["values"]["lengths"](), ["input_ranges"]
)
densified_ranges = net.SparseToDenseMask(
[
field["values"]["keys"](),
id_list_ranges,
empty_range,
field["lengths"](),
],
["densified_ranges"],
mask=feature_ids,
)
result = {}
for idx, name in enumerate(feature_names):
starts = [0, idx, 0]
ends = [-1, idx + 1, -1]
result[name] = {
"ranges": net.Slice(
[densified_ranges],
"{}_ranges".format(name),
starts=starts,
ends=ends,
),
"values": field["values"]["values"]["values"](),
}
return result
def extract_id_score_list_features_ranges(
self,
net: core.Net,
name: str,
field: schema.List,
feature_names: List[str],
feature_ids: List[int],
empty_range: core.BlobReference,
) -> Dict[str, Dict[str, core.BlobReference]]:
"""
Convert the CSR-like format of ID-score-list to ranges and values.
See https://caffe2.ai/docs/operators-catalogue#gatherranges
The return value is keyed by ID-score-list name
"""
assert len(feature_names) == len(
feature_ids
), "feature_names and feature_ids must be parallel"
with core.NameScope("{}_id_score_list_ranges".format(name)):
id_score_list_ranges = net.LengthsToRanges(
field["values"]["values"]["lengths"](), ["input_ranges"]
)
densified_ranges = net.SparseToDenseMask(
[
field["values"]["keys"](),
id_score_list_ranges,
empty_range,
field["lengths"](),
],
["densified_ranges"],
mask=feature_ids,
)
result = {}
for idx, name in enumerate(feature_names):
starts = [0, idx, 0]
ends = [-1, idx + 1, -1]
result[name] = {
"ranges": net.Slice(
[densified_ranges],
"{}_ranges".format(name),
starts=starts,
ends=ends,
),
"ids": field["values"]["values"]["values"]["keys"](),
"scores": field["values"]["values"]["values"]["values"](),
}
return result
def range_to_dense(
self,
net: core.Net,
name: str,
ranges: core.BlobReference,
values: core.BlobReference,
max_length: int,
zero_val: core.BlobReference,
) -> core.BlobReference:
"""
Convert batch of variable-length lists (in range format) to fixed-length lists.
"""
with core.NameScope("range_to_dense_{}".format(name)):
# First slicing the offset and length
offset = net.Cast(
net.Slice(ranges, ["offset"], starts=[0, 0, 0], ends=[-1, -1, 1]),
["float_offset"],
to=core.DataType.FLOAT, # type: ignore
)
length = net.Cast(
net.Slice(ranges, ["length"], starts=[0, 0, 1], ends=[-1, -1, 2]),
["float_length"],
to=core.DataType.FLOAT, # type: ignore
)
zero_offset = net.ConstantFill(length, ["zero_offset"], value=0.0)
max_length_blob = net.ConstantFill(
length, ["max_length"], value=float(max_length)
)
# Calculate the new offset, which is
# offset + max(0, length - max_length)
new_offset = net.Cast(
net.Add(
[
offset,
net.Max(
[zero_offset, net.Sub([length, max_length_blob], ["sub"])],
["max"],
),
],
["float_new_offset"],
broadcast=1,
),
["new_offset"],
to=core.DataType.INT32, # type: ignore
)
new_length = net.Cast(
net.Min([length, max_length_blob], "float_new_length"),
["new_length"],
to=core.DataType.INT32, # type: ignore
)
# Stitch these back together
new_range, _ = net.Concat(
[new_offset, new_length], ["new_range", "split_info"], axis=2
)
# At this point, we have lists w/ length up to max_length
gathered_values, gathered_lengths = net.GatherRanges(
[values, new_range], ["gathered_values", "gathered_length"]
)
# This generate indices for each element
lengths_range_fill = net.LengthsRangeFill(
gathered_lengths, ["lengths_range_fill"]
)
# Finally, we make the dense output
keys_to_extract = list(range(max_length))
values_with_mask: Tuple[
core.BlobReference, core.BlobReference
] = net.SparseToDenseMask(
[lengths_range_fill, gathered_values, zero_val, gathered_lengths],
["dense_values", "presence_mask"],
mask=keys_to_extract,
return_presence_mask=True,
)
dense_values, _presence_mask = values_with_mask
return dense_values
def create_id_mapping(
self, init_net: core.Net, name: str, mapping: List[int]
) -> core.BlobReference:
"""
Given the ID list in the mapping, create index from ID to its (1-base) index.
"""
assert len(set(mapping)) == len(
mapping
), "mapping for {} must not contain duplicated IDs".format(name)
mapping_data = init_net.NextScopedBlob("mapping_data_{}".format(name))
init_net.GivenTensorFill(
[],
mapping_data,
shape=[len(mapping)],
values=mapping,
dtype=core.DataType.INT64, # type: ignore
)
handler: core.BlobReference = init_net.NextScopedBlob("mapping_{}".format(name))
init_net.LongIndexCreate([], handler, max_elements=len(mapping))
init_net.IndexLoad([handler, mapping_data], [handler])
init_net.IndexFreeze(handler, handler)
init_net.AddExternalOutput(handler)
return handler
def create_id_mappings(
self, init_net: core.Net, id_mapping_configs: Dict[str, rlt.IdMapping]
) -> Dict[str, core.BlobReference]:
return {
mapping_name: self.create_id_mapping(init_net, mapping_name, mapping.ids)
for mapping_name, mapping in id_mapping_configs.items()
}
def map_ids(
self,
net: core.Net,
name: str,
map_handler: core.BlobReference,
raw_ids: core.BlobReference,
) -> core.BlobReference:
"""
Map raw ID to index (into embedding lookup table, usually)
"""
with core.NameScope("mapping_{}".format(name)):
retval: core.BlobReference = net.IndexGet(
[map_handler, raw_ids], ["mapped_ids"]
)
assert type(retval) == core.BlobReference
return retval
def map_sequence_id_features(
self,
net: core.Net,
name: str,
map_handlers: Dict[str, core.BlobReference],
raw_sequence_id_features: Dict[str, Dict[str, core.BlobReference]],
sequence_id_feature_configs: Dict[str, Dict[str, rlt.IdFeatureConfig]],
) -> Dict[str, Dict[str, core.BlobReference]]:
"""
Map raw IDs of all sequences' ID features to index (into embedding lookup table)
"""
def _map_id_feature(sequence_name, id_feature, raw_id_feature):
with core.NameScope(sequence_name):
return self.map_ids(
net,
id_feature,
map_handlers[
sequence_id_feature_configs[sequence_name][
id_feature
].id_mapping_name
],
raw_id_feature,
)
with core.NameScope(name):
return {
sequence_name: {
id_feature: _map_id_feature(
sequence_name, id_feature, raw_id_feature
)
for id_feature, raw_id_feature in id_features.items()
}
for sequence_name, id_features in raw_sequence_id_features.items()
}
def fetch_state_sequence_features(
self, record: schema.Struct, fetch_func
) -> rlt.SequenceFeatures:
"""
Pull the values from Caffe2's blobs into PyTorch's tensors.
"""
state_sequence_features = {}
for seq_name, sequence_feature_type in self.sequence_features.items():
state_seq = sequence_feature_type(id_features=None, float_features=None)
if seq_name in self.sequence_id_feature_types:
state_seq.id_features = self.sequence_id_feature_types[seq_name](
**{
feature_name: fetch_func(
record[seq_name]["id_features"][feature_name]
)
for feature_name in self.sequence_id_features[seq_name]
}
)
if sequence_feature_type.get_float_feature_infos():
state_seq.float_features = fetch_func(
record[seq_name]["float_features"]
)
state_sequence_features[seq_name] = state_seq
return self.sequence_features_type(**state_sequence_features) # type: ignore
def read_actions_to_mask(
self, net, name, num_actions, action, action_size_plus_one
):
with core.NameScope(name):
action_blob_one_hot = net.OneHot(
[action(), action_size_plus_one], ["action_blob_one_hot"]
)
action_blob_one_hot_sliced = net.Slice(
[action_blob_one_hot],
["action_blob_one_hot_sliced"],
starts=[0, 0],
ends=[-1, num_actions],
)
return action_blob_one_hot_sliced
@staticmethod
def fetch(ws, b, to_torch=True):
data = ws.fetch_blob(str(b()))
if not isinstance(data, np.ndarray):
# Blob uninitialized, return None and handle downstream
assert False, "Missing blob: " + str(b)
return None
if to_torch:
return torch.tensor(data)
return data
def map_schema():
return schema.Map(schema.Scalar(), schema.Scalar())
def id_list_schema():
return schema.Map(schema.Scalar(), schema.List(schema.Scalar()))
def id_score_list_schema():
return schema.Map(schema.Scalar(), schema.Map(schema.Scalar(), schema.Scalar()))
class InputColumn(object):
STATE_FEATURES = "state_features"
STATE_FEATURES_PRESENCE = "state_features_presence"
STATE_ID_LIST_FEATURES = "state_id_list_features"
STATE_ID_SCORE_LIST_FEATURES = "state_id_score_list_features"
NEXT_STATE_FEATURES = "next_state_features"
NEXT_STATE_FEATURES_PRESENCE = "next_state_features_presence"
NEXT_STATE_ID_LIST_FEATURES = "next_state_id_list_features"
NEXT_STATE_ID_SCORE_LIST_FEATURES = "next_state_id_score_list_features"
ACTION = "action"
ACTION_PRESENCE = "action_presence"
NEXT_ACTION = "next_action"
NEXT_ACTION_PRESENCE = "next_action_presence"
POSSIBLE_ACTIONS = "possible_actions"
POSSIBLE_ACTIONS_PRESENCE = "possible_actions_presence"
POSSIBLE_ACTIONS_MASK = "possible_actions_mask"
POSSIBLE_NEXT_ACTIONS = "possible_next_actions"
POSSIBLE_NEXT_ACTIONS_PRESENCE = "possible_next_actions_presence"
POSSIBLE_NEXT_ACTIONS_MASK = "possible_next_actions_mask"
NOT_TERMINAL = "not_terminal"
STEP = "step"
TIME_DIFF = "time_diff"
TIME_SINCE_FIRST = "time_since_first"
MDP_ID = "mdp_id"
SEQUENCE_NUMBER = "sequence_number"
METRICS = "metrics"
REWARD = "reward"
ACTION_PROBABILITY = "action_probability"
class TrainingFeatureExtractor(FeatureExtractorBase):
"""
Extract:
- State
- Action
- Next state
- Possible next actions/Next actions
"""
def __init__(
self,
state_normalization_parameters: Dict[int, NormalizationParameters],
action_normalization_parameters: Optional[
Dict[int, NormalizationParameters]
] = None,
include_possible_actions: bool = True,
normalize: bool = True,
max_num_actions: int = None,
set_missing_value_to_zero: bool = None,
multi_steps: Optional[int] = None,
metrics_to_score: Optional[List[str]] = None,
model_feature_config: Optional[rlt.ModelFeatureConfig] = None,
use_time_since_first: Optional[bool] = None,
time_since_first_normalization_parameters: Optional[
NormalizationParameters
] = None,
) -> None:
super().__init__(model_feature_config=model_feature_config)
self.state_normalization_parameters = state_normalization_parameters
self.action_normalization_parameters = action_normalization_parameters
self.sorted_state_features, _ = sort_features_by_normalization(
state_normalization_parameters
)
if action_normalization_parameters:
self.sorted_action_features, _ = sort_features_by_normalization(
action_normalization_parameters
)
else:
self.sorted_action_features = None
self.include_possible_actions = include_possible_actions
if action_normalization_parameters is None:
self.include_possible_actions = True # TODO: Delete SARSA in a future diff
self.normalize = normalize
self.max_num_actions = max_num_actions
self.set_missing_value_to_zero = set_missing_value_to_zero
self.multi_steps = multi_steps
self.metrics_to_score = metrics_to_score
self.use_time_since_first = use_time_since_first or False
self.time_since_first_normalization_parameters = (
time_since_first_normalization_parameters
)
def extract(self, ws, extract_record):
fetch = partial(self.fetch, ws)
def fetch_possible_actions(
possible_actions_blob, possible_actions_presence_blob
):
return rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(possible_actions_blob),
presence=fetch(possible_actions_presence_blob),
)
)
state = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.state_features),
presence=fetch(extract_record.state_features_presence),
)
)
next_state = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.next_state_features),
presence=fetch(extract_record.next_state_features_presence),
)
)
if self.has_sequence_features:
state = state._replace(
sequence_features=self.fetch_state_sequence_features(
extract_record.state_sequence_features, fetch
)
)
next_state = next_state._replace(
sequence_features=self.fetch_state_sequence_features(
extract_record.next_state_sequence_features, fetch
)
)
if self.use_time_since_first:
state = state._replace(
time_since_first=fetch(extract_record.time_since_first)
)
next_state = next_state._replace(
time_since_first=fetch(extract_record.next_time_since_first)
)
if self.sorted_action_features is None:
# Action is a one-hot vector of discrete actions
action = fetch(extract_record.action)
next_action = fetch(extract_record.next_action)
else:
# Action is a set of continuous features
action = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.action),
presence=fetch(extract_record.action_presence),
)
)
next_action = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.next_action),
presence=fetch(extract_record.next_action_presence),
)
)
max_num_actions = None
step = None
if self.multi_steps is not None:
step = fetch(extract_record.step).reshape(-1, 1)
reward = fetch(extract_record.reward).reshape(-1, 1)
# is_terminal should be filled by preprocessor
not_terminal = fetch(extract_record.not_terminal).reshape(-1, 1)
time_diff = fetch(extract_record.time_diff).reshape(-1, 1)
if self.sorted_action_features is not None and self.max_num_actions is not None:
tiled_next_state = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=next_state.float_features.value.repeat(
1, self.max_num_actions
).reshape(-1, next_state.float_features.value.shape[1]),
presence=next_state.float_features.presence.repeat(
1, self.max_num_actions
).reshape(-1, next_state.float_features.value.shape[1]),
)
)
else:
tiled_next_state = None
if self.include_possible_actions:
# TODO: this will need to be more complicated to support sparse features
assert self.max_num_actions is not None, "Missing max_num_actions"
possible_actions_mask = (
fetch(extract_record.possible_actions_mask)
.reshape(-1, self.max_num_actions)
.type(torch.FloatTensor)
)
possible_next_actions_mask = (
fetch(extract_record.possible_next_actions_mask)
.reshape(-1, self.max_num_actions)
.type(torch.FloatTensor)
)
if self.sorted_action_features is not None:
possible_actions = fetch_possible_actions(
extract_record.possible_actions,
extract_record.possible_actions_presence,
)
possible_next_actions = fetch_possible_actions(
extract_record.possible_next_actions,
extract_record.possible_next_actions_presence,
)
max_num_actions = self.max_num_actions
training_input = rlt.RawParametricDqnInput(
state=state,
reward=reward,
time_diff=time_diff,
action=action,
next_action=next_action,
not_terminal=not_terminal,
next_state=next_state,
tiled_next_state=tiled_next_state,
possible_actions=possible_actions,
possible_actions_mask=possible_actions_mask,
possible_next_actions=possible_next_actions,
possible_next_actions_mask=possible_next_actions_mask,
step=step,
)
else:
training_input = rlt.RawDiscreteDqnInput(
state=state,
reward=reward,
time_diff=time_diff,
action=action,
next_action=next_action,
not_terminal=not_terminal,
next_state=next_state,
possible_actions_mask=possible_actions_mask,
possible_next_actions_mask=possible_next_actions_mask,
step=step,
)
else:
training_input = rlt.RawPolicyNetworkInput(
state=state,
reward=reward,
time_diff=time_diff,
action=action,
next_action=next_action,
not_terminal=not_terminal,
next_state=next_state,
step=step,
)
mdp_id = fetch(extract_record.mdp_id, to_torch=False)
sequence_number = fetch(extract_record.sequence_number)
metrics = fetch(extract_record.metrics) if self.metrics_to_score else None
# TODO: stuff other fields in here
extras = rlt.ExtraData(
action_probability=fetch(extract_record.action_probability).reshape(-1, 1),
sequence_number=sequence_number.reshape(-1, 1)
if sequence_number is not None
else None,
mdp_id=mdp_id.reshape(-1, 1) if mdp_id is not None else None,
max_num_actions=max_num_actions,
metrics=metrics,
)
return rlt.RawTrainingBatch(training_input=training_input, extras=extras)
def create_net(self):
net = core.Net("feature_extractor")
init_net = core.Net("feature_extractor_init")
missing_scalar = self.create_const(
init_net,
"MISSING_SCALAR",
0.0 if self.set_missing_value_to_zero else MISSING_VALUE,
)
action_schema = map_schema() if self.sorted_action_features else schema.Scalar()
pass_through_columns = [
(InputColumn.REWARD, schema.Scalar()),
(InputColumn.NOT_TERMINAL, schema.Scalar()),
(InputColumn.TIME_DIFF, schema.Scalar()),
(InputColumn.MDP_ID, schema.Scalar()),
(InputColumn.SEQUENCE_NUMBER, schema.Scalar()),
(InputColumn.ACTION_PROBABILITY, schema.Scalar()),
]
if self.multi_steps is not None:
pass_through_columns.append((InputColumn.STEP, schema.Scalar()))
input_schema = schema.Struct(
*(
[
(InputColumn.STATE_FEATURES, map_schema()),
(InputColumn.NEXT_STATE_FEATURES, map_schema()),
(InputColumn.ACTION, action_schema),
(InputColumn.NEXT_ACTION, action_schema),
]
+ pass_through_columns
)
)
if self.has_sequence_id_features:
input_schema += schema.Struct(
(InputColumn.STATE_ID_LIST_FEATURES, id_list_schema()),
(InputColumn.NEXT_STATE_ID_LIST_FEATURES, id_list_schema()),
)
if self.has_sequence_float_features:
input_schema += schema.Struct(
(InputColumn.STATE_ID_SCORE_LIST_FEATURES, id_score_list_schema()),
(InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES, id_score_list_schema()),
)
if self.include_possible_actions:
input_schema += schema.Struct(
(InputColumn.POSSIBLE_ACTIONS_MASK, schema.List(schema.Scalar())),
(InputColumn.POSSIBLE_NEXT_ACTIONS_MASK, schema.List(schema.Scalar())),
)
if self.sorted_action_features is not None:
input_schema += schema.Struct(
(InputColumn.POSSIBLE_ACTIONS, schema.List(map_schema())),
(InputColumn.POSSIBLE_NEXT_ACTIONS, schema.List(map_schema())),
)
if self.metrics_to_score:
input_schema += schema.Struct((InputColumn.METRICS, map_schema()))
if self.use_time_since_first:
input_schema += schema.Struct(
(InputColumn.TIME_SINCE_FIRST, schema.Scalar())
)
input_record = net.set_input_record(input_schema)
state, state_presence = self.extract_float_features(
net,
"state",
input_record[InputColumn.STATE_FEATURES],
self.sorted_state_features,
missing_scalar,
)
next_state, next_state_presence = self.extract_float_features(
net,
"next_state",
input_record[InputColumn.NEXT_STATE_FEATURES],
self.sorted_state_features,
missing_scalar,
)
if self.has_sequence_features:
empty_range = self.create_empty_range(init_net)
if self.has_sequence_id_features:
zero_int64 = self.create_const(
init_net, "zero_int64", 0, dtype=core.DataType.INT64
)
state_sequence_id_features = self.extract_sequence_id_features(
net,
"state",
self.sequence_features,
self.sequence_id_features,
input_record[InputColumn.STATE_ID_LIST_FEATURES],
empty_range,
zero_int64,
)
next_state_sequence_id_features = self.extract_sequence_id_features(
net,
"next_state",
self.sequence_features,
self.sequence_id_features,
input_record[InputColumn.NEXT_STATE_ID_LIST_FEATURES],
empty_range,
zero_int64,
)
id_mappings = self.create_id_mappings(init_net, self.id_mapping_configs)
state_sequence_id_features = self.map_sequence_id_features(
net,
"state",
id_mappings,
state_sequence_id_features,
self.sequence_id_features,
)
next_state_sequence_id_features = self.map_sequence_id_features(
net,
"next_state",
id_mappings,
next_state_sequence_id_features,
self.sequence_id_features,
)
else:
state_sequence_id_features = {}
next_state_sequence_id_features = {}
if self.has_sequence_float_features:
zero_float = self.create_const(init_net, "zero_float", 0.0)
state_sequence_float_features = self.extract_sequence_float_features(
net,
"state",
self.sequence_features,
input_record[InputColumn.STATE_ID_SCORE_LIST_FEATURES],
empty_range,
zero_float,
)
next_state_sequence_float_features = self.extract_sequence_float_features(
net,
"next_state",
self.sequence_features,
input_record[InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES],
empty_range,
zero_float,
)
else:
state_sequence_float_features = {}
next_state_sequence_float_features = {}
if self.sorted_action_features:
action, action_presence = self.extract_float_features(
net,
InputColumn.ACTION,
input_record[InputColumn.ACTION],
self.sorted_action_features,
missing_scalar,
)
next_action, next_action_presence = self.extract_float_features(
net,
InputColumn.NEXT_ACTION,
input_record[InputColumn.NEXT_ACTION],
self.sorted_action_features,
missing_scalar,
)
if self.include_possible_actions:
possible_action_features, possible_action_features_presence = self.extract_float_features(
net,
InputColumn.POSSIBLE_ACTIONS,
input_record[InputColumn.POSSIBLE_ACTIONS]["values"],
self.sorted_action_features,
missing_scalar,
)
possible_next_action_features, possible_next_action_features_presence = self.extract_float_features(
net,
InputColumn.POSSIBLE_NEXT_ACTIONS,
input_record[InputColumn.POSSIBLE_NEXT_ACTIONS]["values"],
self.sorted_action_features,
missing_scalar,
)
else:
action_size_plus_one = self.create_const(
init_net,
"action_size_plus_one",
self.max_num_actions + 1,
dtype=core.DataType.INT64,
)
action = self.read_actions_to_mask(
net,
InputColumn.ACTION,
self.max_num_actions,
input_record[InputColumn.ACTION],
action_size_plus_one,
)
next_action = self.read_actions_to_mask(
net,
InputColumn.NEXT_ACTION,
self.max_num_actions,
input_record[InputColumn.NEXT_ACTION],
action_size_plus_one,
)
if self.normalize:
C2.set_net_and_init_net(net, init_net)
state, _ = PreprocessorNet().normalize_dense_matrix(
state,
self.sorted_state_features,
self.state_normalization_parameters,
blobname_prefix="state",
split_expensive_feature_groups=True,
)
next_state, _ = PreprocessorNet().normalize_dense_matrix(
next_state,
self.sorted_state_features,
self.state_normalization_parameters,
blobname_prefix="next_state",
split_expensive_feature_groups=True,
)
if self.sorted_action_features is not None:
action, _ = PreprocessorNet().normalize_dense_matrix(
action,
self.sorted_action_features,
self.action_normalization_parameters,
blobname_prefix="action",
split_expensive_feature_groups=True,
)
next_action, _ = PreprocessorNet().normalize_dense_matrix(
next_action,
self.sorted_action_features,
self.action_normalization_parameters,
blobname_prefix="next_action",
split_expensive_feature_groups=True,
)
if self.include_possible_actions:
possible_action_features, _ = PreprocessorNet().normalize_dense_matrix(
possible_action_features,
self.sorted_action_features,
self.action_normalization_parameters,
blobname_prefix="possible_action",
split_expensive_feature_groups=True,
)
possible_next_action_features, _ = PreprocessorNet().normalize_dense_matrix(
possible_next_action_features,
self.sorted_action_features,
self.action_normalization_parameters,
blobname_prefix="possible_next_action",
split_expensive_feature_groups=True,
)
C2.set_net_and_init_net(None, None)
if self.metrics_to_score:
metrics_to_score_idxs = list(range(len(self.metrics_to_score)))
missing_metric = self.create_const(init_net, "MISSING_METRIC", 0.0)
metrics, metrics_presence = self.extract_float_features(
net,
InputColumn.METRICS,
input_record[InputColumn.METRICS],
metrics_to_score_idxs,
missing_metric,
)
if self.use_time_since_first:
time_since_first = net.Cast(
net.ExpandDims(
input_record[InputColumn.TIME_SINCE_FIRST](), 1, dims=[1]
),
net.NextScopedBlob("float_time_since_first"),
to=core.DataType.FLOAT,
)
float_time_diff = net.Cast(
net.ExpandDims(input_record[InputColumn.TIME_DIFF](), 1, dims=[1]),
net.NextScopedBlob("float_time_diff"),
to=core.DataType.FLOAT,
)
next_time_since_first = net.Add(
[time_since_first, float_time_diff],
net.NextScopedBlob("float_next_time_since_first"),
)
if self.time_since_first_normalization_parameters and self.normalize:
C2.set_net_and_init_net(net, init_net)
time_since_first, _ = PreprocessorNet().normalize_dense_matrix(
time_since_first,
[0],
{0: self.time_since_first_normalization_parameters},
blobname_prefix="time_since_first",
split_expensive_feature_groups=True,
)
next_time_since_first, _ = PreprocessorNet().normalize_dense_matrix(
next_time_since_first,
[0],
{0: self.time_since_first_normalization_parameters},
blobname_prefix="next_time_since_first",
split_expensive_feature_groups=True,
)
C2.set_net_and_init_net(None, None)
output_schema = schema.Struct(
(InputColumn.STATE_FEATURES, state),
(InputColumn.STATE_FEATURES_PRESENCE, state_presence),
(InputColumn.NEXT_STATE_FEATURES, next_state),
(InputColumn.NEXT_STATE_FEATURES_PRESENCE, next_state_presence),
(InputColumn.ACTION, action),
(InputColumn.NEXT_ACTION, next_action),
)
if self.sorted_action_features:
output_schema += schema.Struct(
(InputColumn.ACTION_PRESENCE, action_presence),
(InputColumn.NEXT_ACTION_PRESENCE, next_action_presence),
)
output_schema += schema.Struct(
*(
[
(col_name, input_record[col_name])
for col_name, _col_type in pass_through_columns
]
)
)
if self.has_sequence_features:
output_schema += schema.Struct(
(
"state_sequence_features",
self.get_state_sequence_features_schema(
state_sequence_id_features, state_sequence_float_features
),
),
(
"next_state_sequence_features",
self.get_state_sequence_features_schema(
next_state_sequence_id_features,
next_state_sequence_float_features,
),
),
)
if self.use_time_since_first:
output_schema += schema.Struct(
("time_since_first", time_since_first),
("next_time_since_first", next_time_since_first),
)
if self.include_possible_actions:
# Drop the "lengths" blob from possible_actions_mask since we know
# it's just a list of [max_num_actions, max_num_actions, ...]
output_schema += schema.Struct(
(
InputColumn.POSSIBLE_ACTIONS_MASK,
input_record[InputColumn.POSSIBLE_ACTIONS_MASK]["values"],
),
(
InputColumn.POSSIBLE_NEXT_ACTIONS_MASK,
input_record[InputColumn.POSSIBLE_NEXT_ACTIONS_MASK]["values"],
),
)
if self.sorted_action_features is not None:
output_schema += schema.Struct(
(InputColumn.POSSIBLE_ACTIONS, possible_action_features),
(
InputColumn.POSSIBLE_ACTIONS_PRESENCE,
possible_action_features_presence,
),
(InputColumn.POSSIBLE_NEXT_ACTIONS, possible_next_action_features),
(
InputColumn.POSSIBLE_NEXT_ACTIONS_PRESENCE,
possible_next_action_features_presence,
),
)
if self.metrics_to_score:
output_schema += schema.Struct((InputColumn.METRICS, metrics))
net.set_output_record(output_schema)
return FeatureExtractorNet(net, init_net)
class PredictorFeatureExtractor(FeatureExtractorBase):
"""
This class assumes that action is not in the input unless it's parametric
action.
The features (of both states & actions, if any) are expected to come in the
following blobs:
- input/float_features.keys
- input/float_features.values
- input/float_features.lengths
TODO: Support int features
"""
def __init__(
self,
state_normalization_parameters: Dict[int, NormalizationParameters],
action_normalization_parameters: Optional[
Dict[int, NormalizationParameters]
] = None,
normalize: bool = True,
set_missing_value_to_zero: bool = False,
model_feature_config: Optional[rlt.ModelFeatureConfig] = None,
use_time_since_first: Optional[bool] = None,
time_since_first_normalization_parameters: Optional[
NormalizationParameters
] = None,
) -> None:
super().__init__(model_feature_config=model_feature_config)
self.state_normalization_parameters = state_normalization_parameters
self.action_normalization_parameters = action_normalization_parameters
self.sorted_state_features, _ = sort_features_by_normalization(
state_normalization_parameters
)
if action_normalization_parameters:
self.sorted_action_features, _ = sort_features_by_normalization(
action_normalization_parameters
)
else:
self.sorted_action_features = None
self.normalize = normalize
self.set_missing_value_to_zero = set_missing_value_to_zero
self.use_time_since_first = use_time_since_first or False
self.time_since_first_normalization_parameters = (
time_since_first_normalization_parameters
)
def extract(self, ws, extract_record):
fetch = partial(self.fetch, ws)
state_features = {"float_features": fetch(extract_record.state.float_features)}
if self.has_sequence_features:
state_features["sequence_features"] = self.fetch_state_sequence_features(
extract_record.state.sequence_features, fetch
)
if self.use_time_since_first:
state_features["time_since_first"] = fetch(
extract_record.state.time_since_first
)
state = rlt.FeatureVector(**state_features)
if self.sorted_action_features is None:
action = None
else:
action = rlt.FeatureVector(float_features=fetch(extract_record.action))
return rlt.RawStateAction(state=state, action=action)
def create_net(self):
net = core.Net("feature_extractor")
init_net = core.Net("feature_extractor_init")
missing_scalar = self.create_const(
init_net,
"MISSING_SCALAR",
0.0 if self.set_missing_value_to_zero else MISSING_VALUE,
)
input_schema = schema.Struct(
(
"float_features",
schema.Map(
keys=core.BlobReference("input/float_features.keys"),
values=core.BlobReference("input/float_features.values"),
lengths_blob=core.BlobReference("input/float_features.lengths"),
),
)
)
if self.has_sequence_id_features:
input_schema += schema.Struct(
(
"id_list_features",
schema.Map(
keys=core.BlobReference(
"input/int_multi_categorical_feature.keys"
),
values=schema.List(
values=core.BlobReference(
"input/int_multi_categorical_feature.values.values"
),
lengths_blob=core.BlobReference(
"input/int_multi_categorical_feature.values.lengths"
),
),
lengths_blob=core.BlobReference(
"input/int_multi_categorical_feature.lengths"
),
),
)
)
if self.has_sequence_float_features:
input_schema += schema.Struct(
(
"id_score_list_features",
schema.Map(
keys=core.BlobReference(
"input/int_weighted_multi_categorical_feature.keys"
),
values=schema.Map(
keys=core.BlobReference(
"input/int_weighted_multi_categorical_feature.values.keys"
),
values=core.BlobReference(
"input/int_weighted_multi_categorical_feature.values.values"
),
lengths_blob=core.BlobReference(
"input/int_weighted_multi_categorical_feature.values.lengths"
),
),
lengths_blob=core.BlobReference(
"input/int_weighted_multi_categorical_feature.lengths"
),
),
)
)
input_record = net.set_input_record(input_schema)
state, state_presence = self.extract_float_features(
net,
"state",
input_record.float_features,
self.sorted_state_features,
missing_scalar,
)
if self.has_sequence_features:
empty_range = self.create_empty_range(init_net)
if self.has_sequence_id_features:
zero_int64 = self.create_const(
init_net, "zero_int64", 0, dtype=core.DataType.INT64
)
state_sequence_id_features = self.extract_sequence_id_features(
net,
"state",
self.sequence_features,
self.sequence_id_features,
input_record.id_list_features,
empty_range,
zero_int64,
)
id_mappings = self.create_id_mappings(init_net, self.id_mapping_configs)
state_sequence_id_features = self.map_sequence_id_features(
net,
"state",
id_mappings,
state_sequence_id_features,
self.sequence_id_features,
)
if self.has_sequence_float_features:
zero_float = self.create_const(init_net, "zero_float", 0.0)
state_sequence_float_features = self.extract_sequence_float_features(
net,
"state",
self.sequence_features,
input_record.id_score_list_features,
empty_range,
zero_float,
)
if self.sorted_action_features:
action, action_presence = self.extract_float_features(
net,
"action",
input_record.float_features,
self.sorted_action_features,
missing_scalar,
)
if self.normalize:
C2.set_net_and_init_net(net, init_net)
state, _ = PreprocessorNet().normalize_dense_matrix(
state,
self.sorted_state_features,
self.state_normalization_parameters,
blobname_prefix="state",
split_expensive_feature_groups=True,
)
if self.sorted_action_features:
action, _ = PreprocessorNet().normalize_dense_matrix(
action,
self.sorted_action_features,
self.action_normalization_parameters,
blobname_prefix="action",
split_expensive_feature_groups=True,
)
C2.set_net_and_init_net(None, None)
if self.use_time_since_first:
state_shape = net.Shape(state, 1)
batch_size = net.Slice(state_shape, 1, starts=[0], ends=[1])
time_since_first = net.ExpandDims(
net.ConstantFill(batch_size, 1, value=0.0, input_as_shape=1),
1,
dims=[1],
)
if self.time_since_first_normalization_parameters and self.normalize:
C2.set_net_and_init_net(net, init_net)
time_since_first, _ = PreprocessorNet().normalize_dense_matrix(
time_since_first,
[0],
{0: self.time_since_first_normalization_parameters},
blobname_prefix="time_since_first",
split_expensive_feature_groups=True,
)
C2.set_net_and_init_net(None, None)
output_record = schema.Struct(("state:float_features", state))
if self.has_sequence_features:
output_record += schema.Struct(
(
"state:sequence_features",
self.get_state_sequence_features_schema(
state_sequence_id_features, state_sequence_float_features
),
)
)
if self.use_time_since_first:
output_record += schema.Struct(("state:time_since_first", time_since_first))
if self.sorted_action_features:
output_record += schema.Struct(("action", action))
net.set_output_record(output_record)
return FeatureExtractorNet(net, init_net)
class WorldModelFeatureExtractor(FeatureExtractorBase):
"""
Extract:
- State
- Action
- Next state
- Reward
- Not terminal
"""
def __init__(
self,
seq_len,
state_normalization_parameters: Dict[int, NormalizationParameters],
action_normalization_parameters: Optional[
Dict[int, NormalizationParameters]
] = None,
discrete_action_names: Optional[List[str]] = None,
normalize: Optional[bool] = True,
) -> None:
self.seq_len = seq_len
self.normalize = normalize
self.state_normalization_parameters = state_normalization_parameters
self.action_normalization_parameters = action_normalization_parameters
self.sorted_state_features, _ = sort_features_by_normalization(
state_normalization_parameters
)
self.state_dim = get_num_output_features(self.state_normalization_parameters)
self.state_feature_num = len(self.sorted_state_features)
self.sorted_state_feature_start_indices = get_feature_start_indices(
self.sorted_state_features, state_normalization_parameters
)
if action_normalization_parameters:
self.sorted_action_features, _ = sort_features_by_normalization(
action_normalization_parameters
)
self.action_dim = get_num_output_features(
self.action_normalization_parameters
)
self.action_feature_num = len(self.sorted_action_features)
self.sorted_action_feature_start_indices = get_feature_start_indices(
self.sorted_action_features, action_normalization_parameters
)
else:
self.sorted_action_features = None
assert discrete_action_names is not None
self.action_dim = len(discrete_action_names)
self.action_feature_num = len(discrete_action_names)
self.sorted_action_feature_start_indices = list(
range(len(discrete_action_names))
)
def extract(self, ws, extract_record):
fetch = partial(self.fetch, ws)
state = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.state_features),
presence=fetch(extract_record.state_features_presence),
)
)
if self.sorted_action_features:
action = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.action),
presence=fetch(extract_record.action_presence),
)
)
else:
action = fetch(extract_record.action).byte()
next_state = rlt.FeatureVector(
float_features=rlt.ValuePresence(
value=fetch(extract_record.next_state_features),
presence=fetch(extract_record.next_state_features_presence),
)
)
reward = fetch(extract_record.reward["values"])
not_terminal = fetch(extract_record.not_terminal["values"]).float()
# TODO: Replace with true time diff
time_diff = torch.ones_like(reward).float()
training_input = rlt.RawMemoryNetworkInput(
state=state,
reward=reward,
time_diff=time_diff,
action=action,
not_terminal=not_terminal,
next_state=next_state,
step=None,
)
return rlt.RawTrainingBatch(training_input=training_input, extras=None)
def create_net(self):
net = core.Net("feature_extractor")
init_net = core.Net("feature_extractor_init")
missing_scalar = self.create_const(init_net, "MISSING_SCALAR", MISSING_VALUE)
action_schema = (
schema.List(map_schema())
if self.sorted_action_features
else schema.List(schema.Scalar())
)
pass_through_columns = [
(InputColumn.NOT_TERMINAL, schema.List(schema.Scalar())),
(InputColumn.REWARD, schema.List(schema.Scalar())),
]
input_schema = schema.Struct(
*(
[
(InputColumn.STATE_FEATURES, schema.List(map_schema())),
(InputColumn.ACTION, action_schema),
(InputColumn.NEXT_STATE_FEATURES, schema.List(map_schema())),
]
+ pass_through_columns
)
)
input_record = net.set_input_record(input_schema)
state, state_presence = self.extract_float_features(
net,
"state",
input_record[InputColumn.STATE_FEATURES].value,
self.sorted_state_features,
missing_scalar,
)
next_state, next_state_presence = self.extract_float_features(
net,
"next_state",
input_record[InputColumn.NEXT_STATE_FEATURES].value,
self.sorted_state_features,
missing_scalar,
)
if self.sorted_action_features:
action, action_presence = self.extract_float_features(
net,
InputColumn.ACTION,
input_record[InputColumn.ACTION].value,
self.sorted_action_features,
missing_scalar,
)
else:
action_size_plus_one = self.create_const(
init_net,
"action_size_plus_one",
self.action_dim + 1,
dtype=core.DataType.INT64,
)
action = self.read_actions_to_mask(
net,
InputColumn.ACTION,
self.action_dim,
input_record[InputColumn.ACTION].value,
action_size_plus_one,
)
action_presence = action
if self.normalize:
C2.set_net_and_init_net(net, init_net)
state, _ = PreprocessorNet().normalize_dense_matrix(
state,
self.sorted_state_features,
self.state_normalization_parameters,
blobname_prefix="state",
split_expensive_feature_groups=True,
)
next_state, _ = PreprocessorNet().normalize_dense_matrix(
next_state,
self.sorted_state_features,
self.state_normalization_parameters,
blobname_prefix="next_state",
split_expensive_feature_groups=True,
)
if self.sorted_action_features is not None:
action, _ = PreprocessorNet().normalize_dense_matrix(
action,
self.sorted_action_features,
self.action_normalization_parameters,
blobname_prefix="action",
split_expensive_feature_groups=True,
)
C2.set_net_and_init_net(None, None)
output_schema = schema.Struct(
*(
[
(InputColumn.STATE_FEATURES, state),
(InputColumn.STATE_FEATURES_PRESENCE, state_presence),
(InputColumn.NEXT_STATE_FEATURES, next_state),
(InputColumn.NEXT_STATE_FEATURES_PRESENCE, next_state_presence),
(InputColumn.ACTION, action),
(InputColumn.ACTION_PRESENCE, action_presence),
]
+ [
(col_name, input_record[col_name])
for col_name, _col_type in pass_through_columns
]
)
)
net.set_output_record(output_schema)
return FeatureExtractorNet(net, init_net)
```
#### File: test/environment/__init__.py
```python
import logging
from gym.envs.registration import register, registry
from ml.rl.test.environment.linear_dynamics import LinDynaEnv # noqa
logger = logging.getLogger(__name__)
def register_if_not_exists(id, entry_point):
"""
Preventing tests from failing trying to re-register environments
"""
if id not in registry.env_specs:
register(id=id, entry_point=entry_point)
register_if_not_exists(
id="LinearDynamics-v0",
entry_point="ml.rl.test.environment.linear_dynamics:LinDynaEnv",
)
```
#### File: gym/world_model/mdnrnn_gym.py
```python
import argparse
import json
import logging
import sys
from typing import Dict, Optional
import ml.rl.types as rlt
import numpy as np
import torch
from ml.rl.evaluation.world_model_evaluator import (
FeatureImportanceEvaluator,
FeatureSensitivityEvaluator,
)
from ml.rl.json_serialize import json_to_object
from ml.rl.models.mdn_rnn import MDNRNNMemoryPool
from ml.rl.models.world_model import MemoryNetwork
from ml.rl.parameters import MDNRNNParameters, OpenAiGymParameters, OpenAiRunDetails
from ml.rl.test.gym.open_ai_gym_environment import (
EnvType,
ModelType,
OpenAIGymEnvironment,
)
from ml.rl.test.gym.run_gym import dict_to_np, get_possible_actions
from ml.rl.training.rl_dataset import RLDataset
from ml.rl.training.world_model.mdnrnn_trainer import MDNRNNTrainer
logger = logging.getLogger(__name__)
def loss_to_num(losses):
return {k: v.item() for k, v in losses.items()}
def multi_step_sample_generator(
gym_env: OpenAIGymEnvironment,
num_transitions: int,
max_steps: Optional[int],
multi_steps: int,
include_shorter_samples_at_start: bool,
include_shorter_samples_at_end: bool,
):
"""
Convert gym env multi-step sample format to mdn-rnn multi-step sample format
:param gym_env: The environment used to generate multi-step samples
:param num_transitions: # of samples to return
:param max_steps: An episode terminates when the horizon is beyond max_steps
:param multi_steps: # of steps of states and actions per sample
:param include_shorter_samples_at_start: Whether to keep samples of shorter steps
which are generated at the beginning of an episode
:param include_shorter_samples_at_end: Whether to keep samples of shorter steps
which are generated at the end of an episode
"""
samples = gym_env.generate_random_samples(
num_transitions=num_transitions,
use_continuous_action=True,
max_step=max_steps,
multi_steps=multi_steps,
include_shorter_samples_at_start=include_shorter_samples_at_start,
include_shorter_samples_at_end=include_shorter_samples_at_end,
)
for j in range(num_transitions):
sample_steps = len(samples.terminals[j]) # type: ignore
state = dict_to_np(samples.states[j], np_size=gym_env.state_dim, key_offset=0)
action = dict_to_np(
samples.actions[j], np_size=gym_env.action_dim, key_offset=gym_env.state_dim
)
next_actions = np.float32( # type: ignore
[
dict_to_np(
samples.next_actions[j][k],
np_size=gym_env.action_dim,
key_offset=gym_env.state_dim,
)
for k in range(sample_steps)
]
)
next_states = np.float32( # type: ignore
[
dict_to_np(
samples.next_states[j][k], np_size=gym_env.state_dim, key_offset=0
)
for k in range(sample_steps)
]
)
rewards = np.float32(samples.rewards[j]) # type: ignore
terminals = np.float32(samples.terminals[j]) # type: ignore
not_terminals = np.logical_not(terminals)
ordered_states = np.vstack((state, next_states))
ordered_actions = np.vstack((action, next_actions))
mdnrnn_states = ordered_states[:-1]
mdnrnn_actions = ordered_actions[:-1]
mdnrnn_next_states = ordered_states[-multi_steps:]
mdnrnn_next_actions = ordered_actions[-multi_steps:]
# Padding zeros so that all samples have equal steps
# The general rule is to pad zeros at the end of sequences.
# In addition, if the sequence only has one step (i.e., the
# first state of an episode), pad one zero row ahead of the
# sequence, which enables embedding generated properly for
# one-step samples
num_padded_top_rows = 1 if multi_steps > 1 and sample_steps == 1 else 0
num_padded_bottom_rows = multi_steps - sample_steps - num_padded_top_rows
sample_steps_next = len(mdnrnn_next_states)
num_padded_top_rows_next = 0
num_padded_bottom_rows_next = multi_steps - sample_steps_next
yield (
np.pad(
mdnrnn_states,
((num_padded_top_rows, num_padded_bottom_rows), (0, 0)),
"constant",
constant_values=0.0,
),
np.pad(
mdnrnn_actions,
((num_padded_top_rows, num_padded_bottom_rows), (0, 0)),
"constant",
constant_values=0.0,
),
np.pad(
rewards,
((num_padded_top_rows, num_padded_bottom_rows)),
"constant",
constant_values=0.0,
),
np.pad(
mdnrnn_next_states,
((num_padded_top_rows_next, num_padded_bottom_rows_next), (0, 0)),
"constant",
constant_values=0.0,
),
np.pad(
mdnrnn_next_actions,
((num_padded_top_rows_next, num_padded_bottom_rows_next), (0, 0)),
"constant",
constant_values=0.0,
),
np.pad(
not_terminals,
((num_padded_top_rows, num_padded_bottom_rows)),
"constant",
constant_values=0.0,
),
sample_steps,
sample_steps_next,
)
def get_replay_buffer(
num_episodes: int, seq_len: int, max_step: int, gym_env: OpenAIGymEnvironment
) -> MDNRNNMemoryPool:
num_transitions = num_episodes * max_step
replay_buffer = MDNRNNMemoryPool(max_replay_memory_size=num_transitions)
for (
mdnrnn_state,
mdnrnn_action,
rewards,
next_states,
_,
not_terminals,
_,
_,
) in multi_step_sample_generator(
gym_env,
num_transitions=num_transitions,
max_steps=max_step,
multi_steps=seq_len,
include_shorter_samples_at_start=False,
include_shorter_samples_at_end=False,
):
mdnrnn_state, mdnrnn_action, next_states, rewards, not_terminals = (
torch.tensor(mdnrnn_state),
torch.tensor(mdnrnn_action),
torch.tensor(next_states),
torch.tensor(rewards),
torch.tensor(not_terminals),
)
replay_buffer.insert_into_memory(
mdnrnn_state, mdnrnn_action, next_states, rewards, not_terminals
)
return replay_buffer
def main(args):
parser = argparse.ArgumentParser(
description="Train a Mixture-Density-Network RNN net to learn an OpenAI"
" Gym environment, i.e., predict next state, reward, and"
" terminal signal using current state and action"
)
parser.add_argument("-p", "--parameters", help="Path to JSON parameters file.")
parser.add_argument(
"-g",
"--gpu_id",
help="If set, will use GPU with specified ID. Otherwise will use CPU.",
default=-1,
)
parser.add_argument(
"-l",
"--log_level",
choices=["debug", "info", "warning", "error", "critical"],
help="If set, use logging level specified (debug, info, warning, error, "
"critical). Else defaults to info.",
default="info",
)
parser.add_argument(
"-f",
"--feature_importance",
action="store_true",
help="If set, feature importance will be calculated after the training",
)
parser.add_argument(
"-s",
"--feature_sensitivity",
action="store_true",
help="If set, state feature sensitivity by varying actions will be"
" calculated after the training",
)
parser.add_argument(
"-e",
"--save_embedding_to_path",
help="If a file path is provided, save a RLDataset with states embedded"
" by the trained world model",
)
args = parser.parse_args(args)
logger.setLevel(getattr(logging, args.log_level.upper()))
with open(args.parameters, "r") as f:
params = json_to_object(f.read(), OpenAiGymParameters)
if args.gpu_id != -1:
params = params._replace(use_gpu=True)
mdnrnn_gym(
params,
args.feature_importance,
args.feature_sensitivity,
args.save_embedding_to_path,
)
def mdnrnn_gym(
params: OpenAiGymParameters,
feature_importance: bool = False,
feature_sensitivity: bool = False,
save_embedding_to_path: Optional[str] = None,
seed: Optional[int] = None,
):
assert params.mdnrnn is not None
use_gpu = params.use_gpu
logger.info("Running gym with params")
logger.info(params)
env_type = params.env
env = OpenAIGymEnvironment(
env_type, epsilon=1.0, softmax_policy=True, gamma=0.99, random_seed=seed
)
# create test data once
assert params.run_details.max_steps is not None
test_replay_buffer = get_replay_buffer(
params.run_details.num_test_episodes,
params.run_details.seq_len,
params.run_details.max_steps,
env,
)
test_batch = test_replay_buffer.sample_memories(
test_replay_buffer.memory_size, use_gpu=use_gpu, batch_first=True
)
trainer = create_trainer(params, env, use_gpu)
_, _, trainer = train_sgd(
env,
trainer,
use_gpu,
"{} test run".format(env_type),
params.mdnrnn.minibatch_size,
params.run_details,
test_batch=test_batch,
)
feature_importance_map, feature_sensitivity_map, dataset = None, None, None
if feature_importance:
feature_importance_map = calculate_feature_importance(
env, trainer, use_gpu, params.run_details, test_batch=test_batch
)
if feature_sensitivity:
feature_sensitivity_map = calculate_feature_sensitivity_by_actions(
env, trainer, use_gpu, params.run_details, test_batch=test_batch
)
if save_embedding_to_path:
dataset = RLDataset(save_embedding_to_path)
create_embed_rl_dataset(env, trainer, dataset, use_gpu, params.run_details)
dataset.save()
return env, trainer, feature_importance_map, feature_sensitivity_map, dataset
def calculate_feature_importance(
gym_env: OpenAIGymEnvironment,
trainer: MDNRNNTrainer,
use_gpu: bool,
run_details: OpenAiRunDetails,
test_batch: rlt.PreprocessedTrainingBatch,
):
assert run_details.max_steps is not None
assert run_details.num_test_episodes is not None
assert run_details.seq_len is not None
feature_importance_evaluator = FeatureImportanceEvaluator(
trainer,
discrete_action=gym_env.action_type == EnvType.DISCRETE_ACTION,
state_feature_num=gym_env.state_dim,
action_feature_num=gym_env.action_dim,
sorted_action_feature_start_indices=list(range(gym_env.action_dim)),
sorted_state_feature_start_indices=list(range(gym_env.state_dim)),
)
feature_loss_vector = feature_importance_evaluator.evaluate(test_batch)[
"feature_loss_increase"
]
feature_importance_map = {}
for i in range(gym_env.action_dim):
print(
"action {}, feature importance: {}".format(i, feature_loss_vector[i].item())
)
feature_importance_map[f"action{i}"] = feature_loss_vector[i].item()
for i in range(gym_env.state_dim):
print(
"state {}, feature importance: {}".format(
i, feature_loss_vector[i + gym_env.action_dim].item()
)
)
feature_importance_map[f"state{i}"] = feature_loss_vector[
i + gym_env.action_dim
].item()
return feature_importance_map
def create_embed_rl_dataset(
gym_env: OpenAIGymEnvironment,
trainer: MDNRNNTrainer,
dataset: RLDataset,
use_gpu: bool,
run_details: OpenAiRunDetails,
):
assert run_details.max_steps is not None
old_mdnrnn_mode = trainer.mdnrnn.mdnrnn.training
trainer.mdnrnn.mdnrnn.eval()
num_transitions = run_details.num_state_embed_episodes * run_details.max_steps
device = torch.device("cuda") if use_gpu else torch.device("cpu") # type: ignore
(
state_batch,
action_batch,
reward_batch,
next_state_batch,
next_action_batch,
not_terminal_batch,
step_batch,
next_step_batch,
) = map(
list,
zip(
*multi_step_sample_generator(
gym_env=gym_env,
num_transitions=num_transitions,
max_steps=run_details.max_steps,
# +1 because MDNRNN embeds the first seq_len steps and then
# the embedded state will be concatenated with the last step
multi_steps=run_details.seq_len + 1,
include_shorter_samples_at_start=True,
include_shorter_samples_at_end=False,
)
),
)
def concat_batch(batch):
return torch.cat(
[
torch.tensor(
np.expand_dims(x, axis=1), dtype=torch.float, device=device
)
for x in batch
],
dim=1,
)
# shape: seq_len x batch_size x feature_dim
mdnrnn_state = concat_batch(state_batch)
next_mdnrnn_state = concat_batch(next_state_batch)
mdnrnn_action = concat_batch(action_batch)
next_mdnrnn_action = concat_batch(next_action_batch)
mdnrnn_input = rlt.PreprocessedStateAction.from_tensors(
state=mdnrnn_state, action=mdnrnn_action
)
next_mdnrnn_input = rlt.PreprocessedStateAction.from_tensors(
state=next_mdnrnn_state, action=next_mdnrnn_action
)
# batch-compute state embedding
mdnrnn_output = trainer.mdnrnn(mdnrnn_input)
next_mdnrnn_output = trainer.mdnrnn(next_mdnrnn_input)
for i in range(len(state_batch)):
# Embed the state as the hidden layer's output
# until the previous step + current state
hidden_idx = 0 if step_batch[i] == 1 else step_batch[i] - 2 # type: ignore
next_hidden_idx = next_step_batch[i] - 2 # type: ignore
hidden_embed = (
mdnrnn_output.all_steps_lstm_hidden[hidden_idx, i, :]
.squeeze()
.detach()
.cpu()
)
state_embed = torch.cat(
(hidden_embed, torch.tensor(state_batch[i][hidden_idx + 1])) # type: ignore
)
next_hidden_embed = (
next_mdnrnn_output.all_steps_lstm_hidden[next_hidden_idx, i, :]
.squeeze()
.detach()
.cpu()
)
next_state_embed = torch.cat(
(
next_hidden_embed,
torch.tensor(next_state_batch[i][next_hidden_idx + 1]), # type: ignore
)
)
logger.debug(
"create_embed_rl_dataset:\nstate batch\n{}\naction batch\n{}\nlast "
"action: {},reward: {}\nstate embed {}\nnext state embed {}\n".format(
state_batch[i][: hidden_idx + 1], # type: ignore
action_batch[i][: hidden_idx + 1], # type: ignore
action_batch[i][hidden_idx + 1], # type: ignore
reward_batch[i][hidden_idx + 1], # type: ignore
state_embed,
next_state_embed,
)
)
terminal = 1 - not_terminal_batch[i][hidden_idx + 1] # type: ignore
possible_actions, possible_actions_mask = get_possible_actions(
gym_env, ModelType.PYTORCH_PARAMETRIC_DQN.value, False
)
possible_next_actions, possible_next_actions_mask = get_possible_actions(
gym_env, ModelType.PYTORCH_PARAMETRIC_DQN.value, terminal
)
dataset.insert(
state=state_embed,
action=torch.tensor(action_batch[i][hidden_idx + 1]), # type: ignore
reward=float(reward_batch[i][hidden_idx + 1]), # type: ignore
next_state=next_state_embed,
next_action=torch.tensor(
next_action_batch[i][next_hidden_idx + 1] # type: ignore
),
terminal=torch.tensor(terminal),
possible_next_actions=possible_next_actions,
possible_next_actions_mask=possible_next_actions_mask,
time_diff=torch.tensor(1),
possible_actions=possible_actions,
possible_actions_mask=possible_actions_mask,
policy_id=0,
)
logger.info(
"Insert {} transitions into a state embed dataset".format(len(state_batch))
)
trainer.mdnrnn.mdnrnn.train(old_mdnrnn_mode)
return dataset
def calculate_feature_sensitivity_by_actions(
gym_env: OpenAIGymEnvironment,
trainer: MDNRNNTrainer,
use_gpu: bool,
run_details: OpenAiRunDetails,
test_batch: rlt.PreprocessedTrainingBatch,
seq_len: int = 5,
num_test_episodes: int = 100,
):
assert run_details.max_steps is not None
feature_sensitivity_evaluator = FeatureSensitivityEvaluator(
trainer,
state_feature_num=gym_env.state_dim,
sorted_state_feature_start_indices=list(range(gym_env.state_dim)),
)
feature_sensitivity_vector = feature_sensitivity_evaluator.evaluate(test_batch)[
"feature_sensitivity"
]
feature_sensitivity_map = {}
for i in range(gym_env.state_dim):
feature_sensitivity_map["state" + str(i)] = feature_sensitivity_vector[i].item()
print(
"state {}, feature sensitivity: {}".format(
i, feature_sensitivity_vector[i].item()
)
)
return feature_sensitivity_map
def train_sgd(
gym_env: OpenAIGymEnvironment,
trainer: MDNRNNTrainer,
use_gpu: bool,
test_run_name: str,
minibatch_size: int,
run_details: OpenAiRunDetails,
test_batch: rlt.PreprocessedTrainingBatch,
):
assert run_details.max_steps is not None
train_replay_buffer = get_replay_buffer(
run_details.num_train_episodes,
run_details.seq_len,
run_details.max_steps,
gym_env,
)
valid_replay_buffer = get_replay_buffer(
run_details.num_test_episodes,
run_details.seq_len,
run_details.max_steps,
gym_env,
)
valid_batch = valid_replay_buffer.sample_memories(
valid_replay_buffer.memory_size, use_gpu=use_gpu, batch_first=True
)
valid_loss_history = []
num_batch_per_epoch = train_replay_buffer.memory_size // minibatch_size
logger.info(
"Collected data {} transitions.\n"
"Training will take {} epochs, with each epoch having {} mini-batches"
" and each mini-batch having {} samples".format(
train_replay_buffer.memory_size,
run_details.train_epochs,
num_batch_per_epoch,
minibatch_size,
)
)
for i_epoch in range(run_details.train_epochs):
for i_batch in range(num_batch_per_epoch):
training_batch = train_replay_buffer.sample_memories(
minibatch_size, use_gpu=use_gpu, batch_first=True
)
losses = trainer.train(training_batch, batch_first=True)
logger.info(
"{}-th epoch, {}-th minibatch: \n"
"loss={}, bce={}, gmm={}, mse={} \n"
"cum loss={}, cum bce={}, cum gmm={}, cum mse={}\n".format(
i_epoch,
i_batch,
losses["loss"],
losses["bce"],
losses["gmm"],
losses["mse"],
np.mean(trainer.cum_loss),
np.mean(trainer.cum_bce),
np.mean(trainer.cum_gmm),
np.mean(trainer.cum_mse),
)
)
trainer.mdnrnn.mdnrnn.eval()
valid_losses = trainer.get_loss(
valid_batch, state_dim=gym_env.state_dim, batch_first=True
)
valid_losses = loss_to_num(valid_losses)
valid_loss_history.append(valid_losses)
trainer.mdnrnn.mdnrnn.train()
logger.info(
"{}-th epoch, validate loss={}, bce={}, gmm={}, mse={}".format(
i_epoch,
valid_losses["loss"],
valid_losses["bce"],
valid_losses["gmm"],
valid_losses["mse"],
)
)
latest_loss = valid_loss_history[-1]["loss"]
recent_valid_loss_hist = valid_loss_history[
-1 - run_details.early_stopping_patience : -1
]
# earlystopping
if len(valid_loss_history) > run_details.early_stopping_patience and all(
(latest_loss >= v["loss"] for v in recent_valid_loss_hist)
):
break
trainer.mdnrnn.mdnrnn.eval()
test_losses = trainer.get_loss(
test_batch, state_dim=gym_env.state_dim, batch_first=True
)
test_losses = loss_to_num(test_losses)
logger.info(
"Test loss: {}, bce={}, gmm={}, mse={}".format(
test_losses["loss"],
test_losses["bce"],
test_losses["gmm"],
test_losses["mse"],
)
)
logger.info("Valid loss history: {}".format(valid_loss_history))
return test_losses, valid_loss_history, trainer
def create_trainer(
params: OpenAiGymParameters, env: OpenAIGymEnvironment, use_gpu: bool
):
assert params.mdnrnn is not None
assert params.run_details.max_steps is not None
mdnrnn_params = params.mdnrnn
mdnrnn_net = MemoryNetwork(
state_dim=env.state_dim,
action_dim=env.action_dim,
num_hiddens=mdnrnn_params.hidden_size,
num_hidden_layers=mdnrnn_params.num_hidden_layers,
num_gaussians=mdnrnn_params.num_gaussians,
)
if use_gpu:
mdnrnn_net = mdnrnn_net.cuda()
cum_loss_hist_len = (
params.run_details.num_train_episodes
* params.run_details.max_steps
// mdnrnn_params.minibatch_size
)
trainer = MDNRNNTrainer(
mdnrnn_network=mdnrnn_net, params=mdnrnn_params, cum_loss_hist=cum_loss_hist_len
)
return trainer
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().setLevel(logging.INFO)
args = sys.argv
main(args[1:])
```
#### File: test/readers/test_nparray_reader.py
```python
import unittest
from collections import OrderedDict
import numpy as np
import numpy.testing as npt
from ml.rl.readers.nparray_reader import NpArrayReader
class TestNpArrayReader(unittest.TestCase):
def get_test_data(self, n):
return OrderedDict(
[
("states", np.random.randn(n, 10)),
("actions", np.random.randn(n, 10)),
("rewards", np.random.randn(n)),
(
"next",
OrderedDict(
[
("states", np.random.randn(n, 10)),
("actions", np.random.randn(n, 10)),
]
),
),
]
)
def assert_batch_equal(self, data, batch, offset, length):
for k in ["states", "actions", "rewards"]:
npt.assert_array_equal(data[k][offset : offset + length], batch[k])
for k in ["states", "actions"]:
npt.assert_array_equal(
data["next"][k][offset : offset + length], batch["next"][k]
)
def test_basic(self):
data = self.get_test_data(1000)
batch_size = 100
reader = NpArrayReader(data, batch_size=batch_size)
for i, batch in enumerate(reader):
self.assert_batch_equal(data, batch, i * batch_size, batch_size)
self.assertEqual(9, i)
def test_drop_small(self):
data = self.get_test_data(999)
batch_size = 100
reader = NpArrayReader(data, batch_size=batch_size)
for i, batch in enumerate(reader):
self.assert_batch_equal(data, batch, i * batch_size, batch_size)
self.assertEqual(8, i)
def test_not_drop_small(self):
data = self.get_test_data(999)
batch_size = 100
reader = NpArrayReader(data, batch_size=batch_size, drop_small=False)
for i, batch in enumerate(reader):
self.assert_batch_equal(
data, batch, i * batch_size, batch_size if i != 9 else 99
)
self.assertEqual(9, i)
def test_shard(self):
n = 1000
data = self.get_test_data(n)
batch_size = 100
num_shards = 2
shard_size = n // num_shards
reader = NpArrayReader(data, batch_size=batch_size, num_shards=num_shards)
num_batches = 0
for shard in range(num_shards):
for i, batch in enumerate(reader.get_shard(shard)):
self.assert_batch_equal(
data, batch, shard * shard_size + i * batch_size, batch_size
)
num_batches += 1
self.assertEqual(10, num_batches)
def test_shard_drop_small(self):
n = 1000
data = self.get_test_data(n)
batch_size = 100
num_shards = 3
shard_size = n // num_shards + 1
reader = NpArrayReader(data, batch_size=batch_size, num_shards=num_shards)
num_batches = 0
for shard in range(num_shards):
for i, batch in enumerate(reader.get_shard(shard)):
self.assert_batch_equal(
data, batch, shard * shard_size + i * batch_size, batch_size
)
num_batches += 1
self.assertEqual(9, num_batches)
def test_shard_not_drop_small(self):
n = 1000
data = self.get_test_data(n)
batch_size = 100
num_shards = 3
shard_size = n // num_shards + 1
reader = NpArrayReader(
data, batch_size=batch_size, drop_small=False, num_shards=num_shards
)
num_batches = 0
for shard in range(num_shards):
for i, batch in enumerate(reader.get_shard(shard)):
self.assert_batch_equal(
data,
batch,
shard * shard_size + i * batch_size,
batch_size if i != 3 else (34 if shard != 2 else 32),
)
num_batches += 1
self.assertEqual(12, num_batches)
```
#### File: test/simulators/test_recsim.py
```python
import unittest
import torch
from ml.rl.simulators.recsim import RecSim
class RandomPolicy:
def __init__(self, m):
self.w = torch.ones(1, m) / m
self.generator = torch._C.Generator()
self.generator.manual_seed(10101)
def __call__(self, num_active_users, k):
w = self.w.repeat(num_active_users, 1)
action = torch.multinomial(w, k, generator=self.generator)
return action
class TestRecsim(unittest.TestCase):
def test_default(self):
recsim = RecSim()
policy = RandomPolicy(recsim.m)
cum_reward = 0
for _i in range(10000):
active_user_ids, users, candidates = recsim.obs()
action = policy(active_user_ids.shape[0], recsim.k)
num_active_users = recsim.step(action)
cum_reward += num_active_users
if num_active_users == 0:
break
else:
self.fail("Running too long")
self.assertEqual(197877, cum_reward)
``` |
{
"source": "JoshRosen/spark-pr-dashboard",
"score": 2
} |
#### File: JoshRosen/spark-pr-dashboard/appengine_config.py
```python
import site
import os.path
import gae_mini_profiler.profiler
site.addsitedir(os.path.join(os.path.dirname(__file__), 'lib'))
def webapp_add_wsgi_middleware(app):
app = gae_mini_profiler.profiler.ProfilerWSGIMiddleware(app)
return app
def gae_mini_profiler_should_profile_production():
from google.appengine.api import users
return users.is_current_user_admin()
``` |
{
"source": "joshrost/dotfiles",
"score": 3
} |
#### File: polybar/scripts/ricardo.py
```python
import requests
from tabulate import tabulate
RICORDO_URL = "https://www.ricardo.ch"
PERSONAL_ACCOUNT = "neutron66"
def get_product_price(product_url):
"""
Returns the price of the value if sold
Throw exception if the item is unsold
"""
result = requests.get(RICORDO_URL + product_url)
page = result.text.split('value--1bdsK">')[1]
amount = page.split("</p>")[0]
return amount
def get_products_url_by_user(user):
"""
Returns all listed item of that user
"""
result = requests.get("{}/de/shop/{}".format(RICORDO_URL, user))
products = []
all_names = result.text.split('MuiGrid-grid-md-3" href="')
all_names.pop(0)
for name in all_names:
product = name.split('/"><div')[0]
products.append(product)
return products
def get_total_of_products(user_name=PERSONAL_ACCOUNT):
"""
Prints the current total of all sold prodcts.
For the unsold get accordingly markt
"""
total = 0
for product in get_products_url_by_user(user_name):
try:
amount = get_product_price(product)
total = total + float(amount)
except IndexError:
continue
return total
def get_bill_of_total(user_name=PERSONAL_ACCOUNT):
"""
Prints the current total of all sold prodcts.
For the unsold get accordingly markt
"""
bill = []
total = 0
for product in get_products_url_by_user(user_name):
try:
amount = get_product_price(product)
total = total + float(amount)
bill.append([(product.split("/de/a/")[1].replace("-", " ")), amount])
except IndexError:
bill.append([(product.split("/de/a/")[1].replace("-", " ")), "NOT SOLD"])
bill.append(["Total", total])
return bill
print(get_total_of_products())
# bill = get_bill_of_total()
# print(tabulate(bill, headers=["Product", "Price CHF"]))
``` |
{
"source": "joshroybal/mergesort-python",
"score": 3
} |
#### File: joshroybal/mergesort-python/random_module.py
```python
from random import randint, random
def random_integer(): return randint(0, 2147483648)
def random_real(): return random()
def random_record():
RECSIZ = 128
NORECS = 405995
idx = int(NORECS * random_real())
direct_access = open('/home/slacker/dat/data.dat', 'rb')
direct_access.seek(idx * RECSIZ)
record = direct_access.read(RECSIZ);
direct_access.close()
return record.strip()
def random_list(n, fn):
return [fn() for i in xrange(n)]
``` |
{
"source": "joshroybal/-presidential_election_stats_python",
"score": 3
} |
#### File: joshroybal/-presidential_election_stats_python/report.py
```python
import sys
import statistics
# procedure definitions
def compute_stats(x):
stats_list = []
stats_list.append('%.2f' % min(x))
stats_list.append('%.2f' % max(x))
stats_list.append('%.2f' % statistics.mean(x))
stats_list.append('%.2f' % statistics.sample_standard_deviation(x))
stats_list.append('%.2f' % statistics.median(x))
stats_list.append('%.2f' % statistics.median_deviation(x))
stats_list.append('%.3f' % statistics.skewness(x))
return stats_list
def write_row(outfile, row_list, flag):
if flag == 'flat':
outfile.write('%-14s ' % row_list[0])
outfile.write('%s ' % row_list[1])
for stat in row_list[2:-1]:
outfile.write('%8s' % stat)
outfile.write('%9s' % row_list[-1])
elif flag == 'html':
outfile.write('<tr>')
for x in row_list: outfile.write('<td>' + x + '</td>')
outfile.write('<tr>')
else:
if (flag == 'csv'): outfile.write(','.join(row_list))
if (flag == 'tab'): outfile.write('\t'.join(row_list))
outfile.write('\n')
def print_table(outfile, list_table, flag):
headers = ['STATE','P','MIN','MAX','AVG','STD','MDN','MAD','SKW']
if flag == 'html':
outfile.write('<tr>')
for hdr in headers: outfile.write('<th>' + hdr + '</th>')
outfile.write('</tr>\n')
else:
write_row(outfile, headers, flag)
for state in sorted(list_table):
if state == 'U. S. Total': continue
write_row(outfile, [state, 'D'] + list_table[state]['D'], flag)
write_row(outfile, [state, 'R'] + list_table[state]['R'], flag)
write_row(outfile, [state, 'I'] + list_table[state]['I'], flag)
write_row(outfile, ['U. S. Total', 'D'] + list_table['U. S. Total']['D'], flag)
write_row(outfile, ['U. S. Total', 'R'] + list_table['U. S. Total']['R'], flag)
write_row(outfile, ['U. S. Total', 'I'] + list_table['U. S. Total']['I'], flag)
if flag == 'html': outfile.write('</table>\n')
# main program
formats = ['csv','flat','html','tab']
if len(sys.argv) < 2 or sys.argv[1] not in formats:
print 'Usage: ' + sys.argv[0] + ' csv|flat|html|tab'
sys.exit(0)
# input section
results = {}
infile = open('results.txt', 'rb')
for line in infile:
field_list = line.strip().split(',')
if field_list[0] not in results:
results[field_list[0]] = { 'D':[], 'R':[], 'I':[] }
results[field_list[0]]['D'].append(100.*float(field_list[1]))
results[field_list[0]]['R'].append(100.*float(field_list[2]))
results[field_list[0]]['I'].append(100.*float(field_list[3]))
infile.close()
# core processing section
# compute results statistics
stats_report = {}
for state in results:
if state not in stats_report:
stats_report[state] = { 'D':[], 'R':[], 'I':[] }
stats_report[state]['D'] = compute_stats(results[state]['D'])
stats_report[state]['R'] = compute_stats(results[state]['R'])
stats_report[state]['I'] = compute_stats(results[state]['I'])
# compute swings
swings = {}
for state in stats_report:
if state not in swings:
swings[state] = { 'D':[], 'R':[], 'I':[] }
px = results[state]['D']
py = results[state]['R']
pz = results[state]['I']
n = min(len(px), len(py), len(pz))
for i in range(1, n):
swings[state]['D'].append(px[i] - px[i-1])
swings[state]['R'].append(py[i] - py[i-1])
swings[state]['I'].append(pz[i] - pz[i-1])
# compute swings statistics
swings_report = {}
for state in results:
if state not in swings_report:
swings_report[state] = { 'D':[], 'R':[], 'I':[] }
swings_report[state]['D'] = compute_stats(swings[state]['D'])
swings_report[state]['R'] = compute_stats(swings[state]['R'])
swings_report[state]['I'] = compute_stats(swings[state]['I'])
# output section
if sys.argv[1] == 'html':
filename = 'report.html'
elif sys.argv[1] == 'csv':
filename = 'report.csv'
else:
filename = 'report.txt'
outfile = open(filename, 'wb')
if sys.argv[1] == 'html':
outfile.write('<!DOCTYPE html>\n')
outfile.write('<html>\n')
outfile.write('<head>\n')
outfile.write('<link id="styleinfo" media="all">\n')
outfile.write('<script type="text/javascript" src="style.js" defer></script>\n')
outfile.write('</head>\n')
outfile.write('<body>\n')
outfile.write('<p>results report</p>\n')
outfile.write('<table id="stats_table">\n')
else:
outfile.write('results report\n')
print_table(outfile, stats_report, sys.argv[1])
print
if sys.argv[1] == 'html':
outfile.write('<p>swings report</p>\n')
outfile.write('<table id="swings_table">\n')
else:
outfile.write('swings report\n')
print_table(outfile, swings_report, sys.argv[1])
if sys.argv[1] == 'html':
outfile.write('</body>\n')
outfile.write('</html>\n')
outfile.close()
print 'tables written to file ' + filename
``` |
{
"source": "joshroybal/python-words",
"score": 4
} |
#### File: joshroybal/python-words/file_module.py
```python
def read_text_file(filename):
records = []
infile = open(filename, 'r')
for line in infile:
records.append(line.strip())
infile.close()
print('<<', filename)
return records
def read_binary_file(filename):
records = []
infile = open(filename, 'rb')
record = infile.read(32).decode('ascii')
while record:
records.append(record.strip())
record = infile.read(32).decode('ascii')
infile.close()
print('<<', filename)
return records
def write_text_file(records, filename):
outfile = open(filename, 'w')
for record in records:
outfile.write('%s\n' % record)
outfile.close()
print('>>', filename)
def write_binary_file(records, filename):
outfile = open(filename, 'wb')
for record in records:
outfile.write(bytes(record.ljust(32), 'ascii'))
outfile.close()
print('>>', filename)
``` |
{
"source": "joshroybal/us_presidential_election_stats",
"score": 3
} |
#### File: joshroybal/us_presidential_election_stats/states_pct_stats.py
```python
import math
# subprogram definitions
def avg(pct_list):
return sum(pct_list) / len(pct_list)
def std(pct_list):
n = len(pct_list)
if n <= 1: return 0 # evade divide by zero
m = avg(pct_list)
s = 0.
for x in pct_list:
s += (x - m)**2
return math.sqrt(s / (n - 1))
def med(pct_list):
n = len(pct_list)
s = sorted(pct_list)
if n%2 is not 0:
return s[n/2]
else:
return (s[n/2-1]+s[n/2])/2
def mad(pct_list):
n = len(pct_list)
m = med(pct_list)
dev = []
for x in pct_list:
dev.append(abs(x-m))
return med(dev)
def compute_table(state_pcts, party):
table = {}
for state in sorted(state_pcts):
pct_list = state_pcts[state][party]
table[state] = {}
table[state]['min'] = min(pct_list)
table[state]['max'] = max(pct_list)
table[state]['avg'] = avg(pct_list)
table[state]['std'] = std(pct_list)
table[state]['med'] = med(pct_list)
table[state]['mad'] = mad(pct_list)
return table
def print_table(table, party):
print
print party + ' %'
print '{:20}'.format('state'),
print '{:>8}'.format('min'),
print '{:>8}'.format('max'),
print '{:>8}'.format('avg'),
print '{:>8}'.format('std'),
print '{:>8}'.format('med'),
print '{:>8}'.format('mad')
for state in sorted(table):
if state is 'U. S.': continue
print '{:20}'.format(state),
print format(table[state]['min'], '8.2f'),
print format(table[state]['max'], '8.2f'),
print format(table[state]['avg'], '8.2f'),
print format(table[state]['std'], '8.2f'),
print format(table[state]['med'], '8.2f'),
print format(table[state]['mad'], '8.2f')
print '{:20}'.format('U. S.'),
print format(table['U. S.']['min'], '8.2f'),
print format(table['U. S.']['max'], '8.2f'),
print format(table['U. S.']['avg'], '8.2f'),
print format(table['U. S.']['std'], '8.2f'),
print format(table['U. S.']['med'], '8.2f'),
print format(table['U. S.']['mad'], '8.2f')
def print_sorted_stats(table, party, stat):
print
print party + ' % sorted by ' + stat
print '{:20}'.format('state'),
print '{:>8}'.format('min'),
print '{:>8}'.format('max'),
print '{:>8}'.format('avg'),
print '{:>8}'.format('std'),
print '{:>8}'.format('med'),
print '{:>8}'.format('mad')
for key, value in sorted(table.items(), key=lambda item: item[1][stat]):
print '{:20}'.format(key),
print format(value['min'], '8.2f'),
print format(value['max'], '8.2f'),
print format(value['avg'], '8.2f'),
print format(value['std'], '8.2f'),
print format(value['med'], '8.2f'),
print format(value['mad'], '8.2f')
# main program
state_pcts = {}
i = 0
infile = open('statepcts.dat', 'r')
for line in infile:
state = line[:20].strip()
if state not in state_pcts:
state_pcts[state] = {}
state_pcts[state]['Democratic'] = []
state_pcts[state]['Republican'] = []
state_pcts[state]['Other'] = []
fields = line[22:].split()
state_pcts[state]['Democratic'].append(float(fields[0]))
state_pcts[state]['Republican'].append(float(fields[1]))
state_pcts[state]['Other'].append(float(fields[2]))
infile.close()
# processing
democratic_table = compute_table(state_pcts, 'Democratic')
republican_table = compute_table(state_pcts, 'Republican')
other_table = compute_table(state_pcts, 'Other')
# output tables
print_table(democratic_table, 'Democratic')
print_table(republican_table, 'Republican')
print_table(other_table, 'Other')
stat_list = [ 'min', 'max', 'avg', 'std', 'med', 'mad' ]
for stat in stat_list: print_sorted_stats(democratic_table, 'Democratic', stat)
for stat in stat_list: print_sorted_stats(republican_table, 'Republican', stat)
for stat in stat_list: print_sorted_stats(other_table, 'Other', stat)
``` |
{
"source": "joshrule/ec",
"score": 3
} |
#### File: ec/bin/dsl2lam.py
```python
from functools import reduce
from random import choice
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
from dreamcoder.utilities import parseSExpression as parse
from dreamcoder.program import Abstraction, Index, Application
# classes Appl, Abstr, Index modified from the parent class to implement:
# * normal order reduction
# * len(). the straightforward approach runs into recursion depth limit, so
# we use generators and compute the sum only at the top level len()
class Appl(Application):
def __init__(self, f, x):
super().__init__(f, x)
self.reduced = None
def len_gen(self):
# '(' + len(f) + len(x) +')'
yield 2
yield from self.f.len_gen()
yield from self.x.len_gen()
def __len__(self):
return sum(self.len_gen())
def shift(self, offset, depth=0):
return Appl(self.f.shift(offset, depth),
self.x.shift(offset, depth))
def substitute(self, old, new):
if self == old:
return new
return Appl(
self.f.substitute(
old, new), self.x.substitute(
old, new))
def betaReduce(self):
if self.reduced is not None:
return self.reduced
if self.f.isAbstraction:
b = self.f.body
v = self.x
self.reduced = b.substitute(Index(0), v.shift(1)).shift(-1)
return self.reduced
f = self.f.betaReduce()
if f is not None:
self.reduced = Appl(f, self.x)
return self.reduced
x = self.x.betaReduce()
if x is not None:
self.reduced = Appl(self.f, x)
return self.reduced
return None
class Abstr(Abstraction):
def __init__(self, body):
super().__init__(body)
self.reduced = None
def len_gen(self):
# '(' + 'λ' + len(body) + ')'
yield 3
yield from self.body.len_gen()
def __len__(self):
return sum(self.len_gen())
def shift(self, offset, depth=0):
return Abstr(self.body.shift(offset, depth + 1))
def substitute(self, old, new):
if self == old:
return new
old = old.shift(1)
new = new.shift(1)
return Abstr(self.body.substitute(old, new))
def betaReduce(self):
if self.reduced is not None: return self.reduced
b = self.body.betaReduce()
if b is None: return None
self.reduced = Abstr(b)
return self.reduced
def index_len_gen(self):
yield 1
Index.len_gen = index_len_gen
Index.__len__ = lambda _: 1
class Min:
'''Keeps track of the value with minimum key seen so far.'''
def __init__(self, key=None, val=None):
self.key, self.val = key, val
def update(self, newkey, newval):
if self.key is None or newkey < self.key:
self.key, self.val = newkey, newval
def _encode_num(m, f, x):
enc = x
while m > 0:
enc = Appl(f, enc)
m -= 1
return enc
def encode_num(m):
'''Return Church encoding of m.'''
return Abstr(Abstr(_encode_num(m, Index(1), Index(0))))
global_encoding = {}
def translate(expr, encoding=global_encoding):
'''Translates a left-associated list representing an S-expression into a
lambda calculus term.'''
if type(expr) is str:
if expr in encoding:
return encoding[expr]
elif expr.startswith('$'):
return Index(int(expr[1:]))
elif expr.isdecimal():
return encode_num(int(expr))
else:
raise NameError('unknown primitive ' + expr)
else:
if expr[0] == 'lambda':
return Abstr(translate(expr[1], encoding=encoding))
else:
return Appl(translate(expr[0], encoding=encoding), translate(expr[1], encoding=encoding))
def left_associate(expr):
'''Left-associates a nested list representing an S-expression.'''
if type(expr) is not list:
return expr
elif len(expr) == 2:
return [left_associate(expr[0]), left_associate(expr[1])]
else:
return [left_associate(expr[:-1]), left_associate(expr[-1])]
def make_program(expr, encoding=global_encoding):
return translate(left_associate(parse(expr)), encoding=encoding)
def beta_normal_form(term, keepmin=False, maxreds=None):
'''Repeatedly beta-reduces a term, optionally keeping track of the shortest
intermediate reduced form of the term, optionally capping the number of
reductions.'''
t = term
minform = Min()
i = 0
while (term is not None) and ((maxreds is None) or (i < maxreds)):
t = term
if keepmin: minform.update(len(term), term)
term = term.betaReduce()
i += 1
return t, minform
# primitives that start with _ are not used in HL, and are only used here as
# components of more complex primitives
primitives = {
# booleans
'true': '(lambda (lambda $1))',
'false': '(lambda (lambda $0))',
'not': '(lambda (lambda (lambda ($2 $0 $1))))',
'if': '(lambda (lambda (lambda ($2 $1 $0))))',
'and': '(lambda (lambda ($1 $0 $1)))',
'or': '(lambda (lambda ($1 $1 $0)))',
# positive integer arithmetic
'_pred': '(lambda (lambda (lambda ($2 (lambda (lambda ($0 ($1 $3)))) (lambda $1) (lambda $0)))))',
'+': '(lambda (lambda (lambda (lambda ($3 $1 ($2 $1 $0))))))',
'-': '(lambda (lambda ($0 _pred $1)))',
'*': '(lambda (lambda (lambda ($2 ($1 $0)))))',
'_Y': '(lambda ((lambda ($1 ($0 $0))) (lambda ($1 ($0 $0)))))',
'_iszero': '(lambda ($0 (lambda false) true))',
# division taken from the Wikipedia page
'_div': '(lambda (lambda (lambda (lambda (lambda ((lambda (_iszero $0 $1 ($2 ($5 $0 $3 $2 $1)))) (- $3 $2)))))))',
'/': '(lambda (_Y _div (lambda (lambda ($1 ($2 $1 $0))))))',
'_<=': '(lambda (lambda (_iszero (- $1 $0))))',
'_>=': '(lambda (lambda (_iszero (- $0 $1))))',
'<': '(lambda (lambda (not (_>= $1 $0))))',
'>': '(lambda (lambda (not (_<= $1 $0))))',
'==': '(lambda (lambda (and (_>= $1 $0) (_<= $1 $0))))',
'_<=>': '(lambda (lambda (lambda (and (_<= $2 $0) (_>= $1 $0)))))',
'%': '(_Y (lambda (lambda (lambda ((< $1 $0) $1 ($2 (- $1 $0) $0))))))',
'is_even': '(lambda (_iszero (% $0 2)))',
'is_odd': '(lambda (not (is_even $0)))',
# lists
# Scott-encoded for ease of pattern-matching recursion
'[]': '(lambda (lambda $1))',
'empty': '[]',
'cons': '(lambda (lambda (lambda (lambda ($0 $3 $2)))))',
'singleton': '(lambda (cons $0 []))',
# foldr is useful for other primitives but concepts use foldl
'_foldr': '(_Y (lambda (lambda (lambda (lambda '
'($0 $1 (lambda (lambda ($4 ($5 $4 $3 $0) $1)))))))))',
'fold': '(_Y (lambda (lambda (lambda (lambda '
'($0 $1 (lambda (lambda ($5 $4 ($4 $3 $1) $0)))))))))',
'map': '(lambda (_foldr (lambda (lambda (cons ($2 $0) $1))) []))',
'filter': '(lambda (_foldr (lambda (lambda (($2 $0) (cons $0 $1) $1))) []))',
'zip': '(_Y (lambda (lambda (lambda ($1 [] (lambda (lambda ($2 [] '
'(lambda (lambda (cons (cons $3 (cons $1 [])) ($6 $2 $0))))))))))))',
'first': '(lambda ($0 false (lambda (lambda $1))))',
'_tail': '(lambda ($0 [] (lambda (lambda $0))))',
'nth': '(_Y (lambda (lambda (lambda ((== $1 1) (first $0) ($2 (_pred $1) (_tail $0)))))))',
'second': '(nth 2)',
'third': '(nth 3)',
'length': '(_foldr (lambda (lambda (+ 1 $1))) 0)',
'last': '(lambda (nth (length $0) $0))',
'concat': '(lambda (lambda (_foldr (lambda (lambda (cons $0 $1))) $0 $1)))',
'append': '(lambda (lambda (concat $1 (singleton $0))))',
'count': '(lambda (lambda (length (filter (== $1) $0))))',
'cut_vals': '(lambda (filter (lambda (not (== $1 $0)))))',
'is_in': '(lambda (lambda (not (_iszero (count $0 $1)))))',
'flatten': '(_foldr (lambda (lambda (concat $0 $1))) [])',
'_summary': '(lambda (lambda (lambda (_foldr (lambda (lambda ($3 ($4 $0) ($4 $1) $0 $1))) (first $0) $0))))',
'max': '(_summary (lambda $0) >)',
'min': '(_summary (lambda $0) <)',
'product': '(_foldr * 1)',
'reverse': '(fold (lambda (lambda (cons $0 $1))) [])',
'sum': '(_foldr + 0)',
'unique': '(lambda (reverse (fold (lambda (lambda (is_in $1 $0 $1 (cons $0 $1)))) [] $0)))',
'range': '(_Y (lambda (lambda (lambda (lambda ((< $0 $2) [] (cons $2 ($3 (+ $2 $1) $1 $0))))))))',
'repeat': '(lambda (lambda (map (lambda $2) (range 1 1 $0))))',
# zips a list with the list [1, 2, ..., len]. used in most primitives that
# have anything to do with indices
'_zipi': '(lambda (zip (range 1 1 (length $0)) $0))',
'_foldri': '(lambda (lambda (lambda (_foldr (lambda (lambda ($4 (first $0) $1 (second $0)))) $1 (_zipi $0)))))',
'foldi': '(lambda (lambda (lambda (fold (lambda (lambda ($4 (first $0) $1 (second $0)))) $1 (_zipi $0)))))',
'mapi': '(lambda (lambda (map (lambda ($2 (first $0) (second $0))) (_zipi $0))))',
'filteri': '(lambda (lambda (map second (filter (lambda ($2 (first $0) (second $0))) (_zipi $0)))))',
'insert': '(lambda (lambda (_foldri (lambda (lambda (lambda (== $2 $3 (cons $4 (cons $0 $1)) (cons $0 $1))))) [])))',
'replace': '(lambda (lambda (_foldri (lambda (lambda (lambda (== $2 $4 (cons $3 $1) (cons $0 $1))))) [])))',
'cut_idx': '(lambda (_foldri (lambda (lambda (lambda (== $2 $3 $1 (cons $0 $1))))) []))',
'swap': '(lambda (lambda (lambda (_foldri (lambda (lambda (lambda'
'(== $2 $5 (cons (nth $4 $3) $1) (== $2 $4 (cons (nth $5 $3) $1) (cons $0 $1)))))) [] $0))))',
'cut_slice': '(lambda (lambda (_foldri (lambda (lambda (lambda (_<=> $4 $3 $2 $1 (cons $0 $1))))) [])))',
'slice': '(lambda (lambda (_foldri (lambda (lambda (lambda (_<=> $4 $3 $2 (cons $0 $1) $1)))) [])))',
'drop': '(lambda (filteri (lambda (lambda (> $1 $2)))))',
'take': '(lambda (filteri (lambda (lambda (_<= $1 $2)))))',
'droplast': '(lambda (lambda (take (- (length $0) $1) $0)))',
'takelast': '(lambda (lambda (drop (- (length $0) $1) $0)))',
'splice': '(lambda (lambda (lambda (concat (concat (take (_pred $1) $0) $2) (drop (_pred $1) $0)))))',
'find': '(lambda (_foldri (lambda (lambda (lambda ($3 $0 (cons $2 $1) $1)))) []))',
'cut_val': '(lambda (lambda (cut_idx (first (find (== $1) $0)) $0)))',
'group': '(lambda (lambda (map (lambda (filter (lambda (== $1 ($3 $0))) $1)) (unique (map $1 $0)))))',
'_isnil': '(lambda ($0 true (lambda (lambda false))))',
'sort': '(_Y (lambda (lambda (lambda (_isnil $0 []'
'((lambda (concat (repeat $0 (count $0 $1)) ($3 $2 (cut_vals $0 $1)))) (_summary $1 < $0)))))))'
}
for prim in primitives:
global_encoding[prim] = make_program(primitives[prim])
alt_defns = {
# booleans
'true': ['(lambda (lambda $1))'],
'false': ['(lambda (lambda $0))'],
'not': ['(lambda (lambda (lambda ($2 $0 $1))))'],
'if': ['(lambda (lambda (lambda ($2 $1 $0))))'],
'and': ['(lambda (lambda ($1 $0 $1)))'],
'or': ['(lambda (lambda ($1 $1 $0)))'],
# positive integer arithmetic
'_pred': ['(lambda (lambda (lambda ($2 (lambda (lambda ($0 ($1 $3)))) (lambda $1) (lambda $0)))))'],
'+': ['(lambda (lambda (lambda (lambda ($3 $1 ($2 $1 $0))))))'],
'-': ['(lambda (lambda ($0 _pred $1)))'],
'*': ['(lambda (lambda (lambda ($2 ($1 $0)))))'],
'_Y': [
'(lambda ((lambda ($1 ($0 $0))) (lambda ($1 ($0 $0)))))',
'((lambda (lambda ($1 $0 $1))) (lambda (lambda ($1 ($0 $1 $0)))))',
'((lambda (lambda ($0 ($1 $1 $0)))) (lambda (lambda ($0 ($1 $1 $0)))))'
],
'_iszero': ['(lambda ($0 (lambda false) true))'],
# division taken from the Wikipedia page
'_div': ['(lambda (lambda (lambda (lambda (lambda ((lambda (_iszero $0 $1 ($2 ($5 $0 $3 $2 $1)))) (- $3 $2)))))))'],
'/': ['(lambda (_Y _div (lambda (lambda ($1 ($2 $1 $0))))))'],
'_<=': ['(lambda (lambda (_iszero (- $1 $0))))'],
'_>=': ['(lambda (lambda (_iszero (- $0 $1))))'],
'<': ['(lambda (lambda (not (_>= $1 $0))))'],
'>': ['(lambda (lambda (not (_<= $1 $0))))'],
'==': [
'(lambda (lambda (and (_>= $1 $0) (_<= $1 $0))))',
'(_Y (lambda (lambda (lambda (_iszero $1 (_iszero $0 true false) (_iszero $0 false '
'($2 (_pred $1) (_pred $0))))))))'
],
'_<=>': ['(lambda (lambda (lambda (and (_<= $2 $0) (_>= $1 $0)))))'],
'%': ['(_Y (lambda (lambda (lambda ((< $1 $0) $1 ($2 (- $1 $0) $0))))))'],
'is_even': ['(lambda (_iszero (% $0 2)))'],
'is_odd': [
'(lambda (not (is_even $0)))',
'(lambda (is_even (+ 1 $0)))'
],
# lists
# Scott-encoded for ease of pattern-matching recursion
'[]': ['(lambda (lambda $1))'],
'empty': ['[]'],
'cons': ['(lambda (lambda (lambda (lambda ($0 $3 $2)))))'],
'singleton': ['(lambda (cons $0 []))'],
# foldr is useful for other primitives but concepts use foldl
'_foldr': ['(_Y (lambda (lambda (lambda (lambda '
'($0 $1 (lambda (lambda ($4 ($5 $4 $3 $0) $1)))))))))'],
'fold': ['(_Y (lambda (lambda (lambda (lambda '
'($0 $1 (lambda (lambda ($5 $4 ($4 $3 $1) $0)))))))))'],
'map': ['(lambda (_foldr (lambda (lambda (cons ($2 $0) $1))) []))'],
'filter': ['(lambda (_foldr (lambda (lambda (($2 $0) (cons $0 $1) $1))) []))'],
'zip': ['(_Y (lambda (lambda (lambda ($1 [] (lambda (lambda ($2 [] '
'(lambda (lambda (cons (cons $3 (cons $1 [])) ($6 $2 $0))))))))))))'],
'first': ['(lambda ($0 false (lambda (lambda $1))))'],
'_tail': ['(lambda ($0 [] (lambda (lambda $0))))'],
'nth': [
'(_Y (lambda (lambda (lambda ((== $1 1) (first $0) ($2 (_pred $1) (_tail $0)))))))',
'(lambda (lambda (first ((_pred $1) _tail $0))))'
],
'second': [
'(nth 2)',
'(lambda (first (_tail $0)))'
],
'third': [
'(nth 3)',
'(lambda (first (_tail (_tail $0))))'
],
'length': ['(_foldr (lambda (lambda (+ 1 $1))) 0)'],
'last': ['(lambda (nth (length $0) $0))'],
'concat': ['(lambda (lambda (_foldr (lambda (lambda (cons $0 $1))) $0 $1)))'],
'append': ['(lambda (lambda (concat $1 (singleton $0))))'],
'count': ['(lambda (lambda (length (filter (== $1) $0))))'],
'cut_vals': ['(lambda (filter (lambda (not (== $1 $0)))))'],
'is_in': ['(lambda (lambda (not (_iszero (count $0 $1)))))'],
'flatten': ['(_foldr (lambda (lambda (concat $0 $1))) [])'],
'_summary': ['(lambda (lambda (lambda (_foldr (lambda (lambda ($3 ($4 $0) ($4 $1) $0 $1))) (first $0) $0))))'],
'max': ['(_summary (lambda $0) >)'],
'min': ['(_summary (lambda $0) <)'],
'product': ['(_foldr * 1)'],
'reverse': ['(fold (lambda (lambda (cons $0 $1))) [])'],
'sum': ['(_foldr + 0)'],
'unique': ['(lambda (reverse (fold (lambda (lambda (is_in $1 $0 $1 (cons $0 $1)))) [] $0)))'],
'range': ['(_Y (lambda (lambda (lambda (lambda ((< $0 $2) [] (cons $2 ($3 (+ $2 $1) $1 $0))))))))'],
'repeat': [
'(lambda (lambda (map (lambda $2) (range 1 1 $0))))',
'(_Y (lambda (lambda (lambda (_iszero $0 [] (cons $1 ($2 $1 (_pred $0))))))))',
'(lambda (lambda ($0 (cons $1) [])))'
],
# zips a list with the list [1, 2, ..., len]. used in most primitives that
# have anything to do with indices
'_zipi': ['(lambda (zip (range 1 1 (length $0)) $0))'],
'_foldri': ['(lambda (lambda (lambda (_foldr (lambda (lambda ($4 (first $0) $1 (second $0)))) $1 (_zipi $0)))))'],
'foldi': ['(lambda (lambda (lambda (fold (lambda (lambda ($4 (first $0) $1 (second $0)))) $1 (_zipi $0)))))'],
'mapi': ['(lambda (lambda (map (lambda ($2 (first $0) (second $0))) (_zipi $0))))'],
'filteri': ['(lambda (lambda (map second (filter (lambda ($2 (first $0) (second $0))) (_zipi $0)))))'],
'insert': [
'(lambda (lambda (_foldri (lambda (lambda (lambda (== $2 $3 (cons $4 (cons $0 $1)) (cons $0 $1))))) [])))',
'(lambda (lambda (lambda (concat (append (take (_pred $1) $0) $2) (drop (_pred $1) $0)))))',
'(lambda (lambda (lambda (concat (take (_pred $1) $0) (cons $2 (drop (_pred $1) $0))))))'
],
'replace': [
'(lambda (lambda (_foldri (lambda (lambda (lambda (== $2 $4 (cons $3 $1) (cons $0 $1))))) [])))',
'(lambda (lambda (lambda (concat (append (take (_pred $2) $0) $1) (drop $2 $0)))))',
'(lambda (lambda (lambda (concat (take (_pred $2) $0) (cons $1 (drop $2 $0))))))'
],
'cut_idx': [
'(lambda (_foldri (lambda (lambda (lambda (== $2 $3 $1 (cons $0 $1))))) []))',
'(lambda (lambda (concat (take (_pred $1) $0) (drop $1 $0))))',
'(lambda (cut_slice $0 $0))'
],
'swap': [
'(lambda (lambda (lambda (_foldri (lambda (lambda (lambda '
'(== $2 $5 (cons (nth $4 $3) $1) (== $2 $4 (cons (nth $5 $3) $1) (cons $0 $1)))))) [] $0))))',
'(lambda (lambda (lambda (concat (append (concat (append '
'(take (_pred $2) $0) (nth $1 $0)) (drop $2 (take (_pred $1) $0))) (nth $2 $0)) (drop $1 $0)))))',
'(lambda (lambda (lambda (concat (concat '
'(take (_pred $2) $0) (cons (nth $1 $0) (drop $2 (take (_pred $1) $0)))) (cons (nth $2 $0) (drop $1 $0))))))'
],
'cut_slice': [
'(lambda (lambda (_foldri (lambda (lambda (lambda (_<=> $4 $3 $2 $1 (cons $0 $1))))) [])))',
'(lambda (lambda (lambda (concat (take (_pred $2) $0) (drop $1 $0)))))',
'(lambda (lambda (filteri (lambda (lambda (not (_<=> $3 $2 $1)))))))',
'(_Y (lambda (lambda (lambda (lambda (_iszero (_pred $2) (_iszero $1 $0 ($3 1 (_pred $1) (_tail $0))) '
'(cons (first $0) ($3 (_pred $2) (_pred $1) (_tail $0)))))))))'
],
'slice': [
'(lambda (lambda (_foldri (lambda (lambda (lambda (_<=> $4 $3 $2 (cons $0 $1) $1)))) [])))',
'(lambda (lambda (lambda (drop (_pred $2) (take $1 $0)))))',
'(lambda (lambda (filteri (lambda (lambda (_<=> $3 $2 $1))))))',
'(_Y (lambda (lambda (lambda (lambda (_iszero (_pred $2) (_iszero $1 [] (cons (first $0) ($3 1 (_pred $1) (_tail $0)))) '
'($3 (_pred $2) (_pred $1) (_tail $0))))))))'
],
'drop': [
'(lambda (filteri (lambda (lambda (> $1 $2)))))',
'(lambda ($1 _tail))',
'(_Y (lambda (lambda (lambda (_iszero $1 $0 ($2 (_pred $1) (_tail $0)))))))',
'(lambda (lambda (slice (+ 1 $1) (length $0) $0)))',
'(lambda (cut_slice 1 $0))'
],
'take': [
'(lambda (filteri (lambda (lambda (_<= $1 $2)))))',
'(_Y (lambda (lambda (lambda (_iszero $1 [] (cons (first $0) ($2 (_pred $1) (_tail $0))))))))',
'(lambda (slice 1 $0))',
'(lambda (lambda (cut_slice (+ 1 $1) (length $0) $0)))'
],
'droplast': [
'(lambda (lambda (take (- (length $0) $1) $0)))',
'(_Y (lambda (lambda (lambda (== (length $0) $1 [] (cons (first $0) ($2 (+ 1 $1) (_tail $0))))))))'
],
'takelast': ['(lambda (lambda (drop (- (length $0) $1) $0)))'],
'splice': ['(lambda (lambda (lambda (concat (concat (take (_pred $1) $0) $2) (drop (_pred $1) $0)))))'],
'find': ['(lambda (_foldri (lambda (lambda (lambda ($3 $0 (cons $2 $1) $1)))) []))'],
'cut_val': [
'(lambda (lambda (cut_idx (first (find (== $1) $0)) $0)))',
'(lambda (lambda (second (fold (lambda (lambda '
'((first $1) (cons true (singleton (append (second $1) $0))) '
'(== $3 $0 (cons true (singleton (second $1))) (cons false (singleton (append (second $1) $0))))))) '
'(cons false (singleton [])) $0))))'
],
'group': ['(lambda (lambda (map (lambda (filter (lambda (== $1 ($3 $0))) $1)) (unique (map $1 $0)))))'],
'_isnil': ['(lambda ($0 true (lambda (lambda false))))'],
'sort': ['(_Y (lambda (lambda (lambda (_isnil $0 [] '
'((lambda (concat (repeat $0 (count $0 $1)) ($3 $2 (cut_vals $0 $1)))) (_summary $1 < $0)))))))']
}
def gen_product(*ls):
return reduce(lambda l1, l2: (x1 + (x2,) for x1 in l1 for x2 in l2), ls[1:], map(lambda x: (x,), ls[0]))
def gen_assignments():
dsl_keys = list(alt_defns.keys())
for values in gen_product(*alt_defns.values()):
prims = {}
for i in range(len(dsl_keys)):
prims[dsl_keys[i]] = values[i]
yield prims
def check_assignment_acyclic(assignment):
prims = set(assignment.keys())
enc = {}
while prims:
done = set()
for prim in prims:
try:
enc[prim] = make_program(assignment[prim], encoding=enc)
done.add(prim)
except NameError:
continue
if len(done) == 0:
return False
prims.difference_update(done)
return True
def gen_random_assignments():
while True:
assignment = {}
assignment.update((prim, choice(alt_defns[prim])) for prim in alt_defns)
yield assignment
def gen_valid_assignments():
yield from filter(check_assignment_acyclic, gen_assignments())
def gen_valid_random_assignments():
yield from filter(check_assignment_acyclic, gen_random_assignments())
``` |
{
"source": "joshs333/jude-tools",
"score": 2
} |
#### File: src/jd_tools/commands.py
```python
import jd_tools.build_env as jmbe
import jd_tools.build_tool as jmbt
import os
import shlex
import subprocess
def workspace_build_env(workspace_config, command, args, dry_run=False, list_options=False):
# check config for vars
if "build_env" not in workspace_config:
raise Exception("build_env variable is not in workspace {}".format(workspace_config["name"]))
if "build_tool" not in workspace_config:
raise Exception("build_tool variable is not in workspace {}".format(workspace_config["name"]))
# Set up the tooling and environment
base_dir = workspace_config["workspace_root"]
build_env = jmbe.get_build_env(workspace_config["build_env"])(workspace_config)
if list_options:
options = build_env.getCommands()
print("Workspace {} uses buildenv {} with commands:".format(workspace_config["name"], type(build_env)))
for o in options:
print("- {}".format(o))
return
# Get the build command to run
enved_build_command = build_env.getCommand(command, ' '.join(args))
# Execute!
if enved_build_command is not None:
print("Executing `{}` in `{}`".format(enved_build_command, base_dir))
if not dry_run:
subprocess.call(enved_build_command, cwd=base_dir, shell=True)
def workspace_build(workspace_config, args, dry_run=False):
# check config for vars
if "build_env" not in workspace_config:
raise Exception("build_env variable is not in workspace {}".format(workspace_config["name"]))
if "build_tool" not in workspace_config:
raise Exception("build_tool variable is not in workspace {}".format(workspace_config["name"]))
# Set up the tooling and environment
base_dir = workspace_config["workspace_root"]
build_env = jmbe.get_build_env(workspace_config["build_env"])(workspace_config)
build_env_setup = build_env.setup()
while True:
if len(build_env_setup) <= 0:
break
m, a = build_env_setup.pop(0)
if m == jmbe.SETUP.READY:
break
if m == jmbe.SETUP.COMMAND:
print("Executing `{}` in `{}`".format(a, base_dir))
if not dry_run:
subprocess.call(a, cwd=base_dir, shell=True)
if m == jmbe.SETUP.MESSAGE:
print("Message from {}: {}".format(workspace_config["name"], a))
if m == jmbe.SETUP.SETUP:
build_env_setup = build_env.setup(a)
if m == jmbe.SETUP.FAIL:
print("Build Env Setup for {} failed: {}".format(workspace_config["name"], a))
return
build_tool = jmbt.get_build_tool(workspace_config["build_tool"])(workspace_config)
# Get the build command to run
build_command = build_tool.getBuildCommand(' '.join(args))
enved_build_command = build_env.renderCommand(build_command)
# Execute!
print("Executing `{}` in `{}`".format(enved_build_command, base_dir))
if not dry_run:
subprocess.call(enved_build_command, cwd=base_dir, shell=True)
``` |
{
"source": "joshs85/bond-api",
"score": 3
} |
#### File: bond-api/bond_api/bond.py
```python
import asyncio
import json
import time
from asyncio import transports
from typing import Any, Callable, List, Optional
from aiohttp import ClientSession, ClientTimeout
from aiohttp.client_exceptions import ServerDisconnectedError
from .action import Action
BPUP_INIT_PUSH_MESSAGE = b"\n"
BPUP_PORT = 30007
BPUP_ALIVE_TIMEOUT = 70
class Bond:
"""Bond API."""
def __init__(
self,
host: str,
token: str,
*,
session: Optional[ClientSession] = None,
timeout: Optional[ClientTimeout] = None,
):
"""Initialize Bond with provided host and token."""
self._host = host
self._api_kwargs = {"headers": {"BOND-Token": token}}
if timeout:
self._api_kwargs["timeout"] = timeout
self._session = session
async def version(self) -> dict:
"""Return the version of hub/bridge reported by API."""
return await self.__get("/v2/sys/version")
async def token(self) -> dict:
"""Return the token after power rest or proof of ownership event."""
return await self.__get("/v2/token")
async def bridge(self) -> dict:
"""Return the name and location of the bridge."""
return await self.__get("/v2/bridge")
async def devices(self) -> List[str]:
"""Return the list of available device IDs reported by API."""
json = await self.__get("/v2/devices")
return [key for key in json if key != "_"]
async def device(self, device_id: str) -> dict:
"""Return main device metadata reported by API."""
return await self.__get(f"/v2/devices/{device_id}")
async def device_properties(self, device_id: str) -> dict:
"""Return device properties reported by API."""
return await self.__get(f"/v2/devices/{device_id}/properties")
async def device_state(self, device_id: str) -> dict:
"""Return current device state reported by API."""
return await self.__get(f"/v2/devices/{device_id}/state")
async def action(self, device_id: str, action: Action) -> None:
"""Execute given action for a given device."""
if action.name == Action.SET_STATE_BELIEF:
path = f"/v2/devices/{device_id}/state"
async def patch(session: ClientSession) -> None:
async with session.patch(
f"http://{self._host}{path}", **self._api_kwargs, json=action.argument
) as response:
response.raise_for_status()
await self.__call(patch)
else:
path = f"/v2/devices/{device_id}/actions/{action.name}"
async def put(session: ClientSession) -> None:
async with session.put(
f"http://{self._host}{path}", **self._api_kwargs, json=action.argument
) as response:
response.raise_for_status()
await self.__call(put)
async def __get(self, path) -> dict:
async def get(session: ClientSession) -> dict:
async with session.get(
f"http://{self._host}{path}", **self._api_kwargs
) as response:
response.raise_for_status()
return await response.json()
return await self.__call(get)
async def __call(self, handler: Callable[[ClientSession], Any]):
if not self._session:
async with ClientSession() as request_session:
return await handler(request_session)
else:
try:
return await handler(self._session)
except ServerDisconnectedError:
# bond has a short connection close time
# so we need to retry if we idled for a bit
return await handler(self._session)
class BPUPSubscriptions:
"""Store BPUP subscriptions."""
def __init__(self):
"""Init and store callbacks."""
self._callbacks = {}
self.last_message_time = 0
@property
def alive(self):
return (time.time() - self.last_message_time) < BPUP_ALIVE_TIMEOUT
def subscribe(self, device_id, callback):
"""Subscribe to BPUP updates."""
self._callbacks.setdefault(device_id, []).append(callback)
def unsubscribe(self, device_id, callback):
"""Unsubscribe from BPUP updates."""
self._callbacks[device_id].remove(callback)
def notify(self, json_msg):
"""Notify subscribers of an update."""
self.last_message_time = time.time()
if json_msg.get("s") != 200:
return
topic = json_msg["t"].split("/")
device_id = topic[1]
for callback in self._callbacks.get(device_id, []):
callback(json_msg["b"])
class BPUProtocol:
"""Implements BPU Protocol."""
def __init__(self, loop, bpup_subscriptions):
"""Create BPU Protocol."""
self.loop = loop
self.bpup_subscriptions = bpup_subscriptions
self.transport = None
self.keep_alive = None
def connection_made(self, transport):
"""Connect or reconnect to the device."""
self.transport = transport
if self.keep_alive:
self.keep_alive.cancel()
self.keep_alive = None
self.send_keep_alive()
def send_keep_alive(self):
"""Send a keep alive every 60 seconds per the protocol."""
self.transport.sendto(BPUP_INIT_PUSH_MESSAGE)
self.keep_alive = self.loop.call_later(60, self.send_keep_alive)
def datagram_received(self, data, addr):
"""Process incoming state changes."""
self.bpup_subscriptions.notify(json.loads(data.decode()[:-1]))
def error_received(self, exc):
"""Ignore errors."""
return
def connection_lost(self, exc):
"""Ignore connection lost."""
return
def stop(self):
"""Stop the client."""
if self.transport:
self.transport.close()
async def start_bpup(host_ip_addr, bpup_subscriptions):
"""Create the socket and protocol."""
loop = asyncio.get_event_loop()
_, protocol = await loop.create_datagram_endpoint(
lambda: BPUProtocol(loop, bpup_subscriptions),
remote_addr=(host_ip_addr, BPUP_PORT),
)
return protocol.stop
```
#### File: bond-api/bond_api/device_type.py
```python
class DeviceType:
"""Bond Device type enumeration."""
CEILING_FAN = "CF"
MOTORIZED_SHADES = "MS"
FIREPLACE = "FP"
AIR_CONDITIONER = "AC"
GARAGE_DOOR = "GD"
BIDET = "BD"
LIGHT = "LT"
GENERIC_DEVICE = "GX"
@staticmethod
def is_fan(device_type: str) -> bool:
"""Checks if specified device type is a fan."""
return device_type == DeviceType.CEILING_FAN
@staticmethod
def is_shades(device_type: str) -> bool:
"""Checks if specified device type is shades."""
return device_type == DeviceType.MOTORIZED_SHADES
@staticmethod
def is_fireplace(device_type: str) -> bool:
"""Checks if specified device type is fireplace."""
return device_type == DeviceType.FIREPLACE
@staticmethod
def is_air_conditioner(device_type: str) -> bool:
"""Checks if specified device type is air conditioner."""
return device_type == DeviceType.AIR_CONDITIONER
@staticmethod
def is_garage_door(device_type: str) -> bool:
"""Checks if specified device type is garage door."""
return device_type == DeviceType.GARAGE_DOOR
@staticmethod
def is_bidet(device_type: str) -> bool:
"""Checks if specified device type is bidet."""
return device_type == DeviceType.BIDET
@staticmethod
def is_light(device_type: str) -> bool:
"""Checks if specified device type is light."""
return device_type == DeviceType.LIGHT
@staticmethod
def is_generic(device_type: str) -> bool:
"""Checks if specified device type is generic."""
return device_type == DeviceType.GENERIC_DEVICE
```
#### File: bond-api/tests/test_action.py
```python
from bond_api import Action
def test_action_eq():
"""Tests that different Action instances compare correctly."""
assert Action("name-1", argument="arg-1") == Action("name-1", argument="arg-1")
assert Action("name-1", argument="arg-1") != Action("name-other", argument="arg-1")
assert Action("name-1", argument="arg-1") != Action("name-1", argument="arg-other")
assert Action("name-1", argument="arg-1") != Action("name-other", argument="arg-other")
``` |
{
"source": "joshsalvi/Healthy-Brain-Network-wearable-evaluation",
"score": 3
} |
#### File: Healthy-Brain-Network-wearable-evaluation/utilities/chart_data.py
```python
from astropy.stats import median_absolute_deviation as mad
from config import config
from datetime import datetime, timedelta
from utilities.fetch_data import fetch_check_data, fetch_data, fetch_hash
from utilities.normalize_acc_data import normalize as norm
import json, numpy as np, os, pandas as pd
from matplotlib.dates import DateFormatter
import matplotlib.pyplot as plt
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
from plotly.graph_objs import *
init_notebook_mode()
import holoviews as hv
hv.extension('bokeh')
with open(os.path.join('./config/device_colors.json')) as fp:
color_key = json.load(fp)
with open(os.path.join('./config/CMI_colors/Color_palette.json')) as fp:
color_palette = json.load(fp)
def bland_altman_plot(data1, data2, *args, **kwargs):
"""
Function to build a Bland-Altman plot.
Parameters
----------
data1, data2 : pandas dataframes
dataframes to compare
*args, **kwargs : various types
additional arguments for plotting
"""
data1 = np.asarray(data1)
data2 = np.asarray(data2)
mean = np.mean([data1, data2], axis=0)
diff = data1 - data2 # Difference between data1 and data2
md = np.mean(diff) # Mean of the difference
sd = np.std(diff, axis=0) # Standard deviation of the difference
plt.scatter(mean, diff, *args, **kwargs)
plt.axhline(md, color='gray', linestyle='--')
plt.axhline(md + 1.96*sd, color='gray', linestyle='--')
plt.axhline(md - 1.96*sd, color='gray', linestyle='--')
def df_devices_qt(devices, sensor, start, stop, acc_hashes={}):
"""
Function to calculate rolling correlations between two sensor data streams.
Parameters
----------
devices : list of (subdirectory, device) tuples (len 2)
each string is the name of one of the two devices to compare
sensor : string
the sensor to compare
start : datetime
beginning of time to compare
stop : datetime
end of time to compare
acc_hashes : dictionary
dictionary of cached datafile hashes
Returns
-------
df : pandas dataframe
merged dataframe with a column per device
"""
suffix = '.csv'
s = []
for i, device in enumerate(devices):
s.append(pd.read_csv(fetch_data(config.rawurls[
sensor][device[1]]),
parse_dates=['Timestamp'], infer_datetime_format=True))
s[i] = s[i].loc[(s[i]['Timestamp'] >= start) & (s[i]['Timestamp'] <=
stop)].copy()
s[i] = norm(s[i])
if device[1] == 'ActiGraph wGT3X-BT':
s[i][['Timestamp']] = s[i].Timestamp.apply(lambda x: x -
timedelta(microseconds=1000))
s[i].set_index('Timestamp', inplace=True)
df = s[0].merge(s[1], left_index=True, right_index=True, suffixes=(''.join(
['_', devices[0][1]]), ''.join(['_', devices[1][1]])))
for i in range(2, len(s), 1):
df = df.merge(s[i], left_index=True, right_index=True, suffixes=('', ''.join(['_', devices[i][1]])))
return(df)
def hvplot(device_data, device_names):
"""
Function to build a plotly line plot from device data from one or more
devices.
Parameters
----------
device_data: pandas dataframe
only including colemns 'Timestamp' and data to plot
device_names: list
ordered list of names, one per dataframe
"""
data = list()
for i, path in enumerate(device_data):
for column in list(path.columns):
if not column == 'Timestamp':
data.append(hv.Scatter(path, kdims=['Timestamp'], vdims=[column
]))
layout = hv.Layout(data).cols(1)
return(layout)
def linechart(df, plot_label, line=True, full=False):
"""
Function to build a linechart and export a PNG and an SVG of the image.
Parameters
----------
df : pandas dataframe
dataframe to plot
plot_label : string
plot title
line : boolean
True for lineplot, False for scatterplot
full : boolean
True for ylim=[0, 1], False for ylim=[0, 3×max(mad)
Returns
-------
plotted : boolean
True if data plotted, False otherwise
Outputs
-------
inline plot
"""
try:
start = min(df.index.values)
except:
print("End of data.")
return False
stop = max(df.index.values)
print("Plotting...")
print(plot_label)
fig = plt.figure(figsize=(10, 8), dpi=75)
plt.rcParams['agg.path.chunksize'] = 10000
ax = fig.add_subplot(111)
ax.set_ylabel('unit cube normalized vector length')
mad_values = []
ci = [0, 0]
cls = list(color_palette.keys())
for i, device in enumerate(list(df.columns)):
if device.startswith('normalized'):
d2 = device[25:]
else:
d2 = device
plot_line = df[[device]].dropna()
mp = mad(plot_line)
if mp > 0:
print(mp)
mad_values.append(mp)
else:
mp = plot_line.std()[0]
if mp > 0:
print(mp)
mad_values.append(mp)
else:
print(max(plot_line[[device]]))
mad_values.append(max(plot_line[[device]]))
label = d2
for c in color_key:
if c in d2 or d2 in c:
cmap = color_key[c]
if not cmap:
cmap = color_palette[cls[ci[0]]][ci[1]]
if ci[1] < len(color_palette[cls[ci[0]]]) - 1:
ci[1] = ci[1] + 1
else:
ci[0] = ci[0] + 1 if ci[0] < (len(color_palette.keys()) - 1) else 0
ci[1] = 0
if line:
ax.plot_date(x=plot_line.index, y=plot_line, alpha=0.4,
label=label, marker="", linestyle="solid",
color=cmap)
else:
ax.plot_date(x=plot_line.index, y=plot_line, alpha=0.4,
label=label, marker="o", linestyle="None",
color=cmap)
ax.legend(loc='best', fancybox=True, framealpha=0.5)
try:
ylim = max(mad_values)
except:
ylim = 0
if full or ylim == 0:
ax.set_ylim([0, 1])
else:
try:
ax.set_ylim([0, 3 * ylim])
except:
ax.set_ylim([0, 1])
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
plt.suptitle(plot_label)
plt.xticks(rotation=65)
plt.show()
return True
def plplot(device_data, device_names):
"""
Function to build a plotly line plot from device data from one or more
devices.
Parameters
----------
device_data: list of pandas dataframes
only including columns 'Timestamp' and data to plot
device_names: list
ordered list of names, one per dataframe
"""
data = list()
for i, path in enumerate(device_data):
for column in list(path.columns):
if not column == 'Timestamp':
data.append(Scatter(x=path['Timestamp'], y=path[column], name=
': '.join([device_names[i], column])))
return(data)
def rolling_window(a, window):
# http://wichita.ogs.ou.edu/documents/python/xcor.py
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def xcorr(x,y):
"""
c=xcor(x,y)
Fast implementation to compute the normalized cross correlation where x and
y are 1D numpy arrays
x is the timeseries
y is the template time series
returns a numpy 1D array of correlation coefficients, c"
The standard deviation algorithm in numpy is the biggest slow down in this
method.
The issue has been identified hopefully they make improvements.
http://wichita.ogs.ou.edu/documents/python/xcor.py
"""
N = len(x)
M = len(y)
meany = np.nanmean(y)
stdy = np.nanstd(np.asarray(y))
tmp = rolling_window(x,M)
c = np.nansum((y-meany)*(tmp-np.reshape(np.nanmean(tmp,-1),(N-M+1,1))),-1
)/(M*np.nanstd(tmp,-1)*stdy)
return(c)
# ============================================================================
if __name__ == '__main__':
pass
``` |
{
"source": "joshsanz/learned_uncertainty",
"score": 2
} |
#### File: joshsanz/learned_uncertainty/analysis.py
```python
import pickle
import numpy as np
from os import listdir
from os.path import isfile, join
import matplotlib
matplotlib.use('tkagg')
from matplotlib import pyplot as plt
plt.rc('figure', figsize=[10, 6])
def plot_residuals(runs, model_name):
# Plot residual of predicted vs actual return
guc = np.zeros((100,))
gu1 = np.zeros((100,))
gu2 = np.zeros((100,))
gwc = np.zeros((100,))
gw1 = np.zeros((100,))
gw2 = np.zeros((100,))
for r in runs:
guc += r['gaussian_unbiased_covar']['predicted_return'] - r['gaussian_unbiased_covar']['true_return']
gu1 += r['gaussian_unbiased_l1']['predicted_return'] - r['gaussian_unbiased_l1']['true_return']
gu2 += r['gaussian_unbiased_l2']['predicted_return'] - r['gaussian_unbiased_l2']['true_return']
gwc += r['gaussian_windowed_covar']['predicted_return'] - r['gaussian_windowed_covar']['true_return']
gw1 += r['gaussian_windowed_l1']['predicted_return'] - r['gaussian_windowed_l1']['true_return']
gw2 += r['gaussian_windowed_l2']['predicted_return'] - r['gaussian_windowed_l2']['true_return']
plt.plot(r['gaussian_unbiased_covar']['predicted_return'] - r['gaussian_unbiased_covar']['true_return'], label='gaussian_unbiased_covar', alpha=0.25)
plt.plot(r['gaussian_unbiased_l1']['predicted_return'] - r['gaussian_unbiased_l1']['true_return'], label='gaussian_unbiased_l1', alpha=0.25)
plt.plot(r['gaussian_unbiased_l2']['predicted_return'] - r['gaussian_unbiased_l2']['true_return'], label='gaussian_unbiased_l2', alpha=0.25)
plt.plot(r['gaussian_windowed_covar']['predicted_return'] - r['gaussian_windowed_covar']['true_return'], label='gaussian_windowed_covar', alpha=0.25)
plt.plot(r['gaussian_windowed_l1']['predicted_return'] - r['gaussian_windowed_l1']['true_return'], label='gaussian_windowed_l1', alpha=0.25)
plt.plot(r['gaussian_windowed_l2']['predicted_return'] - r['gaussian_windowed_l2']['true_return'], label='gaussian_windowed_l2', alpha=0.25)
plt.ylabel("Predicted - Actual Return")
plt.xlabel("Time Step")
plt.title("Return Residual for {} Model".format(model_name))
plt.savefig("out/return_residual-{}_model.png".format(model_name))
plt.close()
plt.plot(guc/len(runs), label='gaussian_unbiased_covar', alpha=0.5)
plt.plot(gu1/len(runs), label='gaussian_unbiased_l1', alpha=0.5)
plt.plot(gu2/len(runs), label='gaussian_unbiased_l2', alpha=0.5)
plt.plot(gwc/len(runs), label='gaussian_windowed_covar', alpha=0.5)
plt.plot(gw1/len(runs), label='gaussian_windowed_l1', alpha=0.5)
plt.plot(gw2/len(runs), label='gaussian_windowed_l2', alpha=0.5)
plt.legend()
plt.ylabel("Mean Predicted - Actual Return")
plt.xlabel("Time Step")
plt.title("Mean Return Residual for {} Model".format(model_name))
plt.savefig("out/mean_return_residual-{}_model.png".format(model_name))
plt.show()
def plot_gammas(files, data_model):
models = ['gaussian_unbiased_covar',
'gaussian_unbiased_l1',
'gaussian_unbiased_l2',
'gaussian_windowed_covar',
'gaussian_windowed_l1',
'gaussian_windowed_l2']
returns = {}
for f in files:
igamma = f.find("gamma")
gamma = float(f[igamma+5:-4])
with open("gamma_runs/" + f, 'rb') as fh:
data = pickle.load(fh)
returns[gamma] = {}
returns[gamma]['gaussian_unbiased_covar'] = 0
returns[gamma]['gaussian_unbiased_l1'] = 0
returns[gamma]['gaussian_unbiased_l2'] = 0
returns[gamma]['gaussian_windowed_covar'] = 0
returns[gamma]['gaussian_windowed_l1'] = 0
returns[gamma]['gaussian_windowed_l2'] = 0
for r in data:
returns[gamma]['gaussian_unbiased_covar'] += np.mean(r['gaussian_unbiased_covar']['true_return'])
returns[gamma]['gaussian_unbiased_l1'] += np.mean(r['gaussian_unbiased_l1']['true_return'])
returns[gamma]['gaussian_unbiased_l2'] += np.mean(r['gaussian_unbiased_l2']['true_return'])
returns[gamma]['gaussian_windowed_covar'] += np.mean(r['gaussian_windowed_covar']['true_return'])
returns[gamma]['gaussian_windowed_l1'] += np.mean(r['gaussian_windowed_l1']['true_return'])
returns[gamma]['gaussian_windowed_l2'] += np.mean(r['gaussian_windowed_l2']['true_return'])
gammas = list(returns.keys())
gammas.sort()
for m in models:
mreturns = [returns[g][m] for g in gammas]
plt.plot(gammas, mreturns, label=m, alpha=0.5)
plt.xlabel("Regularization Parameter")
plt.ylabel("Mean Return")
plt.title("Regularization Parameter versus Return, {}".format(data_model))
plt.legend()
plt.savefig("gammma_vs_return-{}.png".format(data_model))
plt.show()
def plot_runs(runs, model_name):
plot_residuals(runs, model_name)
# plot_gamma_sweep(runs, model_name)
def main():
# with open("base_run/simple_gauss.pkl", 'rb') as f:
# simple_gauss = pickle.load(f)
# plot_residuals(simple_gauss, 'Gaussian')
# with open("base_run/ltv_gauss.pkl", 'rb') as f:
# ltv_gauss = pickle.load(f)
# plot_residuals(ltv_gauss, 'Trending-Gaussian')
# with open("base_run/wiener.pkl", 'rb') as f:
# wiener = pickle.load(f)
# plot_residuals(wiener, 'Wiener-Process')
mypath = "gamma_runs"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
simple = []
ltv = []
wiener = []
for f in onlyfiles:
if "ltv" in f:
ltv.append(f)
elif "wiener" in f:
wiener.append(f)
else:
simple.append(f)
plot_gammas(ltv, "Trending-Gaussian")
plot_gammas(wiener, "Wiener-Process")
plot_gammas(simple, "Gaussian")
if __name__ == "__main__":
main()
```
#### File: joshsanz/learned_uncertainty/control_models.py
```python
import cvxpy as cvx
import numpy as np
import matplotlib
matplotlib.use('tkagg')
from matplotlib import pyplot as plt
plt.rc('figure', figsize=[10, 6])
class ControlModel(object):
"""
Models used to find optimal investment strategies.
"""
def __init__(self):
pass
def run(self, data):
pass
def optima(self):
pass
def variables(self):
pass
class NormModel(ControlModel):
def __init__(self, num_assets, gamma=1.0, regularization=1):
super(ControlModel, CovarianceModel).__init__(self)
self.num_assets = num_assets
self.gamma = gamma
self.regularization = regularization
self.x = None
self.problem = None
self._optima = None
def run(self, data):
mu, sigma = data
self.x = cvx.Variable(self.num_assets)
objective = self.x.T*mu - self.gamma*cvx.norm(self.x, self.regularization)
self.problem = cvx.Problem(cvx.Maximize(objective),
[
cvx.norm(self.x, 1) <= 1,
self.x >= 0
])
self._optima = self.problem.solve()
def optima(self):
return self._optima
def variables(self):
return self.x.value.flatten()
class CovarianceModel(ControlModel):
def __init__(self, num_assets, gamma=1.0):
super(ControlModel, CovarianceModel).__init__(self)
self.num_assets = num_assets
self.gamma = gamma
self.x = None
self.problem = None
self._optima = None
def run(self, data):
mu, sigma = data
self.x = cvx.Variable(self.num_assets)
objective = self.x.T*mu - self.gamma*cvx.quad_form(self.x, sigma)
self.problem = cvx.Problem(cvx.Maximize(objective),
[
cvx.norm(self.x, 1) <= 1,
self.x >= 0
])
self._optima = self.problem.solve()
def optima(self):
return self._optima
def variables(self):
return self.x.value.flatten()
class MultiPeriodModel(ControlModel):
"""
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.116.559&rep=rep1&type=pdf
page 4
"""
def __init__(self, num_assets, L, theta, nu):
self.L = L # planning horizon
self.theta = theta # safety margin on std dev
self.nu = nu # transaction cost
self.num_assets = num_assets
self.R = None
self.xi = cvx.Variable((num_assets+1, L+1))
self.eta = cvx.Variable((num_assets+1, L))
self.zeta = cvx.Variable((num_assets+1, L))
self.omega = cvx.Variable()
self.problem = None
self._optima = None
def run(self, data):
# x0 n x 1 initial state of portfolio,
# returns n x L expected return at each time step,
# sigmas n x n x L variance at each time step
x0, returns, _ = data
self.R = np.cumprod(returns, axis=1)
print("R:",self.R)
objective = cvx.Maximize(self.omega)
constraints = [self.omega <= self.R[:,self.L].T @ self.xi[:,-1],
self.zeta >= 0, self.xi >= 0, self.eta >= 0,
self.xi[:,0] == np.divide(x0, self.R[:,0]),
self.xi[-1,1:] == 0]
A = (1 - self.nu) * self.R
B = (1 + self.nu) * self.R
for l in range(1, self.L + 1):
# Equation 1.9
constraints += [0 == -self.xi[:,l] + self.xi[:,l-1] - self.eta[:,l-1] + self.zeta[:,l-1],
0 <= A[:,l-1].T @ self.eta[:,l-1] - B[:,l-1] @ self.zeta[:,l-1]]
self.problem = cvx.Problem(objective, constraints)
self._optima = self.problem.solve()
print(self.problem.status)
def optima(self):
return self._optima
def variables(self):
zeta = self.zeta.value
eta = self.eta.value
xi = self.xi.value
R = self.R
return xi * R, eta * R[:,1:], zeta * R[:, 1:]
class RobustMultiPeriodModel(ControlModel):
"""
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.116.559&rep=rep1&type=pdf
page 15
Note: Here we use theta as a scaling parameter on the variance, xi^T V xi,
rather than on the standard deviation sqrt(xi^T V xi) to ease implementation in CVX
"""
def __init__(self, num_assets, L, theta, nu):
self.L = L # planning horizon
self.theta = theta # safety margin on std dev
self.nu = nu # transaction cost
self.num_assets = num_assets
self.R = None
self.xi = cvx.Variable((num_assets+1, L+1))
self.zeta = cvx.Variable((num_assets+1, L))
self.eta = cvx.Variable((num_assets+1, L))
self.omega = cvx.Variable()
self.problem = None
self._optima = None
def run(self, data):
# x0 n x 1 initial state of portfolio,
# log_returns n x L expected log return at each time step,
# sigmas n x L expected log variance at each time step
x0, log_returns, log_vars = data
# Expectations
ExpR = np.exp(np.cumsum(log_returns + 0.5 * log_vars, axis=1))
self.R = ExpR
print("R:",self.R)
pl = [ np.concatenate([(1-self.nu) * ExpR[:,l], -(1+self.nu) * ExpR[:,l]]) for l in range(self.L) ]
pLp1 = ExpR[:,-1]
# Covariances
VarR = (np.exp(np.cumsum(2 * log_returns + log_vars, axis=1)) * np.exp(np.cumsum(log_vars, axis=1)) -
np.exp(np.cumsum(2 * log_returns + log_vars, axis=1)))
print("VarR:",VarR)
print(VarR.shape)
Cl = [ np.diag(VarR[:,l]) for l in range(1, self.L+1) ]
Vl = [ np.bmat( [[(1 - self.nu) ** 2 * C, -(1-self.nu) * (1 + self.nu) * C],
[-(1-self.nu) * (1 + self.nu) * C, (1 + self.nu) ** 2 * C]] ) for C in Cl ]
print(len(Vl))
VLp1 = Cl[-1]
objective = cvx.Maximize(self.omega)
constraints = [self.omega <= pLp1 @ self.xi[:,self.L] - self.theta * cvx.quad_form(self.xi[:,-1], VLp1),
self.xi >= 0, self.eta >= 0, self.zeta >= 0,
self.xi[:,0] == np.divide(x0, self.R[:,0]),
self.xi[-1,1:] == 0]
alpha = (1 - self.nu) * self.R
beta = (1 + self.nu) * self.R
for l in range(1, self.L + 1):
# Equation 1.9
constraints += [0 == -self.xi[:,l] + self.xi[:,l-1] - self.eta[:,l-1] + self.zeta[:,l-1],
0 <= (alpha[:,l-1].T @ self.eta[:,l-1] - beta[:,l-1] @ self.zeta[:,l-1] -
self.theta * cvx.quad_form(cvx.bmat([[self.eta[:,l-1], self.zeta[:,l-1]]]).T, Vl[l-1]))
]
self.problem = cvx.Problem(objective, constraints)
self._optima = self.problem.solve()
print(self.problem.status)
def optima(self):
return self._optima
def variables(self):
zeta = self.zeta.value
eta = self.eta.value
xi = self.xi.value
R = self.R
return xi * R, eta * R[:,1:], zeta * R[:, 1:]
# class MultiPeriodModelSimple(ControlModel):
# """
# Equation 1.5
# """
# def __init__(self, num_assets, L, mu, v):
# self.L = L # planning horizon
# self.mu = mu
# self.v = v
# self.num_assets = num_assets
# self.x = cvx.Variable((num_assets, L + 1))
# self.y = cvx.Variable((num_assets, L))
# self.z = cvx.Variable((num_assets, L))
# self.problem = None
# self._optima = None
#
# def run(self, data):
# # TODO (hme): Finish imp.
# x0, r, _ = data
# assert r.shape == (self.num_assets, self.L + 1)
#
# objective = cvx.Maximize(r[:, self.L].T * self.x[:, self.L])
# constraints = [
# self.x >= 0, self.z >= 0, self.y >= 0,
# ]
# for l in range(1, self.L + 1):
# for i in range(1, self.num_assets - 1):
# # Equation 1.5
# constraints += [
# self.x[i, l] == r[i, l-1] @ self.x[i, l-1] - self.y[i, l] + self.z[i, l],
# ]
# # self.x[n+1, l] <= self.x[:, l - 1] + A[:, l - 1].T @ self.y[:, l - 1] - B[:, l - 1] @ self.z[:, l - 1]
#
# self.problem = cvx.Problem(objective, constraints)
# self._optima = self.problem.solve()
# print(self.problem.status)
#
# def optima(self):
# return self._optima
#
# def variables(self):
# x = self.x.value
# y = self.y.value
# z = self.z.value
# return x, y, z
if __name__ == "__main__":
from data_models import GaussianNoise, NoisySine
from prediction_models import UnbiasGaussianEstimator, AutoRegression
num_samples = 1000
num_assets = 3
# mu_truth = np.ones(num_assets)
# sigma_truth = np.diag([0.5, 0.3, 0.2])
#
# sampler = GaussianNoise()
# data = np.zeros(shape=(num_samples, num_assets))
#
# for i in range(num_samples):
# data[i] = sampler.sample((mu_truth, sigma_truth))
#
# sample_mean, sample_covar = UnbiasGaussianEstimator().predict(data)
#
# for i in range(num_assets):
# print(sample_mean[i], sample_covar[i])
# mpc = MultiPeriodModel(num_assets, 2, 2, .1)
# x0 = np.ones((num_assets,)) / num_assets
# sample_mean[0] = 1.1
# sample_mean[1] = 0.9
# means = np.repeat(sample_mean.reshape(-1, 1), 3, 1)
# covs = np.repeat(sample_covar.reshape(-1, 1), 3, 1)
# mpc.run(data=(x0, means, covs))
#
# x, y, z = mpc.variables()
# print("x:", x)
# print("y:", y)
# print("z:", z)
# print(mpc.optima())
#
# cov_model = CovarianceModel(num_assets=num_assets)
# cov_model.run(data=(sample_mean, sample_covar), gamma=1.0)
# print(cov_model.variables())
# print(cov_model.optima())
data = NoisySine()
phase = np.array([1., .5, 2.])
noise = np.array([0.1, 0.03, 0.2])
samples = data.sample((phase, noise, 20))
for i in range(samples.shape[1]):
print(samples.T[i])
L = 5
ar = AutoRegression(L)
ar.fit(samples)
ar_projections, ar_errors = ar.predict(samples, L)
print("Projections:",ar_projections)
print(ar_projections.shape)
print("Errors:",ar_errors)
print(ar_errors.shape)
ar_variances = np.zeros((num_assets, L))
ar_variances[:,1:] = np.repeat(ar_errors.reshape(-1,1), L-1, axis=1)
projections = np.ones((num_assets+1, L))
projections[:-1,:] = ar_projections.T
print(projections)
variances = np.zeros((num_assets+1, L))
variances[:-1,:] = ar_variances
print(variances)
# Run models
mpc = MultiPeriodModel(num_assets, L=4, theta=2, nu=.5)
rmpc = RobustMultiPeriodModel(num_assets, L=4, theta=0, nu=.5)
x0 = np.zeros((num_assets+1,))
x0[-1] = 1.0
mpc.run(data=(x0, projections, None))
x, y, z = mpc.variables()
print("x:",x)
print("y:",y)
print("z:",z)
print(mpc.optima())
rmpc.run(data=(x0, np.log(projections), variances))
rx, ry, rz = rmpc.variables()
print("rx:",rx)
print("ry:",ry)
print("rz:",rz)
print(rmpc.optima())
plt.plot(x.T,label='x')
plt.plot(rx.T,':',label='rx')
plt.legend()
plt.show()
plt.plot(y.T,label='y')
plt.plot(ry.T,':',label='ry')
plt.legend()
plt.show()
plt.plot(z.T,label='z')
plt.plot(rz.T,':',label='rz')
plt.legend()
plt.show()
```
#### File: joshsanz/learned_uncertainty/experiments.py
```python
import matplotlib
matplotlib.use('tkagg')
from matplotlib import pyplot as plt
plt.rc('figure', figsize=[10, 6])
import time
from data_models import *
from prediction_models import *
from control_models import *
def error(predicted_return, true_return):
return (predicted_return - true_return)
def get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
sampler = GaussianNoise(seed)
data = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
sampler_input = (true_asset_value, asset_covariance)
data[t] = sampler.sample(sampler_input)
return data
def get_wiener_data(num_samples, true_asset_value, asset_covariance, seed=1):
num_assets = asset_covariance.shape[0]
steps = get_gaussian_data(num_samples, np.zeros((num_assets,)), asset_covariance, seed)
return np.cumsum(steps, axis=0) + true_asset_value
def get_real_data():
sampler = RealData()
return sampler.labels(), sampler.dates(), sampler.sample()
def get_returns(data, investment_strategies, asset_predictions):
num_samples = investment_strategies.shape[0]
predicted_return = np.zeros(shape=(num_samples,))
true_return = np.zeros(shape=(num_samples,))
for t in range(num_samples):
if t <= 2:
continue
observed_asset_value = data[t]
predicted_asset_value = asset_predictions[t]
investment_strategy = investment_strategies[t]
true_return[t] = investment_strategy.dot(observed_asset_value)
predicted_return[t] = investment_strategy.dot(predicted_asset_value)
return predicted_return, true_return
def run_gaussian_norm(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
regularization = control_params['regularization']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = NormModel(num_assets=num_assets, gamma=gamma, regularization=regularization)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_gaussian_covar(data, num_samples, num_assets, pred_params, control_params):
gamma = control_params['gamma']
prediction_model = UnbiasGaussianEstimator()
window = pred_params['window']
cov_model = CovarianceModel(num_assets=num_assets, gamma=gamma)
predicted_asset_values = np.zeros(shape=(num_samples, num_assets))
investment_strategies = np.zeros(shape=(num_samples, num_assets))
for t in range(num_samples):
if t <= 2:
continue
if window is None:
past_data = data[:t]
else:
past_data = data[max(0, t-window):t]
predicted_asset_value, predicted_asset_variance = prediction_model.predict(past_data)
predicted_asset_values[t] = predicted_asset_value
control_input = (predicted_asset_value, predicted_asset_variance)
cov_model.run(control_input)
investment_strategy = cov_model.variables()
investment_strategies[t] = investment_strategy
return predicted_asset_values, investment_strategies
def run_simple_gaussian_experiments(params, real_data=False, plot=False, seed=1):
if not real_data:
num_samples = 100
true_asset_value = params['asset_value']
asset_covariance = params['asset_covariance']
data = get_gaussian_data(num_samples, true_asset_value, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
else:
data_labels, data_dates, data = get_real_data()
print("date range:", data_dates[0][0], "-", data_dates[0][-1])
num_samples = data.shape[0]
gamma = params['gamma']
window = params['window']
num_assets = data.shape[1]
if plot:
if real_data:
for i in range(num_assets):
plt.plot(data.T[i], label=data_labels[i])
else:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.5)
# In final plots, predicted return may not be relevant.
# plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_ltv_gaussian_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
true_asset_delta = params['asset_delta']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
true_asset_value = true_asset_v0 + (true_asset_delta.T @ np.arange(0,num_samples).reshape(-1,1).T).T
data = get_gaussian_data(num_samples, np.zeros((3,)), asset_covariance, seed) + true_asset_value
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
def run_wiener_experiments(params, plot=False, seed=1):
num_samples = 100
true_asset_v0 = params['asset_value']
asset_covariance = params['asset_covariance']
gamma = params['gamma']
window = params['window']
data = get_wiener_data(num_samples, true_asset_v0, asset_covariance, seed)
data = np.clip(data, 1e-3, None)
num_assets = data.shape[1]
if plot:
plt.plot(data, label='Asset Values')
plt.legend()
plt.title('Input Data')
plt.show()
# Add experiments to run here.
experiments = [
("gaussian_unbiased_covar", run_gaussian_covar, {'window': None}, {"gamma": gamma}),
("gaussian_unbiased_l1", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 1}),
("gaussian_unbiased_l2", run_gaussian_norm, {'window': None}, {"gamma": gamma, "regularization": 2}),
("gaussian_windowed_covar", run_gaussian_covar, {'window': window}, {"gamma": gamma}),
("gaussian_windowed_l1", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 1}),
("gaussian_windowed_l2", run_gaussian_norm, {'window': window}, {"gamma": gamma, "regularization": 2}),
]
bar_plot_mean = []
bar_plot_std = []
results = {}
results['true_values'] = data
for name, experiment_func, pred_params, control_params in experiments:
predicted_asset_values, investment_strategies = experiment_func(data,
num_samples,
num_assets,
pred_params,
control_params)
predicted_return, true_return = get_returns(data, investment_strategies, predicted_asset_values)
results[name] = {}
results[name]['predicted_return'] = predicted_return
results[name]['strategies'] = investment_strategies
results[name]['predicted_values'] = predicted_asset_values
results[name]['true_return'] = true_return
print(name, np.sum(true_return))
bar_plot_mean.append(np.mean(true_return))
bar_plot_std.append(np.std(true_return))
# all_error = error(predicted_return, true_return)
# window = 10
# for i in range(0, num_samples-window, window):
# print(name, np.mean(all_error[i:i + window]))
if plot:
# We really just care about how well the investment strategies actually do,
# which is given by true_return.
plt.plot(np.arange(3, num_samples), true_return[3:], label=name + ' true return', alpha=0.33)
# In final plots, predicted return may not be relevant.
plt.plot(np.arange(3, num_samples), predicted_return[3:], label=name + ' predicted return')
if plot:
plt.legend()
plt.show()
plt.bar(np.arange(len(experiments)), height=bar_plot_mean, yerr=bar_plot_std)
plt.show()
return results
if __name__ == "__main__":
run_simple_gaussian_experiments(params={'gamma': 1,
'window': 10},
real_data=True,
plot=True, seed=int(time.time()))
run_simple_gaussian_experiments(params={'asset_value': np.array([0.8, 1.0, 1.1]),
'asset_covariance': np.diag([0.02, 0.01, 0.03]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_ltv_gaussian_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'asset_delta': np.array([[0.002, -0.003, 0.001]]),
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
run_wiener_experiments(params={'asset_value': np.array([0.9, 1.2, 1.0]),
'asset_covariance': np.diag([1.0, 1.0, 0.2]) * 0.02,
'gamma': 1,
'window': 10},
plot=True, seed=int(time.time()))
``` |
{
"source": "JoshSB/snmp2graphite",
"score": 3
} |
#### File: JoshSB/snmp2graphite/snmp2graphite.py
```python
import os
import netsnmp
import time
import socket
import threading
import cherrypy
INTERVAL = 60
SNMP_VERSION = 2
# /USER VARIABLES/
PER_SECOND = 1
SNMP_COMMUNITY = "my_community"
CARBON_SERVER = "carbon_server"
CARBON_PORT = 2003
GRAPHITE_SERVER = "graphite_server"
GRAPHITE_PREFIX = "network.switches."
WEB_SERVER_HOST = "0.0.0.0"
WEB_SERVER_PORT = 8111
IFACE_EXCEPT = [ "Vlan", "Null", ".0", "bme", "vcp", "lsi", "dsc", "lo0", "vlan", "tap", "gre", "ipip", "pime", "pimd", "mtun" ]
HOST_LIST = [ "router1", "switch1", "linux-box1" ]
port_list = {} #this is dynamic
for hn in HOST_LIST:
port_list[hn] = []
cherrypy.config.update({'server.socket_host': WEB_SERVER_HOST,
'server.socket_port': WEB_SERVER_PORT,
})
class BaseCP(object):
@cherrypy.expose
def default(self,*args,**kwargs):
try:
args[0]
except:
html_body = ""
html_top = "<html><head><title>Switch Graphs</title></head><body>"
for x in HOST_LIST:
html_body+="<a href=\"http://" + cherrypy.request.headers['Host'] + "/" + x + "\">" + x + "</a><br>"
html_end = "</body></html>"
return html_top + html_body + html_end
try:
port_list[args[0]]
except:
return "host not found"
else:
html_body = ""
html_top = "<html><head><title>" + args[0] + "</title></head><body>"
for x in port_list[args[0]]:
if (PER_SECOND == 0):
html_body+="<img src=\"http://" + GRAPHITE_SERVER + "/render?target=nonNegativeDerivative%28" + GRAPHITE_PREFIX + args[0] + ".if." + x + "-*,18446744073709551615%29&from=-60min&height=200&width=600\">"
if (PER_SECOND == 1):
html_body+="<img src=\"http://" + GRAPHITE_SERVER + "/render?target=perSecond%28" + GRAPHITE_PREFIX + args[0] + ".if." + x + "-*%29&from=-60min&height=200&width=600\">"
html_end = "</body></html>"
return html_top + html_body + html_end
def schedule_collect(interval, collector, hst, vrs, comm, num_runs = 0):
if num_runs != 1:
threading.Timer(interval, schedule_collect, [interval, collector, hst, vrs, comm, 0 if num_runs == 0 else num_runs-1]).start()
collector(hst, vrs, comm)
def do_collect(hst, vrs, comm):
port_list[hst] = []
sock = socket.socket()
try:
sock.connect( (CARBON_SERVER,CARBON_PORT) )
except:
traceback.print_exc()
now = int(time.time())
args = {
"Version": SNMP_VERSION,
"DestHost": hst,
"Community": comm
}
sess = netsnmp.Session (**args)
INDEX_POS = 0
MIB_ROOT = "ifIndex"
MIB_CURR = MIB_ROOT
RESULTS = {}
while (MIB_ROOT == MIB_CURR):
vars = netsnmp.VarList(netsnmp.Varbind(MIB_CURR,INDEX_POS))
vals = sess.getbulk(0,16,vars)
for i in vars:
if (i.tag == MIB_CURR):
KEY = i.iid
RESULTS[KEY] = i
INDEX_POS = int(vars[-1].iid)
MIB_CURR = vars[-1].tag
for idx in RESULTS:
descr, oper, cin, cout = netsnmp.snmpget(
netsnmp.Varbind("IF-MIB::ifDescr", idx),
netsnmp.Varbind("IF-MIB::ifOperStatus", idx),
netsnmp.Varbind("IF-MIB::ifHCInOctets", idx),
netsnmp.Varbind("IF-MIB::ifHCOutOctets", idx),
**args)
assert(descr is not None and
cin is not None and
cout is not None)
if descr == "lo":
continue
if oper != "1":
continue
skip = 0
for term in IFACE_EXCEPT:
if term in descr:
skip = 1
if skip == 0:
descr = descr.replace("/","-")
descr = descr.replace("SuperBlade Gigabit Switch BMB-GEM-003, Port #","port-")
port_list[hst].append(descr)
graphiteMessage = GRAPHITE_PREFIX + "%s.if.%s-%s %s %d\n" % (hst,descr,"in",cin,now)
sock.sendall(graphiteMessage)
graphiteMessage = GRAPHITE_PREFIX + "%s.if.%s-%s %s %d\n" % (hst,descr,"out",cout,now)
sock.sendall(graphiteMessage)
sock.close()
for myhost in HOST_LIST:
schedule_collect(INTERVAL, do_collect, myhost, SNMP_VERSION, SNMP_COMMUNITY, 0)
if __name__ == "__main__":
serverThread = threading.Thread(target = cherrypy.quickstart(BaseCP()))
serverThread.start()
``` |
{
"source": "joshschmelzle/lswifi",
"score": 2
} |
#### File: lswifi/lswifi/helpers.py
```python
import itertools
import json
import random
import re
from base64 import b64encode
from .constants import _20MHZ_CHANNEL_LIST
__control_chars = "".join(
map(chr, itertools.chain(range(0x00, 0x20), range(0x7F, 0xA0)))
)
__control_char_re = re.compile("[%s]" % re.escape(__control_chars))
def remove_control_chars(text: str) -> str:
return __control_char_re.sub("", text)
def escape_control_chars(text: str) -> str:
if isinstance(text, str):
try:
return text.encode("unicode_escape").decode("utf-8")
except UnicodeDecodeError:
return text.encode("unicode_escape").decode("latin-1")
if isinstance(text, bytes):
try:
return text.decode("utf-8").encode("unicode_escape").decode("utf-8")
except UnicodeDecodeError:
return text.decode("latin-1").encode("unicode_escape").decode("latin-1")
return text
def generate_pretty_separator(_len, separators, begin, end):
out = begin
count = 0
custom = _len - 2
while count < custom:
out += random.choice(separators)
count = count + 1
out = out + end
return out
def get_attr_max_len(ies, attr):
_list = []
for ie in ies:
if isinstance(getattr(ie, attr), str):
# getattr(ie, attr)
_list.append(getattr(ie, attr))
else:
_list.append(str(getattr(ie, attr)))
return max(len(x) for x in _list)
def bytes_to_int(x_bytes):
return int.from_bytes(x_bytes, "big")
def int_to_bytes(_int):
return _int.to_bytes((_int.bit_length() + 7) // 8, "big")
def format_bytes_as_hex(_bytes):
"""
format a bytes in two digit hex string
doesn't seem to work with lists
TODO: add exception handling?
"""
out = ""
for _int in _bytes:
out = out + f"{_int:02x} "
return out.upper().strip()
def flag_last_object(seq):
"""treat the last object in an iterable differently"""
seq = iter(seq) # ensure this is an iterator
a = next(seq)
for b in seq:
yield a, False
a = b
yield a, True
def get_bit(byteval, index) -> bool:
"""retrieve bit value from byte at provided index"""
return (byteval & (1 << index)) != 0
def bools_to_binary_string(_list: list) -> str:
return "0b" + "".join(["1" if x else "0" for x in _list])
def binary_string_to_int(binary_string: str) -> int:
return int(binary_string[2:], 2)
def trim_most_significant_bit(byteval: int) -> int:
"""trim the most significant bit"""
return byteval & 0x7F
def convert_mac_address_to_string(mac) -> str:
"""returns a MAC address in string format
input can be a list or a c_ubyte from the wlanapi.h
"""
return ":".join("%02x" % x for x in mac)
def strip_mac_address_format(mac):
"""normalizes the various mac address formats"""
return mac.lower().replace("-", "").replace(".", "").replace(":", "")
def __get_digit(number, n):
"""internal helper to get the value of a number at a certain position"""
return number // 10 ** n % 10
def __num_digits(num: int):
"""internal helper to get the number of digits"""
return len(str(num))
def is_two_four_band(frequency: int) -> bool:
"""determines if a channel frequency is in the 2.4 GHz ISM band"""
if __get_digit(frequency, __num_digits(frequency) - 1) == 2:
return True
else:
return False
def is_five_band(frequency: int) -> bool:
"""determines if a channel frequency is in the 5.0 GHz ISM band"""
if __get_digit(frequency, __num_digits(frequency) - 1) == 5:
return True
else:
return False
def is_six_band(frequency: int) -> bool:
"""determines if a channel frequency is in the 6.0-7.125 GHz ISM band"""
if frequency > 5900 and frequency < 7125:
return True
else:
return False
def get_channel_number_from_frequency(frequency):
"""gets the 802.11 channel for a corresponding frequency
in units of kilohertz (kHz). does not support FHSS."""
try:
return _20MHZ_CHANNEL_LIST.get(frequency, "Unknown")
except KeyError:
return "Unknown"
class Base64Encoder(json.JSONEncoder):
"""A Base64 encoder for JSON"""
# example usage: json.dumps(bytes(frame), cls=Base64Encoder)
# pylint: disable=method-hidden
def default(self, obj):
"""Perform default Base64 encode"""
if isinstance(obj, bytes):
return b64encode(obj).decode()
return json.JSONEncoder.default(self, obj)
```
#### File: lswifi/lswifi/__main__.py
```python
import asyncio
import logging
import os
import platform
import sys
# hard set no support for non win32 platforms
if sys.platform == "win32":
pass
else:
print(f"{os.path.basename(__file__)} only works on win32... exiting...")
sys.exit(-1)
# hard set no support for Python versions < 3.7
if sys.version_info < (3, 7):
print(
f"{os.path.basename(__file__)} requires Python 3.7+. "
f"your active Python version is {platform.python_version()}. "
f"exiting..."
)
sys.exit(-1)
# app imports
from . import appsetup, core
from .__version__ import __title__
from .constants import APNAMEACKFILE, APNAMEJSONFILE
def app_path() -> None:
appdata_path = os.path.join(os.getenv("LOCALAPPDATA"), __title__)
path_exists = os.path.isdir(appdata_path)
if not path_exists:
os.mkdir(appdata_path)
print(f"{appdata_path}")
def main():
is_apname_ack_stored = False
parser = appsetup.setup_parser()
args = parser.parse_args()
log = appsetup.setup_logger(args)
log.debug(f"args {args}")
log.debug(f"{sys.version}")
if args.data_location:
app_path()
sys.exit()
if args.apnames:
is_apname_ack_stored = user_ack_apnames_disclaimer()
log.debug(
f"is there a stored ack for caching apnames on local machine? ({is_apname_ack_stored})"
)
try:
asyncio.run(core.scan(args, storedack=is_apname_ack_stored))
except KeyboardInterrupt:
log.warning("caught KeyboardInterrupt... stopping...")
except SystemExit:
pass
def user_ack_apnames_disclaimer() -> bool:
"""retrieve ack from user that BSSIDs and discovered apnames will be cached in appdata"""
logger = logging.getLogger(__name__)
if os.getenv("LOCALAPPDATA"):
appdata_folder = os.path.join(os.getenv("LOCALAPPDATA"), __title__)
else:
raise OSError
is_path = os.path.isdir(appdata_folder)
if not is_path:
os.mkdir(appdata_folder)
logger.debug("%s created? %s", appdata_folder, os.path.isdir(appdata_folder))
ack = os.path.join(appdata_folder, APNAMEACKFILE)
apnames = os.path.join(appdata_folder, APNAMEJSONFILE)
if os.path.isfile(ack):
return True
else:
print(
"---\n"
"AP (Access Point) names are not contained in every scan result.\n\n"
"What?\n"
" - This feature locally caches BSSIDs and any detected corresponding AP names.\n"
" - Caching this information helps to more consistently provide AP names in output.\n"
"Why?\n"
" - AP names are typically identified in beacon frames.\n"
" - Dwell time varies per channel meaning it could be less than the beacon interval.\n"
" - Retrieved scan results are a combination of beacons, probe responses, or sometimes a merged frame.\n"
"Where?\n"
" - Data is stored and read from a JSON file on your local device here:\n\n"
f"{apnames}\n"
"---\n"
)
text = input("Do you want to enable this feature? yes/no: ")
if "y" in text.lower()[:1]:
with open(ack, "w") as file:
pass # we only need a placeholder file
print(
"---\n"
"This feature has been enabled and your response stored here: \n\n"
f"{ack}\n\n"
"Want to disable this feature? Delete the file above\n"
"---"
)
return True
else:
return False
if __name__ == "__main__":
main()
```
#### File: lswifi/schemas/beacon.py
```python
from .out import *
class BeaconInterval(OutObject):
"""Base class for Beacon Interval"""
def __init__(self, **kwargs):
self.value = self.get_beacon_interval(kwargs.get("value"))
super(BeaconInterval, self).__init__(**kwargs)
def __str__(self):
return f"{self.value}ms"
def get_beacon_interval(self, beaconperiod):
"""
Beacons are sent by the AP at a regular interval defined as the Target Beacon Transmission Time (TBTT).
The TBTT is a time interval measured in time units (TUs). A TU is equal to 1024 microseconds.
The TU is often confused with 1 ms.
The reality is seen in the definition of a time unit in the 802.11-2012 standard document, which reads, "A measurement of time equal to 1024 µs."
"""
return (1024 * beaconperiod) / 1000
```
#### File: lswifi/schemas/phy.py
```python
from lswifi import wlanapi as WLAN_API
from .out import *
class PHYType:
"""Base class for PHY Type"""
def __init__(self, bss_entry):
self.value = WLAN_API.DOT11_PHY_TYPE_DICT[bss_entry.dot11BssPhyType]
self.header = Header("PHY")
self.subheader = SubHeader(".11")
def out(self):
return OUT_TUPLE(self.__str__(), self.header, self.subheader)
def __str__(self):
return str(self.amendment)
@property
def generation(self):
"""Get the WFA Generation"""
return self.get_wfa_generation()
@property
def amendment(self):
"""Get the 802.11 amendment"""
return self.get_amendment()
@property
def name(self):
"""Get the current name"""
return self.value
@name.setter
def name(self, value):
self.value = value
def get_amendment(self):
if self.value == "HE":
return "ax"
if self.value == "VHT":
return "ac"
if self.value == "HT":
return "n"
if self.value == "ERP":
return "g"
if self.value.replace("-", "") == "HRDSSS":
return "b"
if self.value == "OFDM":
return "a"
return ""
def get_wfa_generation(self):
"""
https://www.wi-fi.org/discover-wi-fi/wi-fi-certified-6
"""
if self.value == "HE":
return "6"
if self.value == "VHT":
return "5"
if self.value == "HT":
return "4"
if self.value == "ERP":
return "3"
if self.value.replace("-", "") == "HRDSSS":
return "2"
if self.value == "OFDM":
return "1"
return "-"
def __repr__(self):
return self.value
```
#### File: lswifi/schemas/security.py
```python
from .out import *
class Security(OutObject):
"""Base class for Security"""
def __init__(self, capabilities):
if capabilities.ci.bits.PRIVACY == 1:
self.value = "WEP"
else:
self.value = "NONE"
self.header = Header("SECURITY", Alignment.LEFT)
self.subheader = SubHeader("[auth/unicast/group]")
def __format__(self, fmt):
return f"{self.value:{fmt}}"
```
#### File: lswifi/tests/test_elements.py
```python
import sys
import pytest
import lswifi
# WirelessNetworkBss
class TestElements:
def test_parse_rates(self):
test1 = lswifi.elements.OutObject(
value="1(b) 2(b) 5.5(b) 11(b) 6(b) 9 12(b) 18 24(b) 36 48 54"
)
test2 = lswifi.elements.OutObject(
value="1(b) 2(b) 5.5(b) 11(b) 18 24 36 54 6 9 12 48"
)
test3 = lswifi.elements.OutObject(value="6(b) 9 12(b) 18 24(b) 36 48 54")
assert (
lswifi.elements.WirelessNetworkBss.parse_rates(test1)
== "1(B) 2(B) 5.5(B) 6(B) 9 11(B) 12(B) 18 24(B) 36 48 54"
)
assert (
lswifi.elements.WirelessNetworkBss.parse_rates(test2)
== "1(B) 2(B) 5.5(B) 6 9 11(B) 12 18 24 36 48 54"
)
assert (
lswifi.elements.WirelessNetworkBss.parse_rates(test3)
== "6(B) 9 12(B) 18 24(B) 36 48 54"
)
def test_convert_timestamp_to_uptime(self):
assert (
lswifi.elements.WirelessNetworkBss.convert_timestamp_to_uptime(
13667420576596
)
== "158d 4:30:20"
)
assert (
lswifi.elements.WirelessNetworkBss.convert_timestamp_to_uptime(179295494144)
== "02d 1:48:15"
)
assert (
lswifi.elements.WirelessNetworkBss.convert_timestamp_to_uptime(285837076)
== "00d 0:04:45"
)
``` |
{
"source": "joshschmelzle/resequence_esx_ap_numbers",
"score": 3
} |
#### File: joshschmelzle/resequence_esx_ap_numbers/resequenceAPs.py
```python
__author__ = "<NAME>"
__version__ = "0.0.1"
__status__ = "Alpha"
import argparse
import json
import sys
def initialize():
parser = argparse.ArgumentParser(
description="Renames and resequences Ekahau Site Surveys accessPoints.json.",
epilog="Made with Python by {}".format(__author__),
fromfile_prefix_chars='@'
)
parser.add_argument('command', nargs="?", default="rename", help="default argument. initiates the renaming of AP names in accessPoints.json", choices=['rename'])
parser.add_argument('-V', '--version', action="version", version="%(prog)s {}".format(__version__))
return parser
def main():
data = object()
filename = 'accessPoints.json'
try:
with open(filename) as file:
data = json.load(file)
x = 1
for i in data['accessPoints']:
old = i["name"]
i["name"] = "AP{}".format(x)
new = i["name"]
x = x + 1
print("old: {} - new: {}".format(old, new))
# write modified json to new file
with open('accessPoints-resequenced.json', 'w') as out:
json.dump(data, out, indent=4)
except FileNotFoundError:
print("could not find {}".format(filename))
except ValueError:
print("could not decode {} as json".format(filename))
if __name__ == '__main__':
parser = initialize()
try:
args = parser.parse_args()
if args.command == 'rename':
main()
except KeyboardInterrupt:
logger.critcial("stop requested...")
sys.exit(-1)
sys.exit(0)
``` |
{
"source": "joshschmelzle/runcommand",
"score": 2
} |
#### File: runcommand/runcommand/__main__.py
```python
import getpass
import inspect
import logging
import logging.handlers
import sys
import threading
import time
from datetime import datetime
# third-party imports
import netmiko
# app imports
from . import helpers
class Worker(threading.Thread):
def __init__(
self,
platform: str,
ip_address: str,
command: str,
decrypt: bool,
username: str,
password: str,
counter: int,
):
threading.Thread.__init__(self)
self.thread_id = counter
self.platform = platform
self.ip_address = ip_address
self.command = command
self.decrypt = decrypt
self.username = username
self.password = password
def run(self):
start_time = datetime.now()
log = logging.getLogger(inspect.stack()[0][3])
log.info(
"thread {0} - started {1} at {2}".format(
self.thread_id, self.ip_address, datetime.now()
)
)
run_cmd(
self.platform,
self.ip_address,
self.command,
self.decrypt,
self.username,
self.password,
self.thread_id,
)
log.info(
"thread {0} - finished {1} in {2}".format(
self.thread_id, self.ip_address, datetime.now() - start_time
)
)
def run_cmd(
platform: str,
ip_address: str,
command_set: str,
decrypt: bool,
username: str,
password: str,
thread_id: int,
):
log = logging.getLogger(inspect.stack()[0][3])
try:
log.info(
f"thread {thread_id} - connecting to {ip_address} with user {username}"
)
wlc = netmiko.ConnectHandler(
device_type=platform, ip=ip_address, username=username, password=password
)
if decrypt:
wlc.send_command("encrypt disable")
hostname = wlc.send_command("show hostname")
hostname = hostname.split(" ")[2].strip()
results = []
for command in command_set:
results.append(f"\n# command: {command}\n\n")
results.append("```")
results.append(wlc.send_command(command).strip())
results.append("```")
if isinstance(results, list):
log.info(
"thread {0} - retrieved results from {1}".format(thread_id, hostname)
)
build_output_file(results, hostname, ip_address, thread_id)
except netmiko.ssh_exception.NetMikoTimeoutException as ex:
log.error("{0}.".format(ex))
sys.exit(-1)
except netmiko.ssh_exception.NetMikoAuthenticationException as ex:
log.error("{0}.".format(ex))
sys.exit(-1)
def getresults(args):
log = logging.getLogger(inspect.stack()[0][3])
counter = 1
controllers = []
command_set = []
iplist = args.iplist
cmdlist = args.cmdlist
if cmdlist:
with open(cmdlist) as file:
for line in file:
line = line.strip()
if line == "":
continue
command_set.append(line)
else:
command_set.append(args.cmd)
decrypt = args.decrypt
if helpers.validateinput(args):
if iplist:
with open(iplist) as file:
for line in file:
line = line.strip()
if line == "":
continue
if helpers.is_valid_ipv4_address(line):
controllers.append(line)
else:
controllers.append(args.ip)
log.info(f"controllers: {controllers}")
if not controllers:
log.error("no controllers, or valid IPv4 addresses provided.")
sys.exit(-1)
if controllers:
username = input("username: ")
log.info(f"user: {username}")
password = getpass.getpass(prompt="password: ")
for ip_address in controllers:
worker = Worker(
"aruba_os",
ip_address,
command_set,
decrypt,
username,
password,
counter,
)
worker.start()
if args.syn:
worker.join()
counter += 1
def build_output_file(results: list, hostname: str, ip_address: str, thread_id: int):
"""
- naming convention: [name]-[ip]-[command]-[time].cfg
- write to same directory
"""
log = logging.getLogger(inspect.stack()[0][3])
curtime = time.strftime("%Y%m%dt%H%M")
output_filename = (
"runcommand-" + hostname + "-" + ip_address + "-" + curtime + ".md"
)
log.info(f"thread {thread_id} - writing results to {output_filename}")
out_file = open(output_filename, "w")
for result in results:
lines = result.splitlines()
for line in lines:
out_file.write(line + "\n")
out_file.close()
def main() -> None:
parser = helpers.setup_parser()
args = parser.parse_args()
log = helpers.setup_logger(args)
log.info("args {0}".format(args))
log.info("{0}".format(sys.version))
getresults(args)
if __name__ == "__main__":
main()
``` |
{
"source": "joshschmelzle/wlanpi-chat-bot",
"score": 2
} |
#### File: wlanpi-chat-bot/wlanpi_commands/exec_reboot.py
```python
from .command import Command
import os
class ExecReboot(Command):
def __init__(self, telegram_object, conf_obj):
super().__init__(telegram_object, conf_obj)
self.command_name = "exec_reboot"
def run(self, args_list):
os.system('shutdown -r')
return self._render("Attempting reboot in 1 minute...")
``` |
{
"source": "joshschmelzle/wlanpi-fpms",
"score": 3
} |
#### File: modules/apps/scanner.py
```python
import os
import subprocess
import threading
from typing import List
import textfsm
import fpms.modules.wlanpi_oled as oled
from fpms.modules.constants import IP_FILE, IW_FILE, IWCONFIG_FILE, MAX_TABLE_LINES
from fpms.modules.pages.alert import Alert
from fpms.modules.pages.pagedtable import PagedTable
IFACE = "wlan0"
class Scanner(object):
def __init__(self, g_vars):
# load textfsm template to parse iw output
with open(
os.path.realpath(os.path.join(os.getcwd(), "modules/apps/iw_scan.textfsm"))
) as f:
self.iw_textfsm_template = textfsm.TextFSM(f)
# create paged table
self.paged_table_obj = PagedTable(g_vars)
# create alert
self.alert_obj = Alert(g_vars)
def freq_to_channel(self, freq_mhz):
"""
Converts frequency (MHz) to channel number
"""
if freq_mhz == 2484:
return 14
elif freq_mhz >= 2412 and freq_mhz <= 2484:
return int(((freq_mhz - 2412) / 5) + 1)
elif freq_mhz >= 5160 and freq_mhz <= 5885:
return int(((freq_mhz - 5180) / 5) + 36)
elif freq_mhz >= 5955 and freq_mhz <= 7115:
return int(((freq_mhz - 5955) / 5) + 1)
return None
def parse(self, iw_scan_output: str) -> List:
"""
Returns a string containing a list of wireless networks
Fields:
[["bssid","frequency","rssi","ssid"]]
Example:
[["aa:bb:cc:00:11:22", "2412", "-69", "Outlaw"]]
...
"""
self.iw_textfsm_template.Reset()
return self.iw_textfsm_template.ParseText(iw_scan_output)
def scan(self, g_vars):
g_vars["scanner_status"] = True
cmd = f"{IW_FILE} {IFACE} scan"
try:
scan_output = subprocess.check_output(cmd, shell=True).decode().strip()
networks = self.parse(scan_output)
# Sort results by RSSI
networks.sort(key = lambda x: x[2])
results = []
for network in networks:
# BSSID
bssid = network[0].upper()
# Freq
freq = int(network[1])
channel = self.freq_to_channel(freq)
# RSSI
rssi = int(network[2])
# SSID
ssid = network[3]
if len(ssid) == 0:
ssid = "Hidden Network"
ssid = ssid[:17]
results.append("{} {}".format("{0: <17}".format(ssid), rssi))
results.append(
"{} {}".format("{0: <17}".format(bssid), "{0: >3}".format(channel))
)
results.append("---")
g_vars["scanner_results"] = results
except Exception as e:
print(e)
finally:
g_vars["scanner_status"] = False
def scanner_scan(self, g_vars):
# Check if this is the first time we run
if g_vars["result_cache"] == False:
# Mark results as cached (but we will keep updating in the background)
g_vars["result_cache"] = True
g_vars["scanner_results"] = []
g_vars["scanner_status"] = False
self.paged_table_obj.display_list_as_paged_table(
g_vars, "", title="Networks"
)
self.alert_obj.display_popup_alert(g_vars, "Scanning...")
# Configure interface
try:
cmd = f"{IP_FILE} link set {IFACE} down && {IWCONFIG_FILE} {IFACE} mode managed && {IP_FILE} link set {IFACE} up"
subprocess.run(cmd, shell=True)
except Exception as e:
print(e)
else:
if g_vars["scanner_status"] == False:
# Run a scan in the background
thread = threading.Thread(target=self.scan, args=(g_vars,), daemon=True)
thread.start()
# Check and display the results
results = g_vars["scanner_results"]
if len(results) > 0:
# Build the table that will display the results
table_display_max = MAX_TABLE_LINES + int(MAX_TABLE_LINES / 3)
pages = []
while results:
slice = results[:table_display_max]
pages.append(slice)
results = results[table_display_max:]
table_data = {"title": "Networks", "pages": pages}
# Display the results
self.paged_table_obj.display_paged_table(g_vars, table_data, justify=False)
```
#### File: fpms/modules/modes.py
```python
import time
import os.path
import subprocess
import fpms.modules.wlanpi_oled as oled
from fpms.modules.pages.alert import Alert
from fpms.modules.pages.simpletable import SimpleTable
from fpms.modules.constants import (
WCONSOLE_SWITCHER_FILE,
HOTSPOT_SWITCHER_FILE,
WIPERF_SWITCHER_FILE,
SERVER_SWITCHER_FILE,
)
class Mode(object):
def __init__(self, g_vars):
# create simple table
self.simple_table_obj = SimpleTable(g_vars)
# create alert
self.alert_obj = Alert(g_vars)
def switcher(self, g_vars, resource_title, resource_switcher_file, mode_name):
'''
Function to perform generic set of operations to switch wlanpi mode
'''
reboot_image = g_vars['reboot_image']
# check resource is available
if not os.path.isfile(resource_switcher_file):
self.alert_obj.display_alert_error(g_vars, '{} mode not available.'.format(resource_title))
g_vars['display_state'] = 'page'
return
# Resource switcher was detected, so assume it's installed
if g_vars['current_mode'] == "classic":
# if in classic mode, switch to the resource
alert_msg = 'Switching to {} mode (rebooting...)'.format(resource_title)
switch = "on"
elif g_vars['current_mode'] == mode_name:
alert_msg = 'Switching to Classic mode (rebooting...)'
switch = "off"
else:
self.alert_obj.display_alert_error(g_vars, 'Unknown mode: {}'.format(g_vars['current_mode']))
g_vars['display_state'] = 'page'
return False
# Flip the mode
self.alert_obj.display_alert_info(g_vars, alert_msg, title="Success")
g_vars['shutdown_in_progress'] = True
time.sleep(2)
oled.drawImage(g_vars['reboot_image'])
g_vars['screen_cleared'] = True
try:
alert_msg = subprocess.check_output("{} {}".format(resource_switcher_file, switch), shell=True).decode() # reboots
time.sleep(1)
except subprocess.CalledProcessError as exc:
alert_msg = exc.output.decode()
# We only get to this point if the switch has failed for some reason
# (Note that the switcher script reboots the WLANPi)
g_vars['shutdown_in_progress'] = False
g_vars['screen_cleared'] = False
self.alert_obj.display_alert_error(g_vars, alert_msg)
g_vars['display_state'] = 'menu'
# allow 5 secs to view failure msg
time.sleep(3)
# move back up to menu branch
g_vars['current_menu_location'].pop()
return False
def wconsole_switcher(self, g_vars):
wconsole_switcher_file = WCONSOLE_SWITCHER_FILE
resource_title = "Wi-Fi Console"
mode_name = "wconsole"
resource_switcher_file = wconsole_switcher_file
# switch
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
def hotspot_switcher(self, g_vars):
hotspot_switcher_file = HOTSPOT_SWITCHER_FILE
resource_title = "Hotspot"
mode_name = "hotspot"
resource_switcher_file = hotspot_switcher_file
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
def wiperf_switcher(self, g_vars):
wiperf_switcher_file = WIPERF_SWITCHER_FILE
resource_title = "Wiperf"
mode_name = "wiperf"
resource_switcher_file = wiperf_switcher_file
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
def server_switcher(self, g_vars):
server_switcher_file = SERVER_SWITCHER_FILE
resource_title = "Server"
mode_name = "server"
resource_switcher_file = server_switcher_file
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
``` |
{
"source": "joshschmelzle/wlanpi-profiler",
"score": 2
} |
#### File: wlanpi-profiler/profiler/profiler.py
```python
import csv
import inspect
import json
import logging
import os
import signal
import sys
import time
from difflib import Differ
from time import strftime
from typing import Dict, List, Tuple
# third party imports
from manuf import manuf # type: ignore
from scapy.all import Dot11, RadioTap, wrpcap # type: ignore
# app imports
from .__version__ import __version__
from .constants import (
_20MHZ_FREQUENCY_CHANNEL_MAP,
EXT_CAPABILITIES_IE_TAG,
FT_CAPABILITIES_IE_TAG,
HE_6_GHZ_BAND_CAP_IE_EXT_TAG,
HE_CAPABILITIES_IE_EXT_TAG,
HE_SPATIAL_REUSE_IE_EXT_TAG,
HT_CAPABILITIES_IE_TAG,
IE_EXT_TAG,
POWER_MIN_MAX_IE_TAG,
RM_CAPABILITIES_IE_TAG,
RSN_CAPABILITIES_IE_TAG,
SSID_PARAMETER_SET_IE_TAG,
SUPPORTED_CHANNELS_IE_TAG,
SUPPORTED_OPERATING_CLASSES_IE_TAG,
VENDOR_SPECIFIC_IE_TAG,
VHT_CAPABILITIES_IE_TAG,
)
from .helpers import Base64Encoder, Capability, flag_last_object, get_bit, is_randomized
class Profiler(object):
"""Code handling analysis of client capablities"""
def __init__(self, config=None, queue=None):
self.log = logging.getLogger(inspect.stack()[0][1].split("/")[-1])
self.parent_pid = os.getppid()
self.log.debug("profiler pid: %s; parent pid: %s", os.getpid(), self.parent_pid)
self.analyzed_hash = {}
self.config = config
if config:
channel = config.get("GENERAL").get("channel")
if channel:
self.channel = int(channel)
else:
self.log.warning("profiler cannot determine channel from config")
self.listen_only = config.get("GENERAL").get("listen_only")
self.files_path = config.get("GENERAL").get("files_path")
self.pcap_analysis = config.get("GENERAL").get("pcap_analysis")
self.ft_disabled = config.get("GENERAL").get("ft_disabled")
self.he_disabled = config.get("GENERAL").get("he_disabled")
self.reports_dir = os.path.join(self.files_path, "reports")
self.clients_dir = os.path.join(self.files_path, "clients")
self.csv_file = os.path.join(
self.reports_dir, f"profiler-{time.strftime('%Y-%m-%d')}.csv"
)
self.client_profiled_count = 0
self.lookup = manuf.MacParser(update=False)
self.last_manuf = "N/A"
self.running = True
self.run(queue)
def run(self, queue) -> None:
"""Runner which performs checks prior to profiling an association request"""
if queue:
buffer: "Dict" = {}
buffer_squelch = 3
while self.running:
frame = queue.get()
if frame:
if isinstance(frame, RadioTap) or isinstance(frame, Dot11):
if frame.addr2 in buffer:
toc = time.time() - buffer[frame.addr2]
if toc < buffer_squelch:
self.log.debug(
"already seen %s %s seconds ago; not sending to profiler process",
frame.addr2,
f"{toc:.2f}",
)
continue
else:
buffer[frame.addr2] = time.time()
else:
buffer[frame.addr2] = time.time()
self.profile(frame)
if queue.empty():
if self.pcap_analysis:
# if nothing is left in the queue and we're only analyzing a pcap file
self.log.info(
"exit because we were told to only analyze %s",
self.pcap_analysis,
)
sys.exit(signal.SIGTERM)
def profile(self, frame) -> None:
"""Handle profiling clients as they come into the queue"""
# we should determine the channel from frame itself, not from the profiler config
freq = frame.ChannelFrequency
self.log.debug("detected freq from assoc is %s", freq)
channel = _20MHZ_FREQUENCY_CHANNEL_MAP.get(freq, 0)
"""
All radio tap headers are malformed from some adapters on certain kernels.
This has been observed in 5.15rc2 up to 5.15.1 with MediaTek adapters for example.
If that is the case, we are unable to detect the frequency/channel from the association.
---------------------------------------------
- Client MAC: 6e:1d:8a:28:32:51
- OUI manufacturer lookup: Apple (Randomized MAC)
- Frequency band: Unknown
- Capture channel: 0
---------------------------------------------
"""
if channel == 0:
self.log.warning(
"COULD NOT MAP FREQUENCY FROM RADIO TAP HEADER FOUND IN ASSOCIATION FRAME"
)
else:
self.log.debug("detected freq from assoc maps to channel %s", channel)
is_6ghz = False
if freq > 2411 and freq < 2485:
band = "2.4GHz"
elif freq > 5100 and freq < 5900:
band = "5.8GHz"
elif freq > 5900 and freq < 7120:
band = "6.0GHz"
is_6ghz = True
else:
band = "unknown"
ssid, oui_manuf, capabilities = self.analyze_assoc_req(frame, is_6ghz)
analysis_hash = hash(f"{frame.addr2}: {capabilities}")
if analysis_hash in self.analyzed_hash.keys():
self.log.info(
"already seen %s (capabilities hash=%s) this session, ignoring...",
frame.addr2,
analysis_hash,
)
else:
randomized = is_randomized(frame.addr2)
text_report_oui_manuf = oui_manuf
if randomized:
if oui_manuf is None:
text_report_oui_manuf = "Randomized MAC"
else:
text_report_oui_manuf = "{0} (Randomized MAC)".format(oui_manuf)
self.last_manuf = oui_manuf
self.analyzed_hash[analysis_hash] = frame
if self.listen_only:
self.log.info(
"discovered association request for %s to %s",
frame.addr2,
ssid,
)
# generate text report
text_report = self.generate_text_report(
text_report_oui_manuf,
capabilities,
frame.addr2,
channel,
band,
ssid,
self.listen_only,
)
self.log.info("generating text report for %s\n%s", frame.addr2, text_report)
self.log.debug(
"writing textual reports for %s (capabilities hash=%s) to %s",
frame.addr2,
analysis_hash,
os.path.join(self.clients_dir, frame.addr2.replace(":", "-")),
)
self.write_analysis_to_file_system(
text_report,
capabilities,
frame,
oui_manuf,
randomized,
band,
channel,
self.listen_only,
)
self.client_profiled_count += 1
self.log.debug(
"%s clients profiled this session", self.client_profiled_count
)
@staticmethod
def generate_text_report(
oui_manuf: str,
capabilities: list,
client_mac: str,
channel: int,
band: str,
ssid: str,
listen_only: bool,
) -> str:
"""Generate a report for output"""
# start report
text_report = "-" * 45
if listen_only:
text_report += f"\n - SSID: {ssid}"
text_report += f"\n - Client MAC: {client_mac}"
text_report += f"\n - OUI manufacturer lookup: {oui_manuf or 'Unknown'}"
band_label = ""
if band[0] == "2":
band_label = "2.4 GHz"
elif band[0] == "5":
band_label = "5 GHz"
elif band[0] == "6":
band_label = "6 GHz"
else:
band_label = "Unknown"
text_report += f"\n - Frequency band: {band_label}"
text_report += f"\n - Capture channel: {channel}\n"
text_report += "-" * 45
text_report += "\n"
for capability in capabilities:
if capability.name and capability.value:
out = "{0:<22} {1:<22}".format(capability.name, capability.value)
if out.strip():
text_report += out + "\n"
text_report += "\nKey: [X]: Supported, [ ]: Not supported"
text_report += "\n* Reported client capabilities are dependent on available features at the time of client association."
text_report += "\n** Reported channels do not factor local regulatory domain. Detected channel sets are assumed contiguous."
return text_report
def write_analysis_to_file_system(
self,
text_report,
capabilities,
frame,
oui_manuf,
randomized: bool,
band,
channel,
listen_only,
):
"""Write report files out to a directory on the WLAN Pi"""
log = logging.getLogger(inspect.stack()[0][3])
# dump out the text to a file
client_mac = frame.addr2.replace(":", "-", 5)
dest = os.path.join(self.clients_dir, client_mac)
if not os.path.isdir(dest):
try:
os.mkdir(dest)
except OSError:
log.exception("problem creating %s directory", dest)
sys.exit(signal.SIGHUP)
data = {}
data["mac"] = client_mac
data["is_laa"] = randomized
data["manuf"] = oui_manuf
if band[0] == "2":
band_db = 2
elif band[0] == "5":
band_db = 5
elif band[0] == "6":
band_db = 6
else:
band_db = 0
data["band"] = band_db
data["capture_channel"] = channel
data["listen_only"] = listen_only
features = {}
for capability in capabilities:
if capability.db_key:
features[capability.db_key] = capability.db_value
data["features"] = features
data["pcap"] = json.dumps(bytes(frame), cls=Base64Encoder)
data["schema_version"] = 1
data["profiler_version"] = __version__
# if there is a malformed radiotap header
if band == "unknown":
band = ""
else:
band = f"_{band}"
text_filename = os.path.join(dest, f"{client_mac}{band}.txt")
json_filename = os.path.join(dest, f"{client_mac}{band}.json")
try:
same = False
write_time = strftime("%Y%m%dt%H%M%S")
if os.path.exists(json_filename):
with open(json_filename, "r") as _file:
existing_json = json.load(_file)
if hash(str(json.dumps(existing_json.get("features")))) == hash(
str(json.dumps(features))
):
# previously profiled client has the same features
same = True
if not same:
json_filename = json_filename.replace(
".json", f"_diff.{write_time}.json"
)
log.debug("writing json report to %s", json_filename)
with open(json_filename, "w") as write_json_file:
json.dump(data, write_json_file)
if os.path.exists(text_filename):
with open(text_filename, "r") as read_file:
existing_text = read_file.readlines()
temp = []
for line in existing_text:
temp.append(line.replace("\n", ""))
existing_text = temp
if not same:
text_report = list(
Differ().compare(existing_text, text_report.split("\n"))
)
text_filename = text_filename.replace(
".txt", f"_diff.{write_time}.txt"
)
text_report = "\n".join(text_report)
log.debug("writing to %s", text_filename)
with open(text_filename, "w") as file_writer:
file_writer.write(text_report)
except OSError:
log.exception(
"error creating flat files to dump client info (%s)", text_filename
)
sys.exit(signal.SIGHUP)
out_row = {"Client_Mac": client_mac, "OUI_Manuf": oui_manuf}
out_fieldnames = ["Client_Mac", "OUI_Manuf"]
for capability in capabilities:
if capability.db_key:
features[capability.db_key] = capability.db_value
for capability in capabilities:
if capability.db_key is not None and capability.db_value is not None:
out_fieldnames.append(capability.db_key)
out_row[capability.db_key] = capability.db_value
# dump out the frame to a file
pcap_filename = os.path.splitext(text_filename)[0] + ".pcap"
log.debug("writing to %s", pcap_filename)
wrpcap(pcap_filename, [frame])
# check if csv file exists
if not os.path.exists(self.csv_file):
# create file with csv headers
with open(self.csv_file, mode="w") as file_obj:
csv_writer = csv.DictWriter(file_obj, fieldnames=out_fieldnames)
csv_writer.writeheader()
# append data to csv file
with open(self.csv_file, mode="a") as file_obj:
csv_writer = csv.DictWriter(file_obj, fieldnames=out_fieldnames)
csv_writer.writerow(out_row)
@staticmethod
def process_information_elements(buffer: bytes) -> dict:
"""
Parse a 802.11 payload and returns a dict of IEs
Does not handle headers or FCS.
You must strip those before passing the payload in.
"""
# init element vars
information_elements: "Dict" = {}
element_id = 0
element_length = 0
element_data = []
# loop tracking vars
is_index_byte = True
is_length_byte = True
index = 0
for byte, last in flag_last_object(buffer):
if is_index_byte:
element_id = byte
is_index_byte = False
continue
if is_length_byte:
element_length = byte
is_length_byte = False
continue
if index < element_length:
index += 1
element_data.append(byte)
else:
if element_id in [VENDOR_SPECIFIC_IE_TAG, IE_EXT_TAG]:
# map a list of data items to the key
if element_id in information_elements:
information_elements[element_id].append(element_data)
else:
information_elements[element_id] = [element_data]
else:
# map the data to the key
information_elements[element_id] = element_data
# reset vars to decode next information element
index = 0
is_index_byte = True
is_length_byte = True
element_data = []
element_id = 0
element_length = 0
# current byte should be next index byte
element_id = byte
is_index_byte = False
continue
if last:
if element_id in [VENDOR_SPECIFIC_IE_TAG, IE_EXT_TAG]:
# map a list of data items to the key
if element_id in information_elements:
information_elements[element_id].append(element_data)
else:
information_elements[element_id] = [element_data]
else:
# map the data to the key
information_elements[element_id] = element_data
return information_elements
def resolve_oui_manuf(self, mac: str, dot11_elt_dict) -> str:
"""Resolve client's manuf using manuf database and other heuristics"""
log = logging.getLogger(inspect.stack()[0][3])
# log.debug("starting oui lookup for %s", mac)
oui_manuf = self.lookup.get_manuf(mac)
# vendor OUI that we possibly want to check for a more clear OUI match
low_quality = "muratama"
sanitize = {"intelwir": "Intel", "intelcor": "Intel", "samsunge": "Samsung"}
if oui_manuf is None or oui_manuf.lower().startswith(low_quality):
# inspect vendor specific IEs and see if there's an IE with
# an OUI that we know can only be included if the manuf
# of the client is the vendor that maps to that OUI
if VENDOR_SPECIFIC_IE_TAG in dot11_elt_dict.keys():
for element_data in dot11_elt_dict[VENDOR_SPECIFIC_IE_TAG]:
vendor_mac = "{0:02X}:{1:02X}:{2:02X}:00:00:00".format(
element_data[0], element_data[1], element_data[2]
)
oui_manuf_vendor = self.lookup.get_manuf(vendor_mac)
if oui_manuf_vendor is not None:
# Matches are vendor specific IEs we know are client specific
# e.g. Apple vendor specific IEs can only be found in Apple devices
# Samsung may follow similar logic based on S10 5G testing and S21 5G Ultra but unsure of consistency
matches = ("apple", "samsung", "intel")
if oui_manuf_vendor.lower().startswith(matches):
if oui_manuf_vendor.lower() in sanitize:
oui_manuf = sanitize.get(
oui_manuf_vendor.lower(), oui_manuf_vendor
)
else:
oui_manuf = oui_manuf_vendor
log.debug("finished oui lookup for %s: %s", mac, oui_manuf)
return oui_manuf
@staticmethod
def analyze_ssid_ie(dot11_elt_dict) -> str:
"""Check the SSID parameter to determine network name"""
out = ""
if SSID_PARAMETER_SET_IE_TAG in dot11_elt_dict.keys():
try:
ssid = bytes(dot11_elt_dict[SSID_PARAMETER_SET_IE_TAG]).decode("utf-8")
except UnicodeDecodeError:
ssid = bytes(dot11_elt_dict[SSID_PARAMETER_SET_IE_TAG]).decode(
"latin-1"
)
out = f"{ssid}"
return out
@staticmethod
def analyze_ht_capabilities_ie(dot11_elt_dict) -> List:
"""Check for 802.11n support"""
dot11n = Capability(
name="802.11n", value="Not reported*", db_key="dot11n", db_value=0
)
dot11n_nss = Capability(db_key="dot11n_nss", db_value=0)
if HT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():
spatial_streams = 0
# mcs octets 1 - 4 indicate # streams supported (up to 4 streams only)
for mcs_octet in range(3, 7):
mcs_octet_value = dot11_elt_dict[HT_CAPABILITIES_IE_TAG][mcs_octet]
if mcs_octet_value & 255:
spatial_streams += 1
dot11n.value = f"Supported ({spatial_streams}ss)"
dot11n.db_value = 1
dot11n_nss.db_value = spatial_streams
return [dot11n, dot11n_nss]
@staticmethod
def analyze_vht_capabilities_ie(dot11_elt_dict) -> List:
"""Check for 802.11ac support"""
dot11ac = Capability(
name="802.11ac", value="Not reported*", db_key="dot11ac", db_value=0
)
dot11ac_nss = Capability(db_key="dot11ac_nss", db_value=0)
dot11ac_mcs = Capability(db_key="dot11ac_mcs", db_value="")
dot11ac_su_bf = Capability(db_key="dot11ac_su_bf", db_value=0)
dot11ac_mu_bf = Capability(db_key="dot11ac_mu_bf", db_value=0)
dot11ac_160_mhz = Capability(db_key="<KEY>", db_value=0)
if VHT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():
# determine number of spatial streams (NSS) supported
mcs_upper_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][5]
mcs_lower_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][4]
nss = 0
mcs = []
for octet in [mcs_lower_octet, mcs_upper_octet]:
for bit_position in [0, 2, 4, 6]:
bit1 = get_bit(octet, bit_position)
bit2 = get_bit(octet, bit_position + 1)
if (bit1 == 1) and (bit2 == 1): # (0x3) Not supported
continue
if (bit1 == 0) and (bit2 == 0): # (0x0) MCS 0-7
nss += 1
mcs.append("0-7")
continue
if (bit1 == 1) and (bit2 == 0): # (0x1) MCS 0-8
nss += 1
mcs.append("0-8")
continue
if (bit1 == 0) and (bit2 == 1): # (0x2) MCS 0-9
nss += 1
mcs.append("0-9")
continue
mcs = sorted(set(mcs))
mcs_list = ", ".join(mcs) if len(mcs) > 1 else mcs[0]
dot11ac.value = f"Supported ({nss}ss), MCS {mcs_list}"
dot11ac_nss.db_value = nss
dot11ac_mcs.db_value = mcs_list
# check for SU & MU beam formee support
mu_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][2]
su_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][1]
onesixty = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][0]
# 160 MHz
if get_bit(onesixty, 2):
dot11ac_160_mhz.db_value = 1
dot11ac.value += ", [X] 160 MHz"
else:
dot11ac.value += ", [ ] 160 MHz"
# bit 4 indicates support for both octets (1 = supported, 0 = not supported)
beam_form_mask = 16
# SU BF
if su_octet & beam_form_mask:
dot11ac.value += ", [X] SU BF"
dot11ac_su_bf.db_value = 1
else:
dot11ac.value += ", [ ] SU BF"
# MU BF
if mu_octet & beam_form_mask:
dot11ac.value += ", [X] MU BF"
dot11ac_mu_bf.db_value = 1
else:
dot11ac.value += ", [ ] MU BF"
return [
dot11ac,
dot11ac_nss,
dot11ac_160_mhz,
dot11ac_mcs,
dot11ac_su_bf,
dot11ac_mu_bf,
]
@staticmethod
def analyze_rm_capabilities_ie(dot11_elt_dict) -> List:
"""Check for 802.11k support"""
dot11k = Capability(
name="802.11k",
value="Not reported* - treat with caution, many clients lie about this",
db_key="dot11k",
db_value=0,
)
if RM_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():
dot11k.value = "Supported"
dot11k.db_value = 1
return [dot11k]
@staticmethod
def analyze_ft_capabilities_ie(dot11_elt_dict, ft_disabled: bool) -> List:
"""Check for 802.11r support"""
dot11r = Capability(
name="802.11r", value="Not reported*", db_key="dot11r", db_value=0
)
if ft_disabled:
dot11r.value = "Reporting disabled (--no11r option used)"
elif FT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():
dot11r.value = "Supported"
dot11r.db_value = 1
else:
pass
return [dot11r]
@staticmethod
def analyze_ext_capabilities_ie(dot11_elt_dict) -> List:
"""Check for 802.11v support"""
dot11v = Capability(
name="802.11v", value="Not reported*", db_key="dot11v", db_value=0
)
if EXT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():
ext_cap_list = dot11_elt_dict[EXT_CAPABILITIES_IE_TAG]
# check octet 3 exists
if 3 <= len(ext_cap_list):
# bit 4 of octet 3 in the extended capabilites field
octet3 = ext_cap_list[2]
bss_trans_support = int("00001000", 2)
# 'And' octet 3 to test for bss transition support
if octet3 & bss_trans_support:
dot11v.value = "Supported"
dot11v.db_value = 1
return [dot11v]
@staticmethod
def analyze_rsn_capabilities_ie(dot11_elt_dict) -> List:
"""Check for 802.11w support"""
dot11w = Capability(
name="802.11w", value="Not reported", db_key="dot11w", db_value=0
)
if RSN_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():
rsn_cap_list = dot11_elt_dict[RSN_CAPABILITIES_IE_TAG]
rsn_len = len(rsn_cap_list) - 2
pmf_oct = rsn_cap_list[rsn_len]
# bit 8 of 2nd last octet in the rsn capabilites field
if 127 <= pmf_oct:
dot11w.value = "Supported"
dot11w.db_value = 1
return [dot11w]
@staticmethod
def analyze_power_capability_ie(dot11_elt_dict) -> List:
"""Check for supported power capabilities"""
max_power_cap = Capability(
name="Max Power",
value="Not reported",
db_key="max_power",
db_value=0,
)
min_power_cap = Capability(
# name="Min Power",
# value="Not reported",
db_key="min_power",
db_value=0,
)
if POWER_MIN_MAX_IE_TAG in dot11_elt_dict.keys():
# octet 3 of power capabilites
max_power = dot11_elt_dict[POWER_MIN_MAX_IE_TAG][1]
min_power = dot11_elt_dict[POWER_MIN_MAX_IE_TAG][0]
# check if signed
if min_power > 127:
signed_min_power = (256 - min_power) * (-1)
else:
signed_min_power = min_power
max_power_cap.value = f"{max_power} dBm"
max_power_cap.db_value = max_power
min_power_cap.value = f"{signed_min_power} dBm"
min_power_cap.db_value = signed_min_power
return [max_power_cap, min_power_cap]
@staticmethod
def analyze_supported_channels_ie(dot11_elt_dict, is_6ghz: bool) -> List:
"""Check supported channels"""
supported_channels = Capability(
name="Supported Channels",
value="Not reported",
db_key="supported_channels",
db_value=[],
)
number_of_supported_channels = Capability(
name="Number of Channels",
value=0,
)
if SUPPORTED_CHANNELS_IE_TAG in dot11_elt_dict.keys():
channel_sets_list = dot11_elt_dict[SUPPORTED_CHANNELS_IE_TAG]
channel_list = []
is_2ghz = False
is_5ghz = False
while channel_sets_list:
start_channel = channel_sets_list.pop(0)
channel_range = channel_sets_list.pop(0)
if start_channel > 14 or is_6ghz:
if not is_6ghz:
is_5ghz = True
channel_multiplier = 4
else:
is_2ghz = True
channel_multiplier = 1
number_of_supported_channels.value += channel_range
for i in range(channel_range):
channel_list.append(start_channel + (i * channel_multiplier))
ranges = []
placeholder = []
for index, channel in enumerate(channel_list):
if index == 0:
placeholder.append(channel)
continue
if is_2ghz and is_5ghz:
if channel < 15:
channel_multiplier = 1
else:
channel_multiplier = 4
if channel - placeholder[-1] == channel_multiplier:
placeholder.append(channel)
# are we at last index? add last list to ranges
if channel == channel_list[-1]:
ranges.append(placeholder)
else:
ranges.append(placeholder)
placeholder = []
placeholder.append(channel)
channel_ranges = []
for _range in ranges:
channel_ranges.append(f"{_range[0]}-{_range[-1]}")
supported_channels.value = f"{', '.join(channel_ranges)}**"
supported_channels.db_value = channel_list
return [supported_channels, number_of_supported_channels]
@staticmethod
def analyze_operating_classes(dot11_elt_dict) -> List:
"""Check if 6 GHz is a supported alternative operating class"""
six_ghz_operating_class_cap = Capability(
db_key="six_ghz_operating_class_supported",
db_value=0,
)
supported_6ghz_alternative_operating_classes = []
six_ghz_alternative_operating_classes = [131, 132, 133, 134, 135]
if SUPPORTED_OPERATING_CLASSES_IE_TAG in dot11_elt_dict.keys():
supported_operating_classes = dot11_elt_dict[
SUPPORTED_OPERATING_CLASSES_IE_TAG
]
# pop current operating class from list
supported_operating_classes.pop()
for alternative_operating_class in supported_operating_classes:
if alternative_operating_class in six_ghz_alternative_operating_classes:
supported_6ghz_alternative_operating_classes.append(
alternative_operating_class
)
if supported_6ghz_alternative_operating_classes:
six_ghz_operating_class_cap.name = "6 GHz Operating Class"
six_ghz_operating_class_cap.value = "Supported"
six_ghz_operating_class_cap.db_value = 1
return [six_ghz_operating_class_cap]
@staticmethod
def analyze_extension_ies(dot11_elt_dict, he_disabled: bool) -> List:
"""Check for 802.11ax support"""
dot11ax = Capability(
name="802.11ax",
value="Not supported",
db_key="dot11ax",
db_value=0,
)
dot11ax_six_ghz = Capability(
db_key="dot11ax_six_ghz",
db_value=0,
)
dot11ax_punctured_preamble = Capability(
db_key="dot11ax_punctured_preamble", db_value=0
)
dot11ax_he_su_beamformer = Capability(
db_key="dot11ax_he_su_beamformer", db_value=0
)
dot11ax_he_su_beamformee = Capability(
db_key="dot11ax_he_su_beamformee", db_value=0
)
dot11ax_nss = Capability(db_key="dot11ax_nss", db_value=0)
dot11ax_mcs = Capability(db_key="dot11ax_mcs", db_value="")
dot11ax_twt = Capability(db_key="dot11ax_twt", db_value=0)
dot11ax_uora = Capability(db_key="dot11ax_uora", db_value=0)
dot11ax_bsr = Capability(db_key="dot11ax_bsr", db_value=0)
dot11ax_he_er_su_ppdu = Capability(db_key="dot11ax_he_er_su_ppdu", db_value=0)
dot11ax_spatial_reuse = Capability(db_key="dot11ax_spatial_reuse", db_value=0)
dot11ax_160_mhz = Capability(db_key="dot11ax_160_mhz", db_value=0)
if he_disabled:
dot11ax.value = "Reporting disabled (--no11ax option used)"
else:
if IE_EXT_TAG in dot11_elt_dict.keys():
for element_data in dot11_elt_dict[IE_EXT_TAG]:
ext_ie_id = int(str(element_data[0]))
if ext_ie_id == HE_CAPABILITIES_IE_EXT_TAG:
# dot11ax is supported
dot11ax.value = "Supported"
dot11ax.db_value = 1
# determine number of spatial streams (NSS) supported
mcs_upper_octet = element_data[19]
mcs_lower_octet = element_data[18]
nss = 0
mcs = []
for octet in [mcs_lower_octet, mcs_upper_octet]:
for bit_position in [0, 2, 4, 6]:
bit1 = get_bit(octet, bit_position)
bit2 = get_bit(octet, bit_position + 1)
if (bit1 == 1) and (bit2 == 1): # (0x3) Not supported
continue
if (bit1 == 0) and (bit2 == 0): # (0x0) MCS 0-7
nss += 1
mcs.append("0-7")
continue
if (bit1 == 1) and (bit2 == 0): # (0x1) MCS 0-9
nss += 1
mcs.append("0-9")
continue
if (bit1 == 0) and (bit2 == 1): # (0x2) MCS 0-11
nss += 1
mcs.append("0-11")
continue
mcs = sorted(set(mcs))
mcs = ", ".join(mcs) if len(mcs) > 1 else mcs[0] # type: ignore
dot11ax.value = f"Supported ({nss}ss), MCS {mcs}"
dot11ax_mcs.db_value = mcs
dot11ax_nss.db_value = nss
onesixty_octet = element_data[7]
if get_bit(onesixty_octet, 3):
dot11ax.value += ", [X] 160 MHz"
dot11ax_160_mhz.db_value = 1
else:
dot11ax.value += ", [ ] 160 MHz"
twt_octet = element_data[1]
if get_bit(twt_octet, 1):
dot11ax_twt.db_value = 1
dot11ax.value += ", [X] TWT"
else:
dot11ax.value += ", [ ] TWT"
punctured_preamble_octet = element_data[8]
punctured_preamble_octet_binary_string = ""
for bit_position in range(8):
punctured_preamble_octet_binary_string += f"{int(get_bit(punctured_preamble_octet, bit_position))}"
punctured_bit_booleans = [
bool(int(bit))
for bit in punctured_preamble_octet_binary_string[0:4]
]
puncture_preamble_support = any(punctured_bit_booleans)
if puncture_preamble_support:
dot11ax_punctured_preamble.db_value = 1
dot11ax.value += ", [X] Punctured Preamble"
else:
dot11ax_punctured_preamble.db_value = 0
dot11ax.value += ", [ ] Punctured Preamble"
su_beamformer_octet = element_data[10]
su_beamformer_octet_binary_string = ""
for bit_position in range(8):
su_beamformer_octet_binary_string += (
f"{int(get_bit(su_beamformer_octet, bit_position))}"
)
if int(su_beamformer_octet_binary_string[7]):
su_beamformer_support = True
else:
su_beamformer_support = False
if su_beamformer_support:
dot11ax_he_su_beamformer.db_value = 1
dot11ax.value += ", [X] SU Beamformer"
else:
dot11ax_he_su_beamformer.db_value = 0
dot11ax.value += ", [ ] SU Beamformer"
su_beamformee_octet = element_data[11]
su_beamformee_octet_binary_string = ""
for bit_position in range(8):
su_beamformee_octet_binary_string += (
f"{int(get_bit(su_beamformee_octet, bit_position))}"
)
if int(su_beamformee_octet_binary_string[0]):
su_beamformee_support = True
else:
su_beamformee_support = False
if su_beamformee_support:
dot11ax_he_su_beamformee.db_value = 1
dot11ax.value += ", [X] SU Beamformee"
else:
dot11ax_he_su_beamformee.db_value = 0
dot11ax.value += ", [ ] SU Beamformee"
he_er_su_ppdu_octet = element_data[15]
he_er_su_ppdu_octet_binary_string = ""
for bit_position in range(8):
he_er_su_ppdu_octet_binary_string += (
f"{int(get_bit(he_er_su_ppdu_octet, bit_position))}"
)
if int(he_er_su_ppdu_octet_binary_string[0]):
he_er_su_ppdu_support = True
else:
he_er_su_ppdu_support = False
if he_er_su_ppdu_support:
dot11ax_he_er_su_ppdu.db_value = 1
dot11ax.value += ", [X] HE ER SU PPDU"
else:
dot11ax_he_er_su_ppdu.db_value = 0
dot11ax.value += ", [ ] HE ER SU PPDU"
uora_octet = element_data[4]
uora_octet_binary_string = ""
for bit_position in range(8):
uora_octet_binary_string += (
f"{int(get_bit(uora_octet, bit_position))}"
)
if int(uora_octet_binary_string[2]):
uora_support = True
else:
uora_support = False
if uora_support:
dot11ax_uora.db_value = 1
dot11ax.value += ", [X] UORA"
else:
dot11ax_uora.db_value = 0
dot11ax.value += ", [ ] UORA"
bsr_octet = element_data[3]
bsr_octet_binary_string = ""
for bit_position in range(8):
bsr_octet_binary_string += (
f"{int(get_bit(bsr_octet, bit_position))}"
)
if int(bsr_octet_binary_string[3]):
bsr_support = True
else:
bsr_support = False
if bsr_support:
dot11ax_bsr.db_value = 1
dot11ax.value += ", [X] BSR"
else:
dot11ax_bsr.db_value = 0
dot11ax.value += ", [ ] BSR"
continue
if ext_ie_id == HE_SPATIAL_REUSE_IE_EXT_TAG:
dot11ax_spatial_reuse.db_value = 1
if ext_ie_id == HE_6_GHZ_BAND_CAP_IE_EXT_TAG:
dot11ax_six_ghz.name = "6 GHz Capability"
dot11ax_six_ghz.value = "Supported"
dot11ax_six_ghz.db_value = 1
return [
dot11ax,
dot11ax_nss,
dot11ax_mcs,
dot11ax_twt,
dot11ax_uora,
dot11ax_bsr,
dot11ax_punctured_preamble,
dot11ax_he_su_beamformer,
dot11ax_he_su_beamformee,
dot11ax_he_er_su_ppdu,
dot11ax_six_ghz,
dot11ax_160_mhz,
]
def analyze_assoc_req(self, frame, is_6ghz: bool) -> Tuple[str, str, list]:
"""Tear apart the association request for analysis"""
log = logging.getLogger(inspect.stack()[0][3])
# log.debug("processing information elements for client MAC %s", frame.addr2)
# strip radiotap
ie_buffer = bytes(frame.payload)
# strip dot11
ie_buffer = ie_buffer[24:]
# strip params
ie_buffer = ie_buffer[4:]
# strip fcs
ie_buffer = ie_buffer[:-4]
# convert buffer to ie dict
dot11_elt_dict = self.process_information_elements(ie_buffer)
log.debug(
"%s IEs detected in assoc req from %s: %s",
len(dot11_elt_dict),
frame.addr2,
dot11_elt_dict.keys(),
)
# resolve manufacturer
oui_manuf = self.resolve_oui_manuf(frame.addr2, dot11_elt_dict)
ssid = self.analyze_ssid_ie(dot11_elt_dict)
# dictionary to store capabilities as we decode them
capabilities = []
# check if 11k supported
capabilities += self.analyze_rm_capabilities_ie(dot11_elt_dict)
# check if 11r supported
capabilities += self.analyze_ft_capabilities_ie(
dot11_elt_dict, self.ft_disabled
)
# check if 11v supported
capabilities += self.analyze_ext_capabilities_ie(dot11_elt_dict)
# check if 11w supported
capabilities += self.analyze_rsn_capabilities_ie(dot11_elt_dict)
# check for 11n support
capabilities += self.analyze_ht_capabilities_ie(dot11_elt_dict)
# check for 11ac support
capabilities += self.analyze_vht_capabilities_ie(dot11_elt_dict)
# check for ext tags (e.g. 802.11ax draft support)
capabilities += self.analyze_extension_ies(dot11_elt_dict, self.he_disabled)
# check supported operating classes for 6 GHz
capabilities += self.analyze_operating_classes(dot11_elt_dict)
# check supported power capabilities
capabilities += self.analyze_power_capability_ie(dot11_elt_dict)
# check supported channels
capabilities += self.analyze_supported_channels_ie(dot11_elt_dict, is_6ghz)
return ssid, oui_manuf, capabilities
``` |
{
"source": "joshschmelzle/wlanpi-webui",
"score": 3
} |
#### File: wlanpi_webui/network/network.py
```python
import os
import queue
import subprocess
import threading
from flask import current_app, render_template
from wlanpi_webui.network import bp
@bp.route("/network")
def network():
"""fpms screen"""
FPMS_QUEUE = queue.Queue()
def storeInQueue(f):
def wrapper(*args):
FPMS_QUEUE.put(f(*args))
return wrapper
@storeInQueue
def get_script_results(script):
name = script.strip().split("/")[-1]
return name, run(script)
def run(script: str) -> str:
result = ""
if os.path.exists(script):
content = subprocess.run(script, capture_output=True)
result = str(content.stdout, "utf-8")
result = result.replace("\n", "<br />")
else:
result = f"Error: required {script.strip().split('/')[-1]} not found."
return result
def dumpQueue(queue):
results = []
while not queue.empty():
results.append(queue.get())
return results
reachability = (
"/opt/wlanpi-common/networkinfo/reachability.sh"
)
publicip = "/opt/wlanpi-common/networkinfo/publicip.sh"
ipconfig = "/opt/wlanpi-common/networkinfo/ipconfig.sh"
threads = []
for script in [reachability, publicip, ipconfig]:
thread = threading.Thread(target=get_script_results, args=(script,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def readlines(_file):
out = ""
if os.path.exists(_file):
with open(_file, "r") as reader:
for line in reader.readlines():
line = line.replace("\n", "<br />")
out += line
else:
out += f"Error: required {_file} not found."
return out
cdpneigh = "/tmp/cdpneigh.txt"
lldpneigh = "/tmp/lldpneigh.txt"
cdp = readlines(cdpneigh)
lldp = readlines(lldpneigh)
script_results = dumpQueue(FPMS_QUEUE)
for result in script_results:
if "reachability" in str(result):
reachability = result[1]
if "publicip" in str(result):
publicip = result[1]
if "ipconfig" in str(result):
ipconfig = result[1]
return render_template(
"public/network.html",
hostname=current_app.config["HOSTNAME"],
title=current_app.config["TITLE"],
wlanpi_version=current_app.config["WLANPI_VERSION"],
webui_version=current_app.config["WEBUI_VERSION"],
reachability=reachability,
publicip=publicip,
ipconfig=ipconfig,
lldp=lldp,
cdp=cdp,
)
``` |
{
"source": "JoshSchreuder/advent-of-code-2018",
"score": 3
} |
#### File: advent-of-code-2018/2/2b_test.py
```python
import b
def load_main():
with open("2/input.txt", 'r') as myfile:
return myfile.read().splitlines()
def test_1():
assert b.similar(
lambda: ["abcde", "fghij", "klmno", "pqrst", "fguij", "axcye", "wvxyz"]) == "fgij"
def test_main_case():
assert b.similar(load_main) == "tjxmoewpdkyaihvrndfluwbzc"
```
#### File: advent-of-code-2018/2/b.py
```python
def similar(getBoxesFunc):
boxes = getBoxesFunc()
for i in range(0, len(boxes)):
for j in range(i, len(boxes)):
[matchCount, nonMatching] = overlap(boxes[i], boxes[j])
if matchCount == (len(boxes[i]) - 1):
return boxes[i].replace(nonMatching, "")
return ""
def overlap(string1, string2):
count = 0
nonMatching = ""
for i in range(min(len(string1), len(string2))):
if string1[i] == string2[i]:
count = count + 1
else:
nonMatching = string1[i]
return [count, nonMatching]
``` |
{
"source": "JoshSchwarz/attack_range_local",
"score": 2
} |
#### File: attack_range_local/modules/VagrantController.py
```python
from jinja2 import Environment, FileSystemLoader
import vagrant
from tabulate import tabulate
import re
import ansible_runner
import sys
class VagrantController():
def __init__(self, config, log):
self.config = config
self.log = log
self.vagrantfile = 'Vagrant.configure("2") do |config| \n \n'
if config['phantom_server'] == '1':
self.vagrantfile += self.read_vagrant_file('phantom-server/Vagrantfile')
self.vagrantfile += '\n\n'
self.vagrantfile += self.read_vagrant_file('splunk_server/Vagrantfile')
self.vagrantfile += '\n\n'
if config['windows_domain_controller'] == '1':
self.vagrantfile += self.read_vagrant_file('windows-domain-controller/Vagrantfile')
self.vagrantfile += '\n\n'
if config['windows_client'] == '1':
self.vagrantfile += self.read_vagrant_file('windows10/Vagrantfile')
self.vagrantfile += '\n\n'
if config['windows_server'] == '1':
self.vagrantfile += self.read_vagrant_file('windows-server/Vagrantfile')
self.vagrantfile += '\n\n'
if config['kali_machine'] == '1':
self.vagrantfile += self.read_vagrant_file('kali-machine/Vagrantfile')
self.vagrantfile += '\n\n'
self.vagrantfile += '\nend'
with open('vagrant/Vagrantfile', 'w') as file:
file.write(self.vagrantfile)
def read_vagrant_file(self, path):
j2_env = Environment(loader=FileSystemLoader('vagrant'),trim_blocks=True)
template = j2_env.get_template(path)
vagrant_file = template.render(self.config)
return vagrant_file
def build(self):
self.log.info("[action] > build\n")
v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False, quiet_stderr=False)
try:
v1.up(provision=True, provider="virtualbox")
except:
self.log.error("vagrant failed to build")
sys.exit(1)
self.log.info("attack_range has been built using vagrant successfully")
self.list_machines()
def destroy(self):
self.log.info("[action] > destroy\n")
v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)
v1.destroy()
self.log.info("attack_range has been destroy using vagrant successfully")
def stop(self):
print("[action] > stop\n")
v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)
v1.halt()
def resume(self):
print("[action] > resume\n")
v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)
v1.up()
def simulate(self, target, simulation_techniques, simulation_atomics):
# check if specific atomics are used then it's not allowed to multiple techniques
techniques_arr = simulation_techniques.split(',')
if (len(techniques_arr) > 1) and (simulation_atomics != 'no'):
self.log.error('ERROR: if simulation_atomics are used, only a single simulation_technique is allowed.')
sys.exit(1)
run_specific_atomic_tests = 'True'
if simulation_atomics == 'no':
run_specific_atomic_tests = 'False'
# get ip address from machine
self.check_targets_running_vagrant(target, self.log)
target_ip = self.get_ip_address_from_machine(target)
runner = ansible_runner.run(private_data_dir='.',
cmdline=str('-i ' + target_ip + ', '),
roles_path="ansible/roles",
playbook='ansible/atomic_red_team.yml',
extravars={'art_branch': self.config['art_branch'], 'art_repository': self.config['art_repository'], 'run_specific_atomic_tests': run_specific_atomic_tests, 'art_run_tests': simulation_atomics, 'art_run_techniques': simulation_techniques, 'ansible_user': 'Vagrant', 'ansible_password': '<PASSWORD>', 'ansible_port': 5985, 'ansible_winrm_scheme': 'http'},
verbosity=0)
if runner.status == "successful":
self.log.info("successfully executed technique ID {0} against target: {1}".format(simulation_techniques, target))
else:
self.log.error("failed to executed technique ID {0} against target: {1}".format(simulation_techniques, target))
sys.exit(1)
def get_ip_address_from_machine(self, box):
pattern = 'config.vm.define "' + box + '"[\s\S]*?:private_network, ip: "([^"]+)'
match = re.search(pattern, self.vagrantfile)
return match.group(1)
def check_targets_running_vagrant(self, target, log):
v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)
status = v1.status()
found_box = False
for stat in status:
if stat.name == target:
found_box = True
if not (stat.state == 'running'):
log.error(target + ' not running.')
sys.exit(1)
break
if not found_box:
log.error(target + ' not found as vagrant box.')
sys.exit(1)
def list_machines(self):
print()
print('Vagrant Status\n')
v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)
response = v1.status()
status = []
for stat in response:
status.append([stat.name, stat.state, self.get_ip_address_from_machine(stat.name)])
print(tabulate(status, headers=['Name','Status','IP Address']))
print()
``` |
{
"source": "JoshScragg/DisWrapper",
"score": 3
} |
#### File: DisWrapper/diswrapper/wrapper.py
```python
from websocket import create_connection
import requests, json, threading, select, multiprocessing, time, os
from datetime import datetime
class User:
def __init__(self, user_id, username, avatar, discriminator, public_flags, nick=None):
self.user_id = user_id
self.username = username
self.nick = nick
self.avatar = avatar
self.discriminator = discriminator
self.public_flags = public_flags
class Message:
def __init__(self, message_id, message_type, content, channel_id, author, attachments, emebds,
mentions, mention_roles, pinned, mention_everyone, tts, timestamp, edited_timestamp, flags):
self.message_id = message_id
self.type = message_type
self.content = content
self.channel_id = channel_id
self.author = author
self.attachments = attachments
self.embeds = emebds
self.mentions = mentions
self.mention_roles = mention_roles
self.pinned = pinned
self.tts = tts
self.timestamp = timestamp
self.edited_timestamp = edited_timestamp
self.flags = flags
class DisWrapper:
def __init__(self):
self.requester = requests.Session()
self.request_logging = False
self.ws_gateway_query_params = '/?encoding=json&v=6'
self.ws = None
self.ws_send_queue = multiprocessing.Queue()
def sendReq(self, method, url, data=None, headers={}, params=None):
res = self.requester.request(method, url, headers=headers, json=data, params=params)
if self.request_logging: print(f"[{method}] {url} - {res.status_code}: {res.text}")
return res
def auth(self, email, password):
header = {
"content-type": "application/json"
}
payload = {"email": email, "password": password}
url = "https://discord.com/api/v6/auth/login"
req = self.sendReq("POST", url, payload, header).text
json_request = json.loads(req)
self.theme = json_request["user_settings"]["theme"]
self.locale = json_request["user_settings"]["locale"]
self.token = json_request["token"]
return self.token
def sendMessage(self, channel_id, message):
headers = {
"Authorization": self.token,
"content-type": "application/json"
}
payload = {"content": message}
url=f"https://discordapp.com/api/channels/{channel_id}/messages"
req = self.sendReq("POST", url, payload, headers)
self.last_message_request = req
self.last_message_reponse = json.loads(req.text)
return json.loads(req.text)
def readMessage(self, num_messages, channel_id):
messages = []
headers = {
"Authorization": self.token
}
url=f"https://discordapp.com/api/v6/channels/{channel_id}/messages?limit={num_messages}"
req = self.sendReq("GET", url, headers=headers)
request_json = json.loads(req.text)
for message in request_json:
new_author = User(message["author"]["id"], message["author"]["username"], message["author"]["avatar"],
message["author"]["discriminator"], message["author"]["public_flags"])
new_message = Message(message["id"], message["type"], message["content"], message["channel_id"], new_author,
message["attachments"], message["embeds"], message["mentions"], message["mention_roles"],
message["pinned"], message["mention_everyone"], message["tts"], message["timestamp"],
message["edited_timestamp"], message["flags"])
messages.append(new_message)
messages.reverse()
return messages
def typing(self, channel_id):
headers = {
"Authorization": self.token
}
url=f"https://discord.com/api/v6/channels/{channel_id}/typing"
req = self.sendReq("POST", url, headers=headers)
return
def createInvite(self, max_age, max_uses, channel_id):
headers = {
"Authorization": self.token
}
payload = {
"max_age":max_age,
"max_uses":max_uses
}
url=f"https://discord.com/api/v6/channels/{channel_id}/invites"
req = self.sendReq("POST", url, payload, headers)
def setStatus(self, status):
headers = {"Authorization": self.token}
payload = {"custom_status":{"text":status}}
url = "https://discord.com/api/v6/users/@me/settings"
req = self.sendReq("PATCH", url, payload, headers)
def setNick(self, server_id, user_id, nickname):
headers = {"Authorization": self.token}
payload = {"nick": nickname}
url = f"https://discordapp.com/api/v6/guilds/{server_id}/members/{user_id}"
req = self.sendReq("PATCH", url, payload, headers)
if req.status_code == 200:
return True
return False
def getGuildUserInfo(self, server_id, user_id):
headers = {"Authorization": self.token}
url = f"https://discordapp.com/api/v6/guilds/{server_id}/members/{user_id}"
req = self.sendReq("GET", url, headers=headers)
req_json = json.loads(req.text)
new_user = User(req_json["user"]["id"], req_json["user"]["username"], req_json["user"]["avatar"],
req_json["user"]["discriminator"], req_json["user"]["public_flags"], req_json["nick"])
return new_user
def getUserInfo(self, user_id):
headers = {"Authorization": self.token}
url = f"https://discordapp.com/api/v6/users/{user_id}"
req = self.sendReq("GET", url, headers=headers)
req_json = json.loads(req.text)
new_user = User(req_json["id"], req_json["username"], req_json["avatar"],
req_json["discriminator"], req_json["public_flags"])
return new_user
def get_websocket_gateway(self):
req = self.sendReq("GET", "https://discordapp.com/api/v6/auth/login")
``` |
{
"source": "joshseides/pCoAP",
"score": 2
} |
#### File: joshseides/pCoAP/clientGET.py
```python
import logging
import asyncio
from aiocoap import *
logging.basicConfig(level=logging.INFO)
async def main():
protocol = await Context.create_client_context()
request = Message(code=GET, uri='coap://127.0.0.1/time')
try:
response = await protocol.request(request).response
except Exception as e:
print('Failed to fetch resource:')
print(e)
else:
print('Result: %s\n%r'%(response.code, response.payload))
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
``` |
{
"source": "joshsharp/mtn",
"score": 3
} |
#### File: joshsharp/mtn/objects.py
```python
from rpython.rlib.objectmodel import r_dict, compute_hash
from rply.token import BaseBox
from errors import *
def dict_eq(key, other):
# we need to implement rdict method to find key equality
return key._eq(other)
def dict_hash(key):
# we need to implement rdict method to find key equality
return key._hash()
class Null(BaseBox):
def __init__(self):
pass
def to_string(self):
return "<null>"
def dump(self):
return "<null>"
class Function(BaseBox):
def __init__(self, name, code):
self.name = name
self.code = code
def to_string(self):
return "<function %s>" % self.name
def dump(self):
return "<function %s>" % self.name
def add(self, right):
raise Exception("Cannot add that to function %s" % self.name)
class ExternalFunction(BaseBox):
def __init__(self, name, fn, args):
self.name = name
self.fn = fn
self.args = args
def to_string(self):
return "<function %s>" % self.name
def dump(self):
return "<function %s>" % self.name
def add(self, right):
raise Exception("Cannot add that to function %s" % self.name)
class Array(BaseBox):
def __init__(self, args):
self.values = args
def dump(self):
return self.to_string()
def map(self, fun, ls):
nls = []
for l in ls:
nls.append(fun(l))
return nls
def push(self, statement):
self.values.insert(0,statement)
def append(self, statement):
self.values.append(statement)
def index(self, right):
if isinstance(right, Integer):
return self.values[right.value]
raise LogicError("Cannot index with that value")
def add(self, right):
if isinstance(right, Array):
result = Array([])
result.values.extend(self.values)
result.values.extend(right.values)
return result
raise LogicError("Cannot add that to array")
def sub(self,right):
if isinstance(right,Integer):
result = [val for val in self.values]
del result[right.intvalue]
return Array(result)
raise LogicError("Cannot remove that index from array")
def to_string(self):
return '[%s]' % (", ".join(self.map(lambda x: x.to_string(),self.values)))
class Dict(BaseBox):
def __init__(self, args):
self.values = args
def dump(self):
return self.to_string()
def map(self, fun, ls):
nls = []
for l in ls:
nls.append(fun(l))
return nls
def update(self, key, val):
self.values[key] = val
def index(self, right):
if isinstance(right, Integer):
return self.values[right]
if isinstance(right, String):
return self.values[right]
if isinstance(right, Float):
return self.values[right]
if isinstance(right, Boolean):
return self.values[right]
raise LogicError("Cannot index with that value")
def add(self, right):
if isinstance(right, Dict):
result = Dict(r_dict(dict_eq, dict_hash))
for key, val in self.values.iteritems():
result.values[key] = val
for key, val in right.values.iteritems():
result.values[key] = val
return result
raise LogicError("Cannot add that to dict")
def sub(self,right):
result = r_dict(dict_eq, dict_hash)
for key, val in self.values.iteritems():
result[key] = val
del result[right]
return Dict(result)
def to_string(self):
return '{%s}' % (", ".join(self.map(lambda k: "%s: %s" % (k[0].to_string(), k[1].to_string()),self.values.iteritems())))
class Boolean(BaseBox):
def __init__(self, value):
self.boolvalue = bool(value)
@property
def value(self):
return bool(self.boolvalue)
def __hash__(self):
return compute_hash(self.boolvalue)
def __eq__(self, other):
if(isinstance(other,Boolean)):
return self.boolvalue == other.boolvalue
return False
def _hash(self):
return compute_hash(self.boolvalue)
def _eq(self, other):
if(isinstance(other,Boolean)):
return self.boolvalue == other.boolvalue
return False
def equals(self, right):
if isinstance(right, Boolean):
return Boolean(self.value == right.value)
if isinstance(right, Integer):
return Boolean(self.to_int() == right.value)
if isinstance(right, Float):
return Boolean(self.to_int() == right.value)
else:
return Boolean(False)
raise LogicError("Cannot compare that to boolean")
def lte(self, right):
if isinstance(right, Boolean):
return Boolean(self.value == right.value)
raise LogicError("Cannot compare that to boolean")
def lt(self, right):
raise LogicError("Cannot compare boolean that way")
def gt(self, right):
raise LogicError("Cannot compare boolean that way")
def gte(self, right):
if isinstance(right, Boolean):
return Boolean(self.value == right.value)
raise LogicError("Cannot compare that to boolean")
def add(self, right):
raise LogicError("Cannot add that to boolean")
def sub(self, right):
raise LogicError("Cannot sub that from boolean")
def mul(self, right):
raise LogicError("Cannot mul that to boolean")
def div(self, right):
raise LogicError("Cannot div that from boolean")
def to_string(self):
if self.value:
return "true"
return "false"
def to_int(self):
if self.value:
return 1
return 0
def dump(self):
return self.to_string()
class Integer(BaseBox):
def __init__(self, value):
self.intvalue = int(value)
@property
def value(self):
return int(self.intvalue)
def __hash__(self):
return compute_hash(self.intvalue)
def __eq__(self, other):
if(isinstance(other,Integer)):
return (self.intvalue) == (other.intvalue)
return False
def _hash(self):
return compute_hash(self.intvalue)
def _eq(self, other):
if(isinstance(other,Integer)):
return self.intvalue == other.intvalue
return False
def to_string(self):
return str(self.value)
def dump(self):
return str(self.value)
def equals(self, right):
if isinstance(right,Float):
return Boolean(float(self.value) == right.value)
if isinstance(right, Integer):
return Boolean(self.value == right.value)
if isinstance(right, Boolean):
return Boolean(self.value == right.to_int())
raise LogicError("Cannot compare that to integer")
def lte(self, right):
if isinstance(right, Integer):
return Boolean(self.value <= right.value)
if isinstance(right,Float):
return Boolean(float(self.value) <= right.value)
raise LogicError("Cannot compare that to integer")
def lt(self, right):
if isinstance(right, Integer):
return Boolean(self.value < right.value)
if type(right) is Float:
return Boolean(float(self.value) < right.value)
raise LogicError("Cannot compare integer that way")
def gt(self, right):
if isinstance(right, Integer):
return Boolean(self.value > right.value)
if isinstance(right,Float):
return Boolean(float(self.value) > right.value)
raise LogicError("Cannot compare integer that way")
def gte(self, right):
if isinstance(right, Integer):
return Boolean(self.value >= right.value)
if isinstance(right,Float):
return Boolean(float(self.value) >= right.value)
raise LogicError("Cannot compare integer that way")
def add(self, right):
if isinstance(right, Integer):
return Integer(self.value + right.value)
if isinstance(right,Float):
return Float(float(self.value) + right.value)
raise LogicError("Cannot add %s to integer" % str(right.__class__.__name__))
def sub(self, right):
if isinstance(right, Integer):
return Integer(self.value - right.value)
if isinstance(right,Float):
return Float(float(self.value) - right.value)
raise LogicError("Cannot sub from int")
def mul(self, right):
if isinstance(right, Integer):
return Integer(self.value * right.value)
if isinstance(right,Float):
return Float(float(self.value) * right.value)
raise LogicError("Cannot mul that to int")
def div(self, right):
if isinstance(right, Integer):
return Integer(self.value / right.value)
if isinstance(right,Float):
return Float(float(self.value) / right.value)
raise LogicError("Cannot div that with int")
class Float(BaseBox):
def __init__(self, val):
self.floatvalue = float(val)
@property
def value(self):
return float(self.floatvalue)
def __hash__(self):
return compute_hash(self.value)
def __eq__(self, other):
return (self.value) == (other.value)
def _hash(self):
return compute_hash(self.floatvalue)
def _eq(self, other):
if(isinstance(other,Float)):
return self.floatvalue == other.floatvalue
return False
def to_string(self):
return str(self.value)
def equals(self, right):
if isinstance(right,Float):
return Boolean(self.value == right.value)
if isinstance(right, Integer):
return Boolean(self.value == float(right.value))
if isinstance(right, Boolean):
return Boolean(self.value == float(right.to_int()))
raise LogicError("Cannot compare that to float")
def lte(self, right):
if isinstance(right, Integer):
return Boolean(self.value <= float(right.value))
if isinstance(right,Float):
return Boolean(self.value <= right.value)
raise LogicError("Cannot compare that to integer")
def lt(self, right):
if isinstance(right, Integer):
return Boolean(self.value < float(right.value))
if type(right) is Float:
return Boolean(self.value < right.value)
raise LogicError("Cannot compare integer that way")
def gt(self, right):
if isinstance(right, Integer):
return Boolean(self.value > float(right.value))
if isinstance(right,Float):
return Boolean(self.value > right.value)
raise LogicError("Cannot compare integer that way")
def gte(self, right):
if isinstance(right, Integer):
return Boolean(self.value >= float(right.value))
if isinstance(right,Float):
return Boolean(self.value >= right.value)
raise LogicError("Cannot compare integer that way")
def add(self, right):
if isinstance(right, Integer):
return Float(self.value + float(right.value))
if isinstance(right,Float):
return Float(self.value + right.value)
raise LogicError("Cannot add that to float")
def sub(self, right):
if isinstance(right,Float):
return Float(self.value - right.value)
if isinstance(right, Integer):
return Float(self.value - float(right.value))
raise LogicError("Cannot sub string")
def mul(self, right):
if isinstance(right, Integer):
return Float(self.value * float(right.value))
if isinstance(right,Float):
return Float(self.value * right.value)
raise LogicError("Cannot mul that to float")
def div(self, right):
if isinstance(right, Integer):
return Float(self.value / float(right.value))
if isinstance(right,Float):
return Float(self.value / right.value)
raise LogicError("Cannot div that with float")
def dump(self):
return str(self.value)
class String(BaseBox):
def __init__(self, value):
self.value = str(value)
def __hash__(self):
return compute_hash(self.value)
def __eq__(self, other):
return (self.value) == (other.value)
def _hash(self):
return compute_hash(self.value)
def _eq(self, other):
if(isinstance(other,String)):
return self.value == other.value
return False
def to_string(self):
return str(self.value)
def equals(self, right):
if isinstance(right, String):
return Boolean(self.value == right.value)
if isinstance(right, Boolean):
length = int(len(self.value) != 0)
return Boolean(length == right.to_int())
raise LogicError("Cannot compare that to string")
def lte(self, right):
if isinstance(right, String):
return Boolean(self.value == right.value)
raise LogicError("Cannot compare that to string")
def lt(self, right):
raise LogicError("Cannot compare string that way")
def gt(self, right):
raise LogicError("Cannot compare string that way")
def gte(self, right):
if isinstance(right, String):
return Boolean(self.value == right.value)
raise LogicError("Cannot compare that to string")
def add(self, right):
if isinstance(right, Integer):
return String(self.value + str(right.value))
if isinstance(right,Float):
return String("%s%s" % (self.value,right.value))
if isinstance(right, String):
return String(self.value + right.value)
raise LogicError("Cannot add that to string")
def sub(self, right):
if isinstance(right, Integer):
sli = len(self.value) - right.value
assert(sli >= 0)
return String(self.value[:sli])
raise LogicError("Cannot sub string")
def mul(self, right):
if isinstance(right, Integer):
return String(self.value * right.value)
raise LogicError("Cannot multiply string with that")
def div(self, right):
raise LogicError("Cannot divide a string")
def index(self, right):
if isinstance(right, Integer):
if right.value >= 0:
return String(str(self.value[right.value]))
raise LogicError("Cannot index with that")
def dump(self):
return str(self.value)
class Variable(BaseBox):
def __init__(self, name, value):
self.name = str(name)
self.value = value
def dump(self):
return self.value.dump()
``` |
{
"source": "joshshep/golfr",
"score": 3
} |
#### File: golfr/scripts/dl_sample_imgs.py
```python
from __future__ import print_function
import os
import errno
import requests
import zipfile
#ROOT_DIR = '/home/josh/r/golfr'
from golfr.utils import ensure_path_exists
def dl_img_zip(img_dir, tmp_zip_pathname):
print('Downloading sample images zip to "{}" ...'.format(img_dir))
# link expired :/ ... imgur has betrayed me
#IMG_ALBUM_URL = 'http://imgur.com/a/SOIUE/zip'
IMG_ALBUM_URL = 'https://drive.google.com/uc?export=download&id=0B01ArorP31gEbi1sSnE0aFZVYU0'
CHUNK_SIZE = 1024*1024
#r = requests.head(IMG_ALBUM_URL)
#file_size = int(r.headers['content-length'])
r = requests.get(IMG_ALBUM_URL, stream=True)
#fmt_file_size = '{:.2f}'.format(file_size/CHUNK_SIZE)
ensure_path_exists(img_dir)
file_index = 0
with open(tmp_zip_pathname, 'wb') as fd:
print('0.0 MB', end='\r')
for chunk in r.iter_content(CHUNK_SIZE):
file_index += len(chunk)
fmt_file_index = '{:.2f}'.format(file_index/CHUNK_SIZE)
print(fmt_file_index+' MB', end='\r')
fd.write(chunk)
print()
print('done')
def extract_sample_imgs(tmp_zip_pathname):
print('Extracting sample images ... ', end='\r')
with zipfile.ZipFile(tmp_zip_pathname, 'r') as zip_ref:
zip_ref.extractall(img_dir)
print('Extracting sample images ... done')
if __name__ == '__main__':
img_dir = os.path.abspath('../imgs/') #os.path.join(ROOT_DIR, 'sample_imgs/')
tmp_zip_pathname = os.path.join(img_dir,'sample_imgs.zip')
dl_img_zip(img_dir, tmp_zip_pathname)
extract_sample_imgs(tmp_zip_pathname)
os.remove(tmp_zip_pathname)
```
#### File: golfr/tests/test_filter_points.py
```python
from __future__ import print_function
from golfr.find_grid_points.filter_points import group_points
from os import listdir
from os.path import abspath, basename, join, dirname
import sys, traceback
import pandas as pd
import cv2
def test_filter_points():
data_dir = abspath(join(dirname(__file__),'filter_pnts_data/'))
df = pd.read_csv(join(data_dir, 'pnts_unfiltered.csv'), index_col=False)
centroids = df.values#.tolist()
hori_lines = cv2.imread(join(data_dir, 'hori_lines.jpg'), 1)
vert_lines = cv2.imread(join(data_dir, 'vert_lines.jpg'), 1)
try:
group_points(centroids, vert_lines, hori_lines)
except:
print('Exception: couldn\'t group points')
print ('-'*60)
traceback.print_exc(file=sys.stdout)
print ('-'*60)
print('test FAILED')
return
print('test not necessarily failed')
return True
if __name__ == '__main__':
test_filter_points()
```
#### File: golfr/tests/test_find_grid_points.py
```python
from __future__ import print_function
from golfr.find_grid_points.find_grid_points import find_grid_points
import sys, traceback
from os.path import abspath, join, dirname
def test_find_grid_points():
fname = abspath(join(dirname(__file__),'../imgs/golfr_test_imgs/ex0.jpg'))
try:
find_grid_points(fname)
except:
print('Exception: in finding grid points of {}'.format(fname))
print ('-'*60)
traceback.print_exc(file=sys.stdout)
print ('-'*60)
if __name__ == '__main__':
test_find_grid_points()
``` |
{
"source": "joshsh/twitlogic",
"score": 3
} |
#### File: python/places/map_places.py
```python
import json
import urllib2
def wikipedia_name(name):
w = name.replace(' ', '_')
return w
f = open('/tmp/new_places-json.txt', 'r')
for line in f:
try:
o = json.loads(line)
except:
# TODO: output an error message
continue
place_type = o['place_type']
name = o['name']
full_name = o['full_name']
id = o['id']
country_code = o['country_code']
#if 'US' != country_code:
# print country_code
q = 'http://en.wikipedia.org/w/api.php?action=opensearch&format=json&search=' + urllib2.quote(name)
response = urllib2.urlopen(q)
text = response.read()
w = json.loads(text)
a = w[1]
for p in a:
wn = wikipedia_name(p)
st = '<http://twitlogic.fortytwo.net/location/twitter/' + id + '> owl:sameAs <' + wn + "> . # " + place_type
wu = 'http://en.wikipedia.org/wiki/' + wn
out = id + '\t' + place_type + '\t' + full_name + '\t' + p + '\t' + wu + '\t' + st
try:
print out
except:
# TODO: output an error message
continue
``` |
{
"source": "joshSi/4YearPlan",
"score": 3
} |
#### File: joshSi/4YearPlan/course.py
```python
class Course:
'''Course superclass for any course'''
def __init__(self, dept, num, name='',
units=4, prereq=set(), restr=set(), coclass=None):
self.dept, self.num, self.name = dept, num, name
self.units = units
self.prereq, self.restr = prereq, restr
if coclass:
self.coclass = coclass
else:
self.coclass = 'No co-classes required'
def __repr__(self):
return self.dept+' '+self.num
def get_info(self):
return self.__repr__()+': '+self.name\
+'\nPrerequisites: '+', '.join(self.prereq)\
+ '\nCoclasses: '+self.coclass
class GE(Course):
'''GE class'''
def __init__(self, dept, num, name='',
units=4, prereq=set(), restr=set(), coclass=None, section=set()):
self.section = section
super.__init__(dept, num, name, units, prereq, restr, coclass)
``` |
{
"source": "joshsia/dsci532-2022-ia1-joshsia",
"score": 3
} |
#### File: dsci532-2022-ia1-joshsia/app/app.py
```python
from dash import Dash, html, dcc, Input, Output
import pandas as pd
import altair as alt
from altair import pipe, limit_rows, to_values
from si_prefix import si_format
alt.renderers.enable('html')
t = lambda data: pipe(data, limit_rows(max_rows=1_000_000), to_values)
alt.data_transformers.register('custom', t)
alt.data_transformers.enable('custom')
# Read in global data
data = pd.read_csv("data/processed/cleaned_salaries.csv")
country_names = data["Country"].unique()
country_names.sort()
education_order = ["Less than bachelor's degree", "Bachelor's degree",
"Master's degree", "Doctoral degree"]
# Setup app
app = Dash(
__name__,
external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css']
)
server = app.server
app.layout = html.Div([
html.H2("Data Science Salaries Dashboard"),
html.Div([
html.P("Distribution of salaries for different countries"),
html.Iframe(
id="boxplot-countries",
style={'border-width': '0', 'width': '100%', 'height': '455px'}
),
html.P("Set y-axis limits"),
dcc.RangeSlider(
id="y-axis-widget", allowCross=False,
min=0, max=2_600_000, value=[0, 2_600_000],
marks={i: str(si_format(i, precision=1)) for i in range(0, 2_600_000, 400_000)}
)], style={'width': '48%', 'display': 'inline-block'}),
html.Div([], style={'width': '4%', 'display': 'inline-block'}),
html.Div([
html.P("Histogram of selected country"),
dcc.Dropdown(
id="select-country",
value="Canada",
options=[{"label": country, "value": country} for country in country_names]
),
html.Div(style={'height': '50px'}),
html.Iframe(
id="histogram-country",
style={'border-width': '0', 'width': '100%', 'height': '400px'}
)
], style={'width': '48%', 'display': 'inline-block'})
])
# Plotting functions
@app.callback(
Output("boxplot-countries", "srcDoc"),
Input("y-axis-widget", "value")
)
def countries_boxplot(value):
boxplot_order = (data.groupby("Country")["Salary_USD"]
.median().sort_values(ascending=False).index.tolist())
chart = (alt.Chart(data).mark_boxplot(clip=True).encode(
x=alt.X("Country", sort=boxplot_order),
y=alt.Y("Salary_USD", title="Salary in USD",
scale=alt.Scale(
domain=(value[0], value[1])
),
axis=alt.Axis(format='~s')
)
)
)
return chart.to_html()
@app.callback(
Output("histogram-country", "srcDoc"),
Input("select-country", "value")
)
def country_hist(value):
country = data.query("Country == @value")
for idx, i in enumerate(country["FormalEducation"]):
if i in education_order[1:]:
continue
else:
print("Change")
country["FormalEducation"].iloc[idx] = "Less than bachelor's degree"
chart = (alt.Chart(country).mark_bar().encode(
x=alt.X("Salary_USD", bin=alt.Bin(maxbins=20), title="Salary in USD"),
y=alt.Y("count()", title="Counts"),
color=alt.Color("FormalEducation", sort=education_order,
title="Education level"),
order=alt.Order('education_order:Q')
).configure_legend(
orient='bottom'
)
)
return chart.to_html()
if __name__ == '__main__': app.run_server(debug=True)
``` |
{
"source": "joshsia/random-maze-rl",
"score": 3
} |
#### File: joshsia/random-maze-rl/agent.py
```python
import numpy as np
import torch
from collections import deque
import math
import random
class Agent:
# Function to initialise the agent
def __init__(self):
self.dqn = DQN()
self.buffer = ReplayBuffer()
# True when the replay buffer has been filled
self.filledBuffer = False
# Set the minibatch size
self.minibatch_size = 850
# Set the episode length
self.episode_length = 150
# Reset the total number of steps which the agent has taken
self.num_steps_taken = 0
# The state variable stores the latest state of the agent in the environment
self.state = [-999, -999]
# The action variable stores the latest action which the agent has applied to the environment
self.action = None
# Set the initial policy
self.policy = 0.25 * np.ones((100, 100, 4))
self.set_init_policy()
# Discount factor used in Bellman equation
self.gamma = 0.95
# Initial epsilon value which allows for exploration
self.epsilon = 0.6
# Tracks the total number of episodes completed
self.episodes = 0
# Tracks the number of steps between each target network update
self.update_t = 0
# Used to fill the replay buffer
self.reachGoal = False
# Used to find out how many steps it took the greedy policy to reach goal
self.steps = 0
# True if the greedy policy reached the goal within 100 steps
self.greedyGoal = False
# Function to check whether the agent has reached the end of an episode
def has_finished_episode(self):
if( (self.num_steps_taken % self.episode_length == 0) and (self.num_steps_taken>0) ):
if(self.filledBuffer):
if not(self.greedyGoal):
self.episode_length = 150
self.episodes += 1
self.num_steps_taken = 0
self.steps = 0
# Increase the episode length to encourage more exploration
if(self.episodes > 30):
self.episode_length = 175
if(self.episodes > 45):
self.episode_length = 200
# Check the greedy policy every 15 episodes
if((self.episodes % 15) == 0 and (self.episodes > 0)):
self.episode_length = 100
# Find the GLIE epsilon value
# dec_eps = 0 every 15 episodes to get the greedy policy
dec_eps = self.get_eps()
# Update the policy when an episode ends
self.update_policy(dec_eps)
return True
else:
# Filling in the buffer
if(self.reachGoal):
self.reachGoal = False
return True
else:
return False
else:
return False
# GLIE: Function to decrease epsilon as episode number increases
def get_eps(self):
if(self.episodes < 6):
dec_eps = self.epsilon
else:
dec_eps = (self.epsilon) / ((self.episodes-4)**0.35)
# Keeps epsilon high if the greedy policy fails to reach goal to encourage exploration
if(self.episodes > 30):
dec_eps = (self.epsilon) / ((self.episodes - 30)**0.35)
if(self.episodes > 45):
dec_eps = (self.epsilon) / ((self.episodes - 45)**0.35)
if(self.episodes > 60):
dec_eps = (self.epsilon) / ((self.episodes - 60)**0.35)
if(self.episodes > 75):
dec_eps = (self.epsilon) / ((self.episodes - 75)**0.35)
if(self.episodes > 90):
dec_eps = (self.epsilon) / ((self.episodes - 90)**0.35)
# Greedy policy every 15 episodes
if((self.episodes % 15) == 0 and (self.episodes > 0)):
dec_eps = 0
return dec_eps
# Function to update the current policy every time an episode ends
def update_policy(self, dec_eps):
q_vals = self.dqn.get_q_values(self.buffer)
max_actions = q_vals.argmax(axis=2)
suboptimal_actions = dec_eps / 4
for i in range(len(q_vals)):
for j in range(len(q_vals[0])):
new_policy = suboptimal_actions * np.ones(4)
my_action = max_actions[i][j]
new_policy[my_action] = (1 - dec_eps) + (dec_eps / 4)
self.policy[i][j] = new_policy
# Function to get the next action
def get_next_action(self, state):
action = self.choose_action()
action = self.to_cont_action(action)
self.num_steps_taken += 1
self.state = state
self.action = action
return action
# Function to set the next state and distance, which resulted from applying action self.action at state self.state
def set_next_state_and_distance(self, next_state, distance_to_goal):
self.steps += 1
action = self.to_disc_action(self.action)
# Convert the distance to a reward
reward = self.get_reward(next_state, distance_to_goal, action)
transition = (self.state, action, reward, next_state)
self.buffer.append(transition)
len_buffer = self.buffer.get_length()
# Append weights index list
self.buffer.weights_index.append(len_buffer-1)
# Append weights of new transition as max of current weights
if(len_buffer == 1):
self.buffer.weights.append(2)
else:
if not(max(self.buffer.weights) > 1.99):
max_weight = max(self.buffer.weights) + (0.02 * self.state[0])
else:
max_weight = max(self.buffer.weights)
self.buffer.weights.append(max_weight)
if(len_buffer < 15000):
# Only filling in the replay buffer
self.filledBuffer = False
if(distance_to_goal < 0.02):
self.reachGoal = True
self.num_steps_taken = self.episode_length
else:
# Finished filling in the replay buffer. Start training
self.filledBuffer = True
# If not executing the greedy policy, train the network
if (not(self.greedyGoal) and not((self.episodes % 15) == 0) ):
# Weighted sampling
samples, indexes = self.buffer.sampling(self.minibatch_size)
# Add a small positive constant to the weights which is 5% of the largest weight
self.dqn.w_epsilon = max(self.buffer.weights) * 0.05
# Train the network
delta = self.dqn.train_q_network(samples, self.gamma)
# Update the transition weights based on delta
for x,y in zip(indexes, delta):
self.buffer.weights[x] = y
self.update_t += 1
# Update the target network
if(self.update_t == 10):
self.dqn.update_target()
self.update_t = 0
# Executing the greedy policy
if((self.episodes % 15) == 0):
if( (math.isclose(next_state[0],self.state[0], rel_tol = 0.004)) and (math.isclose(next_state[1],self.state[1], rel_tol = 0.004)) and not(self.greedyGoal)):
self.num_steps_taken = self.episode_length
#print("Greedy STUCK; Dist: {}".format(distance_to_goal))
# Greedy policy is stuck
if((distance_to_goal < 0.03) and (self.steps < 100) and not(self.greedyGoal)):
print("GREEDY Reached Goal in {} steps; Episode {}".format(self.steps, self.episodes))
self.greedyGoal = True
self.num_steps_taken = self.episode_length
# Stop training
# Function to determine how much reward is associated to a transition
def get_reward(self, sprime, dist, action):
# Reward 1 based on distance to goal
# Reward 2 based on x position (higher x position means higher rewards)
# Total reward = (a * Reward 1) + (b * Reward 2)
# Where a and b are weighting factors
a = 0.35
b = 0.65
# Reward based on distance to goal
factor1 = -1.5
power1 = -1.5
reward1 = factor1/((dist)**power1)
# Reward based on x position
factor2 = -2
reward2 = factor2 * (1 - self.state[0])
total_reward = (a * reward1) + (b * reward2)
# If the agent is very close to the goal, add more rewards
if(dist < 0.1):
total_reward = total_reward + 0.4
if(dist < 0.05):
total_reward = total_reward + 0.6
# If agent moves West, that's not good
if (action == 3):
total_reward = total_reward - 0.2
# If agent stays in the same state, that's not good
if( (math.isclose(sprime[0],self.state[0], rel_tol = 0.004)) and (math.isclose(sprime[1],self.state[1], rel_tol = 0.004)) ):
total_reward = total_reward - 0.03
return total_reward
# Function to get the greedy action for a particular state
def get_greedy_action(self, state):
row, col = self.state_to_rowcol(state)
policy = self.policy[round(col)][round(row)]
action = np.argmax(policy)
#print("({},{}); A: {}".format(round(col),round(row),action))
action = self.to_cont_action(action)
return action
# Defining the initial policy
def set_init_policy(self):
init_pol = [0.275, 0.3, 0.275, 0.15]
for i in range(len(self.policy)):
for j in range(len(self.policy[0])):
self.policy[i][j] = init_pol
# Choose an action based on the current policy
def choose_action(self):
if( (self.state[0]==-999) and (self.state[1]==-999) ):
action = 1
else:
row, col = self.state_to_rowcol(self.state)
policy = self.policy[round(col)][round(row)]
action = random.choices(np.arange(4), weights = policy)[0]
return action
# Mapping the discrete actions to continuous actions
def to_cont_action(self, action):
if action == 0:
cont_action = np.array([0, 0.02], dtype=np.float32)
elif action == 1:
cont_action = np.array([0.02, 0], dtype=np.float32)
elif action == 2:
cont_action = np.array([0, -0.02], dtype=np.float32)
elif action == 3:
cont_action = np.array([-0.02, 0], dtype=np.float32)
else:
print("Invalid action")
cont_action = np.array([0, 0], dtype=np.float32)
return cont_action
# Mapping the continuous actions to discrete actions
def to_disc_action(self, action):
if( (action[0]==0) and (action[1]>0) ):
disc_action = 0
elif( (action[0]>0) and (action[1]==0) ):
disc_action = 1
elif( (action[0]==0) and (action[1]<0) ):
disc_action = 2
elif( (action[0]<0) and (action[1]==0) ):
disc_action = 3
else:
print("Invalid action")
disc_action = 4
return disc_action
# Mapping the states to columns and rows of the Q function for easy indexing
def state_to_rowcol(self, state):
col = math.floor(state[0] * 100)
row = math.floor(state[1] * 100)
return row, col
class Network(torch.nn.Module):
def __init__(self, input_dimension, output_dimension):
super(Network, self).__init__()
self.layer_1 = torch.nn.Linear(in_features=input_dimension, out_features=90)
self.layer_2 = torch.nn.Linear(in_features=90, out_features=125)
self.layer_3 = torch.nn.Linear(in_features=125, out_features=90)
self.output_layer = torch.nn.Linear(in_features=90, out_features=output_dimension)
def forward(self, input):
layer_1_output = torch.nn.functional.relu(self.layer_1(input))
layer_2_output = torch.nn.functional.relu(self.layer_2(layer_1_output))
layer_3_output = torch.nn.functional.relu(self.layer_3(layer_2_output))
output = self.output_layer(layer_3_output)
return output
class DQN:
def __init__(self):
self.q_network = Network(input_dimension=2, output_dimension=4)
self.t_network = Network(input_dimension=2, output_dimension=4)
self.optimiser = torch.optim.Adam(self.q_network.parameters(), lr=0.002)
self.t_network.load_state_dict(self.q_network.state_dict())
self.w_epsilon = 0.05
def update_target(self):
self.t_network.load_state_dict(self.q_network.state_dict())
def train_q_network(self, transition, gamma):
self.optimiser.zero_grad()
loss, delta = self._calculate_loss(transition, gamma)
loss.backward()
self.optimiser.step()
return delta
def _calculate_loss(self, transition, gamma):
# Double Q Deep Learning
s_tensor, a_tensor, r_tensor, sprime_tensor = zip(*transition)
s_tensor = torch.tensor(s_tensor, dtype=torch.float32)
a_tensor = torch.tensor(a_tensor)
r_tensor = torch.tensor(r_tensor, dtype=torch.float32)
sprime_tensor = torch.tensor(sprime_tensor, dtype=torch.float32)
# Get Q value from Q network indexed by action taken
s_prediction = self.q_network.forward(s_tensor).gather(dim=1, index=a_tensor.unsqueeze(-1)).squeeze(-1)
# Get max actions from target network Q s_prime predictions
with torch.no_grad():
max_actions = (self.t_network.forward(sprime_tensor)).argmax(1)
# Get Q value using max action from target network
sprime_prediction = self.q_network.forward(sprime_tensor).gather(dim=1, index=max_actions.unsqueeze(-1)).squeeze(-1)
sprime_prediction = r_tensor + (gamma * sprime_prediction)
# New weights
delta = abs(sprime_prediction - s_prediction) + self.w_epsilon
delta = delta.detach().numpy()
# Calculate loss
loss = torch.nn.MSELoss()(s_prediction, sprime_prediction)
return loss, delta
def get_q_values(self, buffer):
q_vals = np.zeros((100, 100, 4))
init_q = [-50, -40, -50, -60]
for i in range(len(q_vals)):
for j in range(len(q_vals[0])):
q_vals[i][j] = init_q
samples = buffer.get_all()
state_tensor = [i[0] for i in samples]
state_tensor = np.unique(state_tensor, axis=0)
for i in state_tensor:
col = math.floor(i[0] * 100)
row = math.floor(i[1] * 100)
state = torch.tensor(i)
q_vals[round(col)][round(row)] = self.q_network.forward(state).detach().numpy()
return q_vals
class ReplayBuffer:
def __init__(self):
self.buffer = deque(maxlen=75000)
self.weights = []
self.weights_index = []
def append(self, input_tuple):
self.buffer.append(input_tuple)
def sampling(self, minibatch_size):
if(minibatch_size > len(self.buffer)):
print("Buffer too short")
else:
# Weighted sampling
indexes = random.choices(self.weights_index, self.weights, k=minibatch_size)
samples = [self.buffer[i] for i in indexes]
return samples, indexes
def get_length(self):
return len(self.buffer)
def get_all(self):
return self.buffer
``` |
{
"source": "joshsimmons/animportantdate",
"score": 2
} |
#### File: wedding/migrations/0010_auto_20170527_1704.py
```python
from __future__ import unicode_literals
import string
from django.db import migrations
def create_short_name(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Event = apps.get_model('wedding', 'Event')
for event in Event.objects.all():
short_name = "".join(
i for i in event.name.lower() if i in string.ascii_lowercase
)
short_name = short_name[:20]
event.short_name = short_name
event.save()
class Migration(migrations.Migration):
dependencies = [
('wedding', '0009_auto_20170527_1704'),
]
operations = [
migrations.RunPython(create_short_name),
]
``` |
{
"source": "joshsisto/Pi_Weather_Station",
"score": 3
} |
#### File: Pi_Weather_Station/src/weather.py
```python
import datetime
import time
import json
import requests
import csv
import os
from math import log
from sense_hat import SenseHat
from flask import Flask, request, render_template
from sendEmail import send_email
from sense_hat import SenseHat
import time
import os
import csv
import ast
import tablib
import pandas as pd
import json
sense = SenseHat()
RED = [155, 0, 0]
BRED = [255, 0, 0]
ORANGE = [255, 127, 0]
YELLOW = [155, 155, 0]
GREEN = [0, 155, 0]
BLUE = [0, 0, 155]
PURPLE = [128, 0, 128]
WHITE = [155, 155, 155]
BRIGHT_WHITE = [255, 255, 255]
dataset = tablib.Dataset()
def convert_epoch(epoch_time):
converted_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch_time))
return converted_time
def epoch_to_day(epoch_time):
converted_time = time.strftime('%A', time.localtime(epoch_time))
return converted_time
def get_csv_data():
"""Open the daily csv log and return the content"""
csv_list = []
day = get_timestamp().split()[0]
csv_path = os.path.join(os.path.dirname(__file__) + '/logs/', day + '.csv')
# csv_path = '/home/pi/Pi_Weather_Station/src/logs/' + day + '.csv'
with open(csv_path, 'r') as csv_file:
# content = f.read()
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# print(row)
csv_list.append(row)
return csv_list
# print(get_csv_data())
def get_dark_sky():
"""Read the most recent dark sky log and return a list of the stats"""
csv_content = get_csv_data()
most_recent = csv_content[-1]
dark_sky_string = most_recent[9]
dark_sky_list = dark_sky_string.strip('][').split(', ')
ds_temp = dark_sky_list[0]
ds_cond = dark_sky_list[1].strip("'")
ds_fore = dark_sky_list[2].strip("'")
return [ds_temp, ds_cond, ds_fore]
# print(get_dark_sky())
def get_gov_aqi():
"""Read the most recent aqi log and return the stats"""
csv_content = get_csv_data()
most_recent = csv_content[-1]
aqi_string = most_recent[10]
aqi_list = aqi_string.strip('][').split(', ')
aqi = aqi_list[0]
air_cond = aqi_list[1].strip("'")
return [aqi, air_cond]
# print(get_gov_aqi())
def get_timestamp():
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return st
def get_xyz():
"""Get orientation data X,Y,Z"""
sense.clear()
acceleration = sense.get_accelerometer_raw()
x = round(acceleration['x'], 2)
y = round(acceleration['y'], 2)
z = round(acceleration['z'], 2)
return [x, y, z]
def set_orientation():
"""Set screen orientation based on x,y sensor reading"""
sense.clear()
acceleration = sense.get_accelerometer_raw()
x = round(acceleration['x'], 0)
y = round(acceleration['y'], 0)
if x == -1:
sense.set_rotation(90)
elif y == 1:
sense.set_rotation(0)
elif y == -1:
sense.set_rotation(180)
else:
sense.set_rotation(180)
def set_screen_color(fahrenheit):
"""Set screen color based on temperature"""
if 20 <= int(fahrenheit) <= 80:
bg_color = BLUE
elif 81 <= int(fahrenheit) <= 90:
bg_color = GREEN
elif 91 <= int(fahrenheit) <= 100:
bg_color = YELLOW
elif 101 <= int(fahrenheit) <= 102:
bg_color = ORANGE
elif 103 <= int(fahrenheit) <= 104:
bg_color = RED
elif 105 <= int(fahrenheit) <= 109:
bg_color = BRED
elif 110 <= int(fahrenheit) <= 120:
bg_color = WHITE
else:
bg_color = GREEN
return bg_color
def weather():
"""Display SenseHAT data on the 8x8 LED grid"""
data_lst = get_csv_data()
sense_data = ("{} AQI:{} "
.format(data_lst[9], data_lst[-1]))
print(sense_data)
set_orientation()
# bg_color = set_screen_color(data_lst[9][0])
sense.show_message(sense_data, scroll_speed=0.10, back_colour=PURPLE, text_colour=BRIGHT_WHITE)
while __name__ == '__main__':
weather()
```
#### File: Pi_Weather_Station/src/window_alerts.py
```python
import datetime
import time
import csv
import os
import ast
import glob
import json
from math import log
from sense_hat import SenseHat
from weather import get_timestamp
from sendText import *
def get_csv_data():
"""Open the daily csv log and return the content"""
global csv_path
csv_list = []
day = get_timestamp().split()[0]
csv_path = os.path.join(os.path.dirname(os.path.abspath(__file__)) + '/logs/', day + '.csv')
# csv_path = '/home/pi/Pi_Weather_Station/src/logs/' + day + '.csv'
with open(csv_path, 'r') as csv_file:
# content = f.read()
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# print(row)
csv_list.append(row)
return csv_list
get_csv_data()
def get_dark_sky():
"""Read the most recent dark sky log and return a list of the stats"""
csv_content = get_csv_data()
most_recent = csv_content[-1]
dark_sky_string = most_recent[9]
dark_sky_list = dark_sky_string.strip('][').split(', ')
ds_temp = dark_sky_list[0]
ds_cond = dark_sky_list[1].strip("'")
ds_fore = dark_sky_list[2].strip("'")
return [ds_temp, ds_cond, ds_fore]
# print(os.path.basename(csv_path))
# print(csv_path[:-3] + 'alert')
def check_min():
global current_temp
global daily_high_temp
try:
# alert_cont = read_alert()
with open('/home/pi/Pi_Weather_Station/src/weather.json') as json_file:
data = json.load(json_file)
today_temp_hi = data['daily']['data'][0]['temperatureHigh']
minimum_temp = 74
current_temp = get_dark_sky()[0]
current_temp = float(current_temp)
daily_high_temp = float(today_temp_hi)
if daily_high_temp >= 80 and current_temp >= 74:
print('It is 74 degrees or warmer! Time to close the windows')
return True
else:
print('Temperature is within limit set')
return False
except:
print('That did not work.')
print('probably did not have a value set for minimum temp')
print(current_temp, daily_high_temp)
alert_file_path = csv_path[:-3] + 'alert'
if os.path.exists(alert_file_path) == False:
print('no alert file detected')
if check_min() == True:
print("Temp reached! creating alert flag")
alert_flag = open(alert_file_path, 'w+')
print("Sending Text")
email_message = 'It is {} outside with a High of {}. You should close the windows to keep the house cool. Love you!'.format(current_temp, daily_high_temp)
print(email_message)
send_email(email_message)
print('need to print something')
print(alert_file_path)
``` |
{
"source": "joshsizer/free_code_camp",
"score": 3
} |
#### File: algorithms/pairwise/pairwise_test.py
```python
from pairwise import pairwise
class TestPairwise:
def test_pairwise1(self):
assert pairwise([1, 4, 2, 3, 0, 5], 7) == 11
def test_pairwise2(self):
assert pairwise([1, 3, 2, 4], 4) == 1
def test_pairwise3(self):
assert pairwise([1, 1, 1], 2) == 1
def test_pairwise4(self):
assert pairwise([0, 0, 0, 0, 1, 1], 1) == 10
def test_pariwise5(self):
assert pairwise([], 100) == 0
```
#### File: algorithms/selection_sort/selection_sort.py
```python
def selection_sort(arr):
"""Sort the given array using selection sort.
All cases is O(n^2).
Arguments:
arr: The array to sort.
Returns:
The same array, arr, but sorted in
increasing order.
"""
# The array is separated into the sorted
# section, with indices less than i, and the
# unsorted, with indices greater than or
# equal to i. Each pass of the array finds
# the minimum in the unsorted section, and
# appends it to the end of the sorted section.
for i in range(len(arr) - 1):
min_idx = i
for k in range(i + 1, len(arr)):
if arr[k] < arr[min_idx]:
min_idx = k
temp = arr[i]
arr[i] = arr[min_idx]
arr[min_idx] = temp
return arr
``` |
{
"source": "joshsizer/mnist_digit_classifier",
"score": 3
} |
#### File: joshsizer/mnist_digit_classifier/from_scratch.py
```python
import pandas as pd
import numpy as np
import numpy.random as nprand
nprand.seed(3321)
import math
def full_print(arr):
"""Disable the corners only functionality for
this specific print job.
"""
np.set_printoptions(threshold=np.inf)
print(arr)
np.set_printoptions(threshold=1000)
def to_categorical(Y):
"""Convert a single number to a vector, where
the value of 1 is set for the index equal to the
single number, and 0 otherwise.
"""
new_arr = np.zeros((len(Y), 10), Y.dtype)
for i in range(len(Y)):
new_arr[i][Y[i][0]] = 1
return new_arr
def from_categorical(Y):
"""Turn a categorical output into a single
digit classification.
"""
new_arr = np.zeros((len(Y), 1), Y.dtype)
for i in range(len(Y)):
new_arr[i][0] = np.argmax(Y[i])
return new_arr
def sigmoid(x):
"""An implementation of sigmoid function.
Thanks to source:
https://stackoverflow.com/questions/3985619/how-to-calculate-a-logistic-sigmoid-function-in-python
"""
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
# Avoid overflow by limiting the range of X
x = np.clip(x, -500, 500)
sig_x = sigmoid(x)
return sig_x * (1-sig_x)
def relu(x):
"""An implementation of a ReLU (rectified
linear unit) function.
Thanks to source:
https://stackoverflow.com/questions/32109319/how-to-implement-the-relu-function-in-numpy
"""
return np.maximum(x, 0)
def relu_derivative(x):
"""An implementation of the ReLU derivative.
Thanks to source:
https://stackoverflow.com/questions/46411180/implement-relu-derivative-in-python-numpy
"""
x[x<=0] = 0
x[x>0] = 1
return x
def softmax(x):
"""An implementation of the softmax function
Thanks to source:
https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""
e_x = np.exp(x-np.max(x))
return e_x / e_x.sum()
def softmax_derivative(x):
pass
def initialize_layer_weights(n, m, init_type="xavier"):
"""Generates a distribution of random numbers in some range,
specified by the init_type parameter.
n is the number of input nodes
m is the number of output nodes.
U is the uniform distribution.
G is the gaussian or normal distribution.
Thanks to source:
https://machinelearningmastery.com/weight-initialization-for-deep-learning-neural-networks/
Initialization type (init_type) can be:
Good for Sigmoid and Tanh activation functions.
"xavier"
Xavier Glorot uses the formula:
weight = U[-(1/sqrt(n)), 1/sqrt(n)]
"nxavier"
Normalized Xavier Glorot uses the formula:
weight = U[-(sqrt(6)/sqrt(n+m)), sqrt(6)/sqrt(n+m)]
Good for ReLU activation funtions.
"he"
Kaiming He uses the formula:
weight = G(0.0, sqrt(2/n))
"""
if "xavier" in init_type:
numbers = nprand.rand(m, n)
if init_type == "xavier":
lower, upper = -(1.0 / math.sqrt(n)), (1.0 / math.sqrt(n))
else:
lower, upper = -(math.sqrt(6.0) / math.sqrt(n + m)), (math.sqrt(6.0) / math.sqrt(n + m))
scaled = lower + numbers * (upper - lower)
else:
std = math.sqrt(2.0 / n)
numbers = nprand.randn(m, n)
scaled = numbers * std
return scaled
def shuffle_two(a, b):
"""Shuffle two arrays in the same way so as to
keep them correctly aligned with each other.
For example:
shuffle_two([1, 2, 3], [3, 2, 1])
could produce [1, 3, 2], [3, 1, 2]
"""
rnd_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rnd_state)
np.random.shuffle(b)
# Load train.csv
df = pd.read_csv("data/train.csv")
dfn = df.to_numpy()
# Split into X and Y
dfn = np.hsplit(dfn, [1])
# X are the input objects (MNIST digits) and Y are
# the correct label for each object.
X_train = dfn[1]
Y_train = dfn[0]
# Normalize our inputs and outputs
X_train = X_train / 255
Y_train = to_categorical(Y_train)
# Define our layers
layers = [X_train.shape[1], 128, 10]
# Initialize our layer 1 and layer 2 weights. Layer
# 1 uses a ReLU function, so use "he" init type.
# Layer 2 uses a sigmoid function, so use the
# xavier or nxavier type.
l1_w = initialize_layer_weights(layers[0], layers[1], "he")
l2_w = initialize_layer_weights(layers[1], layers[2], "nxavier")
# Initialize our bias to 0 for all layers
l1_b = np.zeros((layers[1], 1))
l2_b = np.zeros((layers[2], 1))
X_total = X_train
Y_total = Y_train
X_validate = None
Y_validate = None
# Do the learning
mini_batch_size = 32
epochs = 10
for i in range(epochs):
# Shuffle our dataset
shuffle_two(X_total, Y_total)
# Split our dataset into validation and train
# sets.
split_point = int(len(X_total) * 0.3)
X_validate = X_total[0:split_point]
X_train = X_total[split_point:]
Y_validate = Y_total[0:split_point]
Y_train = Y_total[split_point:]
for i in range(0, len(X_train), mini_batch_size):
mini_batch_x = X_train[i: i + mini_batch_size].T
mini_batch_y = Y_train[i: i + mini_batch_size].T
z1 = l1_w.dot(mini_batch_x) + l1_b
a1 = relu(z1)
z2 = l2_w.dot(a1) + l2_b
a2 = sigmoid(z2)
cost = np.mean(np.square(a2 - mini_batch_y))
#print(cost)
da2 = (a2 - mini_batch_y)
dz2 = da2 * sigmoid_derivative(z2)
nabla_l2_b = (1/mini_batch_size) * np.sum(dz2, axis=1, keepdims=True)
nabla_l2_w = (1/mini_batch_size) * np.dot(dz2, a1.T)
da1 = np.dot(l2_w.T, dz2)
dz1 = da1 * relu_derivative(a1)
nabla_l1_b = (1/mini_batch_size) * np.sum(dz1, axis=1, keepdims=True)
nabla_l1_w = (1/mini_batch_size) * np.dot(dz1, mini_batch_x.T)
learning_rate = 0.2
l1_w = l1_w - learning_rate * nabla_l1_w
l2_w = l2_w - learning_rate * nabla_l2_w
l1_b = l1_b - learning_rate * nabla_l1_b
l2_b = l2_b - learning_rate * nabla_l2_b
# check out accuracy
y = from_categorical(Y_validate)
x = X_validate
z1 = l1_w.dot(x.T) + l1_b
a1 = relu(z1)
z2 = l2_w.dot(a1) + l2_b
a2 = sigmoid(z2)
predictions = from_categorical(a2.T)
count_correct = 0
total = 0
for i in range(len(x)):
prediction = predictions[i]
expected = y[i][0]
if prediction == expected:
count_correct += 1
total += 1
print(count_correct/total)
``` |
{
"source": "JoshSkrzypczak/people",
"score": 3
} |
#### File: people/scrape/va.py
```python
import datetime
import re
import attr
from spatula import HtmlPage, HtmlListPage, XPath
from common import Person, PeopleWorkflow
PARTY_MAP = {"R": "Republican", "D": "Democratic", "I": "Independent"}
party_district_pattern = re.compile(r"\((R|D|I)\) - (?:House|Senate) District\s+(\d+)")
name_elect_pattern = re.compile(r"(- Elect)$")
def get_party_district(text):
return party_district_pattern.match(text).groups()
lis_id_patterns = {
"upper": re.compile(r"(S[0-9]+$)"),
"lower": re.compile(r"(H[0-9]+$)"),
}
def get_lis_id(chamber, url):
"""Retrieve LIS ID of legislator from URL."""
match = re.search(lis_id_patterns[chamber], url)
if match.groups:
return match.group(1)
def clean_name(name):
name = name_elect_pattern.sub("", name).strip()
action, date = (None, None)
match = re.search(r"-(Resigned|Member) (\d{1,2}/\d{1,2})?", name)
if match:
action, date = match.groups()
name = name.rsplit("-")[0]
return name, action, date
def maybe_date(text):
try:
date = datetime.datetime.strptime(text, "%Y-%d-%m")
return date.strftime("%Y-%m-%d")
except ValueError:
return ""
# TODO: restore when we do committees again
# def get_committees(self, item):
# for com in item.xpath('//ul[@class="linkSect"][1]/li/a/text()'):
# key = (com, self.chamber)
# if key not in self.kwargs["committees"]:
# org = Organization(
# name=com, chamber=self.chamber, classification="committee"
# )
# org.add_source(self.url)
# self.kwargs["committees"][key] = org
# self.obj.add_membership(
# self.kwargs["committees"][key],
# start_date=maybe_date(self.kwargs["session"].get("start_date")),
# end_date=maybe_date(self.kwargs["session"].get("end_date", "")),
# )
@attr.s(auto_attribs=True)
class PartialMember:
name: str
url: str
image: str = None
class MemberList(HtmlListPage):
session_id = "211" # 2021
source = f"http://lis.virginia.gov/{session_id}/mbr/MBR.HTM"
def process_item(self, item):
name = item.text
lname = name.lower()
if "resigned" in lname or "vacated" in lname or "retired" in lname:
return
name, action, date = clean_name(name)
return self.next_page_cls(PartialMember(name=name, url=item.get("href")))
class MemberDetail(HtmlPage):
input_type = PartialMember
def process_page(self):
party_district_text = self.root.xpath("//h3/font/text()")[0]
party, district = get_party_district(party_district_text)
p = Person(
name=self.input.name, state="va", chamber=self.chamber, party=party, district=district,
)
if self.input.image:
p.image = self.input.image
p.add_link(self.source.url)
p.add_source(self.source.url)
self.get_offices(p)
return p
def get_offices(self, person):
for ul in self.root.xpath('//ul[@class="linkNon" and normalize-space()]'):
address = []
phone = None
email = None
for li in ul.getchildren():
text = li.text_content()
if re.match(r"\(\d{3}\)", text):
phone = text.strip()
elif text.startswith("email:"):
email = text.strip("email: ").strip()
else:
address.append(text.strip())
if "Capitol Square" in address:
office_obj = person.capitol_office
else:
office_obj = person.district_office
office_obj.address = "; ".join(address)
if phone:
office_obj.voice = phone
if email:
person.email = email
class SenateDetail(MemberDetail):
input_type = PartialMember
role = "Senator"
chamber = "upper"
class SenatePhotoDetail(HtmlPage):
input_type = PartialMember
def get_source_from_input(self):
lis_id = get_lis_id("upper", self.input.url)
return f"http://apps.senate.virginia.gov/Senator/memberpage.php?id={lis_id}"
def process_page(self):
src = self.root.xpath('.//img[@class="profile_pic"]/@src')
img = src[0] if src else None
if img and img.startswith("//"):
img = "https:" + img
self.input.image = img
return SenateDetail(self.input)
class DelegateDetail(MemberDetail):
role = "Delegate"
chamber = "lower"
def process_page(self):
p = super().process_page()
lis_id = get_lis_id(self.chamber, self.input.url)
if lis_id:
lis_id = "{}{:04d}".format(lis_id[0], int(lis_id[1:]))
p.image = f"http://memdata.virginiageneralassembly.gov/images/display_image/{lis_id}"
return p
class SenateList(MemberList):
chamber = "upper"
selector = XPath('//div[@class="lColRt"]/ul/li/a')
next_page_cls = SenatePhotoDetail
class DelegateList(MemberList):
chamber = "lower"
selector = XPath('//div[@class="lColLt"]/ul/li/a')
next_page_cls = DelegateDetail
senators = PeopleWorkflow(SenateList)
delegates = PeopleWorkflow(DelegateList)
```
#### File: scripts/one-off/migrate_people.py
```python
import sys
import os
import glob
import itertools
import json
from collections import defaultdict, OrderedDict
from utils import ocd_uuid, get_jurisdiction_id, get_data_dir, dump_obj, iter_objects
def load_new_files(state):
new_db_ids = set()
for data, _ in itertools.chain(iter_objects(state, "people"), iter_objects(state, "retired")):
for ids in data.get("other_identifiers", []):
if ids["scheme"] == "legacy_openstates":
new_db_ids.add(ids["identifier"])
return new_db_ids
def scan_old_files(state, old_dir, new_db_ids):
with open(os.path.join(old_dir, state, "metadata.json")) as f:
metadata = json.load(f)
all_old_files = glob.glob(os.path.join(old_dir, state, "legislators/*"))
already = 0
migrated = 0
for f in all_old_files:
data = json.load(open(f))
found = 0
for oid in data["_all_ids"]:
if oid in new_db_ids:
found += 1
if found == 0:
process_old_file(f, metadata)
migrated += 1
elif found == len(data["_all_ids"]):
already += 1
else:
print("!!! PARTIAL:", f)
raise Exception()
print(f"{already} already migrated. {migrated} migrated.")
def terms_to_roles(leg_terms, metadata_terms):
# term_id => (start, end)
term_ranges = {}
for mt in metadata_terms:
term_ranges[mt["name"]] = (mt["start_year"], mt["end_year"])
# (chamber, district) => [years]
years_for_position = defaultdict(list)
for lt in leg_terms:
# fix out of order term in MA
start, end = sorted(term_ranges[lt["term"]])
years_for_position[(lt["chamber"], lt["district"])].extend(list(range(start, end + 1)))
positions = []
for pos, years in years_for_position.items():
years = sorted(years)
start_year = None
prev_year = start_year = years[0]
for year in years[1:]:
if year != prev_year + 1:
positions.append((*pos, start_year, prev_year))
start_year = year
prev_year = year
positions.append((*pos, start_year, prev_year))
return positions
def process_old_file(filename, metadata):
data = json.load(open(filename))
if data["leg_id"] != data["_id"]:
raise Exception()
if data.get("active"):
print(data)
return
raise Exception()
if data.get("roles", []):
raise Exception()
# remove unused fields
for k in (
"_yearly_contributions",
"nimsp_candidate_id",
"votesmart_id",
"_contributions_start_year",
"_scraped_name",
"_total_contributions",
"transparencydata_id",
"_locked_fields",
"level",
"nimsp_id",
"_type",
"country",
"updated_at",
"_id",
"active",
"roles",
"offices",
"notice",
"nickname",
"district",
"party",
"chamber",
"csrfmiddlewaretoken",
"email",
"created_at",
"office_address",
"office_phone",
"occupation",
"_guid",
"_code",
"all_ids",
"2008-2011",
):
data.pop(k, None)
# remove plus fields
for k in [k for k in data.keys() if k.startswith("+")]:
data.pop(k)
leg_obj = OrderedDict({"id": ocd_uuid("person")})
leg_obj["name"] = data.pop("full_name")
first_name = data.pop("first_name")
middle_name = data.pop("middle_name")
last_name = data.pop("last_name")
suffixes = data.pop("suffixes", "")
suffix = data.pop("suffix", "")
if first_name:
leg_obj["given_name"] = first_name
if last_name:
leg_obj["family_name"] = last_name
if middle_name:
leg_obj["middle_name"] = middle_name
if suffix:
leg_obj["suffix"] = suffixes or suffix
state = data.pop("state")
jurisdiction_id = get_jurisdiction_id(state)
# pull useful fields
old_roles = data.pop("old_roles", {})
parties = set()
new_roles = []
for session, roles in old_roles.items():
for role in roles:
if role["type"] in (
"committee member",
"Minority Floor Leader",
"Majority Floor Leader",
"Majority Caucus Chair",
"Minority Caucus Chair",
"Speaker Pro Tem",
"President Pro Tem",
"Senate President",
"Speaker of the House",
"Minority Whip",
"Majority Whip",
"Lt. Governor",
) or role.get("committee"):
continue
parties.add(role["party"])
new_roles.append(
{"term": role["term"], "chamber": role["chamber"], "district": role["district"]}
)
leg_obj["party"] = [{"name": party} for party in parties]
# add these to leg_obj
roles = terms_to_roles(new_roles, metadata["terms"])
formatted_roles = []
for chamber, district, start, end in roles:
formatted_roles.append(
OrderedDict(
{
"district": district,
"jurisdiction": jurisdiction_id,
"type": chamber,
"start_date": f"{start}-01-01",
"end_date": f"{end}-12-31",
}
)
)
leg_obj["roles"] = formatted_roles
all_ids = data.pop("_all_ids")
leg_id = data.pop("leg_id")
if leg_id not in all_ids:
all_ids.append(leg_id)
image = data.pop("photo_url", "")
if image:
leg_obj["image"] = image
url = data.pop("url", "")
if url:
leg_obj["links"] = [{"url": url}]
leg_obj["sources"] = data.pop("sources")
leg_obj["other_identifiers"] = [
{"identifier": id_, "scheme": "legacy_openstates"} for id_ in all_ids
]
if data:
print(data)
raise Exception()
output_dir = get_data_dir(state)
dump_obj(leg_obj, output_dir=os.path.join(output_dir, "retired"))
def main():
old_data_dir = sys.argv[1]
for state in glob.glob("data/*"):
state = state.replace("data/", "")
print(state)
new_ids = load_new_files(state)
scan_old_files(state, old_data_dir, new_ids)
if __name__ == "__main__":
main()
```
#### File: scripts/tests/test_summarize.py
```python
from summarize import Summarizer
def test_person_summary():
s = Summarizer()
people = [
{
"gender": "F",
"image": "https://example.com/image1",
"party": [{"name": "Democratic"}, {"name": "Democratic", "end_date": "1990"}],
},
{
"gender": "F",
"image": "https://example.com/image2",
"party": [{"name": "Democratic"}, {"name": "Working Families"}],
"extras": {"religion": "Zoroastrian"},
"contact_details": [{"fax": "123-456-7890", "note": "Capitol Office"}],
"other_identifiers": [{"scheme": "fake", "identifier": "abc"}],
"ids": {"twitter": "fake"},
},
{
"gender": "M",
"image": "https://example.com/image3",
"party": [{"name": "Republican"}],
"contact_details": [{"phone": "123-456-7890", "note": "Capitol Office"}],
"other_identifiers": [{"scheme": "fake", "identifier": "123"}],
},
]
for p in people:
s.summarize(p)
assert s.parties == {"Republican": 1, "Democratic": 2, "Working Families": 1}
assert s.contact_counts == {"Capitol Office phone": 1, "Capitol Office fax": 1}
assert s.id_counts == {"fake": 2, "twitter": 1}
assert s.optional_fields == {"gender": 3, "image": 3}
assert s.extra_counts == {"religion": 1}
``` |
{
"source": "joshsnelling/HomeKit-Bridge",
"score": 3
} |
#### File: ifactory/include/calcs.py
```python
__version__ = "1.0.0"
__modname__ = "Useful Calculations"
__author__ = "ColoradoFourWheeler"
__copyright__ = "Copyright 2018, ColoradoFourWheeler & EPS"
__credits__ = ["ColoradoFourWheeler"]
__license__ = "GPL"
__maintainer__ = "ColoradoFourWheeler"
__email__ = "Indigo Forums"
__status__ = "Production"
# Python Modules
import sys
import logging
import linecache
import json
# Third Party Modules
import indigo
# Package Modules
import ex
# Enumerations
kExceptionOutputPrefix = "\n\n\t\t\t\t\t\t\t "
class CalcsException(Exception):
# Generic error
pass
class TypeConversionError(Exception):
# Error converting from one data type to another
pass
###
def convert_temperature (value, toCelsius = False, asInteger = False):
"""
Convert a temperature value to Celsius or Fahrenheit.
Arguments:
toCelsius: convert value to Celsius (default is Fahrenheit)
asInteger: returns full float value when false and integer value when true
Returns:
Converted value as a float
"""
try:
if toCelsius:
# Convert value to celsius
value = float(value)
value = (value - 32) / 1.8000
value = round(value, precision)
if asInteger: return int(value)
return value
else:
# Default: convert value to fahrenheit
value = float(value)
value = (value * 1.8000) + 32
value = round(value, precision)
if asInteger: return int(value)
return value
except Exception as e:
e.args += (value,)
e.args += (u"to Celsius: {}".format(toCelsius),)
raise CalcsException (kExceptionOutputPrefix + ex.stack_trace(e))
###
def filter_to_dict (filter):
"""
Reads a filter passed from Devices.xml into a dictionary and returns it.
"""
try:
args = {}
filter = filter.replace("[", "").replace("]","")
for f in filter.split(","):
f = f.strip() # Clean up spaces
valkey = f.split("=")
valname = valkey[0].lower().strip()
args[valname] = valkey[1].strip()
return args
except Exception as e:
e.args += (filter,)
raise CalcsException (kExceptionOutputPrefix + ex.stack_trace(e))
###
def type_to_unicode_output (obj):
"""
Converts the type of the object to a string representation including the type (used for __str__ functions).
"""
try:
if obj is None: return "None"
return u"{} ({})".format(obj, unicode(type(obj)).replace("<type '", "").replace("'>","").replace("<class '", ""))
except Exception as e:
e.args += (unicode(type(obj)),)
raise CalcsException (kExceptionOutputPrefix + ex.stack_trace(e))
###
def generic_unicode_output (tabtitle, tabcontent, obj, title = None):
"""
Generic unicode output for custom classes (called by __str__ functions).
"""
try:
ret = ""
if title: ret += u"{}{} : {}\n".format(tabtitle, title, type_to_unicode_output(obj) )
for a in dir(obj):
if callable(getattr(obj, a)): continue
if a.startswith("_"): continue
if a == "factory": continue
if a == "logger": continue
if a == "tabtitle" or a == "tabcontent": continue
if type(getattr(obj, a)) == list:
ret += u"{}{} : (list) \n".format(tabcontent, a)
for l in getattr(obj, a):
ret += u"\t{}item :\n{}{}".format(tabcontent, tabcontent, l)
else:
ret += u"{}{} : {}\n".format(tabcontent, a, type_to_unicode_output(getattr(obj, a)) )
except Exception as e:
e.args += (unicode(type(obj)),)
raise CalcsException (kExceptionOutputPrefix + ex.stack_trace(e))
return ret
###
def generic_class_to_dict (obj):
"""
Using the same exclusions as generic_unicode_output, convert class data to a dictionary object.
"""
try:
data = {}
for a in dir(obj):
if callable(getattr(obj, a)): continue
if a.startswith("_"): continue
if a == "factory": continue
if a == "logger": continue
if a == "tabtitle" or a == "tabcontent": continue
if type(getattr(obj, a)) == list:
listitem = []
for l in getattr(obj, a):
if "'instance'" in unicode(type(l)):
listitem.append(generic_class_to_dict(l))
else:
listitem.append(l)
data[a] = listitem
else:
data[a] = getattr(obj, a)
except Exception as e:
e.args += (unicode(type(obj)),)
raise CalcsException (kExceptionOutputPrefix + ex.stack_trace(e))
return data
###
def convert_to_compared_datatype (source, destination):
"""
Converts the source value to the destination data type.
Arguments:
source: the source value who's data type needs to be changed
destination: the value that the data type will be derived from
Returns:
source: value of source converted to the data type of destination
"""
try:
converted = False # Assume failure
# Convert to string types for ease
stype = str(type(source)).replace("<type '", "").replace("'>", "")
dtype = str(type(destination)).replace("<type '", "").replace("'>", "")
# Convert from None
if stype == "NoneType":
if dtype == "float": source = 0.0
if dtype == "int": source = 0
if dtype == "bool": source = False
if dtype == "string": source = ""
converted = True
# Convert from Boolean
if stype == "bool":
# To integer
if dtype == "int":
if source: source = 1
if not source: source = 0
converted = True
# To float
elif dtype == "float":
if source: source = 1.0
if not source: source = 0.0
converted = True
# To string
elif dtype == "str":
if source: source = "true"
if not source: source = "false"
converted = True
# From string
if stype == "str":
# To unicode
if dtype == "unicode":
source = unicode(source)
converted = True
# To boolean
if dtype == "bool":
if source.lower() == "true":
source = True
else:
source = False # It's either absolutely true or it's always false
converted = True
# To integer
if dtype == "int":
try:
source = int(source)
converted = True
except:
raise TypeConversionError (u"{} value {} cannot be converted to {}".format(stype, source, dtype))
# To float
if dtype == "float":
try:
source = float(source)
converted = True
except:
raise TypeConversionError (u"{} value {} cannot be converted to {}".format(stype, source, dtype))
# From unicode to string
if stype == "unicode" and dtype == "str":
source = str(source)
converted = True
# From integer to float
if stype == "int" and dtype == "float":
source = float(source)
converted = True
# From float to integer
if stype == "float" and dtype == "int":
source = int(round(source))
converted = True
if not converted:
raise TypeConversionError (u"Unable to convert source {} to type {}".format(stype, dtype))
except Exception as e:
e.args += (u"{} to {}".format(stype, dtype),)
raise CalcsException (kExceptionOutputPrefix + ex.stack_trace(e))
return source
```
#### File: ifactory/include/ui.py
```python
__version__ = "1.0.0"
__modname__ = "Indigo Plugin User Interface"
__author__ = "ColoradoFourWheeler"
__copyright__ = "Copyright 2018, ColoradoFourWheeler & EPS"
__credits__ = ["ColoradoFourWheeler"]
__license__ = "GPL"
__maintainer__ = "ColoradoFourWheeler"
__email__ = "Indigo Forums"
__status__ = "Production"
# Python Modules
import logging
import sys
import json
# Third Party Modules
import indigo
# Package Modules
import ex
# Enumerations
kDeviceVersion = "ipf_deviceVersion"
class UserInterface:
"""
Handle callbacks and dynmaic list generation.
"""
def __init__(self, factory):
try:
self.factory = factory # References the Indigo plugin
self.logger = logging.getLogger ("Plugin.ui")
self.logger.debug ("{} {} loaded".format(__modname__, __version__))
self.deviceFieldCache = {} # For retrieving defaults and knowing if a field changed
except Exception as e:
self.logger.error (ex.stack_trace(e))
###
def formFieldChanged (self, valuesDict, typeId, devId, setDefault):
"""
Called from the plugin whenever any form field is changed, then attempts to raise an event in the plugin or ifactory and returns the result.
Arguments:
valuesDict = form fields
typeId = device type Id
devId = device Id
setDefault = read last list retrieved for this field and default to the first value if the field is blank or its value doesn't exist on the list
"""
try:
errorsDict = indigo.Dict()
# If there's no version then add it, after this version changes can only happen elsewhere
if kDeviceVersion not in valuesDict: valuesDict[kDeviceVersion] = self.factory.PluginBase.pluginVersion
# Process through jstuff
(valuesDict, cbErrors) = self.factory.jstuff.onformFieldChanged (valuesDict, typeId, devId)
if cbErrors: return (valuesDict, cbErrors)
# Plugin callbacks
callback = self.factory._callback ([]) # Base callback
if callback:
(valuesDict, cbErrors) = callback
if cbErrors: return (valuesDict, cbErrors)
cleantypeId = ''.join(e for e in typeId if e.isalnum()) # Scrub type ID to be a valid function name
callback = self.factory._callback ([valuesDict, typeId, devId], None, cleantypeId + "_") # Device type prefix callback
if callback:
(valuesDict, cbErrors) = callback
if cbErrors: return (valuesDict, cbErrors)
callback = self.factory._callback ([valuesDict, typeId, devId], None, None, "_" + cleantypeId) # Device type suffix callback
if callback:
(valuesDict, cbErrors) = callback
if cbErrors: return (valuesDict, cbErrors)
except Exception as e:
self.logger.error (ex.stack_trace(e))
return (valuesDict, errorsDict)
```
#### File: Server Plugin/lib/ivoice.py
```python
import indigo
import logging
import linecache
import sys
class IndigoVoice:
# Enumerations
kHomeKitPlugin = u'com.eps.indigoplugin.homekit-bridge'
kAlexaPlugin = u'com.indigodomo.opensource.alexa-hue-bridge'
kVoiceAPIActionName = u'voiceAPI'
#
# Initialize the class
#
def __init__ (self):
self.logger = logging.getLogger ("Plugin.ivoice")
self.libversion = "1.0.0"
self.HKB = indigo.server.getPlugin(kHomeKitPlugin)
self.AHB = indigo.server.getPlugin(kAlexaPlugin)
self.logger.debug ("Starting Indigo Voice plugin API version {0}".format(self.version))
#
# Report back our version number (in case the calling plugin wants to include that in their support dump)
#
def version (self):
return self.libversion
#
# Check that props/valuesDict has the required fields
#
def checkFields (self, valuesDict):
try:
errorDict = indigo.Dict()
success = True
requiredFields = ["voiceIntegrated", "voiceHKBAvailable", "voiceAHBAvailable", "voiceHKBServer", "voiceAHBServer", "voiceHKBDeviceType"]
for r in requiredFields:
if "voiceIntegrated" not in valuesDict:
errorDict["showAlertText"] = "Indigo voice integration failure. Device is missing the voiceIntegrated field, integration is not possible."
return (False, valuesDict, errorDict)
except Exception as e:
success = False
errorDict["showAlertText"] = unicode(e)
self.logger.error (self.getException(e))
return (success, valuesDict, errorDict)
#
# Request that HBB add a device to a server
#
def addDevice (self, devId, valuesDict, plug = self.kHomeKitPlugin):
try:
# Check for all required fields
(chSuccess, chValues, chErrors) = self.checkFields (valuesDict)
if not chSuccess:
return False
success = True
plugin = indigo.server.getPlugin(plug)
if plugin.isEnabled():
apiprops = {}
apiprops["libversion"] = self.version()
apiprops["command"] = "addDevice"
apiprops["params"] = (devId, valuesDict)
(success, data, errors) = plugin.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
if not success:
self.logger.error (errors["message"])
return False
else:
self.logger.error ("Attempting to add a device to {} but neither is enabled.".format(plugin.name))
return False
except Exception as e:
success = False
errorDict["showAlertText"] = unicode(e)
self.logger.error (self.getException(e))
return success
#
# Request that HBB update a device
#
def updateDevice (self, devId, valuesDict, plug = self.kHomeKitPlugin):
try:
# Check for all required fields
(chSuccess, chValues, chErrors) = self.checkFields (valuesDict)
if not chSuccess:
return False
success = True
plugin = indigo.server.getPlugin(plug)
if plugin.isEnabled():
apiprops = {}
apiprops["libversion"] = self.version()
apiprops["command"] = "updateDevice"
apiprops["params"] = (devId, valuesDict)
(success, data, errors) = plugin.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
if not success:
self.logger.error (errors["message"])
return False
else:
self.logger.error ("Attempting to update a device on {} but it is not enabled.".format(plugin.name))
return False
except Exception as e:
success = False
errorDict["showAlertText"] = unicode(e)
self.logger.error (self.getException(e))
return success
#
# An HBB Integration API form field changed
#
def integrationFieldChange (self, valuesDict, typeId, devId):
try:
errorDict = indigo.Dict()
hkb = indigo.server.getPlugin(self.kHomeKitPlugin)
ahb = indigo.server.getPlugin(self.kAlexaPlugin)
# Check for all required fields
(chSuccess, chValues, chErrors) = self.checkFields (valuesDict)
if not chSuccess:
return (chValues, chErrors)
if valuesDict["voiceIntegrated"]:
# Set fields based on integraton
valuesDict["voiceHKBAvailable"] = True
valuesDict["voiceAHBAvailable"] = False # Until we get Alexa integration keep this at false
if hkb.pluginDisplayName == "- plugin not installed -": valuesDict["voiceHKBAvailable"] = False
if ahb.pluginDisplayName == "- plugin not installed -": valuesDict["voiceAHBAvailable"] = False
# Make sure they have the required fields
if not valuesDict["voiceHKBAvailable"] and not valuesDict["voiceAHBAvailable"]:
valuesDict["voiceIntegrated"] = False
errorDict["voiceIntegrated"] = "Voice integration plugin not installed"
errorDict["showAlertText"] = "Please install HomeKit Bridge from the Indigo plugin store to enable this device for HomeKit."
#errorDict["showAlertText"] = "Please install HomeKit Bridge from the Indigo plugin store to enable this device for HomeKit and/or the Alexa-Hue Bridge from the Indigo plugin store to enable this device for Alexa."
return (valuesDict, errorDict)
# If the have HKB and it's disabled and AHB is not installed at all
if hkb.isEnabled() == False and not valuesDict["voiceAHBAvailable"]:
valuesDict["voiceIntegrated"] = False
errorDict["voiceIntegrated"] = "HomeKit Bridge not enabled"
errorDict["showAlertText"] = "HomeKit Bridge is currently disabled and this plugin cannot talk to it, please re-enable HomeKit Bridge before trying to add this device to HomeKit."
return (valuesDict, errorDict)
# If the have AHB and it's disabled and HKB is not installed at all
#if ahb.isEnabled() == False and not valuesDict["voiceHKBAvailable"]:
# valuesDict["voiceIntegrated"] = False
# errorDict["voiceIntegrated"] = "Alexa-Hue Bridge not enabled"
# errorDict["showAlertText"] = "Alexa-Hue Bridge is currently disabled and this plugin cannot talk to it, please re-enable Alexa-Hue Bridge before trying to add this device to Alexa."
# return (valuesDict, errorDict)
# If all voice integration is not enabled
#if hkb.isEnabled() == False and ahb.isEnabled() == False:
# valuesDict["voiceIntegrated"] = False
# errorDict["voiceIntegrated"] = "Voice integration plugins are not enabled"
# errorDict["showAlertText"] = "All voice integration plugins are currently disabled and this plugin cannot talk to them, please re-enable HomeKit Bridge and/or Alexa-Hue Bridge before trying to add this device to HomeKit or Alexa."
# return (valuesDict, errorDict)
# Just to be safe, do a blank call to the API to make sure our version is OK
success = False
apiprops = {}
apiprops["libversion"] = self.version()
apiprops["command"] = "none"
apiprops["params"] = "none"
if hkb.isEnabled():
hkbVer = int(hkb.pluginVersion.replace(".", ""))
if hkbVer < 130:
valuesDict["voiceIntegrated"] = False
errorDict["voiceIntegrated"] = "HomeKit Bridge needs upgraded"
errorDict["showAlertText"] = "You are running a version of HomeKit Bridge that does not support this feature, please upgrade to the latest version to enable this device for HomeKit."
return (valuesDict, errorDict)
(success, data, errors) = hkb.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
#if ahb.isEnabled():
# ahbVer = int(ahb.pluginVersion.replace(".", ""))
# if ahbVer < 130:
# valuesDict["voiceIntegrated"] = False
# errorDict["voiceIntegrated"] = "HomeKit Bridge needs upgraded"
# errorDict["showAlertText"] = "You are running a version of HomeKit Bridge that does not support this feature, please upgrade to the latest version to enable this device for HomeKit."
# return (valuesDict, errorDict)
# (success, data, errors) = ahb.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
if not success:
self.logger.error (errors["message"])
valuesDict["voiceIntegrated"] = False
valuesDict["voiceHKBServer"] = "none"
valuesDict["voiceHKBDeviceType"] = "default"
errorDict["showAlertText"] = errors["message"]
return (valuesDict, errorDict)
if valuesDict["voiceHKBServer"] == "": valuesDict["voiceHKBServer"] = "none" # In case there is a problem
if valuesDict["voiceHKBDeviceType"] == "": valuesDict["voiceHKBDeviceType"] = "default"
except Exception as e:
success = False
errorDict["showAlertText"] = unicode(e)
self.logger.error (self.getException(e))
return (valuesDict, errorDict)
#
# Request a list of valid servers from HomeKit Bridge
#
def HKBIntegrationServerList (self, filter="", valuesDict=None, typeId="", targetId=0):
try:
ret = [("default", "No HomeKit Bridge servers found")]
if "voiceHKBAvailable" in valuesDict:
if valuesDict["voiceHKBAvailable"]:
hkb = indigo.server.getPlugin(self.kHomeKitPlugin)
if hkb.isEnabled():
apiprops = {}
apiprops["libversion"] = self.version()
apiprops["command"] = "getServerList"
apiprops["params"] = "server" # Cannot add devices to guests or customs for now since guest is an exclusion of a server and custom doesn't integrate into Indog
(success, data, errors) = hkb.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
if success:
ret = []
for d in data:
ret.append ((d[0], d[1]))
else:
self.logger.error (errors["message"])
except Exception as e:
self.logger.error (self.getException(e))
return ret
#
# Request a list of valid servers from Alexa Hue Bridge
#
def AHBIntegrationServerList (self, filter="", valuesDict=None, typeId="", targetId=0):
try:
ret = [("default", "No Alexa-Hue Bridge servers found")]
if "voiceAHBAvailable" in valuesDict:
if valuesDict["voiceAHBAvailable"]:
ahb = indigo.server.getPlugin(self.kAlexaPlugin)
if ahb.isEnabled():
apiprops = {}
apiprops["libversion"] = self.version()
apiprops["command"] = "getServerList"
apiprops["params"] = "server" # Cannot add devices to guests or customs for now since guest is an exclusion of a server and custom doesn't integrate into Indog
(success, data, errors) = ahb.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
if success:
ret = []
for d in data:
ret.append ((d[0], d[1]))
else:
self.logger.error (errors["message"])
except Exception as e:
self.logger.error (self.getException(e))
return ret
#
# Request a list of valid device types from HomeKit Bridge
#
def IntegrationHKBDeviceTypeList (self, filter="", valuesDict=None, typeId="", targetId=0):
try:
ret = [("default", "No Homebridge types found")]
if "voiceHKBAvailable" in valuesDict:
if valuesDict["voiceHKBAvailable"]:
hkb = indigo.server.getPlugin(self.kHomeKitPlugin)
if hkb.isEnabled():
apiprops = {}
apiprops["libversion"] = self.version()
apiprops["command"] = "getDeviceTypes"
apiprops["params"] = "allowNone"
(success, data, errors) = hkb.executeAction(self.kVoiceAPIActionName, deviceId=0, waitUntilDone=True, props=apiprops)
if success:
ret = []
for d in data:
ret.append ((d[0], d[1]))
else:
self.logger.error (errors["message"])
except Exception as e:
self.logger.error (self.getException(e))
return ret
#
# Validate device config
#
def validateDeviceConfigUi(self, valuesDict, typeId, devId):
try:
errorDict = indigo.Dict()
if "voiceIntegrated" in valuesDict:
if valuesDict["voiceIntegrated"]:
if valuesDict["voiceHKBAvailable"]:
if valuesDict["voiceHKBServer"] == "":
errorDict["voiceHKBServer"] = "Select a HomeKit Bridge server"
errorDict["showAlertText"] = "If you opt to integrate with HomeKit Bridge then you must select which server to attach this device to."
return (valuesDict, errorDict)
if valuesDict["voiceHKBDeviceType"] == "":
errorDict["voiceHKBDeviceType"] = "Select a HomeKit Bridge device type"
errorDict["showAlertText"] = "If you opt to integrate with HomeKit Bridge then you must select how you want this device treated."
return (valuesDict, errorDict)
except Exception as e:
self.logger.error (self.getException(e))
return (valuesDict, errorDict)
#
# Get exception details
#
def getException (self, e):
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filenameEx = f.f_code.co_filename
filename = filenameEx.split("/")
filename = filename[len(filename)-1]
filename = filename.replace(".py", "")
filename = filename.replace(".pyc","")
linecache.checkcache(filename)
line = linecache.getline(filenameEx, lineno, f.f_globals)
exceptionDetail = "Exception in %s.%s line %i: %s\n\t\t\t\t\t\t\t CODE: %s" % (filename, f.f_code.co_name, lineno, str(e), line.replace("\t",""))
return exceptionDetail
```
#### File: lib/plugin/hkplcharacteristic.py
```python
__version__ = "1.0.0"
__modname__ = "Homebridge-Indigo2 Payload Data"
__author__ = "ColoradoFourWheeler"
__copyright__ = "Copyright 2018, ColoradoFourWheeler & EPS"
__credits__ = ["ColoradoFourWheeler"]
__license__ = "GPL"
__maintainer__ = "ColoradoFourWheeler"
__email__ = "Indigo Forums"
__status__ = "Production"
# Python Modules
import sys
import logging
import json
# Third Party Modules
import indigo
# Package Modules
from ..ifactory.include import ex
from ..ifactory.include import calcs
class HomebridgePayloadCharacteristic:
"""
This class does nothing other than allow the user to manually set the values that will output a JSON data stream for HomeKit.
"""
###
def __init__(self):
"""
An empty class that allows the following attributes to be set.
"""
try:
self.logger = logging.getLogger ("Plugin.HomebridgePayloadCharacteristic")
self.tabtitle = "" # Title indention on __str__
self.tabcontent = "" # Content indention on __str__
#self.factory = factory # References the Indigo plugin
self.name = "" # Characteristic name
self.maxValue = 0 # Maximum value
self.minValue = 0 # Minimum value
self.readonly = False # Characteristic is read only
self.notify = True # Characteristic can be used in notifications
self.value = None # Value of the characteristic
self.changeMinMax = False # True when min/max are different than HapNode.JS
self.logger.debug ("{} {} loaded".format(__modname__, __version__))
except Exception as e:
self.logger.error (ex.stack_trace(e))
###
def __str__ (self):
try:
ret = calcs.generic_unicode_output (self.tabtitle, self.tabcontent, self)
except Exception as e:
self.logger.error (ex.stack_trace(e))
return ret
###
def legacy_populate_from_service (self, obj, charName, charValue):
"""
Create a payload characteristic for the service object record passed. This is transitional until all legacy methods and classes are cut over.
Arguments:
obj: service_* legacy HomeKit service class object
Returns:
HomebridgePayloadCharacteristic object
"""
try:
characteristic = getattr (obj, charName)
self.name = charName
self.value = charValue
#if runningAction and charItem["name"] == "On": charItem["value"] = True
#if not characteristic is None and not value is None and charItem["name"] == characteristic: charItem["value"] = value # Force it to see what it expects to see so it doesn't beachball
self.readonly = characteristic.readonly
self.notify = characteristic.notify
if "changeMinMax" in dir(characteristic) and characteristic.changeMinMax:
self.changeMinMax = True
self.minValue = characteristic.minValue
self.maxValue = characteristic.maxValue
except Exception as e:
self.logger.error (ex.stack_trace(e))
```
#### File: lib/plugin/hkplprocessor.py
```python
__version__ = "1.0.0"
__modname__ = "HomeKit Payload Processor"
__author__ = "ColoradoFourWheeler"
__copyright__ = "Copyright 2018, ColoradoFourWheeler & EPS"
__credits__ = ["ColoradoFourWheeler"]
__license__ = "GPL"
__maintainer__ = "ColoradoFourWheeler"
__email__ = "Indigo Forums"
__status__ = "Production"
# Python Modules
import sys
import logging
import json
import thread
# Third Party Modules
import indigo
# Package Modules
from ..ifactory.include import ex
from ..ifactory.include import calcs
import hkpldevice
class HomebridgePayloadProcessor:
"""
Process an incoming API request and deliver the payload for it.
"""
###
def __init__(self, factory):
"""
Set up the class.
"""
try:
self.logger = logging.getLogger ("Plugin.HomebridgePayloadDevice")
self.tabtitle = "" # Title indention on __str__
self.tabcontent = "" # Content indention on __str__
self.factory = factory # References the HomeKit factory
self.logger.debug ("{} {} loaded".format(__modname__, __version__))
except Exception as e:
self.logger.error (ex.stack_trace(e))
###
def __str__ (self):
try:
ret = calcs.generic_unicode_output (self.tabtitle, self.tabcontent, self)
except Exception as e:
self.logger.error (ex.stack_trace(e))
return ret
###
def process_incoming_api_call (self, request, query):
"""
Process incoming HTTP API request from Homebridge-Indigo2.
"""
try:
#indigo.server.log(unicode(query))
#indigo.server.log(unicode(request))
if not "/HomeKit" in request.path: return self.json_reply_error ("fail", "Invalid path")
if not "cmd" in query: return self.json_reply_error ("fail", "Invalid request")
if not "serverId" in query: return self.json_reply_error ("fail", "Invalid server")
devId = None
serverId = int(query["serverId"][0])
jkey = None
cmd = query["cmd"][0]
if not serverId in indigo.devices: return self.json_reply_error ("fail", "Server Id invalid")
if "objId" in query: devId = int(query["objId"][0])
if "jkey" in query: jkey = query["jkey"][0]
if cmd == "deviceList": return self.json_reply_command_devicelist (serverId)
if cmd == "getInfo": return self.json_reply_command_getinfo (devId, jkey, serverId)
if cmd == "setCharacteristic": return self.json_reply_command_set_characteristic (query, devId, jkey, serverId)
return self.json_reply_error ("fail", "Nothing to process")
except Exception as e:
self.logger.error (ex.stack_trace(e))
return self.json_reply_error ("fail", "A fatal exception was encountered while processing your request, check the Indigo log for details")
###
def legacy_get_homekit_object (self, devId, serverId, hkType):
"""
Call into the legacy factory in the plugin to retrieve the HomeKit service object for this device.
Arguments:
devId: Indigo device Id for the service
serverId: Indigo device Id for the server hosting the device
HkType: HomeKit service_* type to resolve
"""
try:
return self.factory.factory.PluginBase.epslibrary.homekit.getServiceObject (devId, serverId, hkType, False, True)
except Exception as e:
self.logger.error (ex.stack_trace(e))
###
def legacy_extract_json_objects (self, serverId):
"""
Find the JSON encoded includedDevices and includedActions in server properties and return them as dictionaries.
"""
try:
server = indigo.devices[serverId]
includedDevices = []
includedActions = []
includedVariables = []
if "includedDevices" in server.pluginProps: includedDevices = json.loads(server.pluginProps["includedDevices"])
if "includedActions" in server.pluginProps: includedActions = json.loads(server.pluginProps["includedActions"])
if "includedVariables" in server.pluginProps: includedVariables = json.loads(server.pluginProps["includedVariables"])
except Exception as e:
self.logger.error (ex.stack_trace(e))
return includedDevices, includedActions, includedVariables
###
def extract_json_object (self, jkey, serverId, includedDevices, includedActions, includedVariables, excludeObject = False):
"""
Read through all included objects to find an item matching the provided jkey.
"""
try:
foundIn = "includedDevices"
rec = None
obj = None
for r in includedDevices:
if r["jkey"] == jkey:
rec = r
if not excludeObject: obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
break
if not obj:
for r in includedActions:
if r["jkey"] == jkey:
rec = r
if not excludeObject: obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
break
if not obj:
for r in includedVariables:
if r["jkey"] == jkey:
rec = r
if not excludeObject: obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
break
except Exception as e:
self.logger.error (ex.stack_trace(e))
return obj, rec
###
def json_reply_command_set_characteristic (self, query, devId, jkey, serverId):
"""
Process incoming API command 'setCharacteristic'.
"""
try:
obj = None
rec = None
characteristic = None
value = None
for param, val in query.iteritems():
if param == "serverId": continue
if param == "objId": continue
if param == "jkey": continue
if param == "cmd": continue
characteristic = param
value = val[0]
includedDevices, includedActions, includedVariables = self.legacy_extract_json_objects (serverId)
if jkey in self.factory.HKDEFINITIONS:
obj = self.factory.HKDEFINITIONS[jkey]
toss, rec = self.extract_json_object (jkey, serverId, includedDevices, includedActions, includedVariables, True)
else:
obj, rec = self.extract_json_object (jkey, serverId, includedDevices, includedActions, includedVariables)
if obj:
data = {}
for a in obj.actions:
if a.characteristic == characteristic:
result = a.run (value, obj.objId, obj, False)
if result:
# Result will be true it passes and runs
payload = hkpldevice.HomebridgePayloadDevice (self.factory)
newvalue = calcs.convert_to_compared_datatype (value, a.whenvalue)
data = payload.legacy_populate_from_service (obj, rec, serverId)
if devId in indigo.actionGroups:
# Action groups don't do anything, just return that it is on and call back in 2 seconds to toggle off
data = payload.legacy_populate_from_service (obj, rec, serverId, characteristic, newvalue)
#thread.start_new_thread(self.factory.factory.PluginBase.timedCallbackToURL, (serverId, jkey, 2, rec))
thread.start_new_thread(self.factory.queue_homebridge_refresh, (jkey, 2))
if obj.recurringUpdate:
# Timer based device where we need real-time updates
#thread.start_new_thread(self.factory.factory.PluginBase.timedCallbackToURL, (serverId, jkey, obj.recurringSeconds, rec))
thread.start_new_thread(self.factory.queue_homebridge_refresh, (jkey, obj.recurringSeconds))
break
if not data:
# No action found, return the cached payload
data = json.dumps(self.factory.HKCACHE[jkey])
#indigo.server.log(json.dumps(data, indent=4))
#thread.start_new_thread(self.factory.factory.PluginBase.timedCallbackToURL, (serverId, rec["jkey"], 0))
return "text/css", json.dumps(data, indent=4)
return self.json_reply_error ("fail", "Object not found")
except Exception as e:
self.logger.error (ex.stack_trace(e))
return self.json_reply_error ("fail", "A fatal exception was encountered while processing your request, check the Indigo log for details")
###
def json_reply_command_devicelist (self, serverId):
"""
Process incoming API command 'deviceList'.
"""
try:
includedDevices, includedActions, includedVariables = self.legacy_extract_json_objects (serverId)
deviceList = []
for r in includedDevices:
if r["hktype"] == "service_CameraRTPStreamManagement": continue
#indigo.server.log (u"{} {}".format(r["alias"], r["hktype"]))
if r["jkey"] in self.factory.HKCACHE:
deviceList.append(self.factory.HKCACHE[r["jkey"]])
else:
if r["jkey"] in self.factory.HKDEFINITIONS:
obj = self.factory.HKDEFINITIONS[r["jkey"]]
else:
obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
payload = hkpldevice.HomebridgePayloadDevice (self.factory)
deviceList.append(payload.legacy_populate_from_service (obj, r, serverId))
for r in includedActions:
if r["hktype"] == "service_CameraRTPStreamManagement": continue
if r["jkey"] in self.factory.HKCACHE:
deviceList.append(self.factory.HKCACHE[r["jkey"]])
else:
if r["jkey"] in self.factory.HKDEFINITIONS:
obj = self.factory.HKDEFINITIONS[r["jkey"]]
else:
obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
payload = hkpldevice.HomebridgePayloadDevice (self.factory)
deviceList.append(payload.legacy_populate_from_service (obj, r, serverId))
#obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
#payload = hkpldevice.HomebridgePayloadDevice (self.factory)
#deviceList.append(payload.legacy_populate_from_service (obj, r, serverId))
for r in includedVariables:
if r["hktype"] == "service_CameraRTPStreamManagement": continue
if r["jkey"] in self.factory.HKCACHE:
deviceList.append(self.factory.HKCACHE[r["jkey"]])
else:
if r["jkey"] in self.factory.HKDEFINITIONS:
obj = self.factory.HKDEFINITIONS[r["jkey"]]
else:
obj = self.legacy_get_homekit_object (r["id"], serverId, r["hktype"])
payload = hkpldevice.HomebridgePayloadDevice (self.factory)
deviceList.append(payload.legacy_populate_from_service (obj, r, serverId))
return "text/css", json.dumps(deviceList, indent=4)
except Exception as e:
self.logger.error (ex.stack_trace(e))
return self.json_reply_error ("fail", "A fatal exception was encountered while processing your request, check the Indigo log for details")
###
def json_reply_command_getinfo (self, devId, jkey, serverId):
"""
Process incoming API command 'getInfo'.
"""
try:
if jkey in self.factory.HKCACHE:
return "text/css", json.dumps(self.factory.HKCACHE[jkey], indent=4)
else:
includedDevices, includedActions, includedVariables = self.legacy_extract_json_objects (serverId)
obj, rec = self.extract_json_object (jkey, serverId, includedDevices, includedActions, includedVariables)
if obj:
payload = hkpldevice.HomebridgePayloadDevice (self.factory)
data = payload.legacy_populate_from_service (obj, rec, serverId)
return "text/css", json.dumps(data, indent=4)
return self.json_reply_error ("fail", "Object not found")
except Exception as e:
self.logger.error (ex.stack_trace(e))
return self.json_reply_error ("fail", "A fatal exception was encountered while processing your request, check the Indigo log for details")
###
def json_reply_error (self, result, message):
"""
Return generic error message to HTTP engine.
"""
try:
msg = {}
msg["result"] = result
msg["message"] = message
return "text/css", json.dumps(msg, indent=4)
except Exception as e:
self.logger.error (ex.stack_trace(e))
msg = {}
msg["result"] = "fail"
msg["message"] = "A fatal exception was encountered while processing your request, check the Indigo log for details"
return "text/css", json.dumps(msg, indent=4)
``` |
{
"source": "joshsny/google-calendar-simple-api",
"score": 2
} |
#### File: google-calendar-simple-api/gcsa/event.py
```python
from functools import total_ordering
from beautiful_date import BeautifulDate
from tzlocal import get_localzone
from datetime import datetime, date, timedelta
from .attachment import Attachment
from .attendee import Attendee
from .reminders import PopupReminder, EmailReminder
from .util.date_time_util import insure_localisation
class Visibility:
""" Possible values of the event visibility.
* DEFAULT - Uses the default visibility for events on the calendar. This is the default value.
* PUBLIC - The event is public and event details are visible to all readers of the calendar.
* PRIVATE - The event is private and only event attendees may view event details.
"""
DEFAULT = "default"
PUBLIC = "public"
PRIVATE = "private"
@total_ordering
class Event:
def __init__(self,
summary,
start,
end=None,
*,
timezone=str(get_localzone()),
event_id=None,
description=None,
location=None,
recurrence=None,
color=None,
visibility=Visibility.DEFAULT,
attendees=None,
attachments=None,
conference_solution=None,
reminders=None,
default_reminders=False,
minutes_before_popup_reminder=None,
minutes_before_email_reminder=None,
guests_can_invite_others=True,
guests_can_modify=False,
guests_can_see_other_guests=True,
_created=None,
_updated=None,
_creator=None,
_recurring_event_id=None,
**other):
"""
:param summary:
Title of the event.
:param start:
Starting date/datetime.
:param end:
Ending date/datetime. If 'end' is not specified, event is considered as a 1-day or 1-hour event
if 'start' is date or datetime respectively.
:param timezone:
Timezone formatted as an IANA Time Zone Database name, e.g. "Europe/Zurich". By default,
the computers local timezone is used if it is configured. UTC is used otherwise.
:param event_id:
Opaque identifier of the event. By default is generated by the server. You can specify id as a
5-1024 long string of characters used in base32hex ([a-vA-V0-9]). The ID must be unique per
calendar.
:param description:
Description of the event. Can contain HTML.
:param location:
Geographic location of the event as free-form text.
:param recurrence:
RRULE/RDATE/EXRULE/EXDATE string or list of such strings. See :py:mod:`~gcsa.recurrence`
:param color:
Color id referring to an entry from colors endpoint (list_event_colors)
:param visibility:
Visibility of the event. Default is default visibility for events on the calendar.
See :py:class:`~gcsa.event.Visibility`
:param attendees:
Attendee or list of attendees. See :py:class:`~gcsa.attendee.Attendee`.
Each attendee may be given as email string or :py:class:`~gcsa.attendee.Attendee` object.
:param attachments:
Attachment or list of attachments. See :py:class:`~gcsa.attachment.Attachment`
:param conference_solution:
:py:class:`~gcsa.conference.ConferenceSolutionCreateRequest` object to create a new conference
or :py:class:`~gcsa.conference.ConferenceSolution` object for existing conference.
:param reminders:
Reminder or list of reminder objects. See :py:mod:`~gcsa.reminders`
:param default_reminders:
Whether the default reminders of the calendar apply to the event.
:param minutes_before_popup_reminder:
Minutes before popup reminder or None if reminder is not needed.
:param minutes_before_email_reminder:
Minutes before email reminder or None if reminder is not needed.
:param guests_can_invite_others:
Whether attendees other than the organizer can invite others to the event.
:param guests_can_modify:
Whether attendees other than the organizer can modify the event.
:param guests_can_see_other_guests:
Whether attendees other than the organizer can see who the event's attendees are.
:param _created:
Creation time of the event. Read-only.
:param _updated:
Last modification time of the event. Read-only.
:param _recurring_event_id:
For an instance of a recurring event, this is the id of the recurring event to which
this instance belongs. Read-only.
:param other:
Other fields that should be included in request json. Will be included as they are.
"""
def assure_list(obj):
return [] if obj is None else obj if isinstance(obj, list) else [obj]
self.timezone = timezone
self.start = start
if end:
self.end = end
elif isinstance(start, datetime):
self.end = start + timedelta(hours=1)
elif isinstance(start, date):
self.end = start + timedelta(days=1)
if isinstance(self.start, datetime) and isinstance(self.end, datetime):
self.start = insure_localisation(self.start, timezone)
self.end = insure_localisation(self.end, timezone)
elif isinstance(self.start, datetime) or isinstance(self.end, datetime):
raise TypeError('Start and end must either both be date or both be datetime.')
def insure_date(d):
"""Converts d to date if it is of type BeautifulDate."""
if isinstance(d, BeautifulDate):
return date(year=d.year, month=d.month, day=d.day)
else:
return d
self.start = insure_date(self.start)
self.end = insure_date(self.end)
self.created = _created
self.updated = _updated
attendees = [self._ensure_attendee_from_email(a) for a in assure_list(attendees)]
reminders = assure_list(reminders)
if len(reminders) > 5:
raise ValueError('The maximum number of override reminders is 5.')
if default_reminders and reminders:
raise ValueError('Cannot specify both default reminders and overrides at the same time.')
self.event_id = event_id
self.summary = summary
self.description = description
self.location = location
self.recurrence = assure_list(recurrence)
self.color_id = color
self.visibility = visibility
self.attendees = attendees
self.attachments = assure_list(attachments)
self.conference_solution = conference_solution
self.reminders = reminders
self.default_reminders = default_reminders
self.recurring_event_id = _recurring_event_id
self.guests_can_invite_others = guests_can_invite_others
self.guests_can_modify = guests_can_modify
self.guests_can_see_other_guests = guests_can_see_other_guests
self.other = other
if minutes_before_popup_reminder is not None:
self.add_popup_reminder(minutes_before_popup_reminder)
if minutes_before_email_reminder is not None:
self.add_email_reminder(minutes_before_email_reminder)
@property
def id(self):
return self.event_id
def add_attendee(self, attendee):
"""Adds attendee to an event. See :py:class:`~gcsa.attendee.Attendee`.
Attendee may be given as email string or :py:class:`~gcsa.attendee.Attendee` object."""
self.attendees.append(self._ensure_attendee_from_email(attendee))
def add_attachment(self, file_url, title=None, mime_type=None):
"""Adds attachment to an event. See :py:class:`~gcsa.attachment.Attachment`"""
self.attachments.append(Attachment(file_url=file_url, title=title, mime_type=mime_type))
def add_email_reminder(self, minutes_before_start=60):
"""Adds email reminder to an event. See :py:class:`~gcsa.reminders.EmailReminder`"""
self.add_reminder(EmailReminder(minutes_before_start))
def add_popup_reminder(self, minutes_before_start=30):
"""Adds popup reminder to an event. See :py:class:`~gcsa.reminders.PopupReminder`"""
self.add_reminder(PopupReminder(minutes_before_start))
def add_reminder(self, reminder):
"""Adds reminder to an event. See :py:mod:`~gcsa.reminders`"""
if len(self.reminders) > 4:
raise ValueError('The maximum number of override reminders is 5.')
self.reminders.append(reminder)
@staticmethod
def _ensure_attendee_from_email(attendee_or_email):
"""If attendee_or_email is email string, returns created :py:class:`~gcsa.attendee.Attendee`
object with the given email."""
if isinstance(attendee_or_email, str):
return Attendee(email=attendee_or_email)
else:
return attendee_or_email
@property
def is_recurring_instance(self):
return self.recurring_event_id is not None
def __str__(self):
return '{} - {}'.format(self.start, self.summary)
def __repr__(self):
return '<Event {}>'.format(self.__str__())
def __lt__(self, other):
def insure_datetime(d, timezone):
if type(d) == date:
return insure_localisation(datetime(year=d.year, month=d.month, day=d.day), timezone)
else:
return d
start = insure_datetime(self.start, self.timezone)
end = insure_datetime(self.end, self.timezone)
other_start = insure_datetime(other.start, other.timezone)
other_end = insure_datetime(other.end, other.timezone)
return (start, end) < (other_start, other_end)
def __eq__(self, other):
return (
isinstance(other, Event)
and self.start == other.start
and self.end == other.end
and self.event_id == other.event_id
and self.summary == other.summary
and self.description == other.description
and self.location == other.location
and self.recurrence == other.recurrence
and self.color_id == other.color_id
and self.visibility == other.visibility
and self.attendees == other.attendees
and self.attachments == other.attachments
and self.reminders == other.reminders
and self.default_reminders == other.default_reminders
and self.created == other.created
and self.updated == other.updated
and self.recurring_event_id == other.recurring_event_id
and self.guests_can_invite_others == other.guests_can_invite_others
and self.guests_can_modify == other.guests_can_modify
and self.guests_can_see_other_guests == other.guests_can_see_other_guests
and self.other == other.other
)
``` |
{
"source": "Joshsora/kipy",
"score": 2
} |
#### File: Joshsora/kipy/setup.py
```python
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, src_dir=''):
Extension.__init__(self, name, sources=[])
self.src_dir = os.path.abspath(src_dir)
class CMakeBuild(build_ext):
def run(self):
cmake_extensions = []
for ext in self.extensions:
cmake_extensions.append(ext)
if cmake_extensions:
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
'CMake must be installed to build the following extensions: ' +
', '.join(ext.name for ext in cmake_extensions))
if platform.system() == 'Windows':
cmake_version = LooseVersion(
re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError('CMake >= 3.1.0 is required on Windows')
build_ext.run(self)
def build_extensions(self):
for ext in self.extensions:
if isinstance(ext, CMakeExtension):
self.build_extension(ext)
else:
build_ext.build_extension(self, ext)
def build_extension(self, ext):
ext_dir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + ext_dir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == 'Windows':
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_%s=%s' % (cfg.upper(), ext_dir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '%s -DVERSION_INFO="%s"' % (
env.get('CXXFLAGS', ''), self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.src_dir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
print() # Add an empty line for cleaner output.
about = {}
cwd = os.path.abspath(os.path.dirname(__file__))
version_path = os.path.join(cwd, 'ki', '__version__.py')
with open(version_path, 'r', encoding='utf-8') as f:
exec(f.read(), about)
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
setup_requires = ['pytest-runner']
install_requires = ['ruamel.yaml>=0.15.35']
tests_require = ['pytest>=3.0.0']
if platform.system() != 'Windows':
install_requires.append('uvloop>=0.9.1')
setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
description=about['__description__'],
long_description=long_description,
packages=find_packages(),
ext_modules=[
CMakeExtension('ki.dml'),
CMakeExtension('ki.protocol')
],
cmdclass={
'build_ext': CMakeBuild
},
zip_safe=False,
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require
)
``` |
{
"source": "joshspeagle/brutus",
"score": 2
} |
#### File: brutus/brutus/los.py
```python
from __future__ import (print_function, division)
import warnings
import numpy as np
from scipy.stats import truncnorm
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
__all__ = ["LOS_clouds_priortransform", "LOS_clouds_loglike_samples",
"kernel_tophat", "kernel_gauss", "kernel_lorentz"]
def LOS_clouds_priortransform(u, rlims=(0., 6.), dlims=(4., 19.),
pb_params=(-3., 0.7, -np.inf, 0.),
s_params=(-3., 0.3, -np.inf, 0.),
dust_template=False, nlims=(0.2, 2)):
"""
The "prior transform" for the LOS fit that converts from draws on the
N-dimensional unit cube to samples from the prior. Used in nested sampling
methods. Assumes uniform priors for distance and reddening
and a (truncated) log-normal in outlier fraction.
Parameters
----------
u : `~numpy.ndarray` of shape `(Nparams)`
The `Nparams` values drawn from the unit cube.
Contains the portion of outliers `P_b`, followed by the
foreground smoothing `sfore` and background smoothing `sback`,
followed by the foreground reddening `fred`, followed by a series of
`(dist, red)` pairs for each "cloud" along the LOS.
rlims : 2-tuple, optional
The reddening bounds within which we'd like to sample. Default is
`(0., 6.)`, which also assumes reddening is in units of Av.
dlims : 2-tuple, optional
The distance bounds within which we'd like to sample. Default is
`(4., 19.)`, which also assumes distance is in units of distance
modulus.
pb_params : 4-tuple, optional
Mean, standard deviation, lower bound, and upper bound for a
truncated log-normal distribution used as a prior for the outlier
model. The default is `(-3., 0.7, -np.inf, 0.)`, which corresponds
to a mean of 0.05, a standard deviation of a factor of 2, a lower
bound of 0, and an upper bound of 1.
s_params : 4-tuple, optional
Mean, standard deviation, lower bound, and upper bound for a
truncated log-normal distribution used as a prior for the
smoothing along the reddening axis (in %). The default is
`(-3.5, 0.7, -np.inf, 0.)`, which corresponds to a mean of 0.05, a
standard deviation of a factor of 1.35, a lower bound of 0, and an
upper bound of 1.
dust_template : bool, optional
Whether or not to use a sptial distribution for the dust based on
a particular template. If true, dust along the line of sight
will be in terms of rescalings of the template rather than
Av. Default is `False`.
nlims : 2-tuple, optional
Lower and upper bounds for the uniform prior for the rescaling
applied to the Planck spatial reddening template.
Default is `(0.2, 2.)`.
Returns
-------
x : `~numpy.ndarray` of shape `(Nparams)`
The transformed parameters.
"""
# Initialize values.
x = np.array(u)
# pb (outlier fraction)
pb_mean, pb_std, pb_low, pb_high = pb_params
a = (pb_low - pb_mean) / pb_std # set normalized lower bound
b = (pb_high - pb_mean) / pb_std # set normalized upper bound
x[0] = np.exp(truncnorm.ppf(u[0], a, b, loc=pb_mean, scale=pb_std))
# s (fractional smoothing)
ns = 2 # 2 parameters for foreground + background smoothing
s_mean, s_std, s_low, s_high = s_params
a = (s_low - s_mean) / s_std # set normalized lower bound
b = (s_high - s_mean) / s_std # set normalized upper bound
x[1] = np.exp(truncnorm.ppf(u[1], a, b, loc=s_mean, scale=s_std))
x[2] = np.exp(truncnorm.ppf(u[2], a, b, loc=s_mean, scale=s_std))
# distances
x[ns + 2::2] = np.sort(u[ns + 2::2]) * (dlims[1] - dlims[0]) + dlims[0]
# foreground reddening
x[ns + 1] = u[ns + 1] * (rlims[1] - rlims[0]) + rlims[0]
# cloud reddenings
dsort = np.argsort(u[ns + 2::2]) # sort distances
x[ns + 3::2] = (u[ns + 3::2][dsort]) * (rlims[1] - rlims[0]) + rlims[0]
if dust_template:
# replace with rescalings for the template
x[ns + 3::2] = u[ns + 3::2][dsort] * (nlims[1] - nlims[0]) + nlims[0]
return x
def LOS_clouds_loglike_samples(theta, dsamps, rsamps, kernel='gauss',
rlims=(0., 6.), template_reds=None,
Ndraws=25, additive_foreground=False,
monotonic=True):
"""
Compute the log-likelihood for the cumulative reddening along the
line of sight (LOS) parameterized by `theta`, given a set of input
reddening and distance draws. Assumes a uniform outlier model in distance
and reddening across our binned posteriors.
Parameters
----------
theta : `~numpy.ndarray` of shape `(Nparams,)`
A collection of parameters that characterizes the cumulative
reddening along the LOS. Contains the fraction of outliers `P_b`
followed by the fractional reddening smoothing for the foreground `s0`
and background `s` followed by the foreground reddening `fred`
followed by a series of `(dist, red)` pairs for each
"cloud" along the LOS.
dsamps : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
Distance samples for each object. Follows the units used in `theta`.
rsamps : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
Reddening samples for each object. Follows the units in `theta`.
kernel : str or function, optional
The kernel used to weight the samples along the LOS. If a string is
passed, a pre-specified kernel will be used. Options include
`'lorentz'`, `'gauss'`, and `'tophat'`. Default is `'gauss'`.
rlims : 2-tuple, optional
The reddening bounds within which we'd like to sample. Default is
`(0., 6.)`, which also assumes reddening is in units of Av.
template_reds : `~numpy.ndarray` of shape `(Nobj)`, optional
Reddenings for each star based on a spatial dust template.
If not provided, the same reddening value in a given distance
bin will be fit to all stars. If provided, a rescaled version of the
individual reddenings will be fit instead.
Ndraws : int, optional
The number of draws to use for each star. Default is `25`.
additive_foreground : bool, optional
Whether the foreground is treated as just another value or added
to all background values. Default is `False`.
monotonic : bool, optional
Whether to enforce monotonicity in the fits so that the values
must get larger with distance. Default is `True`.
Returns
-------
loglike : float
The computed log-likelihood.
"""
# Check kernel
KERNELS = {'tophat': kernel_tophat, 'gauss': kernel_gauss,
'lorentz': kernel_lorentz}
if kernel in KERNELS:
kern = KERNELS[kernel]
elif callable(kernel):
kern = kernel
else:
raise ValueError("The kernel provided is not a valid function nor "
"one of the pre-defined options. Please provide a "
"valid kernel.")
# Grab parameters.
pb, s0, s = theta[0], theta[1], theta[2]
reds, dists = np.atleast_1d(theta[3::2]), np.atleast_1d(theta[4::2])
area = (rlims[1] - rlims[0])
rsmooth = s * area
rsmooth0 = s0 * area
# Check monotonicity.
if not np.all(np.sort(dists) == dists):
raise ValueError("Distances must be monotonically increasing.")
if monotonic:
if not np.all(np.sort(reds) == reds):
# If monotonicity is enforced, non-monotonic solutions disallowed.
return -np.inf
# Define cloud edges ("distance bounds").
xedges = np.concatenate(([0], dists, [1e10]))
# Sub-sample distance and reddening samples.
ds, rs = dsamps[:, :Ndraws], rsamps[:, :Ndraws]
Nobj, Nsamps = ds.shape
# Reshape sigmas to match samples.
rsmooth, rsmooth0 = np.full_like(rs, rsmooth), np.full_like(rs, rsmooth0)
# Get reddenings to each star in each distance slice (kernel mean).
reds = np.array([np.full_like(rs, r) for r in reds])
# Adjust reddenings after the foreground if a spatial template is used.
if template_reds is not None:
reds[1:] *= template_reds[None, :, None] # reds[1:] are rescalings
# Adjust reddenings after the foreground if needed.
if additive_foreground:
reds[1:] += reds[0] # add foreground to background
# Define kernel parameters (mean, sigma) per LOS chunk.
kparams = np.array([(r, rsmooth) for r in reds])
kparams[0][1] = rsmooth0
# Compute log-weights for samples along the LOS by evaluating reddening
# samples within each segment against the associated centered kernel.
with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore bad values
logw = np.array([kern(rs, kp) + np.log((ds >= xl) & (ds < xh))
for xl, xh, kp in zip(xedges[:-1], xedges[1:],
kparams)])
# Compute log-likelihoods across all samples and clouds.
logls = logsumexp(logw, axis=(0, 2)) - np.log(Nsamps)
# Add in outlier mixture model.
logls = logsumexp(a=np.c_[logls, np.full_like(logls, -np.log(area))],
b=[(1. - pb), pb], axis=1)
# Compute total log-likeihood.
loglike = np.sum(logls)
return loglike
def kernel_tophat(reds, kp):
"""
Compute a weighted sum of the provided reddening draws using a Top-Hat
kernel.
Parameters
----------
reds : `~numpy.ndarray` of shape `(Nsamps)`
Distance samples for each object.
kp : 2-tuple
The kernel parameters `(mean, half-bin-width)`.
Returns
-------
logw : `~numpy.ndarray` of shape `(Nsamps)`
Log(weights).
"""
# Extract kernel parameters.
kmean, kwidth = kp[0], kp[1]
klow, khigh = kmean - kwidth, kmean + kwidth # tophat low/high edges
norm = 2. * kwidth
# Compute weights.
inbounds = (reds >= klow) & (reds < khigh)
# Compute log-sum.
logw = np.log(inbounds) - np.log(norm)
return logw
def kernel_gauss(reds, kp):
"""
Compute a weighted sum of the provided reddening draws using a Gaussian
kernel.
Parameters
----------
reds : `~numpy.ndarray` of shape `(Nsamps)`
Distance samples for each object.
kp : 2-tuple
The kernel parameters `(mean, standard deviation)`.
Returns
-------
logw : `~numpy.ndarray` of shape `(Nsamps)`
Log(weights).
"""
# Extract kernel parameters.
kmean, kstd = kp[0], kp[1]
norm = np.sqrt(2 * np.pi) * kstd
# Compute log-weights.
logw = -0.5 * ((reds - kmean) / kstd)**2 - np.log(norm)
return logw
def kernel_lorentz(reds, kp):
"""
Compute a weighted sum of the provided reddening draws using a Lorentzian
kernel.
Parameters
----------
reds : `~numpy.ndarray` of shape `(Nsamps)`
Distance samples for each object.
kp : 2-tuple
The kernel parameters `(mean, HWHM)`.
Returns
-------
logw : `~numpy.ndarray` of shape `(Nsamps)`
Log(weights).
"""
# Extract kernel parameters.
kmean, khwhm = kp[0], kp[1]
norm = np.pi * khwhm
# Compute log-weights.
logw = -np.log(1. + ((reds - kmean) / khwhm)**2) - np.log(norm)
return logw
``` |
{
"source": "joshspicer/pvscc-logo",
"score": 3
} |
#### File: pvscc-logo/logo_app/views.py
```python
import pathlib
from flask import Flask, render_template
from . import generate_logo
from . import app
@app.route("/")
def index():
url = "https://code.visualstudio.com/docs/python/python-tutorial"
mask_path = (
pathlib.Path(__file__).parent / "static" / "images" / "python-colored-mask.png"
)
output = generate_logo.generate_fig(url, mask_path)
return render_template("index.html", image=output)
```
#### File: joshspicer/pvscc-logo/tests.py
```python
import pytest
import unittest
from logo_app import generate_logo
import io
import pathlib
class TestFigure(unittest.TestCase):
def test_generate_figure(self):
url = "https://code.visualstudio.com/docs/python/python-tutorial"
mask_path = (
pathlib.Path(__file__).parent / "logo_app" / "static" / "images" / "python-colored-mask.png"
)
# this tests fails on purpose. To fix it, replace io.BytesIO with str in the line below
self.assertTrue(isinstance(generate_logo.generate_fig(url,mask_path), io.BytesIO))
``` |
{
"source": "joshspicer/vscode-remote-try-python",
"score": 2
} |
#### File: joshspicer/vscode-remote-try-python/app.py
```python
from flask import Flask
import pandas as pd
app = Flask(__name__)
@app.route("/")
def hello():
data = {}
arr = pd.DataFrame(data)
return app.send_static_file("index.html")
``` |
{
"source": "JoshStegmaier/kabocha",
"score": 2
} |
#### File: kabocha/errors/base.py
```python
import json
from django.http import JsonResponse
from django.core.serializers.json import DjangoJSONEncoder
DEFAULT_ERROR_CODE = "no_code"
DEFAULT_DEVELOPER_MESSAGE = "No error message available."
DEFAULT_USER_MESSAGE = "An error has occurred."
DEFAULT_MORE_INFO = None
DEFAULT_STATUS_CODE = 400
DEFAULT_ADDITIONAL_ERRORS = None
class BaseJsonError(JsonResponse):
def __init__(self, error_code=DEFAULT_ERROR_CODE, developer_message=DEFAULT_DEVELOPER_MESSAGE, user_message=DEFAULT_USER_MESSAGE, more_info=DEFAULT_MORE_INFO, status_code=DEFAULT_STATUS_CODE, additional_errors=DEFAULT_ADDITIONAL_ERRORS, encoder=DjangoJSONEncoder, **kwargs):
data = {
"error_code" : error_code,
"developer_message" : developer_message,
"user_message" : user_message,
"more_info" : more_info,
"status_code" : status_code,
"additional_errors" : additional_errors,
}
self.encoder = encoder
self.data = data
super(BaseJsonError, self).__init__(data, status=status_code, encoder=encoder, **kwargs)
def update_content(self):
self.content = json.dumps(self.data, cls=self.encoder)
def error_code():
doc = "Short error code that uniquely identifies the error. Defaults to '%s'" % DEFAULT_ERROR_CODE
def fget(self):
return self.data['error_code']
def fset(self, value):
self.data['error_code'] = value
self.update_content()
def fdel(self):
self.data['error_code'] = DEFAULT_ERROR_CODE
self.update_content()
return locals()
error_code = property(**error_code())
def developer_message():
doc = "Verbose description of the error for the developer's benefit. Defaults to '%s'" % DEFAULT_DEVELOPER_MESSAGE
def fget(self):
return self.data['developer_message']
def fset(self, value):
self.data['developer_message'] = value
self.update_content()
def fdel(self):
self.data['developer_message'] = DEFAULT_DEVELOPER_MESSAGE
self.update_content()
return locals()
developer_message = property(**developer_message())
def user_message():
doc = "Verbose error message that can be passed on to the user if not otherwise handled. Defaults to '%s'" % DEFAULT_USER_MESSAGE
def fget(self):
return self.data['user_message']
def fset(self, value):
self.data['user_message'] = value
self.update_content()
def fdel(self):
self.data['user_message'] = DEFAULT_USER_MESSAGE
self.update_content()
return locals()
user_message = property(**user_message())
def more_info():
doc = "URL to a document that provides more information about the error. Defaults to '%s'" % DEFAULT_MORE_INFO
def fget(self):
return self.data['more_info']
def fset(self, value):
self.data['more_info'] = value
self.update_content()
def fdel(self):
self.data['more_info'] = DEFAULT_MORE_INFO
self.update_content()
return locals()
more_info = property(**more_info())
def status_code():
doc = "HTTP status code of the response. Defaults to '%s'" % DEFAULT_STATUS_CODE
def fget(self):
return self.data['status_code']
def fset(self, value):
self.data['status_code'] = value
self.status = value
self.update_content()
def fdel(self):
self.data['status_code'] = DEFAULT_STATUS_CODE
del self.status
self.update_content()
return locals()
status_code = property(**status_code())
def additional_errors():
doc = "Additional error information, to hold other information provided by the system that may not be handled otherwise. Defaults to '%s'" % DEFAULT_ADDITIONAL_ERRORS
def fget(self):
return self.data['additional_errors']
def fset(self, value):
self.data['additional_errors'] = value
self.update_content()
def fdel(self):
self.data['additional_errors'] = DEFAULT_ADDITIONAL_ERRORS
self.update_content()
return locals()
additional_errors = property(**additional_errors())
class JsonError(BaseJsonError):
DEFAULTS = {}
def __init__(self, **kwargs):
new_kwargs = self.DEFAULTS.copy()
new_kwargs.update(kwargs)
super(JsonError, self).__init__(**new_kwargs)
``` |
{
"source": "joshsteiner/szachy-si",
"score": 4
} |
#### File: joshsteiner/szachy-si/xo.py
```python
import generic_mcts
from random import choice, seed
def opposite_player(p):
if p == 'x':
return 'o'
else:
return 'x'
class XoStatus(generic_mcts.Status):
DRAW = 1
X_WIN = 2
O_WIN = 3
class XoMove(generic_mcts.Move):
def __init__(self, r, c, player):
self.r = r
self.c = c
self.player = player
def __eq__(self, other):
return (
self.r == other.r and
self.c == other.c and
self.player == other.player
)
class XoGameState(generic_mcts.GameState):
def __init__(self, board, player):
self.board = board
self.player = player
def copy(self):
board = [r[:] for r in self.board]
return XoGameState(board, self.player)
class XoGame(generic_mcts.Game):
@staticmethod
def show(game_state):
print(" ", end="")
for c in range(3):
print(chr(ord('a') + c), end=" ")
print()
for row_number, row in enumerate(game_state.board[::-1]):
print(3 - row_number, end=" ")
for player in row:
print(player, end=" ")
print()
@staticmethod
def status(game_state):
L = [
[(0, 0), (0, 1), (0, 2)],
[(1, 0), (1, 1), (1, 2)],
[(2, 0), (2, 1), (2, 2)],
[(0, 0), (1, 0), (2, 0)],
[(0, 1), (1, 1), (2, 1)],
[(0, 2), (1, 2), (2, 2)],
[(0, 0), (1, 1), (2, 2)],
[(0, 2), (1, 1), (2, 0)],
]
for l in L:
s = ''.join(game_state.board[r][c] for (r, c) in l)
if s == 'xxx':
return XoStatus.X_WIN
elif s == 'ooo':
return XoStatus.O_WIN
if len(XoGame.possible_moves(game_state)) == 0:
return XoStatus.DRAW
else:
return XoStatus.IN_PROGRESS
@staticmethod
def score(result, game_state):
assert result != XoStatus.IN_PROGRESS
if result == XoStatus.DRAW:
return 0.5
elif (result == XoStatus.X_WIN and
game_state.player == 'x'):
return 1
elif (result == XoStatus.O_WIN and
game_state.player == 'o'):
return 1
else:
return 0
@staticmethod
def apply_move(move, game_state):
game_state.board[move.r][move.c] = move.player
game_state.player = move.player
@staticmethod
def undo_move(move, game_state):
game_state.board[move.r][move.c] = ' '
game_state.player = opposite_player(move.player)
@staticmethod
def possible_moves(game_state):
moves = [
XoMove(r, c, opposite_player(game_state.player))
for r in range(3)
for c in range(3)
if game_state.board[r][c] == ' '
]
return moves
@staticmethod
def initial_state():
board = [
[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', ' '],
]
return XoGameState(board, 'o')
@staticmethod
def parse_move(move_str):
c = ord(move_str[0]) - ord('a')
r = ord(move_str[1]) - ord('1')
return XoMove(r, c, None)
class XoUniformRandomPlayoutPolicy:
def playout(self, node):
game_state = node.game_state.copy()
st = XoGame.status(node.game_state)
if ((st == XoStatus.X_WIN and game_state.player == 'o')
or (st == XoStatus.O_WIN and game_state.player == 'x')):
node.win_count = -100
return
while XoGame.status(game_state) == XoStatus.IN_PROGRESS:
move = choice(XoGame.possible_moves(game_state))
XoGame.apply_move(move, game_state)
return XoGame.status(game_state)
if __name__ == '__main__':
seed()
game = XoGame
mct = generic_mcts.McTree(
game,
select_policy=generic_mcts.UctSelectPolicy(),
playout_policy=XoUniformRandomPlayoutPolicy(),
number_of_playouts=1000,
)
while True:
game.show(mct.root.game_state)
move = game.parse_move(input(": "))
move.player = opposite_player(mct.root.game_state.player)
mct.apply_move(move)
game.show(mct.root.game_state)
print("thinking...")
ai_move = mct.choose_best_move()
mct.apply_move(ai_move)
``` |
{
"source": "joshStillerman/hdf_data",
"score": 3
} |
#### File: hdf_data/python/display_hdf.py
```python
import h5py
import sys
def display_hdf(file_name) :
"""Prints the HDF5 file structure"""
file = h5py.File(file_name, 'r') # open read-only
item = file #["/Configure:0000/Run:0000"]
print_hdf5_item_attributes(item)
print_hdf5_item_structure(item)
file.close()
def print_hdf5_item_attributes(item, offset=''):
for attr in item.attrs.items():
print offset+str(attr)
def print_hdf5_item_structure(g, offset=' ') :
"""Prints the input file/group/dataset (g) name and begin iterations on its content"""
if isinstance(g,h5py.File) :
print g.file, '(File)', g.name
elif isinstance(g,h5py.Dataset) :
print '(Dataset)', g.name, ' len =', g.shape #, g.dtype
elif isinstance(g,h5py.Group) :
print '(Group)', g.name
else :
print 'WORNING: UNKNOWN ITEM IN HDF5 FILE', g.name
sys.exit ( "EXECUTION IS TERMINATED" )
print_hdf5_item_attributes(g, offset+' ')
if isinstance(g, h5py.File) or isinstance(g, h5py.Group) :
for key,val in dict(g).iteritems() :
subg = val
print offset, key, #," ", subg.name #, val, subg.len(), type(subg),
print_hdf5_item_structure(subg, offset + ' ')
if __name__ == "__main__" :
filename = sys.argv[1]
display_hdf(filename)
``` |
{
"source": "joshswe/simpleArticleManagementSystem",
"score": 3
} |
#### File: joshswe/simpleArticleManagementSystem/app.py
```python
from flask import Flask, render_template, flash, redirect, url_for, session, logging, request
#from data import Articles
from flask_mysqldb import MySQL
from passlib.hash import sha256_crypt
from functools import wraps
from formclass import RegisterForm, ArticleForm
#Create an instance of the flask class
app = Flask(__name__)
# Config MySQL
# Newer versions of Ubuntu (≥16.04): Removing the line bind-address 127.0.0.1 in /etc/mysql/mysql.conf.d/mysqld.cnf.
app.config['MYSQL_HOST']=''
app.config['MYSQL_USER']=''
app.config['MYSQL_PASSWORD']=''
app.config['MYSQL_DB']=''
app.config['MYSQL_CURSORCLASS']='DictCursor'
app.config['MYSQL_PORT'] = 3306
# Initialize MySQL
mysql = MySQL(app)
# Index
@app.route('/')
def index():
return render_template('home.html')
# About
@app.route('/about')
def about():
return render_template('about.html')
# Articles
@app.route('/articles')
def articles():
# Create cursor
cur = mysql.connection.cursor()
# Get articles
result = cur.execute("SELECT * FROM articles")
articles = cur.fetchall() # This will be fetched in dictionary form
if result > 0:
return render_template('articles.html',articles=articles)
else:
msg = 'No Articles Found'
return render_template('articles.html', msg=msg)
#Close connection
cur.close()
return render_template('articles.html', articles = Articles)
#Single Article
@app.route('/article/<string:id>/')
def article(id):
# Create cursor
cur = mysql.connection.cursor()
# Get article
result = cur.execute("SELECT * FROM articles WHERE id = %s",[id])
article = cur.fetchone()
return render_template('article.html', article=article)
# User Register
@app.route('/register',methods=['GET','POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
# Create cursor
cur = mysql.connection.cursor()
# Execute Query
cur.execute("INSERT INTO users(name,email,username,password) VALUES (%s,%s,%s,%s)",(name,email,username,password))
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
flash('You are now registered and can log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
# User login
@app.route('/login',methods=['GET','POST'])
def login():
if request.method == 'POST':
# Get Form Fields
username = request.form['username']
password_candidate = request.form['password']
# Create Cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute("SELECT * FROM users WHERE username = %s",[username])
app.logger.info(result)
if result > 0:
# Get stored hash
data = cur.fetchone()
app.logger.info(data)
password = data['password']
# Compare passwords
if sha256_crypt.verify(password_candidate,password):
# Valid Login Credentials
session['logged_in'] = True
session['username'] = username
flash('You are now logged in','success')
return redirect(url_for('dashboard'))
else:
msgInvalidLogin = 'Invalid Login'
return render_template('login.html',error=msgInvalidLogin) # Error message can be found in inludes/_messages.html
# Close connection
cur.close()
else:
msgInvalidUser = 'Username Not Found'
return render_template('login.html',error=msgInvalidUser)
return render_template('login.html')
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args,**kwargs):
if 'logged_in' in session:
return f(*args,**kwargs)
else:
flash('Unauthorized, Please login','danger')
return redirect(url_for('login'))
return wrap
# Logout
@app.route('/logout')
def logout():
session.clear()
flash('You are now logged out')
return redirect(url_for('login'))
# Dashboard
@app.route('/dashboard')
@is_logged_in
def dashboard():
# Create cursor
cur = mysql.connection.cursor()
# Get articles
result = cur.execute("SELECT * FROM articles")
articles = cur.fetchall() # This will be fetched in dictionary form
if result > 0:
return render_template('dashboard.html',articles=articles)
else:
msg = 'No Articles Found'
return render_template('dashboard.html', msg=msg)
#Close connection
cur.close()
# Add Article
@app.route('/add_article', methods=['GET','POST'])
@is_logged_in
def add_article():
form = ArticleForm(request.form)
if request.method == 'POST' and form.validate():
title = form.title.data
body = form.body.data
#Create cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("INSERT INTO articles(title, body, author) VALUES(%s,%s,%s)",(title,body,session['username']))
# Commit to database
mysql.connection.commit()
# Close connection
cur.close()
flash('Your article was created!','success')
return redirect(url_for('dashboard'))
return render_template('add_article.html',form=form)
# Edit Article
@app.route('/edit_article/<string:id>', methods=['GET','POST'])
@is_logged_in
def edit_article(id):
# Create cursor
cur = mysql.connection.cursor()
# Get article by ID
result = cur.execute("SELECT * FROM articles WHERE id = %s", [id])
article = cur.fetchone()
# Get form
form = ArticleForm(request.form)
# Populate article form fields
form.title.data = article['title']
form.body.data = article['body']
if request.method == 'POST' and form.validate():
title = request.form['title']
body = request.form['body']
#Create cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("UPDATE articles SET title=%s, body=%s WHERE id=%s",(title,body,id))
# Commit to database
mysql.connection.commit()
# Close connection
cur.close()
flash('Article Updated','success')
return redirect(url_for('dashboard'))
return render_template('edit_article.html',form=form)
# Delete Article
@app.route('/delete_article/<string:id>', methods=['POST'])
@is_logged_in
def delete_article(id):
#Create cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("DELETE FROM articles WHERE id=%s",[id])
# Commit to database
mysql.connection.commit()
# Close connection
cur.close()
flash('Article Deleted','success')
return redirect(url_for('dashboard'))
if __name__ == '__main__': #That means the script is going to be executed
app.secret_key='secret123'
app.run(debug=True)
```
#### File: joshswe/simpleArticleManagementSystem/data.py
```python
def Articles():
articles = [
{
'id': 1,
'title':'Article One',
'body': 'lorem ',
'author': '<NAME>',
'create_date':'08-26-2019'
},
{
'id': 2,
'title':'Article Two',
'body': 'lorem ',
'author': 'Joshua',
'create_date':'08-26-2019'
},
{
'id': 3,
'title':'Article Three',
'body': 'lorem ',
'author': 'Josh',
'create_date':'08-26-2019'
},
]
return articles
``` |
{
"source": "joshsziegler/dotfiles",
"score": 4
} |
#### File: dotfiles/python/html_find_body.py
```python
from bs4 import BeautifulSoup
import urllib.request
import argparse
def find_element_with_most_paragraphs(url):
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, "html.parser")
parents = soup.find_all(['div', 'article', 'body'])
most_paras = ""
most_paras_num = 0
for el in parents:
num_paras = len(el.find_all('p', recursive=False))
if num_paras > most_paras_num:
most_paras = el
most_paras_num = num_paras
return most_paras
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('url', type=str)
args = parser.parse_args()
print(find_element_with_most_paragraphs(args.url))
``` |
{
"source": "Josh-Talks/Flood-Warning",
"score": 3
} |
#### File: Flood-Warning/floodsystem/analysis.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from datetime import datetime, timedelta
def polyfit(dates, levels, p):
dates = matplotlib.dates.date2num(dates)
if len(dates) > 1:
x = np.linspace(max(dates), min(dates), len(dates))
d0 = x[0]
p_coeff = np.polyfit(x-d0, levels, p)
poly = np.poly1d(p_coeff)
poly_deriv = poly.deriv(m=1)
print(poly_deriv)
return poly, x, poly_deriv
else:
return None
def current_gradient(poly, dates, levels, p):
poly, x, poly_deriv = polyfit(dates, levels, p)
x1 = np.linspace(x[0], x[-1], 30)
current_grad = poly_deriv(x1[-1] - x[0])
print("current gradient")
print(current_grad)
return current_grad
```
#### File: Flood-Warning/floodsystem/sampledata.py
```python
from .station import MonitoringStation
def build_sample_data():
s_id = ['sid1', 'sid2', 'sid3', 'sid4', 'sid5']
m_id = ['id1', 'id2', 'id3', 'id4', 'id5']
label = ['label1', 'label2', 'label3', 'label4', 'label5']
coord = [(1,1),(2,2), (3,3), (4,4), (5,5)]
trange = [(0.1,0.2), (0.2,0.3), (0.3,0.4), (0.5,0.4), (None)]
river = ['river1', 'river2', 'river3', 'river4', 'river1']
town = ['town1', 'town2', 'town3', 'town4', 'town5']
current_level = [0.15, 0.27, 0.39, 0.51, None]
station_data = []
for n in range(len(town)):
s = MonitoringStation(s_id[n], m_id[n], label[n], coord[n], trange[n], river[n], town[n])
station_data.append(s)
i = 0
for station in station_data:
station.latest_level = current_level[i]
i += 1
return station_data
```
#### File: Flood-Warning/Task/Task1C.py
```python
from floodsystem.stationdata import build_station_list
from floodsystem.geo import *
def run():
"""Requirements for Task 1C"""
# Build list of tuples of station names and distance
stations = build_station_list()
centre = (52.2053, 0.1218)
r = 10
stations_within_distance = stations_within_radius(stations, centre, r)
stations_within_distance.sort()
print(stations_within_distance)
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run()
``` |
{
"source": "joshtburdick/deepTools",
"score": 2
} |
#### File: deepTools/deeptools/plotCoverage.py
```python
import os
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
import deeptools.countReadsPerBin as countR
from deeptools import parserCommon
from deeptools.utilities import smartLabels
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
parent_parser = parserCommon.getParentArgParse(binSize=False)
read_options_parser = parserCommon.read_options()
parser = \
argparse.ArgumentParser(
parents=[required_args(), parent_parser, read_options_parser],
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
description="""
This tool is useful to assess the sequencing depth of a given sample.
It samples 1 million bp, counts the number of overlapping reads and can report
a histogram that tells you how many bases are covered how many times.
Multiple BAM files are accepted, but they all should correspond to the same genome assembly.
detailed usage help:
$ plotCoverage -h
""",
epilog='example usages:\nplotCoverage '
'--bamfiles file1.bam file2.bam -o results.png\n\n'
' \n\n',
conflict_handler='resolve')
parser.add_argument('--version', action='version',
version='plotCoverage {}'.format(__version__))
return parser
def process_args(args=None):
args = parse_arguments().parse_args(args)
if not args.labels:
if args.smartLabels:
args.labels = smartLabels(args.bamfiles)
else:
args.labels = [os.path.basename(x) for x in args.bamfiles]
if args.labels and len(args.bamfiles) != len(args.labels):
sys.exit("The number of labels does not match the number of BAM files.")
return args
def required_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
required.add_argument('--bamfiles', '-b',
metavar='FILE1 FILE2',
help='List of indexed BAM files separated by spaces.',
nargs='+',
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument("--help", "-h", action="help",
help="show this help message and exit")
optional.add_argument('--plotFile', '-o',
type=parserCommon.writableFile,
help='File name to save the plot to.')
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--smartLabels',
action='store_true',
help='Instead of manually specifying labels for the input '
'BAM files, this causes deepTools to use the file name '
'after removing the path and extension.')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or nan values in _all_ samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--numberOfSamples', '-n',
help='Number of 1 bp regions to sample. (Default: %(default)s)',
required=False,
type=int,
default=1000000)
optional.add_argument('--BED',
help='Limits the coverage analysis to '
'the regions specified in these files. This overrides --numberOfSamples. '
'Due to memory requirements, it is inadvised to combine this with '
'--outRawCounts or many tens of thousands of regions, as per-base '
'coverage is used!',
metavar='FILE1.bed FILE2.bed',
nargs='+')
optional.add_argument('--outRawCounts',
help='Save raw counts (coverages) to file.',
type=parserCommon.writableFile,
metavar='FILE')
optional.add_argument('--outCoverageMetrics',
help='Save percentage of bins/regions above the specified thresholds to '
'the specified file. The coverage thresholds are specified by '
'--coverageThresholds. If no coverage thresholds are specified, the file '
'will be empty.',
type=parserCommon.writableFile,
metavar='FILE')
optional.add_argument('--coverageThresholds', '-ct',
type=int,
action="append",
help='The percentage of reported bins/regions with signal at least as '
'high as the given threshold. This can be specified multiple times.')
optional.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=5.0)
optional.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=15.0)
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf, svg and plotly.',
default=None,
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
return parser
def main(args=None):
args = process_args(args)
if not args.outRawCounts and not args.plotFile and not args.outCoverageMetrics:
sys.exit("At least one of --plotFile, --outRawCounts and --outCoverageMetrics are required.\n")
if 'BED' in args:
bed_regions = args.BED
else:
bed_regions = None
cr = countR.CountReadsPerBin(args.bamfiles,
binLength=1,
bedFile=bed_regions,
numberOfSamples=args.numberOfSamples,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose,
region=args.region,
blackListFileName=args.blackListFileName,
extendReads=args.extendReads,
minMappingQuality=args.minMappingQuality,
ignoreDuplicates=args.ignoreDuplicates,
center_read=args.centerReads,
samFlag_include=args.samFlagInclude,
samFlag_exclude=args.samFlagExclude,
minFragmentLength=args.minFragmentLength,
maxFragmentLength=args.maxFragmentLength,
bed_and_bin=True,
out_file_for_raw_data=args.outRawCounts)
num_reads_per_bin = cr.run()
if args.outCoverageMetrics and args.coverageThresholds:
args.coverageThresholds.sort() # Galaxy in particular tends to give things in a weird order
of = open(args.outCoverageMetrics, "w")
of.write("Sample\tThreshold\tPercent\n")
nbins = float(num_reads_per_bin.shape[0])
for thresh in args.coverageThresholds:
vals = np.sum(num_reads_per_bin >= thresh, axis=0)
for lab, val in zip(args.labels, vals):
of.write("{}\t{}\t{:6.3f}\n".format(lab, thresh, 100. * val / nbins))
of.close()
if args.outRawCounts:
# append to the generated file the
# labels
header = "#plotCoverage --outRawCounts\n#'chr'\t'start'\t'end'\t"
header += "'" + "'\t'".join(args.labels) + "'\n"
f = open(args.outRawCounts, 'r+')
content = f.read()
f.seek(0, 0)
f.write(header + content)
f.close()
if num_reads_per_bin.shape[0] < 2:
exit("ERROR: too few non-zero bins found.\n"
"If using --region please check that this "
"region is covered by reads.\n")
if args.skipZeros:
num_reads_per_bin = countR.remove_row_of_zeros(num_reads_per_bin)
if args.plotFile:
if args.plotFileFormat == 'plotly':
fig = go.Figure()
fig['layout']['xaxis1'] = {'domain': [0.0, 0.48], 'anchor': 'x1', 'title': 'coverage (#reads per base)'}
fig['layout']['xaxis2'] = {'domain': [0.52, 1.0], 'anchor': 'x2', 'title': 'coverage (#reads per base)'}
fig['layout']['yaxis1'] = {'domain': [0.0, 1.0], 'anchor': 'x1', 'title': 'fraction of bases sampled'}
fig['layout']['yaxis2'] = {'domain': [0.0, 1.0], 'anchor': 'x2', 'title': 'fraction of bases sampled >= coverage'}
fig['layout'].update(title=args.plotTitle)
else:
fig, axs = plt.subplots(1, 2, figsize=(args.plotWidth, args.plotHeight))
plt.suptitle(args.plotTitle)
# plot up to two std from mean
num_reads_per_bin = num_reads_per_bin.astype(int)
sample_mean = num_reads_per_bin.mean(axis=0)
sample_std = num_reads_per_bin.std(axis=0)
sample_max = num_reads_per_bin.max(axis=0)
sample_min = num_reads_per_bin.min(axis=0)
sample_25 = np.percentile(num_reads_per_bin, 25, axis=0)
sample_50 = np.percentile(num_reads_per_bin, 50, axis=0)
sample_75 = np.percentile(num_reads_per_bin, 75, axis=0)
# use the largest 99th percentile from all samples to set the x_max value
x_max = np.max(np.percentile(num_reads_per_bin, 99, axis=0))
# plot coverage
# print headers for text output
print("sample\tmean\tstd\tmin\t25%\t50%\t75%\tmax")
# the determination of a sensible value for y_max of the first plot (fraction of bases sampled vs.
# coverage) is important because, depending on the data,
# it becomes very difficult to see the lines in the plot. For example, if the coverage of a sample
# is a nice gaussian curve with a large mean of 50. Then a sensible range for the y axis (fraction of
# reads having coverage=x) is (0, 0.02) which nicely shows the coverage curve. If instead the coverage is
# very por and centers close to 1 then a good y axis range is (0,1).
# the current implementation aims to find the y_value for which 50% of the reads >= x (coverage) and
# sets that as the x_axis range.
y_max = []
data = []
# We need to manually set the line colors so they're shared between the two plots.
plotly_colors = ["#d73027", "#fc8d59", "#f33090", "#e0f3f8", "#91bfdb", "#4575b4"]
plotly_styles = sum([6 * ["solid"], 6 * ["dot"], 6 * ["dash"], 6 * ["longdash"], 6 * ["dashdot"], 6 * ["longdashdot"]], [])
for idx, col in enumerate(num_reads_per_bin.T):
if args.plotFile:
frac_reads_per_coverage = np.bincount(col.astype(int)).astype(float) / num_reads_per_bin.shape[0]
csum = np.bincount(col.astype(int))[::-1].cumsum()
csum_frac = csum.astype(float)[::-1] / csum.max()
if args.plotFileFormat == 'plotly':
color = plotly_colors[idx % len(plotly_colors)]
dash = plotly_styles[idx % len(plotly_styles)]
trace = go.Scatter(x=np.arange(0, int(x_max) - 1),
y=frac_reads_per_coverage[:int(x_max)],
mode='lines',
xaxis='x1',
yaxis='y1',
line=dict(color=color, dash=dash),
name="{}, mean={:.1f}".format(args.labels[idx], sample_mean[idx]),
legendgroup="{}".format(idx))
data.append(trace)
trace = go.Scatter(x=np.arange(0, int(x_max) - 1),
y=csum_frac[:int(x_max)],
mode='lines',
xaxis='x2',
yaxis='y2',
line=dict(color=color, dash=dash),
name=args.labels[idx],
showlegend=False,
legendgroup="{}".format(idx))
data.append(trace)
else:
axs[0].plot(frac_reads_per_coverage, label="{}, mean={:.1f}".format(args.labels[idx], sample_mean[idx]))
axs[1].plot(csum_frac, label=args.labels[idx])
# find the indexes (i.e. the x values) for which the cumulative distribution 'fraction of bases
# sampled >= coverage' where fraction of bases sampled = 50%: `np.flatnonzero(csum_frac>0.5)`
# then find the fraction of bases sampled that that have the largest x
y_max.append(frac_reads_per_coverage[max(np.flatnonzero(csum_frac > 0.5))])
print("{}\t{:0.2f}\t{:0.2f}\t{}\t{}\t{}\t{}\t{}\t".format(args.labels[idx],
sample_mean[idx],
sample_std[idx],
sample_min[idx],
sample_25[idx],
sample_50[idx],
sample_75[idx],
sample_max[idx],
))
if args.plotFile:
# Don't clip plots
y_max = max(y_max)
if args.plotFileFormat == "plotly":
fig.add_traces(data)
fig['layout']['yaxis1'].update(range=[0.0, min(1, y_max + (y_max * 0.10))])
fig['layout']['yaxis2'].update(range=[0.0, 1.0])
py.plot(fig, filename=args.plotFile, auto_open=False)
else:
axs[0].set_ylim(0, min(1, y_max + (y_max * 0.10)))
axs[0].set_xlim(0, x_max)
axs[0].set_xlabel('coverage (#reads per bp)')
axs[0].legend(fancybox=True, framealpha=0.5)
axs[0].set_ylabel('fraction of bases sampled')
# plot cumulative coverage
axs[1].set_xlim(0, x_max)
axs[1].set_xlabel('coverage (#reads per bp)')
axs[1].set_ylabel('fraction of bases sampled >= coverage')
axs[1].legend(fancybox=True, framealpha=0.5)
plt.savefig(args.plotFile, format=args.plotFileFormat)
plt.close()
if __name__ == "__main__":
main()
```
#### File: deepTools/deeptools/plotHeatmap.py
```python
from __future__ import division
import argparse
from collections import OrderedDict
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import matplotlib.gridspec as gridspec
from matplotlib import ticker
import copy
import sys
import plotly.offline as py
import plotly.graph_objs as go
# own modules
from deeptools import cm # noqa: F401
from deeptools import parserCommon
from deeptools import heatmapper
from deeptools.heatmapper_utilities import plot_single, plotly_single
from deeptools.utilities import convertCmap
from deeptools.computeMatrixOperations import filterHeatmapValues
debug = 0
old_settings = np.seterr(all='ignore')
plt.ioff()
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
parents=[parserCommon.heatmapperMatrixArgs(),
parserCommon.heatmapperOutputArgs(mode='heatmap'),
parserCommon.heatmapperOptionalArgs(mode='heatmap')],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='This tool creates a heatmap for '
'scores associated with genomic regions. '
'The program requires a matrix file '
'generated by the tool ``computeMatrix``.',
epilog='An example usage is: plotHeatmap -m <matrix file>',
add_help=False)
return parser
def process_args(args=None):
args = parse_arguments().parse_args(args)
args.heatmapHeight = args.heatmapHeight if args.heatmapHeight > 3 and args.heatmapHeight <= 100 else 10
if not matplotlib.colors.is_color_like(args.missingDataColor):
exit("The value {0} for --missingDataColor is not valid".format(args.missingDataColor))
args.boxAroundHeatmaps = True if args.boxAroundHeatmaps == 'yes' else False
return args
def prepare_layout(hm_matrix, heatmapsize, showSummaryPlot, showColorbar, perGroup, colorbar_position):
"""
prepare the plot layout
as a grid having as many rows
as samples (+1 for colobar)
and as many rows as groups (or clusters) (+1 for profile plot)
"""
heatmapwidth, heatmapheight = heatmapsize
numcols = hm_matrix.get_num_samples()
numrows = hm_matrix.get_num_groups()
if perGroup:
numcols, numrows = numrows, numcols
# the rows have different size depending
# on the number of regions contained in the
if perGroup:
# heatmap
height_ratio = np.array([np.amax(np.diff(hm_matrix.group_boundaries))] * numrows)
# scale ratio to sum = heatmapheight
height_ratio = heatmapheight * (height_ratio.astype(float) / height_ratio.sum())
else:
# heatmap
height_ratio = np.diff(hm_matrix.group_boundaries)
# scale ratio to sum = heatmapheight
height_ratio = heatmapheight * (height_ratio.astype(float) / height_ratio.sum())
# convert the height_ratio from numpy array back to list
height_ratio = height_ratio.tolist()
# the width ratio is equal for all heatmaps
width_ratio = [heatmapwidth] * numcols
if showColorbar:
if colorbar_position == 'below':
numrows += 2 # a spacer needs to be added to avoid overlaps
height_ratio += [4 / 2.54] # spacer
height_ratio += [1 / 2.54]
else:
numcols += 1
width_ratio += [1 / 2.54]
if showSummaryPlot:
numrows += 2 # plus 2 because a spacer is added
# make height of summary plot
# proportional to the width of heatmap
sumplot_height = heatmapwidth
spacer_height = heatmapwidth / 8
# scale height_ratios to convert from row
# numbers to heatmapheigt fractions
height_ratio = np.concatenate([[sumplot_height, spacer_height], height_ratio])
grids = gridspec.GridSpec(numrows, numcols, height_ratios=height_ratio, width_ratios=width_ratio)
return grids
def addProfilePlot(hm, plt, fig, grids, iterNum, iterNum2, perGroup, averageType, plot_type, yAxisLabel, color_list, yMin, yMax, wspace, hspace, colorbar_position, label_rotation=0.0):
"""
A function to add profile plots to the given figure, possibly in a custom grid subplot which mimics a tight layout (if wspace and hspace are not None)
"""
if wspace is not None and hspace is not None:
if colorbar_position == 'side':
gridsSub = gridspec.GridSpecFromSubplotSpec(1, iterNum, subplot_spec=grids[0, :-1], wspace=wspace, hspace=hspace)
else:
gridsSub = gridspec.GridSpecFromSubplotSpec(1, iterNum, subplot_spec=grids[0, :], wspace=wspace, hspace=hspace)
ax_list = []
globalYmin = np.inf
globalYmax = -np.inf
for sample_id in range(iterNum):
if perGroup:
title = hm.matrix.group_labels[sample_id]
tickIdx = sample_id % hm.matrix.get_num_samples()
else:
title = hm.matrix.sample_labels[sample_id]
tickIdx = sample_id
if sample_id > 0 and len(yMin) == 1 and len(yMax) == 1:
ax_profile = fig.add_subplot(grids[0, sample_id])
else:
if wspace is not None and hspace is not None:
ax_profile = fig.add_subplot(gridsSub[0, sample_id])
else:
ax_profile = fig.add_subplot(grids[0, sample_id])
ax_profile.set_title(title)
for group in range(iterNum2):
if perGroup:
sub_matrix = hm.matrix.get_matrix(sample_id, group)
line_label = sub_matrix['sample']
else:
sub_matrix = hm.matrix.get_matrix(group, sample_id)
line_label = sub_matrix['group']
plot_single(ax_profile, sub_matrix['matrix'],
averageType,
color_list[group],
line_label,
plot_type=plot_type)
if sample_id > 0 and len(yMin) == 1 and len(yMax) == 1:
plt.setp(ax_profile.get_yticklabels(), visible=False)
if sample_id == 0 and yAxisLabel != '':
ax_profile.set_ylabel(yAxisLabel)
xticks, xtickslabel = hm.getTicks(tickIdx)
if np.ceil(max(xticks)) != float(sub_matrix['matrix'].shape[1] - 1):
tickscale = float(sub_matrix['matrix'].shape[1] - 1) / max(xticks)
xticks_use = [x * tickscale for x in xticks]
ax_profile.axes.set_xticks(xticks_use)
else:
ax_profile.axes.set_xticks(xticks)
ax_profile.axes.set_xticklabels(xtickslabel, rotation=label_rotation)
ax_list.append(ax_profile)
# align the first and last label
# such that they don't fall off
# the heatmap sides
ticks = ax_profile.xaxis.get_major_ticks()
ticks[0].label1.set_horizontalalignment('left')
ticks[-1].label1.set_horizontalalignment('right')
globalYmin = min(np.float64(globalYmin), ax_profile.get_ylim()[0])
globalYmax = max(globalYmax, ax_profile.get_ylim()[1])
# It turns out that set_ylim only takes np.float64s
for sample_id, subplot in enumerate(ax_list):
localYMin = yMin[sample_id % len(yMin)]
localYMax = yMax[sample_id % len(yMax)]
lims = [globalYmin, globalYmax]
if localYMin:
if localYMax:
lims = (np.float64(localYMin), np.float64(localYMax))
else:
lims = (np.float64(localYMin), lims[1])
elif localYMax:
lims = (lims[0], np.float64(localYMax))
if lims[0] >= lims[1]:
lims = (lims[0], lims[0] + 1)
ax_list[sample_id].set_ylim(lims)
return ax_list
def plotlyMatrix(hm,
outFilename,
yMin=[None], yMax=[None],
zMin=[None], zMax=[None],
showSummaryPlot=False,
cmap=None, colorList=None, colorBarPosition='side',
perGroup=False,
averageType='median', yAxisLabel='', xAxisLabel='',
plotTitle='',
showColorbar=False,
label_rotation=0.0):
label_rotation *= -1.0
if colorBarPosition != 'side':
sys.error.write("Warning: It is not currently possible to have multiple colorbars with plotly!\n")
nRows = hm.matrix.get_num_groups()
nCols = hm.matrix.get_num_samples()
if perGroup:
nRows, nCols = nCols, nRows
profileHeight = 0.0
profileBottomBuffer = 0.0
if showSummaryPlot:
profileHeight = 0.2
profileBottomBuffer = 0.05
profileSideBuffer = 0.
profileWidth = 1. / nCols
if nCols > 1:
profileSideBuffer = 0.1 / (nCols - 1)
profileWidth = 0.9 / nCols
dataSummary = []
annos = []
fig = go.Figure()
fig['layout'].update(title=plotTitle)
xAxisN = 1
yAxisN = 1
# Summary plots at the top (if appropriate)
if showSummaryPlot:
yMinLocal = np.inf
yMaxLocal = -np.inf
for i in range(nCols):
xanchor = 'x{}'.format(xAxisN)
yanchor = 'y{}'.format(yAxisN)
xBase = i * (profileSideBuffer + profileWidth)
yBase = 1 - profileHeight
xDomain = [xBase, xBase + profileWidth]
yDomain = [yBase, 1.0]
for j in range(nRows):
if perGroup:
mat = hm.matrix.get_matrix(i, j)
xTicks, xTicksLabels = hm.getTicks(i)
label = mat['sample']
else:
mat = hm.matrix.get_matrix(j, i)
xTicks, xTicksLabels = hm.getTicks(j)
label = mat['group']
if j == 0:
fig['layout']['xaxis{}'.format(xAxisN)] = dict(domain=xDomain, anchor=yanchor, range=[0, mat['matrix'].shape[1]], tickmode='array', tickvals=xTicks, ticktext=xTicksLabels, tickangle=label_rotation)
fig['layout']['yaxis{}'.format(yAxisN)] = dict(anchor=xanchor, domain=yDomain)
trace = plotly_single(mat['matrix'], averageType, colorList[j], label)[0]
trace.update(xaxis=xanchor, yaxis=yanchor, legendgroup=label)
if min(trace['y']) < yMinLocal:
yMinLocal = min(trace['y'])
if max(trace['y']) > yMaxLocal:
yMaxLocal = max(trace['y'])
if i == 0:
trace.update(showlegend=True)
dataSummary.append(trace)
# Add the column label
if perGroup:
title = hm.matrix.group_labels[i]
else:
title = hm.matrix.sample_labels[i]
titleX = xBase + 0.5 * profileWidth
annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': title, 'y': 1.0, 'x': titleX, 'font': {'size': 16}, 'showarrow': False})
xAxisN += 1
yAxisN += 1
# Adjust y-bounds as appropriate:
for i in range(1, yAxisN):
yMinUse = yMinLocal
if yMin[(i - 1) % len(yMin)] is not None:
yMinUse = yMin[(i - 1) % len(yMin)]
yMaxUse = yMaxLocal
if yMax[(i - 1) % len(yMax)] is not None:
yMaxUse = yMax[(i - 1) % len(yMax)]
fig['layout']['yaxis{}'.format(i)].update(range=[yMinUse, yMaxUse])
fig['layout']['yaxis1'].update(title=yAxisLabel)
# Add the heatmap
dataHeatmap = []
zMinLocal = np.inf
zMaxLocal = -np.inf
heatmapWidth = 1. / nCols
heatmapSideBuffer = 0.0
if nCols > 1:
heatmapWidth = .9 / nCols
heatmapSideBuffer = 0.1 / (nCols - 1)
heatmapHeight = 1.0 - profileHeight - profileBottomBuffer
for i in range(nCols):
xanchor = 'x{}'.format(xAxisN)
xBase = i * (heatmapSideBuffer + heatmapWidth)
# Determine the height of each heatmap, they have no buffer
lengths = [0.0]
for j in range(nRows):
if perGroup:
mat = hm.matrix.get_matrix(i, j)
else:
mat = hm.matrix.get_matrix(j, i)
lengths.append(mat['matrix'].shape[0])
fractionalHeights = heatmapHeight * np.cumsum(lengths).astype(float) / np.sum(lengths).astype(float)
xDomain = [xBase, xBase + heatmapWidth]
fig['layout']['xaxis{}'.format(xAxisN)] = dict(domain=xDomain, anchor='free', position=0.0, range=[0, mat['matrix'].shape[1]], tickmode='array', tickvals=xTicks, ticktext=xTicksLabels, title=xAxisLabel)
# Start adding the heatmaps
for j in range(nRows):
if perGroup:
mat = hm.matrix.get_matrix(i, j)
label = mat['sample']
start = hm.matrix.group_boundaries[i]
end = hm.matrix.group_boundaries[i + 1]
else:
mat = hm.matrix.get_matrix(j, i)
label = mat['group']
start = hm.matrix.group_boundaries[j]
end = hm.matrix.group_boundaries[j + 1]
regs = hm.matrix.regions[start:end]
regs = [x[2] for x in regs]
yanchor = 'y{}'.format(yAxisN)
yDomain = [heatmapHeight - fractionalHeights[j + 1], heatmapHeight - fractionalHeights[j]]
visible = False
if i == 0:
visible = True
fig['layout']['yaxis{}'.format(yAxisN)] = dict(domain=yDomain, anchor=xanchor, visible=visible, title=label, tickmode='array', tickvals=[], ticktext=[])
if np.min(mat['matrix']) < zMinLocal:
zMinLocal = np.min(mat['matrix'])
if np.max(mat['matrix']) < zMaxLocal:
zMaxLocal = np.max(mat['matrix'])
trace = go.Heatmap(z=np.flipud(mat['matrix']),
y=regs[::-1],
xaxis=xanchor,
yaxis=yanchor,
showlegend=False,
name=label,
showscale=False)
dataHeatmap.append(trace)
yAxisN += 1
xAxisN += 1
if showColorbar:
dataHeatmap[-1].update(showscale=True)
dataHeatmap[-1]['colorbar'].update(len=heatmapHeight, y=0, yanchor='bottom', ypad=0.0)
# Adjust z bounds and colorscale
for trace in dataHeatmap:
zMinUse = zMinLocal
zMaxUse = zMaxLocal
if zMin[0] is not None:
zMinUse = zMin[0]
if zMax[0] is not None:
zMaxUse = zMax[0]
trace.update(zmin=zMinUse, zmax=zMaxUse, colorscale=convertCmap(cmap[0], vmin=zMinUse, vmax=zMaxUse))
dataSummary.extend(dataHeatmap)
fig.add_traces(dataSummary)
fig['layout']['annotations'] = annos
py.plot(fig, filename=outFilename, auto_open=False)
def plotMatrix(hm, outFileName,
colorMapDict={'colorMap': ['binary'], 'missingDataColor': 'black', 'alpha': 1.0},
plotTitle='',
xAxisLabel='', yAxisLabel='', regionsLabel='',
zMin=None, zMax=None,
yMin=None, yMax=None,
averageType='median',
reference_point_label=None,
startLabel='TSS', endLabel="TES",
heatmapHeight=25,
heatmapWidth=7.5,
perGroup=False, whatToShow='plot, heatmap and colorbar',
plot_type='lines',
linesAtTickMarks=False,
image_format=None,
legend_location='upper-left',
box_around_heatmaps=True,
label_rotation=0.0,
dpi=200,
interpolation_method='auto'):
hm.reference_point_label = hm.parameters['ref point']
if reference_point_label is not None:
hm.reference_point_label = [reference_point_label] * hm.matrix.get_num_samples()
hm.startLabel = startLabel
hm.endLabel = endLabel
matrix_flatten = None
if zMin is None:
matrix_flatten = hm.matrix.flatten()
# try to avoid outliers by using np.percentile
zMin = np.percentile(matrix_flatten, 1.0)
if np.isnan(zMin):
zMin = [None]
else:
zMin = [zMin] # convert to list to support multiple entries
elif 'auto' in zMin:
matrix_flatten = hm.matrix.flatten()
auto_min = np.percentile(matrix_flatten, 1.0)
if np.isnan(auto_min):
auto_min = None
new_mins = [float(x) if x != 'auto' else auto_min for x in zMin]
zMin = new_mins
else:
new_mins = [float(x) for x in zMin]
zMin = new_mins
if zMax is None:
if matrix_flatten is None:
matrix_flatten = hm.matrix.flatten()
# try to avoid outliers by using np.percentile
zMax = np.percentile(matrix_flatten, 98.0)
if np.isnan(zMax) or zMax <= zMin[0]:
zMax = [None]
else:
zMax = [zMax]
elif 'auto' in zMax:
matrix_flatten = hm.matrix.flatten()
auto_max = np.percentile(matrix_flatten, 98.0)
if np.isnan(auto_max):
auto_max = None
new_maxs = [float(x) if x != 'auto' else auto_max for x in zMax]
zMax = new_maxs
else:
new_maxs = [float(x) for x in zMax]
zMax = new_maxs
if (len(zMin) > 1) & (len(zMax) > 1):
for index, value in enumerate(zMax):
if value <= zMin[index]:
sys.stderr.write("Warnirng: In bigwig {}, the given zmin ({}) is larger than "
"or equal to the given zmax ({}). Thus, it has been set "
"to None. \n".format(index + 1, zMin[index], value))
zMin[index] = None
if yMin is None:
yMin = [None]
if yMax is None:
yMax = [None]
if not isinstance(yMin, list):
yMin = [yMin]
if not isinstance(yMax, list):
yMax = [yMax]
plt.rcParams['font.size'] = 8.0
fontP = FontProperties()
showSummaryPlot = False
showColorbar = False
if whatToShow == 'plot and heatmap':
showSummaryPlot = True
elif whatToShow == 'heatmap and colorbar':
showColorbar = True
elif whatToShow == 'plot, heatmap and colorbar':
showSummaryPlot = True
showColorbar = True
# colormap for the heatmap
if colorMapDict['colorMap']:
cmap = []
for color_map in colorMapDict['colorMap']:
copy_cmp = copy.copy(plt.get_cmap(color_map))
cmap.append(copy_cmp)
cmap[-1].set_bad(colorMapDict['missingDataColor']) # nans are printed using this color
if colorMapDict['colorList'] and len(colorMapDict['colorList']) > 0:
# make a cmap for each color list given
cmap = []
for color_list in colorMapDict['colorList']:
cmap.append(matplotlib.colors.LinearSegmentedColormap.from_list(
'my_cmap', color_list.replace(' ', '').split(","), N=colorMapDict['colorNumber']))
cmap[-1].set_bad(colorMapDict['missingDataColor']) # nans are printed using this color
if len(cmap) > 1 or len(zMin) > 1 or len(zMax) > 1:
# position color bar below heatmap when more than one
# heatmap color is given
colorbar_position = 'below'
else:
colorbar_position = 'side'
grids = prepare_layout(hm.matrix, (heatmapWidth, heatmapHeight),
showSummaryPlot, showColorbar, perGroup, colorbar_position)
# figsize: w,h tuple in inches
figwidth = heatmapWidth / 2.54
figheight = heatmapHeight / 2.54
if showSummaryPlot:
# the summary plot ocupies a height
# equal to the fig width
figheight += figwidth
numsamples = hm.matrix.get_num_samples()
if perGroup:
num_cols = hm.matrix.get_num_groups()
else:
num_cols = numsamples
total_figwidth = figwidth * num_cols
if showColorbar:
if colorbar_position == 'below':
figheight += 1 / 2.54
else:
total_figwidth += 1 / 2.54
fig = plt.figure(figsize=(total_figwidth, figheight))
fig.suptitle(plotTitle, y=1 - (0.06 / figheight))
# color map for the summary plot (profile) on top of the heatmap
cmap_plot = plt.get_cmap('jet')
numgroups = hm.matrix.get_num_groups()
if perGroup:
color_list = cmap_plot(np.arange(hm.matrix.get_num_samples()) / hm.matrix.get_num_samples())
else:
color_list = cmap_plot(np.arange(numgroups) / numgroups)
alpha = colorMapDict['alpha']
if image_format == 'plotly':
return plotlyMatrix(hm,
outFileName,
yMin=yMin, yMax=yMax,
zMin=zMin, zMax=zMax,
showSummaryPlot=showSummaryPlot, showColorbar=showColorbar,
cmap=cmap, colorList=color_list, colorBarPosition=colorbar_position,
perGroup=perGroup,
averageType=averageType, plotTitle=plotTitle,
xAxisLabel=xAxisLabel, yAxisLabel=yAxisLabel,
label_rotation=label_rotation)
# check if matrix is reference-point based using the upstream >0 value
# and is sorted by region length. If this is
# the case, prepare the data to plot a border at the regions end
regions_length_in_bins = [None] * len(hm.parameters['upstream'])
if hm.matrix.sort_using == 'region_length' and hm.matrix.sort_method != 'no':
for idx in range(len(hm.parameters['upstream'])):
if hm.parameters['ref point'][idx] is None:
regions_length_in_bins[idx] = None
continue
_regions = hm.matrix.get_regions()
foo = []
for _group in _regions:
_reg_len = []
for ind_reg in _group:
if isinstance(ind_reg, dict):
_len = ind_reg['end'] - ind_reg['start']
else:
_len = sum([x[1] - x[0] for x in ind_reg[1]])
if hm.parameters['ref point'][idx] == 'TSS':
_reg_len.append((hm.parameters['upstream'][idx] + _len) / hm.parameters['bin size'][idx])
elif hm.parameters['ref point'][idx] == 'center':
_len *= 0.5
_reg_len.append((hm.parameters['upstream'][idx] + _len) / hm.parameters['bin size'][idx])
elif hm.parameters['ref point'][idx] == 'TES':
_reg_len.append((hm.parameters['upstream'][idx] - _len) / hm.parameters['bin size'][idx])
foo.append(_reg_len)
regions_length_in_bins[idx] = foo
# plot the profiles on top of the heatmaps
if showSummaryPlot:
if perGroup:
iterNum = numgroups
iterNum2 = hm.matrix.get_num_samples()
else:
iterNum = hm.matrix.get_num_samples()
iterNum2 = numgroups
ax_list = addProfilePlot(hm, plt, fig, grids, iterNum, iterNum2, perGroup, averageType, plot_type, yAxisLabel, color_list, yMin, yMax, None, None, colorbar_position, label_rotation)
if len(yMin) > 1 or len(yMax) > 1:
# replot with a tight layout
import matplotlib.tight_layout as tl
specList = tl.get_subplotspec_list(fig.axes, grid_spec=grids)
renderer = tl.get_renderer(fig)
kwargs = tl.get_tight_layout_figure(fig, fig.axes, specList, renderer, pad=1.08)
for ax in ax_list:
fig.delaxes(ax)
ax_list = addProfilePlot(hm, plt, fig, grids, iterNum, iterNum2, perGroup, averageType, plot_type, yAxisLabel, color_list, yMin, yMax, kwargs['wspace'], kwargs['hspace'], colorbar_position, label_rotation)
if legend_location != 'none':
ax_list[-1].legend(loc=legend_location.replace('-', ' '), ncol=1, prop=fontP,
frameon=False, markerscale=0.5)
first_group = 0 # helper variable to place the title per sample/group
for sample in range(hm.matrix.get_num_samples()):
sample_idx = sample
for group in range(numgroups):
group_idx = group
# add the respective profile to the
# summary plot
sub_matrix = hm.matrix.get_matrix(group, sample)
if showSummaryPlot:
if perGroup:
sample_idx = sample + 2 # plot + spacer
else:
group += 2 # plot + spacer
first_group = 1
if perGroup:
ax = fig.add_subplot(grids[sample_idx, group])
# the remainder (%) is used to iterate
# over the available color maps (cmap).
# if the user only provided, lets say two
# and there are 10 groups, colormaps they are reused every
# two groups.
cmap_idx = group_idx % len(cmap)
zmin_idx = group_idx % len(zMin)
zmax_idx = group_idx % len(zMax)
else:
ax = fig.add_subplot(grids[group, sample])
# see above for the use of '%'
cmap_idx = sample % len(cmap)
zmin_idx = sample % len(zMin)
zmax_idx = sample % len(zMax)
if group == first_group and not showSummaryPlot and not perGroup:
title = hm.matrix.sample_labels[sample]
ax.set_title(title)
if box_around_heatmaps is False:
# Turn off the boxes around the individual heatmaps
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
rows, cols = sub_matrix['matrix'].shape
# if the number of rows is too large, then the 'nearest' method simply
# drops rows. A better solution is to relate the threshold to the DPI of the image
if interpolation_method == 'auto':
if rows >= 1000:
interpolation_method = 'bilinear'
else:
interpolation_method = 'nearest'
# if np.clip is not used, then values of the matrix that exceed the zmax limit are
# highlighted. Usually, a significant amount of pixels are equal or above the zmax and
# the default behaviour produces images full of large highlighted dots.
# If interpolation='nearest' is used, this has no effect
sub_matrix['matrix'] = np.clip(sub_matrix['matrix'], zMin[zmin_idx], zMax[zmax_idx])
img = ax.imshow(sub_matrix['matrix'],
aspect='auto',
interpolation=interpolation_method,
origin='upper',
vmin=zMin[zmin_idx],
vmax=zMax[zmax_idx],
cmap=cmap[cmap_idx],
alpha=alpha,
extent=[0, cols, rows, 0])
img.set_rasterized(True)
# plot border at the end of the regions
# if ordered by length
if regions_length_in_bins[sample] is not None:
x_lim = ax.get_xlim()
y_lim = ax.get_ylim()
ax.plot(regions_length_in_bins[sample][group_idx],
np.arange(len(regions_length_in_bins[sample][group_idx])),
'--', color='black', linewidth=0.5, dashes=(3, 2))
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
if perGroup:
ax.axes.set_xlabel(sub_matrix['group'])
if sample < hm.matrix.get_num_samples() - 1:
ax.axes.get_xaxis().set_visible(False)
else:
ax.axes.get_xaxis().set_visible(False)
ax.axes.set_xlabel(xAxisLabel)
ax.axes.set_yticks([])
if perGroup and group == 0:
ax.axes.set_ylabel(sub_matrix['sample'])
elif not perGroup and sample == 0:
ax.axes.set_ylabel(sub_matrix['group'])
# Plot vertical lines at tick marks if desired
if linesAtTickMarks:
xticks_heat, xtickslabel_heat = hm.getTicks(sample)
xticks_heat = [x + 0.5 for x in xticks_heat] # There's an offset of 0.5 compared to the profile plot
if np.ceil(max(xticks_heat)) != float(sub_matrix['matrix'].shape[1]):
tickscale = float(sub_matrix['matrix'].shape[1]) / max(xticks_heat)
xticks_heat_use = [x * tickscale for x in xticks_heat]
else:
xticks_heat_use = xticks_heat
for x in xticks_heat_use:
ax.axvline(x=x, color='black', linewidth=0.5, dashes=(3, 2))
# add labels to last block in a column
if (perGroup and sample == numsamples - 1) or \
(not perGroup and group_idx == numgroups - 1):
# add xticks to the bottom heatmap (last group)
ax.axes.get_xaxis().set_visible(True)
xticks_heat, xtickslabel_heat = hm.getTicks(sample)
xticks_heat = [x + 0.5 for x in xticks_heat] # There's an offset of 0.5 compared to the profile plot
if np.ceil(max(xticks_heat)) != float(sub_matrix['matrix'].shape[1]):
tickscale = float(sub_matrix['matrix'].shape[1]) / max(xticks_heat)
xticks_heat_use = [x * tickscale for x in xticks_heat]
ax.axes.set_xticks(xticks_heat_use)
else:
ax.axes.set_xticks(xticks_heat)
ax.axes.set_xticklabels(xtickslabel_heat, size=8)
# align the first and last label
# such that they don't fall off
# the heatmap sides
ticks = ax.xaxis.get_major_ticks()
ticks[0].label1.set_horizontalalignment('left')
ticks[-1].label1.set_horizontalalignment('right')
ax.get_xaxis().set_tick_params(
which='both',
top=False,
direction='out')
if showColorbar and colorbar_position == 'below':
# draw a colormap per each heatmap below the last block
if perGroup:
col = group_idx
else:
col = sample
ax = fig.add_subplot(grids[-1, col])
tick_locator = ticker.MaxNLocator(nbins=3)
cbar = fig.colorbar(img, cax=ax, orientation='horizontal', ticks=tick_locator)
labels = cbar.ax.get_xticklabels()
ticks = cbar.ax.get_xticks()
if ticks[0] == 0:
# if the label is at the start of the colobar
# move it a bit inside to avoid overlapping
# with other labels
labels[0].set_horizontalalignment('left')
if ticks[-1] == 1:
# if the label is at the end of the colobar
# move it a bit inside to avoid overlapping
# with other labels
labels[-1].set_horizontalalignment('right')
# cbar.ax.set_xticklabels(labels, rotation=90)
if showColorbar and colorbar_position != 'below':
if showSummaryPlot:
# we don't want to colorbar to extend
# over the profiles and spacer top rows
grid_start = 2
else:
grid_start = 0
ax = fig.add_subplot(grids[grid_start:, -1])
fig.colorbar(img, cax=ax)
if box_around_heatmaps:
plt.subplots_adjust(wspace=0.10, hspace=0.025, top=0.85, bottom=0, left=0.04, right=0.96)
else:
# When no box is plotted the space between heatmaps is reduced
plt.subplots_adjust(wspace=0.05, hspace=0.01, top=0.85, bottom=0, left=0.04, right=0.96)
plt.savefig(outFileName, bbox_inches='tight', pad_inches=0.1, dpi=dpi, format=image_format)
plt.close()
def mergeSmallGroups(matrixDict):
group_lengths = [len(x) for x in matrixDict.values()]
min_group_length = sum(group_lengths) * 0.01
to_merge = []
i = 0
_mergedHeatMapDict = OrderedDict()
for label, ma in matrixDict.items():
# merge small groups together
# otherwise visualization is impaired
if group_lengths[i] > min_group_length:
if len(to_merge):
to_merge.append(label)
new_label = " ".join(to_merge)
new_ma = np.concatenate([matrixDict[item]
for item in to_merge], axis=0)
else:
new_label = label
new_ma = matrixDict[label]
_mergedHeatMapDict[new_label] = new_ma
to_merge = []
else:
to_merge.append(label)
i += 1
if len(to_merge) > 1:
new_label = " ".join(to_merge)
new_ma = np.array()
for item in to_merge:
new_ma = np.concatenate([new_ma, matrixDict[item]])
_mergedHeatMapDict[new_label] = new_ma
return _mergedHeatMapDict
def main(args=None):
args = process_args(args)
hm = heatmapper.heatmapper()
matrix_file = args.matrixFile.name
args.matrixFile.close()
hm.read_matrix_file(matrix_file)
if hm.parameters['min threshold'] is not None or hm.parameters['max threshold'] is not None:
filterHeatmapValues(hm, hm.parameters['min threshold'], hm.parameters['max threshold'])
if args.sortRegions == 'keep':
args.sortRegions = 'no' # These are the same thing
if args.kmeans is not None:
hm.matrix.hmcluster(args.kmeans, method='kmeans', clustering_samples=args.clusterUsingSamples)
elif args.hclust is not None:
print("Performing hierarchical clustering."
"Please note that it might be very slow for large datasets.\n")
hm.matrix.hmcluster(args.hclust, method='hierarchical', clustering_samples=args.clusterUsingSamples)
group_len_ratio = np.diff(hm.matrix.group_boundaries) / len(hm.matrix.regions)
if np.any(group_len_ratio < 5.0 / 1000):
problem = np.flatnonzero(group_len_ratio < 5.0 / 1000)
sys.stderr.write("WARNING: Group '{}' is too small for plotting, you might want to remove it. "
"There will likely be an error message from matplotlib regarding this "
"below.\n".format(hm.matrix.group_labels[problem[0]]))
if args.regionsLabel:
hm.matrix.set_group_labels(args.regionsLabel)
if args.samplesLabel and len(args.samplesLabel):
hm.matrix.set_sample_labels(args.samplesLabel)
if args.sortRegions != 'no':
sortUsingSamples = []
if args.sortUsingSamples is not None:
for i in args.sortUsingSamples:
if (i > 0 and i <= hm.matrix.get_num_samples()):
sortUsingSamples.append(i - 1)
else:
exit("The value {0} for --sortSamples is not valid. Only values from 1 to {1} are allowed.".format(args.sortUsingSamples, hm.matrix.get_num_samples()))
print('Samples used for ordering within each group: ', sortUsingSamples)
hm.matrix.sort_groups(sort_using=args.sortUsing,
sort_method=args.sortRegions,
sample_list=sortUsingSamples)
if args.silhouette:
if args.kmeans is not None:
hm.matrix.computeSilhouette(args.kmeans)
elif args.hclust is not None:
hm.matrix.computeSilhouette(args.args.hclust)
if args.outFileNameMatrix:
hm.save_matrix(args.outFileNameMatrix)
if args.outFileSortedRegions:
hm.save_BED(args.outFileSortedRegions)
colormap_dict = {'colorMap': args.colorMap,
'colorList': args.colorList,
'colorNumber': args.colorNumber,
'missingDataColor': args.missingDataColor,
'alpha': args.alpha}
plotMatrix(hm,
args.outFileName,
colormap_dict, args.plotTitle,
args.xAxisLabel, args.yAxisLabel, args.regionsLabel,
args.zMin, args.zMax,
args.yMin, args.yMax,
args.averageTypeSummaryPlot,
args.refPointLabel,
args.startLabel,
args.endLabel,
args.heatmapHeight,
args.heatmapWidth,
args.perGroup,
args.whatToShow,
linesAtTickMarks=args.linesAtTickMarks,
plot_type=args.plotType,
image_format=args.plotFileFormat,
legend_location=args.legendLocation,
box_around_heatmaps=args.boxAroundHeatmaps,
label_rotation=args.label_rotation,
dpi=args.dpi,
interpolation_method=args.interpolationMethod)
``` |
{
"source": "joshtburdick/misc",
"score": 4
} |
#### File: countingBound/py/lpRank1.py
```python
import numpy as np
import scipy.optimize
import scipy.special
class LpBound:
"""Computes a bound on the rank of finding cliques.
n: number of vertices
k: clique size
"""
self.n = n
self.k = k
# the total number of cliques
self.numCliques = scipy.special.comb(n, k)
# the number of sets of cliques (and functions) which
# include an arbitrary edge, (say, e_{12})
self.numHittingEdge = scipy.special.comb(n-2, k-2)
# the number of sets of cliques which don't include e_{12}
self.numNotHittingEdge = self.numCliques - self.numHittingEdge
# these store the constraints, as lists (for easy appending,
# since it's not clear how many there will be).
# ??? rename these?
# A is stored as a list of numpy matrices
self.A = []
# ... and b as a list of numbers
self.b = []
def getConstraintMatrix():
"""Gets a matrix corresponding to one constraint.
The row index is the number of cliques in A (which
intersect an arbitrarily-chosen edge), while
the column index is the number of cliques in B.
"""
return np.zeros([self.numHittingEdge+1,
self.numNotHittingEdge+1])
def addConstraint(A, b):
"""Adds one row to the constraints.
A: a numpy.array of constraint coefficients
b: the corresponding bound
Side effects: adds a row to the bound, of the form "Ax >= b"
"""
self.A.append(A)
self.b.append(b)
def addBTotalConstraint():
"""Adds constraint on expected value of E[B].
(It's possible it will make more sense to constrain E[C].)
Side effects: adds the constraint.
"""
a = np.zeros([ self.numVarsInConstraint ])
# maximum number of cliques in B (if there are more,
# some cliques must be in A)
m = self.numFunctions - self.numHittingEdge
# constrain the weighted average of all of these
for i in range(m+1):
a[ self.B(i) ] = scipy.special.comb(m, i) / (2 ** m)
# what the weighted average should be
self.addConstraint(a, 0.5 * (2 ** m))
def addZeroRestrictionConstraint():
"""Adds effect of restricting e_{12} to 0.
"""
pass
def addPermutationConstraint():
"""Add constraint that "permuting vertices doesn't matter".
XXX FIXME it's not clear what this should look like
"""
pass
def solve():
"""Solves the linear system.
Note that by default, the solver constrains all x >= 0.
Returns: a numpy array, of the minimum rank of each set
of functions.
"""
# convert A and b to np.array objects (note that both are
# negated, since the solver is solving Ax <= b).
A = - np.stack([a.reshape(FIXME) for a in self.A], axis=?)
# b is just converted into a column vector
b = - np.array(self.b)
# the objective function: how low can finding all the
# cliques go?
c = self.getConstraintMatrix()
c[ self.numHittingEdge(), self.numNotHittingEdge() ] = 1
c = c.reshape(FIXME)
```
#### File: countingBound/py/lpRank.py
```python
import numpy as np
import pdb
import scipy.optimize
# note that comb() returns a float by default;
# for loop bounds, it needs the "exact=True" option,
# so that it returns an int
from scipy.special import comb
class LpBound:
"""Computes a bound on the rank of finding cliques.
Here, we track the number of cliques, and the total number of
vertices used by all of the cliques (which hopefully will be
easier to deal with.)
"""
def __init__(self, n, k):
""" Constructor
n: number of vertices
k: clique size
"""
# problem size
self.n = n
self.k = k
# mapping from tuples (numVertices, numCliques) to
# variable index in the LP
self.varIndex = {}
index = 0
# set up the mapping of variable indices
for i in range(k, n+1):
for j in range(0, comb(i, k, exact=True)+1):
self.varIndex[(i,j)] = index
index += 1
# this is the total number of variables we're solving for
self.numVariables = index
# these store the constraints, as lists (for easy appending,
# since it's not clear how many there will be).
# A is stored as a list of triples
# (numVertices, numCliques, coefficient), which hopefully
# will be easier to understand...
self.A = []
# ... and b as a list of numbers
self.b = []
# the bounds matrices (initially undefined)
self.A_ub = None
self.b_ub = None
def addConstraint(self, A, b):
"""Adds one row to the constraints.
A: a list of (numVertices, numCliques, coefficient) triples
b: the corresponding lower bound
Side effects: adds a row to the bound, of the form "Ax >= b"
"""
self.A.append(A)
self.b.append(b)
def addVertexTotalConstraint(self):
"""Constraint on rank of sets with some number of vertices.
Note that the sets with more vertices also includes functions
with a smaller number of vertices.
Side effects: for each possible number of vertices, adds
a constraint on the total rank of the sets with that
many vertices.
"""
# i is the number of vertices
for i in range(self.k, self.n+1):
# the number of possible cliques with that many vertices
maxNumCliques = comb(i, self.k, exact=True)
# the number of functions with up to that many cliques
numFunctions = 2 ** maxNumCliques
# constraint on the "weighted average" of these
# (here, i is the number of cliques in the function)
a = [(i, j, comb(maxNumCliques, j) / numFunctions)
for j in range(0, maxNumCliques+1)]
# the weighted average should be at least
# half the number of functions
self.addConstraint(a, numFunctions / 2)
def addVertexZeroExpectedConstraint(self):
"""Adds constraint from restricting some vertex's edges to 0.
This constraint says that if you take a random graph with
i+1 vertices, and zero out all the edges from one vertex,
the rank of the resulting graph (with i vertices)
will be smaller.
Note that punctuating the possessive of a word ending in 'x'
is just problematic.
??? also add constraint that "zeroing out a vertex's edges
strictly reduces rank"?
Side effects: adds a constraint on expected rank.
"""
A = []
# i is the number of vertices _after_ a vertex is zeroed out
# (and thus ranges up to n-1)
for i in range(self.k, self.n):
# maximum number of cliques which might be made
# impossible, by zeroing out the edges connected to a vertex
maxNumCliquesZeroed = comb(i, self.k-1, exact=True)
# corresponding number of functions
numFunctionsZeroed = 2 ** maxNumCliquesZeroed
# j is the number of cliques _after_ a vertex is zeroed out
for j in range(0, comb(i, self.k, exact=True)+1):
# the rank, after a vertex is zeroed out
a = [(i, j, -1.0)]
# k is the number of cliques which were zeroed out
# (this shouldn't throw a KeyError)
a += [(i+1, j+k,
comb(maxNumCliquesZeroed, k) / numFunctionsZeroed)
for k in range(0, maxNumCliquesZeroed+1)]
# the constraint is that "the expected rank after
# zeroing out a clique is some amount higher than
# the rank of what remains"
b = 0
self.addConstraint(a, b)
def setBounds(self):
"""Sets the bounds matrices, A_ub and b_ub."""
# if these are already computed, skip this
if self.A_ub and self.B_ub:
return
# converts from "list of numbers" to a row of A
def constraintRow(A_list):
row = np.zeros(self.numVariables)
for entry in A_list:
(numVertices, numCliques, a) = entry
row[ self.varIndex[(numVertices, numCliques)] ] = a
return row
# convert A and b to np.array objects (note that both are
# negated, since the solver is solving Ax <= b).
self.A_ub = - np.stack([constraintRow(a1) for a1 in self.A])
# b is just converted into a column vector
self.b_ub = - np.array(self.b)
def solve(self, numVertices):
"""Solves the linear system.
Note that by default, the solver constrains all x >= 0,
so we don't add that constraint.
numVertices: the number of vertices in the function to minimize.
Returns: a numpy array, of the minimum rank of finding
all the k-vertex cliques in an m-vertex graph,
for 0 <= m <= n. (Note that this is only minimized
for m==numVertices; for the others, I'm curious what bounds
it gives, but there's no guarantee that it's minimal.)
For m < k, this is 0, but those cases are included
for indexing convenience.
"""
# set A_ub and b_ub (if they haven't been set already)
self.setBounds()
# the objective function: how low can the rank of finding
# all the cliques (with that many vertices) be?
c = np.zeros(self.numVariables)
numCliques = comb(numVertices, self.k)
c[ self.varIndex[(numVertices, numCliques)] ] = 1
# solve
r = scipy.optimize.linprog(c, self.A_ub, self.b_ub)
return(r)
if __name__ == '__main__':
print('in main')
lp = LpBound(5,3)
# this probably won't do much
lp.addVertexTotalConstraint()
lp.addVertexZeroExpectedConstraint()
# pdb.set_trace()
r = lp.solve(5)
print(r)
``` |
{
"source": "JoshTDN03/knix",
"score": 2
} |
#### File: ManagementService/python/executeWorkflow.py
```python
import json
import os
import requests
import re
def execute_workflow(wfurl, wfinput):
result = None
if 'KUBERNETES_SERVICE_HOST' in os.environ:
namespace = ""
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
namespace = f.read()
# on k8s replace the wfurl with the local service url
wfurl = re.sub(r'^.*://([^.]+).*', r'http://\1', wfurl)+"."+namespace+".svc"
try:
result = requests.post(wfurl, params={}, json=wfinput)
except Exception as exc:
raise
return result
def handle(value, sapi):
assert isinstance(value, dict)
data = value
response = {}
response_data = {}
email = data["email"]
try:
workflows = sapi.get(email + "_list_workflows", True)
if workflows is None or workflows == "":
workflows = {}
else:
workflows = json.loads(workflows)
# get single workflow status
if "workflow" not in data and "id" not in data["workflow"] and "wfurl" not in data["workflow"] and "wfinput" not in data["workflow"]:
raise Exception("Can't execute workflow; invalid parameters.")
wfid = data["workflow"]["id"]
if wfid not in workflows.values():
raise Exception("Can't execute workflow; no such workflow.")
wfurl = data["workflow"]["wfurl"]
wfinput = data["workflow"]["wfinput"]
# execute workflow here
result = execute_workflow(wfurl, wfinput)
response_data["result"] = result.json()
response_data["message"] = "Executed workflow " + wfid + "."
response["status"] = "success"
response["data"] = response_data
except Exception as exc:
response["status"] = "failure"
response_data["message"] = "Couldn't execute workflow: " + str(exc)
response["data"] = response_data
sapi.log(json.dumps(response))
return response
```
#### File: ManagementService/python/getTriggerDetails.py
```python
import json
import json
import requests
MAP_AVAILABLE_FRONTENDS = "available_triggers_frontned_map"
MAP_TRIGGERS_TO_INFO = "triggers_to_info_map"
### Utility functions ###
def get_available_frontends(context):
tf_hosts = context.getMapKeys(MAP_AVAILABLE_FRONTENDS, True)
return tf_hosts
def is_frontend_registered(context, frontend_ip_port):
return context.containsMapKey(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True)
def get_frontend_info(context, frontend_ip_port):
ret = context.getMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True)
print("get_frontend_info: data: " + str(ret))
if ret is "" or ret is None:
return None
else:
return json.loads(ret)
def is_trigger_registered(context, trigger_id):
return context.containsMapKey(MAP_TRIGGERS_TO_INFO, trigger_id, True)
def get_trigger_info(context, trigger_id):
ret = context.getMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True)
print("get_trigger_info: data: " + str(ret))
if ret is "" or ret is None:
return None
else:
return json.loads(ret)
def get_user_trigger_list(context, email):
user_triggers_list = context.get(email + "_list_triggers", True)
if user_triggers_list is not None and user_triggers_list != "":
user_triggers_list = json.loads(user_triggers_list)
else:
user_triggers_list = {}
return user_triggers_list
def handle(value, context):
assert isinstance(value, dict)
data = value
print("[GetTriggerDetails] input data: " + str(data))
status_msg = ""
try:
if "email" not in data or "trigger_names" not in data or type(data["trigger_names"]) is not type([]):
raise Exception(
"Couldn't get details of triggers; either user email or trigger_names array is missing")
email = data["email"]
trigger_names = data["trigger_names"]
storage_userid = data["storage_userid"]
if "custom_storage_userid" in data and "custom_email" in data:
if type(data["custom_storage_userid"]) is not type("") or data["custom_storage_userid"] == "" or len(data["custom_storage_userid"]) == 0 or type(data["custom_email"]) is not type("") or data["custom_email"] == "" or len(data["custom_email"]) == 0:
raise Exception("Couldn't get details of triggers; Invalid custom_storage_userid: " + str(data["custom_storage_userid"]) + ", or custom_email: " + str(data["custom_email"]))
else:
storage_userid = data["custom_storage_userid"]
email = data["custom_email"]
if len(trigger_names) == 0:
user_triggers_list = get_user_trigger_list(context, email)
for trigger_name in user_triggers_list:
trigger_names.append(trigger_name)
all_details = {}
for trigger_name in trigger_names:
details = get_details_for_trigger_name(trigger_name, storage_userid, context)
all_details[trigger_name] = details
response_data = {}
response_data["trigger_details"] = all_details
status_msg = "Number of details included: " + str(len(all_details))
except Exception as e:
response = {}
response_data = {}
response["status"] = "failure"
response_data["message"] = "Couldn't get details for triggers; " + str(e)
response["data"] = response_data
print("[GetTriggerDetails] Error: " + str(response))
return response
# finish successfully
response = {}
response["status"] = "success"
response_data["message"] = status_msg
response["data"] = response_data
print("[GetTriggerDetails] response: " + str(response))
return response
def get_details_for_trigger_name(trigger_name, storage_userid, context):
details = {}
trigger_id = storage_userid + "_" + trigger_name
details["trigger_status"] = "non_existent"
details["trigger_id"] = trigger_id
details["status_msg"] = "Trigger not found"
# check the global list for the trigger
global_trigger_info = get_trigger_info(context, trigger_id)
if global_trigger_info is not None:
try:
tf_ip_port = global_trigger_info["frontend_ip_port"]
if not is_frontend_active(tf_ip_port):
raise Exception("Frontend: " + tf_ip_port + " not active")
# send the request and wait for response
url = "http://" + tf_ip_port + "/trigger_details"
res_obj = {}
status_msg = ""
try:
res = requests.post(url, json={"trigger_id": trigger_id})
if res.status_code != 200:
raise Exception("[get_details_for_trigger_name] status code: " + str(res.status_code) + " returned")
res_obj = res.json()
except Exception as e:
status_msg = "[get_details_for_trigger_name] Error: trigger_id" + trigger_id + "," + str(e)
if "status" in res_obj and "message" in res_obj:
details = json.loads(res_obj["message"])
else:
raise Exception(status_msg)
except Exception as e:
msg = "[GetTriggerDetails] Exception: " + str(e)
print(msg)
details["status_msg"] = msg
return details
def is_frontend_active(tf_ip_port):
if tf_ip_port is None or tf_ip_port is "":
return False
url = "http://" + tf_ip_port + "/"
print("[is_frontend_active] Contacting: " + url + ", to check if it is alive")
try:
res = requests.get(url)
if res.status_code != 200:
raise Exception("[is_frontend_active] status code: " + str(res.status_code) + " returned")
if res.text is None or res.text != 'ok':
raise Exception("[is_frontend_active] response body: " + str(res.text) + " returned")
if res.text == 'ok':
print("[is_frontend_active] " + url + " is alive")
return True
except Exception as e:
status_msg = "[is_frontend_active] Error: " + str(e)
print(status_msg)
return False
```
#### File: ManagementService/python/triggersFrontendStatus.py
```python
import json
import time
import requests
import random
MAP_AVAILABLE_FRONTENDS = "available_triggers_frontned_map"
MAP_TRIGGERS_TO_INFO = "triggers_to_info_map"
SET_PENDING_TRIGGERS = "pending_triggers_set"
def handle(value, context):
assert isinstance(value, dict)
data = value
action = data["action"].lower()
frontend_ip_port = data["self_ip_port"]
trigger_status_map = data["trigger_status_map"]
trigger_error_map = data["trigger_error_map"]
response = {}
response_data = {}
errmsg = ""
if action == "start":
handle_start(frontend_ip_port, trigger_status_map, trigger_error_map, context)
success = True
response_data["message"] = "Triggers Frontend registered with Management service."
elif action == "status":
handle_status(frontend_ip_port, trigger_status_map, trigger_error_map, context)
success = True
response_data["message"] = "Triggers Frontend updated successfully."
elif action == "stop":
handle_stop(frontend_ip_port, trigger_status_map, trigger_error_map, context)
success = True
response_data["message"] = "Triggers Frontend stopped successfully."
else:
success = False
errmsg = "Unknown action: " + str(action)
if success:
response["status"] = "success"
else:
response["status"] = "failure"
response_data["message"] = errmsg
response["data"] = response_data
return response
def get_available_frontends(context):
tf_hosts = context.getMapKeys(MAP_AVAILABLE_FRONTENDS, True)
return tf_hosts
def is_frontend_registered(context, frontend_ip_port):
return context.containsMapKey(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True)
def get_frontend_info(context, frontend_ip_port):
ret = context.getMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True)
print("get_frontend_info: data: " + str(ret))
if ret is "" or ret is None:
return None
else:
return json.loads(ret)
def remove_frontend_info(context, frontend_ip_port):
print("remove_frontend_info: " + frontend_ip_port)
context.deleteMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, True)
def add_frontend_info(context, frontend_ip_port, entry):
print("add_frontend_info: " + frontend_ip_port + ", data: " + entry)
context.putMapEntry(MAP_AVAILABLE_FRONTENDS, frontend_ip_port, entry, True)
def is_trigger_registered(context, trigger_id):
return context.containsMapKey(MAP_TRIGGERS_TO_INFO, trigger_id, True)
def get_trigger_info(context, trigger_id):
ret = context.getMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True)
if ret is "" or ret is None:
return None
else:
return json.loads(ret)
def add_trigger_info(context, trigger_id, data):
print("add_trigger_info: " + trigger_id + ", data: " + data)
context.putMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, data, True)
def remove_trigger_info(context, trigger_id):
print("remove_trigger_info: " + trigger_id)
context.deleteMapEntry(MAP_TRIGGERS_TO_INFO, trigger_id, True)
def get_user_trigger_list(context, email):
user_triggers_list = context.get(email + "_list_triggers", True)
if user_triggers_list is not None and user_triggers_list != "":
user_triggers_list = json.loads(user_triggers_list)
else:
user_triggers_list = {}
return user_triggers_list
def update_user_trigger_list(context, email, user_trigger_list):
print("User: " + email + ", Trigger list updates to: " + str(user_trigger_list))
context.put(email + "_list_triggers", user_trigger_list, True)
def add_to_global_pending_trigger_set(context, entry):
print("add_to_global_pending_trigger_set: data: " + str(entry))
context.addSetEntry(SET_PENDING_TRIGGERS, entry, True)
def remove_from_global_pending_trigger_set(context, entry):
print("remove_from_global_pending_trigger_set: data: " + str(entry))
context.removeSetEntry(SET_PENDING_TRIGGERS, entry, True)
def get_global_pending_trigger_set(context):
items = []
items_ret = context.retrieveSet(SET_PENDING_TRIGGERS, True)
if items_ret is not None:
items = list(items_ret)
print("get_global_pending_trigger_set: data: " + str(items))
else:
print("get_global_pending_trigger_set: data: None")
return items
def clear_global_pending_trigger_set(context):
context.clearSet(SET_PENDING_TRIGGERS, True)
print("clear_global_pending_trigger_set")
# called when a frontend starts
def handle_start(frontend_ip_port, trigger_status_map, trigger_error_map, context):
print("[TriggersFrontend] [START] frontend_ip_port: " + frontend_ip_port + ", trigger_status_map: " + str(trigger_status_map) + ", trigger_error_map: " + str(trigger_error_map))
assert(len(trigger_status_map) == 0) # frontend should not be running anything yet
assert(len(trigger_error_map) == 0) # frontend should not be running anything yet
frontend_available = is_frontend_registered(context, frontend_ip_port)
triggers_to_recreate = []
triggers_to_inform_and_remove = []
if frontend_available:
print("Frontend already registered, but it is reporting that it is starting!!")
# we have the frontend already registered with us. Why is it starting again,
# without telling us that it stopped? Maybe because the stop message did not reach us?
# check if we have any triggers that we think should be active,
# and were earlier assigned to this frontend which has just started up
# such triggers will have to be re-assigned
print("[handle_start] First removing information about the old frontend with same ip: " + frontend_ip_port)
frontend_info = get_frontend_info(context, frontend_ip_port)
remove_frontend_info(context, frontend_ip_port)
for trigger_id in frontend_info:
trigger_info = get_trigger_info(context, trigger_id)
if trigger_info is not None and trigger_info["frontend_ip_port"] == frontend_ip_port:
if trigger_info["status"].lower() == "ready":
print("[handle_start] queuing trigger to be re-created, since status is ready: " + str(trigger_info))
triggers_to_recreate.append((trigger_info, ""))
else:
print("[handle_start] queuing trigger to be removed, since status is not ready: " + str(trigger_info))
triggers_to_inform_and_remove.append((trigger_info, "Associated Triggers Frontend not active"))
else:
# this trigger is now associated with a different frontend, simply remove information
pass
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context, update_frontend_info=False)
new_frontend_entry = {}
add_frontend_info(context, frontend_ip_port, json.dumps(new_frontend_entry))
# pending_triggers_from_other_inactive_frontends = health_check_registered_frontends(context)
# triggers_to_recreate = triggers_to_recreate + pending_triggers_from_other_inactive_frontends
# pending_global_triggers = get_info_for_global_pending_triggers(context)
# triggers_to_recreate = triggers_to_recreate + pending_global_triggers
# recreate_pending_triggers(triggers_to_recreate, context)
for (trigger_info, error_msg) in triggers_to_recreate:
print("[handle_start] Queuing up to be recreated, trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info))
add_to_global_pending_trigger_set(context, trigger_info["trigger_id"])
def handle_status(frontend_ip_port, trigger_status_map, trigger_error_map, context):
print("[TriggersFrontend] [STATUS], frontend_ip_port: " + frontend_ip_port + ", trigger_status_map: " + str(trigger_status_map) + ", trigger_error_map: " + str(trigger_error_map))
triggers_to_inform_and_remove = []
triggers_to_recreate = []
frontend_available = is_frontend_registered(context, frontend_ip_port)
if frontend_available:
# we know about this frontend
frontend_info = get_frontend_info(context, frontend_ip_port)
assert(frontend_info is not None)
print("Known frontend with data: " + str(frontend_info))
# first check if any trigger has stopped unexpectedly, and check if we had this trigger registered with us
# if so, then remove this trigger from our known list and put them in pending list
for error_trigger_id in trigger_error_map:
error_trigger_info = get_trigger_info(context, error_trigger_id)
if error_trigger_id in frontend_info and error_trigger_info is not None:
if error_trigger_info["status"].lower() == "ready":
print("[handle_status] queuing trigger to be removed, since it stopped unexpectedly: " + str(error_trigger_info) + ", error message: " + str(trigger_error_map[error_trigger_id]))
triggers_to_inform_and_remove.append((error_trigger_info, trigger_error_map[error_trigger_id]))
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context)
else:
# we don't know about this frontend. Ideally it should not have any triggers
print("Unknown frontend sending a status update!!")
new_frontend_entry = {}
add_frontend_info(context, frontend_ip_port, json.dumps(new_frontend_entry))
pending_triggers_from_other_inactive_frontends = health_check_registered_frontends(context)
triggers_to_recreate = triggers_to_recreate + pending_triggers_from_other_inactive_frontends
pending_global_triggers = get_info_for_global_pending_triggers(context)
triggers_to_recreate = triggers_to_recreate + pending_global_triggers
recreate_pending_triggers(triggers_to_recreate, context)
def handle_stop(frontend_ip_port, trigger_status_map, trigger_error_map, context):
print("[TriggersFrontend] [STOP], frontend_ip_port: " + frontend_ip_port + ", trigger_status_map: " + str(trigger_status_map) + ", trigger_error_map: " + str(trigger_error_map))
assert(len(trigger_status_map) == 0)
frontend_info = get_frontend_info(context, frontend_ip_port)
assert(frontend_info is not None)
remove_frontend_info(context, frontend_ip_port)
triggers_to_recreate = []
triggers_to_inform_and_remove = []
for error_trigger_id in trigger_error_map:
error_trigger_info = get_trigger_info(context, error_trigger_id)
if error_trigger_id in frontend_info and error_trigger_info is not None:
#if error_trigger_info["status"].lower() == "ready" and "ready trigger shutdown!" in trigger_error_map[error_trigger_id].lower():
if error_trigger_info["status"].lower() == "ready":
triggers_to_recreate.append((error_trigger_info, trigger_error_map[error_trigger_id]))
else:
print("[handle_stop] queuing trigger to be removed, since status is not ready: " + str(error_trigger_info))
triggers_to_inform_and_remove.append((error_trigger_info, trigger_error_map[error_trigger_id]))
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context, update_frontend_info=False)
#pending_triggers_from_other_inactive_frontends = health_check_registered_frontends(context)
#triggers_to_recreate = triggers_to_recreate + pending_triggers_from_other_inactive_frontends
#pending_global_triggers = get_info_for_global_pending_triggers(context)
#triggers_to_recreate = triggers_to_recreate + pending_global_triggers
#recreate_pending_triggers(triggers_to_recreate, context)
for (trigger_info, error_msg) in triggers_to_recreate:
print("[handle_stop] Queuing up to be recreated, trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info))
add_to_global_pending_trigger_set(context, trigger_info["trigger_id"])
def get_info_for_global_pending_triggers(context):
global_pending_triggers = get_global_pending_trigger_set(context)
clear_global_pending_trigger_set(context)
triggers_to_recreate = []
for trigger_id in global_pending_triggers:
pending_trigger_info = get_trigger_info(context, trigger_id)
if pending_trigger_info is not None:
print("[get_info_for_global_pending_triggers] Queuing trigger to be re-created: pending_trigger_info = " + str(pending_trigger_info))
triggers_to_recreate.append((pending_trigger_info, ""))
return triggers_to_recreate
def get_active_frontend(context):
tf_hosts = get_available_frontends(context)
if len(tf_hosts) == 0:
print("No available TriggersFrontend found")
return ""
tf_hosts = list(tf_hosts)
tf_ip_port = select_random_active_frontend(tf_hosts)
if tf_ip_port is None or tf_ip_port is "":
print("No active TriggersFrontend found")
return ""
return tf_ip_port
def recreate_pending_triggers(triggers_to_recreate, context):
print("[recreate_pending_triggers] called with number of triggers: " + str(len(triggers_to_recreate)))
triggers_to_inform_and_remove = []
for (trigger_info, error_msg) in triggers_to_recreate:
print("[recreate_pending_triggers] Attempting to recreate trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info))
active_frontend = get_active_frontend(context)
if active_frontend is not "":
# there is an active frontend available, try to re-create the trigger
try:
status, updated_info = attempt_to_recreate_single_trigger(trigger_info, active_frontend, context)
if status:
# trigger created, attempt to add workflow associations
associated_workflows = updated_info["associated_workflows"].copy()
print("[recreate_pending_triggers] Attempting to attach trigger to associated_workflows = " + str(associated_workflows))
for workflow_name in associated_workflows:
association_status = attempt_to_associate_trigger_with_workflows(updated_info["trigger_id"], workflow_name, context)
if association_status == False:
del updated_info["associated_workflows"][workflow_name]
add_trigger_info(context, updated_info["trigger_id"], json.dumps(updated_info))
print("[recreate_pending_triggers] Removed workflow: " + str(workflow_name) + ", from associated_workflows of trigger_info: " + str(updated_info))
else:
# need to add this to the list of inform list and then remove it
if updated_info is not None:
print("[recreate_pending_triggers] Unable to recreate trigger. Queuing to be removed, trigger_id: " + updated_info["trigger_id"] + ", trigger_info: " + str(updated_info))
triggers_to_inform_and_remove.append((updated_info, "Unable to recreate trigger"))
except Exception as e:
print("[recreate_pending_triggers] Exception in attempt_to_recreate_single_trigger, Queuing trigger to be removed: " + str(e) + ", trigger_info: " + str(trigger_info))
triggers_to_inform_and_remove.append((trigger_info, "Unable to recreate trigger"))
else:
# no active triggers frontend, add to the pending set again
print("[recreate_pending_triggers] No active frontend, Queuing up to be recreated, trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info))
add_to_global_pending_trigger_set(context, trigger_info["trigger_id"])
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context)
def attempt_to_recreate_single_trigger(trigger_info, tf_ip_port, context):
print("[attempt_to_recreate_single_trigger] selected frontend: " + tf_ip_port + ", trigger_info: " + str(trigger_info))
status_msg = ""
# create the global trigger info all the information, and status set of starting, and not workflow associated
trigger_id = trigger_info["trigger_id"]
email = trigger_info["email"]
trigger_name = trigger_info["trigger_name"]
global_trigger_info = trigger_info.copy()
global_trigger_info["status"] = "starting"
global_trigger_info["frontend_ip_port"] = tf_ip_port
# add the global_trigger_info to global map
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
url = "http://" + tf_ip_port + "/create_trigger"
# send the request and wait for response
print("[attempt_to_recreate_single_trigger] Contacting: " + url + ", with data: " + str(global_trigger_info["frontend_command_info"]))
res_obj = {}
try:
res = requests.post(url, json=global_trigger_info["frontend_command_info"])
if res.status_code != 200:
raise Exception("status code: " + str(res.status_code) + " returned")
res_obj = res.json()
except Exception as e:
status_msg = "POST Error: trigger_id: " + trigger_id + "," + str(e)
#print("[AddTrigger] " + status_msg)
if "status" in res_obj and res_obj["status"].lower() == "success":
# add the trigger_id to frontend map
print("[attempt_to_recreate_single_trigger] Success response from frontend")
frontend_info = get_frontend_info(context, tf_ip_port)
#print("get_frontend_info: " + str(frontend_info))
assert(frontend_info is not None)
frontend_info[trigger_id] = ''
add_frontend_info(context, tf_ip_port, json.dumps(frontend_info))
global_trigger_info["status"] = "ready"
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
# add the trigger_name to user's list of triggers
user_triggers_list = get_user_trigger_list(context, email)
user_triggers_list[trigger_name] = ''
update_user_trigger_list(context, email, json.dumps(user_triggers_list))
# write the user's list
status_msg = "Trigger created successfully. Message: " + res_obj["message"] + ", details: " + str(global_trigger_info)
print("[attempt_to_recreate_single_trigger] " + status_msg)
return True, global_trigger_info
else:
if "message" in res_obj:
status_msg = status_msg + ", message: " + res_obj["message"]
status_msg = "Error: " + status_msg + ", response: " + str(res_obj)
print("[attempt_to_recreate_single_trigger] " + status_msg)
return False, global_trigger_info
def attempt_to_associate_trigger_with_workflows(trigger_id, workflow_name, context):
print("[attempt_to_associate_trigger_with_workflows] called with: trigger_id: " + str(trigger_id) + ", workflow_name: " + str(workflow_name))
trigger_info = get_trigger_info(context, trigger_id)
trigger_id = trigger_info["trigger_id"]
email = trigger_info["email"]
trigger_name = trigger_info["trigger_name"]
tf_ip_port = trigger_info["frontend_ip_port"]
workflow_info = trigger_info["associated_workflows"][workflow_name]
workflow_state = workflow_info["workflow_state"]
isWorkflowPresent, isWorkflowDeployed, workflow_details = isWorkflowPresentAndDeployed(email, workflow_name, context)
if isWorkflowPresent == False:
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " not found.")
return False
if isWorkflowPresent == True:
# add the trigger name in workflow's metadata
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " is present.")
addTriggerToWorkflowMetadata(email, trigger_name, workflow_name, workflow_state, workflow_details["id"], context)
addWorkflowToTriggerMetadata(workflow_name, workflow_state, trigger_id, context)
if isWorkflowDeployed == True:
# add the workflow to the trigger
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " is deployed.")
addWorkflowToTrigger(email, workflow_name, workflow_state, workflow_details, trigger_id, trigger_name, context)
else:
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " is not deployed. Keeping workflow to trigger association intact.")
return True
# TODO: write updated trigger info
def health_check_registered_frontends(context):
print("[health_check_registered_frontends] called")
triggers_to_recreate = []
tf_hosts = get_available_frontends(context)
if len(tf_hosts) == 0:
print("[health_check_registered_frontends] No available TriggersFrontend found")
return triggers_to_recreate
tf_hosts = list(tf_hosts)
for tf_ip_port in tf_hosts:
if not is_frontend_active(tf_ip_port):
# frontend is not active but is still registered with management
print("[health_check_registered_frontends] Removing inactive frontend: " + tf_ip_port)
triggers_to_inform_and_remove = []
frontend_info = get_frontend_info(context, tf_ip_port)
if frontend_info is None:
continue
remove_frontend_info(context, tf_ip_port)
print("[health_check_registered_frontends] Removing inactive frontend: frontend_info = " + str(frontend_info))
for trigger_id in frontend_info:
trigger_info = get_trigger_info(context, trigger_id)
if trigger_info is not None and trigger_info["frontend_ip_port"] == tf_ip_port:
if trigger_info["status"] == "ready":
# this ready trigger is still associated with an inactive frontend
print("[health_check_registered_frontends] Queuing up to be recreated, trigger_id: " + str(trigger_id) + ", trigger_info: " + str(trigger_info))
triggers_to_recreate.append((trigger_info, "READY trigger frontend not active"))
else:
print("[health_check_registered_frontends] Queuing up to be removed, since status is not ready, trigger_id: " + str(trigger_id) + ", trigger_info: " + str(trigger_info))
triggers_to_inform_and_remove.append((trigger_info, "Triggers frontend not active"))
else:
print("[health_check_registered_frontends] Ignoring trigger, since it belongs to a different frontend or does not exist, trigger_id: " + str(trigger_id) + ", trigger_info: " + str(trigger_info))
# this trigger is now associated with a different frontend, simply remove frontend information
pass
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context, update_frontend_info=False)
return triggers_to_recreate
def is_frontend_active(tf_ip_port):
if tf_ip_port is None or tf_ip_port is "":
return False
url = "http://" + tf_ip_port + "/"
print("[is_frontend_active] Contacting: " + url + ", to check if it is alive")
try:
res = requests.get(url)
if res.status_code != 200:
raise Exception("[is_frontend_active] status code: " + str(res.status_code) + " returned")
if res.text is None or res.text != 'ok':
raise Exception("[is_frontend_active] response body: " + str(res.text) + " returned")
if res.text == 'ok':
print("[is_frontend_active] " + url + " is alive")
return True
except Exception as e:
status_msg = "[is_frontend_active] Error: " + str(e)
print(status_msg)
return False
def inform_workflows_for_triggers(pending_triggers, context):
for (trigger_info, error_msg) in pending_triggers:
print("[inform_workflows_for_triggers] for trigger: " + str(trigger_info))
frontend_command_info = trigger_info["frontend_command_info"]
associated_workflows = trigger_info["associated_workflows"]
for workflow_name in associated_workflows:
workflow_info = associated_workflows[workflow_name]
request_obj = { \
"trigger_status": "error",
"trigger_type": frontend_command_info["trigger_type"],
"trigger_name": frontend_command_info["trigger_name"],
"workflow_name": workflow_name,
"source": "",
"data": error_msg
}
url = workflow_info["workflow_url"]
workflow_state = workflow_info["workflow_state"]
execute_workflow(url, request_obj, workflow_state)
def removeTriggerAndWorkflowAssociations(pending_triggers, context, update_frontend_info=True):
for (trigger_info, error_msg) in pending_triggers:
if update_frontend_info == True:
removeTriggerFromFrontend(trigger_info, context)
try:
removeTriggerFromWorkflow(trigger_info, context)
except Exception as e:
print("Exception in removeTriggerFromWorkflow: " + str(e))
remove_trigger_info(context, trigger_info["trigger_id"])
def removeTriggerFromFrontend(trigger_info, context):
print("[removeTriggerFromFrontend] for trigger: " + str(trigger_info))
trigger_id = trigger_info["trigger_id"]
frontend_ip_port = trigger_info["frontend_ip_port"]
# remove the trigger_id from frontend map
frontend_info = get_frontend_info(context, frontend_ip_port)
if frontend_info is not None and trigger_id in frontend_info:
del frontend_info[trigger_id]
add_frontend_info(context, frontend_ip_port, json.dumps(frontend_info))
def removeTriggerFromWorkflow(trigger_info,context):
print("[removeTriggerFromWorkflow] for trigger: " + str(trigger_info))
associated_workflows = trigger_info["associated_workflows"].copy()
email = trigger_info["email"]
trigger_name = trigger_info["trigger_name"]
storage_userid = trigger_info["storage_userid"]
trigger_id = trigger_info["trigger_id"]
status_msg = ""
# do the delete trigger processing
for associated_workflow_name in associated_workflows:
del trigger_info["associated_workflows"][associated_workflow_name]
add_trigger_info(context, trigger_id, json.dumps(trigger_info))
isWorkflowPresent, isWorkflowDeployed, workflow_details = isWorkflowPresentAndDeployed(email, associated_workflow_name, context)
print("associated_workflow_name: " + associated_workflow_name + ", isWorkflowPresent: " + str(isWorkflowPresent) + ", details: " + str(workflow_details))
try:
if isWorkflowPresent == True:
# add the trigger name in workflow's metadata
deleteTriggerFromWorkflowMetadata(email, trigger_name, associated_workflow_name, workflow_details["id"], context)
except Exception as e:
status_msg = str(e)
print("[removeTriggerFromWorkflow] exeception: " + status_msg)
# check the user's storage area for the trigger name
user_triggers_list = get_user_trigger_list(context, email)
print("user_triggers_list = " + str(user_triggers_list))
if trigger_name in user_triggers_list:
del user_triggers_list[trigger_name]
update_user_trigger_list(context, email, json.dumps(user_triggers_list))
return status_msg
def select_random_active_frontend(tf_hosts):
random.seed(time.time())
selected_tf = ""
while len(tf_hosts) > 0:
tf_ip_port = tf_hosts[random.randint(0,len(tf_hosts)-1)]
if is_frontend_active(tf_ip_port):
selected_tf = tf_ip_port
break
else:
tf_hosts.remove(tf_ip_port)
return selected_tf
def isWorkflowPresentAndDeployed(email, workflowname, sapi):
workflows = sapi.get(email + "_list_workflows", True)
if workflows is not None and workflows != "":
workflows = json.loads(workflows)
else:
workflows = {}
isWorkflowPresent = False
isWorkflowDeployed = False
details = {}
if workflowname in workflows:
wf_id = workflows[workflowname]
wf = sapi.get(email + "_workflow_" + wf_id, True)
if wf is not None and wf != "":
isWorkflowPresent = True
wf = json.loads(wf)
details["email"] = email
details["name"] = workflowname
details["id"] = wf_id
wf_status = sapi.get("workflow_status_" + wf_id, True)
details["status"] = wf_status
details["endpoints"] = list(sapi.retrieveSet(wf_id + "_workflow_endpoints", is_private=True))
if "modified" in wf:
details["modified"] = wf["modified"]
if "associatedTriggerableTables" in wf:
details["associatedTriggerableTables"] = wf["associatedTriggerableTables"]
if "associatedTriggers" in wf:
details["associatedTriggers"] = wf["associatedTriggers"]
if wf["status"] == "deployed" or wf["status"] == "deploying":
isWorkflowDeployed = True
return isWorkflowPresent, isWorkflowDeployed, details
def deleteTriggerFromWorkflowMetadata(email, trigger_name, workflow_name, workflow_id, context):
wf = context.get(email + "_workflow_" + workflow_id, True)
if wf is None or wf == "":
print("[deleteTriggerFromWorkflowMetadata] User: " + email + ", Workflow: " +
workflow_name + ": couldn't retrieve workflow metadata.")
raise Exception("[deleteTriggerFromWorkflowMetadata] User: " + email +
", Workflow: " + workflow_name + ": couldn't retrieve workflow metadata.")
wf = json.loads(wf)
print("[deleteTriggerFromWorkflowMetadata] User: " + email + ", Workflow: " +
workflow_name + ": Current workflow metadata: " + str(wf))
if 'associatedTriggers' not in wf:
wf['associatedTriggers'] = {}
associatedTriggers = wf['associatedTriggers']
if trigger_name in associatedTriggers:
del associatedTriggers[trigger_name]
wf['associatedTriggers'] = associatedTriggers
wf = context.put(email + "_workflow_" + workflow_id, json.dumps(wf), True)
print("[deleteTriggerFromWorkflowMetadata] User: " + email +
", Trigger: " + trigger_name + " removed from Workflow: " + workflow_name)
else:
print("[deleteTriggerFromWorkflowMetadata] User: " + email + ", Trigger: " +
trigger_name + " not present in Workflow: " + workflow_name)
def addTriggerToWorkflowMetadata(email, trigger_name, workflow_name, workflow_state, workflow_id, context):
print("[addTriggerToWorkflowMetadata] called with: trigger_name: " + str(trigger_name) + ", workflow_name: " + str(workflow_name) + ", workflow_state: " + str(workflow_state) + ", workflow_id: " + str(workflow_id))
wf = context.get(email + "_workflow_" + workflow_id, True)
if wf is None or wf == "":
print("[addTriggerToWorkflowMetadata] User: " + email + ", Workflow: " +
workflow_name + ": couldn't retrieve workflow metadata.")
return
wf = json.loads(wf)
print("[addTriggerToWorkflowMetadata] User: " + email + ", Workflow: " +
workflow_name + ": Current workflow metadata: " + str(wf))
if 'associatedTriggers' not in wf:
wf['associatedTriggers'] = {}
associatedTriggers = wf['associatedTriggers']
if trigger_name not in associatedTriggers:
associatedTriggers[trigger_name] = workflow_state
wf['associatedTriggers'] = associatedTriggers
print("[addTriggerToWorkflowMetadata] updated workflow metadata: " + str(wf))
context.put(email + "_workflow_" + workflow_id, json.dumps(wf), True)
print("[addTriggerToWorkflowMetadata] User: " + email +
", Trigger: " + trigger_name + " added to Workflow: " + workflow_name)
else:
print("[addTriggerToWorkflowMetadata] User: " + email + ", Trigger: " +
trigger_name + " already present in Workflow: " + workflow_name)
def addWorkflowToTriggerMetadata(workflow_name, workflow_state, trigger_id, context):
print("[addWorkflowToTriggerMetadata] called with: workflow_name" + str(workflow_name) + ", workflow_state: " + str(workflow_state) + ", trigger_id: " + str(trigger_id))
workflow_to_add = \
{
"workflow_url": "",
"workflow_name": workflow_name,
"workflow_state": workflow_state
}
global_trigger_info = get_trigger_info(context, trigger_id)
global_trigger_info["associated_workflows"][workflow_name] = workflow_to_add
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
def addWorkflowToTrigger(email, workflow_name, workflow_state, workflow_details, trigger_id, trigger_name, context):
print("[addWorkflowToTrigger] called with: trigger_id: " + str(trigger_id) + ", trigger_name: " + str(trigger_name) + ", workflow_name: " + str(workflow_name) + ", workflow_state: " + str(workflow_state) + ", workflow_details: " + str(workflow_details))
status_msg = ""
try:
workflow_endpoints = workflow_details["endpoints"]
if len(workflow_endpoints) == 0:
print("[addTriggerForWorkflow] No workflow endpoint available")
raise Exception("[addTriggerForWorkflow] No workflow endpoint available")
# TODO: [For bare metal clusters] send all workflow endpoints to frontend to let is load balance between wf endpoints. For k8s there will only be one name
selected_workflow_endpoint = workflow_endpoints[random.randint(0,len(workflow_endpoints)-1)]
print("[addTriggerForWorkflow] selected workflow endpoint: " + selected_workflow_endpoint)
workflow_to_add = \
{
"workflow_url": selected_workflow_endpoint,
"workflow_name": workflow_name,
"workflow_state": workflow_state
}
# if the frontend with the trigger is available
global_trigger_info = get_trigger_info(context, trigger_id)
tf_ip_port = global_trigger_info["frontend_ip_port"]
tryRemovingFirst(tf_ip_port, trigger_id, workflow_to_add)
url = "http://" + tf_ip_port + "/add_workflows"
# send the request and wait for response
req_obj = {"trigger_id": trigger_id, "workflows": [workflow_to_add]}
print("[addTriggerForWorkflow] Contacting: " + url + ", with data: " + str(req_obj))
res_obj = {}
try:
res = requests.post(url, json=req_obj)
if res.status_code != 200:
raise Exception("status code: " + str(res.status_code) + " returned")
res_obj = res.json()
except Exception as e:
status_msg = "Error: trigger_id" + trigger_id + "," + str(e)
if "status" in res_obj and res_obj["status"].lower() == "success":
# if success then update the global trigger table to add a new workflow.
print("[addTriggerForWorkflow] Success response from " + url)
global_trigger_info["associated_workflows"][workflow_name] = workflow_to_add
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
status_msg = "[addTriggerForWorkflow] Trigger " + trigger_name + " added successfully to workflow:" + workflow_name + ". Message: " + res_obj["message"]
print(status_msg)
return True
else:
if "message" in res_obj:
status_msg = status_msg + ", message: " + res_obj["message"]
status_msg = "[addTriggerForWorkflow] Error: " + status_msg + ", response: " + str(res_obj)
raise Exception(status_msg)
except Exception as e:
print("[addWorkflowToTrigger] exception: " + str(e))
#deleteTriggerFromWorkflowMetadata(email, trigger_name, workflow_name, workflow_details["id"], context)
return False
def tryRemovingFirst(tf_ip_port, trigger_id, workflow_to_remove):
print("[tryRemovingFirst] called with: tf_ip_port: " + str(tf_ip_port) + ", trigger_id: " + str(trigger_id) + ", workflow_to_remove: " + str(workflow_to_remove))
url = "http://" + tf_ip_port + "/remove_workflows"
# send the request and wait for response
req_obj = {"trigger_id": trigger_id, "workflows": [workflow_to_remove]}
print("Contacting: " + url + ", with data: " + str(req_obj))
res_obj = {}
try:
res = requests.post(url, json=req_obj)
if res.status_code != 200:
raise Exception("status code: " + str(res.status_code) + " returned")
res_obj = res.json()
print("[tryRemovingFirst] Response from " + url + ", response: " + str(res_obj))
except Exception as e:
status_msg = "Error: trigger_id" + trigger_id + "," + str(e)
print("[tryRemovingFirst] exception: " + status_msg)
def execute_workflow(wfurl, wfinput, wfstate):
result = None
headers = {"x-mfn-action": "trigger-event", "x-mfn-action-data": wfstate}
res = None
try:
if wfstate == "":
print("[execute_workflow] url: " + str(wfurl) + ", data: " + str(wfinput))
res = requests.post(wfurl, params={}, json=wfinput)
else:
print("[execute_workflow] url: " + str(wfurl) + ", headers: " + str(headers) + ", data: " + str(wfinput))
res = requests.post(wfurl, params={}, json=wfinput, headers=headers)
print("[execute_workflow] status: " + str(res.status_code))
except Exception as exc:
print("Execute workflow error: " + str(exc))
```
#### File: mfn_sdk/examples/trigger.py
```python
import base64
import time
from mfn_sdk import MfnClient
c = MfnClient()
function = c.add_function("react")
function.code = """
def handle(event, context):
context.log("Triggered "+str(event))
return None
"""
workflow = c.add_workflow("eventdriven_workflow")
workflow.json = """{
"name": "eventdriven_workflow",
"entry": "react",
"functions": [
{
"name": "react",
"next": ["end"]
}
]
}"""
workflow.deploy(60)
### Create Trigger
trigger = c.add_trigger("amqptrigger",{
"trigger_type": "amqp",
"amqp_addr": "amqp://<user>:<pass>@<host>:5672//test",
"routing_key": "my_topic",
"exchange": "my_exchange",
"with_ack": False,
"durable": False,
})
trigger.associate_workflow(workflow)
time.sleep(3)
trigger.disassociate_workflow(workflow)
print(workflow.logs()['log'])
```
#### File: asl_State_Names_with_Special_Chars/python/CloseCaseFunction.py
```python
import json
def handle(event, context):
myCaseStatus = event['Status']
myCaseID = event['Case']
myMessage = event['Message'] + "closed."
result = {'Case': myCaseID, 'Status' : myCaseStatus, 'Message': myMessage}
return result
```
#### File: dynamic_parallel_execution_groups/python/reducer_blocking.py
```python
def init(event, context):
# 1. get reducer's info and obtain its corresponding key
# do this first, so that we don't do processing if there is an error
if "reducer_id_key" not in event:
raise "Could not find corresponding reducer info."
reducer_id_key = event["reducer_id_key"]
rfid = context.get_session_function_id()
context.put(reducer_id_key, rfid, is_private=True)
def reduce_results_wordcount(mapper_results):
final_result = {}
for mapper in mapper_results:
mapper_result = mapper_results[mapper]
for word in mapper_result:
if word not in final_result:
final_result[word] = 0
final_result[word] += mapper_result[word]
return final_result
def reduce_results_mergesort(mapper_results):
assert len(mapper_results) == 2
data1 = mapper_results[0]
data2 = mapper_results[1]
ret = []
i = 0
j = 0
size1 = len(data1)
size2 = len(data2)
while i < size1 and j < size2:
if data1[i] < data2[j]:
ret.append(data1[i])
i += 1
else:
ret.append(data2[j])
j += 1
while i < size1:
ret.append(data1[i])
i += 1
while j < size2:
ret.append(data2[j])
j += 1
return ret
def handle(event, context):
# 1. get reducer key and write running function id to it
# 2.1. wait for messages in a blocking mode
# 2.2. process messages
# 2.3. update and check conditions (e.g., k/n, k-list/n)
# 3. post processing for 1) next and/or 2) further reducer
# 4. send appropriate messages to 'next' and/or reducer
# it can be the case that this reducer is for a dynamic parallel execution group (PEG),
# which was generated inside a mapper (i.e., in another dynamic PEG).
# in that case, this reducer might need to send a message to another reducer.
# similar to what a regular mapper does:
# 1. obtain the storage key for that reducer from the stack we get from the mapper
# that generated us
# 2. get its running function id
# 3. send the result to it
# it is also possible that the reducer might need to send a regular message
# to some other function when exiting or executing (i.e., next)
# this information needs to be available in the event
# 1. get reducer key and write our running function id to it
init(event, context)
num_mappers_to_expect = event["num_mappers"]
# initialize any other privately used data structures
# for processing the incoming messages from other functions
mapper_results = {}
#print("reducer: " + event["peg_id"] + ", result status: " + str(len(mapper_results)) + "/" + str(num_mappers_to_expect) + "; still looping...")
# 2.1. get messages in a blocking mode
msgs = context.get_session_update_messages(count=num_mappers_to_expect, block=True)
print("All mapper results received; continuing... " + event["peg_id"])
for msg in msgs:
if msg != None:
print("New message from mapper: " + str(msg)[:100] + " ...")
# 2.2. process the message
mapper_id = msg["mapper_id"]
result = msg["mapper_result"]
mapper_results[mapper_id] = result
# 3. after the loop processing
my_job = event["job"]
if my_job["type"] == "wordcount":
final_result = reduce_results_wordcount(mapper_results)
elif my_job["type"] == "mergesort":
final_result = reduce_results_mergesort(mapper_results)
# 4. send the appropriate message
if "next_reducer_id" in event and "mapper_id" in event:
# get next reducer's info and send a message to it
next_reducer_id = event["next_reducer_id"]
my_id = event["mapper_id"]
message = {}
message["mapper_id"] = my_id
message["mapper_result"] = final_result
context.send_to_running_function_in_session(next_reducer_id, message, send_now=False)
else:
next_receiver = event["final_next"]
context.add_workflow_next(next_receiver, final_result)
```
#### File: tests/triggers_timer_based_trigger_control/test.py
```python
import json
import random
import sys
import time
import unittest
import socket
import os
import subprocess
sys.path.append("../")
from mfn_test_utils import MFNTest
print("Starting rabbitmq")
rabbit = subprocess.Popen(["scripts/run_local_rabbitmq.sh"])
time.sleep(20)
print("Starting publisher")
pub = subprocess.Popen(["scripts/run_local_publisher.sh"])
time.sleep(10)
os.system("scripts/run_local_subscriber.sh")
print("Publisher is ready")
class TriggersAmqpTest(unittest.TestCase):
# @unittest.skip("")
def test_triggers_timer_based_trigger_control(self):
test = MFNTest(test_name='triggers_timer_based_trigger_control',
workflow_filename='wf_triggers_timer_based_trigger_control.json')
time.sleep(5)
print("Executing test")
# ["wf_triggers_timer_based_trigger_control", "trigger_amqp_to_be_controlled_nonce", "amqp://rabbituser:rabbitpass@paarijaat-debian-vm:5672/%2frabbitvhost", "rabbit.*.*", "egress_exchange", "trigger_timer_controller_nonce", 20000]
nonce = str(int(time.time() * 1000))
curr_hostname = socket.gethostname()
input_data = []
workflowname = "wf_triggers_timer_based_trigger_control"
trigger_name_amqp = "trigger_amqp_to_be_controlled_" + nonce
amqp_addr = "amqp://rabbituser:rabbitpass@" + curr_hostname + ":5672/%2frabbitvhost"
routingkey = "rabbit.*.*"
routingkey_to_expect = "rabbit.routing.key"
exchange = "egress_exchange"
trigger_name_timer = "trigger_timer_controller_" + nonce
ttl = 20000
input_data.append(workflowname)
input_data.append(trigger_name_amqp)
input_data.append(amqp_addr)
input_data.append(routingkey)
input_data.append(exchange)
input_data.append(trigger_name_timer)
input_data.append(ttl)
response = test.execute(input_data)
time.sleep((float(ttl)/1000.0) + 10)
print("Shutting down rabbitmq and publisher")
pub.terminate()
rabbit.terminate()
subprocess.Popen(["scripts/stop_local_rabbitmq.sh"])
time.sleep(5)
counter_state_1 = 0
counter_state_2 = 0
counter_state_1_error = 0
counter_state_2_error = 0
logs = test.get_workflow_logs()
wflog = logs["log"]
log_lines = wflog.split("\n")
for line in log_lines:
if "_!_TRIGGER_START_" + trigger_name_amqp + ";timer_based_trigger_control;" + workflowname + ";" + routingkey_to_expect + ";" in line.strip():
counter_state_1 = counter_state_1 + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + trigger_name_amqp + ";timer_based_trigger_control;" + workflowname + ";;" in line.strip():
counter_state_1_error = counter_state_1_error + 1
print(line.strip())
if "_!_TRIGGER_START_" + trigger_name_timer + ";timer_based_trigger_control_state2;" + workflowname + ";;" in line.strip():
counter_state_2 = counter_state_2 + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + trigger_name_timer + ";timer_based_trigger_control_state2;" + workflowname + ";;" in line.strip():
counter_state_2_error = counter_state_2_error + 1
print(line.strip())
if counter_state_1 >=20 and counter_state_2 == 1 and counter_state_1_error == 0 and counter_state_2_error == 0:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
print("Number of state1 error triggers: " + str(counter_state_1_error))
print("Number of state1 error triggers: " + str(counter_state_2_error))
test.report(True, str(input_data), input_data, response)
else:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
print("Number of state1 error triggers: " + str(counter_state_1_error))
print("Number of state1 error triggers: " + str(counter_state_2_error))
test.report(False, str(input_data), input_data, response)
for line in log_lines:
print(line.strip())
test.undeploy_workflow()
test.cleanup()
```
#### File: triggers_timer/python/triggers_timer.py
```python
import time
import json
import base64
workflow_other_json = '''{
"Comment": "other Workflow",
"StartAt": "test",
"States": {
"test": {
"Type": "Task",
"Resource": "triggers_timer",
"End": true
}
}
}'''
# sample input
# name of this wf, nonce,
# [ "wf_triggers_timer", "23049823"]
def handle(event, context):
if type(event) == type([]):
nonce = event[1]
print(f"_!_EXPLICIT_START_{nonce}")
workflowname = event[0]
trigger_name = nonce
try:
# creating an timer trigger
trigger_info = \
{
"trigger_type": "timer",
"timer_interval_ms": 1000,
}
addTrigger(trigger_name, trigger_info, context)
time.sleep(5)
# associating main wf with the trigger
addTriggerForWorkflow(trigger_name, workflowname, "triggers_timer_state2", context)
time.sleep(10)
# associating main wf with the trigger
deleteTriggerForWorkflow(trigger_name, workflowname, context)
time.sleep(5)
# associating main wf with the trigger
addTriggerForWorkflow(trigger_name, workflowname, "", context)
time.sleep(10)
# associating main wf with the trigger
deleteTriggerForWorkflow(trigger_name, workflowname, context)
deleteTrigger(trigger_name, context)
time.sleep(3)
except Exception as e:
print("Exception: " + str(e))
deleteTrigger(trigger_name, context)
time.sleep(3)
return event
else:
if type(event) == type({}) \
and 'trigger_status' in event \
and 'trigger_type' in event \
and 'trigger_name' in event \
and 'workflow_name' in event \
and 'source' in event \
and 'data' in event:
assert(event["trigger_type"] == "timer")
assert(event["trigger_status"] == "ready" or event["trigger_status"] == "error")
print("_!_TRIGGER_START_" + event['trigger_name'] + ";triggers_timer;" + event['workflow_name'] + ";" + event['source'] + ";" + event['data'])
time.sleep(1)
else:
print("ERROR: received event: " + str(event))
assert(0)
return {}
def addTrigger(trigger_name, trigger_info, context):
message = f"addTrigger Trigger: {trigger_name}"
status, status_msg = context.addTrigger(trigger_name, trigger_info)
if status == None or status == False:
message = f"{message}, Error: response: {status}, message: {status_msg}"
print(message)
raise Exception(message)
else:
message = f"{message}, Success: response: {status}, message: {status_msg}"
print(message)
time.sleep(1)
def addTriggerForWorkflow(trigger_name, workflowname, workflow_state, context):
message = f"addTriggerForWorkflow Trigger: {trigger_name}"
status, status_msg = context.addTriggerForWorkflow(trigger_name, workflowname, workflow_state)
if status == None or status == False:
message = f"{message}, Error: response: {status}, message: {status_msg}"
print(message)
raise Exception(message)
else:
message = f"{message}, Success: response: {status}, message: {status_msg}"
print(message)
time.sleep(1)
def deleteTriggerForWorkflow(trigger_name, workflowname, context):
message = f"deleteTriggerForWorkflow Trigger: {trigger_name}"
status, status_msg = context.deleteTriggerForWorkflow(trigger_name, workflowname)
if status == None or status == False:
message = f"{message}, Error: response: {status}, message: {status_msg}"
print(message)
raise Exception(message)
else:
message = f"{message}, Success: response: {status}, message: {status_msg}"
print(message)
time.sleep(1)
def deleteTrigger(trigger_name, context):
message = f"deleteTrigger Trigger: {trigger_name}"
status, status_msg = context.deleteTrigger(trigger_name)
if status == None or status == False:
message = f"{message}, Error: response: {status}, message: {status_msg}"
print(message)
raise Exception(message)
else:
message = f"{message}, Success: response: {status}, message: {status_msg}"
print(message)
time.sleep(1)
def addWorkflow(workflowname, context):
message = f"addWorkflow: workflow: {workflowname}"
request = \
{
"action": "addWorkflow",
"data": {
"workflow": {"name": workflowname}
}
}
status, status_message, response = context._invoke_management_api(request)
if status != True or response['status'] != 'success':
message = f"{message}, Error: Status message: {status_message}, Response: {response}"
print(message)
raise Exception(message)
workflow_id = response['data']['workflow']['id']
message = f"{message}, Success, workflow_id = {workflow_id}"
print(message)
return workflow_id
def deleteWorkflow(workflowname, workflow_id, context):
message = f"deleteWorkflow: workflow: {workflowname}, workflow_id: {workflow_id}"
request = \
{
"action": "deleteWorkflow",
"data": {
"workflow": {"id": workflow_id}
}
}
status, status_message, response = context._invoke_management_api(request)
if status != True or response['status'] != 'success':
message = f"{message}, Error: Status message: {status_message}, Response: {response}"
print(message)
raise Exception(message)
message = f"{message}, Success"
print(message)
def uploadWorkflowJSON(workflowname, workflow_id, workflow_json, context):
message = f"uploadWorkflowJSON: workflow: {workflowname}, workflow_id: {workflow_id}"
request = \
{
"action": "uploadWorkflowJSON",
"data": {
"workflow": {"id": workflow_id, "json": base64.b64encode(workflow_json.encode()).decode()}
}
}
status, status_message, response = context._invoke_management_api(request)
if status != True or response['status'] != 'success':
message = f"{message}, Error: Status message: {status_message}, Response: {response}"
print(message)
raise Exception(message)
message = f"{message}, Success."
print(message)
def deployWorkflow(workflowname, workflow_id, context):
message = f"deployWorkflow: workflow: {workflowname}, workflow_id: {workflow_id}"
request = \
{
"action": "deployWorkflow",
"data": {
"workflow": {"id": workflow_id}
}
}
status, status_message, response = context._invoke_management_api(request)
if status != True or response['status'] != 'success':
message = f"{message}, Error: Status message: {status_message}, Response: {response}"
print(message)
raise Exception(message)
message = f"{message}, Success."
print(message)
def undeployWorkflow(workflowname, workflow_id, context):
message = f"undeployWorkflow: workflow: {workflowname}, workflow_id: {workflow_id}"
request = \
{
"action": "undeployWorkflow",
"data": {
"workflow": {"id": workflow_id}
}
}
status, status_message, response = context._invoke_management_api(request)
if status != True or response['status'] != 'success':
message = f"{message}, Status message: {status_message}, Error: Response: {response}"
print(message)
raise Exception(message)
message = f"{message}, Success."
print(message)
def retrieveAllWorkflowLogs(workflowname, workflow_id, context):
message = f"retrieveAllWorkflowLogs: workflow: {workflowname}, workflow_id: {workflow_id}"
request = \
{
"action": "retrieveAllWorkflowLogs",
"data": {
"workflow": {"id": workflow_id}
}
}
status, status_message, response = context._invoke_management_api(request)
if status != True or response['status'] != 'success':
message = f"{message}, Error: Status message: {status_message}, Response: {response}"
print(message)
raise Exception(message)
message = f"{message}, Success."
print(message)
workflow_log = response["data"]["workflow"]["log"]
workflow_log = base64.b64decode(workflow_log).decode()
workflow_log_lines = workflow_log.split("\n")
return workflow_log_lines
```
#### File: tests/triggers_timer/test.py
```python
import json
import random
import sys
import time
import unittest
sys.path.append("../")
from mfn_test_utils import MFNTest
class TriggersTimerTest(unittest.TestCase):
# @unittest.skip("")
def test_triggers_storage(self):
test = MFNTest(test_name='triggers_timer',
workflow_filename='wf_triggers_timer.json')
nonce = str(int(time.time() * 1000))
input_data = []
workflowname = "wf_triggers_timer"
input_data.append(workflowname)
input_data.append(nonce)
response = test.execute(input_data)
logs = test.get_workflow_logs()
wflog = logs["log"]
log_lines = wflog.split("\n")
counter_state_1 = 0
counter_state_2 = 0
for line in log_lines:
if "_!_TRIGGER_START_" + nonce + ";triggers_timer;" + workflowname in line.strip():
counter_state_1 = counter_state_1 + 1
if "_!_TRIGGER_START_" + nonce + ";triggers_timer_state2;" + workflowname in line.strip():
counter_state_2 = counter_state_2 + 1
if counter_state_1 >=9 and counter_state_2 >=9:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
test.report(True, str(input_data), input_data, response)
else:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
test.report(False, str(input_data), input_data, response)
for line in log_lines:
print(line.strip())
test.undeploy_workflow()
test.cleanup()
``` |
{
"source": "joshtemple/lkml",
"score": 3
} |
#### File: lkml/lkml/simple.py
```python
import logging
from typing import Any, Dict, List, Optional, Sequence, Type, Union, cast
from lkml.keys import (
EXPR_BLOCK_KEYS,
KEYS_WITH_NAME_FIELDS,
PLURAL_KEYS,
QUOTED_LITERAL_KEYS,
pluralize,
singularize,
)
from lkml.tree import (
BlockNode,
Comma,
ContainerNode,
DocumentNode,
ExpressionSyntaxToken,
LeftBracket,
LeftCurlyBrace,
ListNode,
PairNode,
QuotedSyntaxToken,
RightBracket,
RightCurlyBrace,
SyntaxNode,
SyntaxToken,
)
from lkml.visitors import Visitor
logger = logging.getLogger(__name__)
def flatten(sequence: list) -> list:
"""Flattens a singly-nested list of lists into a list of items."""
result = []
for each in sequence:
if isinstance(each, list):
result.extend(each)
else:
result.append(each)
return result
class DictVisitor(Visitor):
"""Creates a primitive representation of the parse tree.
Traverses the parse tree and transforms each node type into a dict. Each dict is
combined into one nested dict. Also handles the grouping of fields with plural keys
like ``dimension`` or ``view`` into lists.
Attributes:
depth: Tracks the level of nesting.
"""
def __init__(self):
self.depth: int = -1 # Tracks the level of nesting
def update_tree(self, target: Dict, update: Dict) -> None:
"""Add one dictionary to an existing dictionary, handling certain repeated keys.
This method is primarily responsible for handling repeated keys in LookML like
`dimension` or `set`, which can exist more than once in LookML but cannot be
repeated in a Python dictionary.
This method checks the list of valid repeated keys and combines the values of
that key in `target` and/or `update` into a list and assigns a plural key (e.g.
`dimensions` instead of `dimension`).
Args:
target: Existing dictionary of parsed LookML
update: New dictionary to be added to target
Raises:
KeyError: If `update` has more than one key
KeyError: If the key in `update` already exists and would overwrite existing
"""
keys = tuple(update.keys())
if len(keys) > 1:
raise KeyError("Dictionary to update with cannot have multiple keys.")
key = keys[0]
if key in PLURAL_KEYS:
plural_key = pluralize(key)
if plural_key in target.keys():
target[plural_key].append(update[key])
else:
target[plural_key] = [update[key]]
elif key in target.keys():
if self.depth == 0:
logger.warning(
'Multiple declarations of top-level key "%s" found. '
"Using the last-declared value.",
key,
)
target[key] = update[key]
else:
raise KeyError(
f'Key "{key}" already exists in tree '
"and would overwrite the existing value."
)
else:
target[key] = update[key]
def visit(self, document: DocumentNode) -> Dict[str, Any]:
return self.visit_container(document.container)
def visit_container(self, node: ContainerNode) -> Dict[str, Any]:
"""Creates a dict from a container node by visiting its children."""
container: Dict[str, Any] = {}
if len(node.items) > 0:
self.depth += 1
for item in node.items:
self.update_tree(container, item.accept(self))
self.depth -= 1
return container
def visit_block(self, node: BlockNode) -> Dict[str, Dict]:
"""Creates a dict from a block node by visiting its children."""
container_dict = node.container.accept(self) if node.container else {}
if node.name is not None:
container_dict["name"] = node.name.accept(self)
return {node.type.accept(self): container_dict}
def visit_list(self, node: ListNode) -> Dict[str, List]:
"""Creates a dict from a list node by visiting its children."""
return {node.type.accept(self): [item.accept(self) for item in node.items]}
def visit_pair(self, node: PairNode) -> Dict[str, str]:
"""Creates a dict from pair node by visiting its type and value tokens."""
return {node.type.accept(self): node.value.accept(self)}
def visit_token(self, token: SyntaxToken) -> str:
"""Creates a string from a syntax token."""
return str(token.value)
class DictParser:
"""Parses a Python dictionary into a parse tree.
Review the grammar specified for the Parser class to understand how LookML
is represented. The grammar details the differences between blocks, pairs, keys,
and values.
Attributes:
parent_key: The name of the key at the previous level in a LookML block.
level: The number of indentations appropriate for the current position.
base_indent: Whitespace representing one tab.
latest_node: The type of the last node to be parsed.
"""
def __init__(self):
self.parent_key: str = None
self.level: int = 0
self.base_indent: str = " " * 2
self.latest_node: Optional[Type[SyntaxNode]] = DocumentNode
def increase_level(self) -> None:
"""Increases the indent level of the current line by one tab.
This also resets the latest node, mainly for formatting reasons.
"""
self.latest_node = None
self.level += 1
def decrease_level(self) -> None:
"""Decreases the indent level of the current line by one tab."""
self.level -= 1
@property
def indent(self) -> str:
"""Returns the level-adjusted indent."""
if self.level > 0:
return self.base_indent * self.level
else:
return ""
@property
def newline_indent(self) -> str:
"""Returns a newline plus the current indent."""
return "\n" + self.indent
@property
def prefix(self) -> str:
"""Returns the currently appropriate, preceding whitespace."""
if self.latest_node == DocumentNode:
return ""
elif self.latest_node is None:
return self.newline_indent
elif self.latest_node == BlockNode:
return "\n" + self.newline_indent
else:
return self.newline_indent
def is_plural_key(self, key: str) -> bool:
"""Returns True if the key is a repeatable key.
For example, `dimension` can be repeated, but `sql` cannot be.
The key `allowed_value` is a special case and changes behavior depending on its
parent key. If its parent key is `access_grant`, it is a list and cannot be
repeated. Otherwise, it can be repeated.
The parent key `query` is also a special case, where children are kept as lists.
See issue #53.
Args:
key: The name of the key to test.
"""
singular_key = singularize(key)
return (
singular_key in PLURAL_KEYS
and not (
singular_key == "allowed_value"
and self.parent_key.rstrip("s") == "access_grant"
)
and not (self.parent_key == "query")
)
def resolve_filters(self, values: List[dict]) -> Union[List[BlockNode], ListNode]:
"""Parse the key ``filters`` according to the context.
In LookML, the ``filters`` key is wildly inconsistent and can have three
different syntaxes. This method determines the syntax that should be used based
on the context and parses the appropriate node.
Args:
values: The contents of the ``filters`` block. Provides context to resolve.
Returns:
A block or list node depending on the resolution.
"""
if "name" in values[0]:
# This is one or more filter-only field(s), e.g.
# filter: order_region { type: string }
blocks = []
for value in values:
name = value.pop("name")
block = self.parse_block(key="filter", items=value, name=name)
blocks.append(block)
return blocks
elif "field" in values[0] and "value" in values[0]:
# This is the legacy filter syntax, e.g.
# filters: { field: dimension_name, value: "filter expression" }
return [self.parse_block(key="filters", items=value) for value in values]
else:
# This is the new filter syntax, e.g.
# filters: [ dimension_name: "filter expression", ... ]
return self.parse_list(key="filters", values=values)
def parse(self, obj: Dict[str, Any]) -> DocumentNode:
"""Parses a primitive representation of LookML into a parse tree."""
nodes = [self.parse_any(key, value) for key, value in obj.items()]
container = ContainerNode(items=tuple(flatten(nodes)))
return DocumentNode(container)
def expand_list(
self, key: str, values: Sequence
) -> List[Union[BlockNode, ListNode, PairNode]]:
"""Expands and parses a list of values for a repeatable key.
Args:
key: A repeatable LookML field type (e.g. "views" or "dimension_groups")
values: A sequence of objects to be parsed
Returns:
A list of block, list, or pair nodes, depending on the list's contents.
"""
# A dictionary with a key "filters" can correspond to multiple syntaxes, so
# must be handled in a context-aware manner
if key == "filters":
values = cast(List[dict], values)
return flatten([self.resolve_filters(values)])
else:
singular_key = singularize(key)
return flatten([self.parse_any(singular_key, value) for value in values])
def parse_any(
self, key: str, value: Union[str, list, tuple, dict]
) -> Union[
List[Union[BlockNode, ListNode, PairNode]], BlockNode, ListNode, PairNode
]:
"""Dynamically serializes a Python object based on its type.
Args:
key: A LookML field type (e.g. "suggestions" or "hidden")
value: A string, tuple, or list to serialize
Raises:
TypeError: If input value is not of a valid type
Returns:
A generator of serialized string chunks
"""
if isinstance(value, str):
return self.parse_pair(key, value)
elif isinstance(value, (list, tuple)):
if self.is_plural_key(key):
return self.expand_list(key, value)
else:
return self.parse_list(key, value)
elif isinstance(value, dict):
if key in KEYS_WITH_NAME_FIELDS or "name" not in value.keys():
name = None
else:
name = value.pop("name")
return self.parse_block(key, value, name)
else:
raise TypeError("Value must be a string, list, tuple, or dict.")
def parse_block(
self, key: str, items: Dict[str, Any], name: Optional[str] = None
) -> BlockNode:
"""Serializes a dictionary to a LookML block.
Args:
key: A LookML field type (e.g. "dimension")
fields: A dictionary to serialize (e.g. {"sql": "${TABLE}.order_id"})
name: An optional name of the block (e.g. "order_id")
Returns:
A generator of serialized string chunks
"""
prev_parent_key = self.parent_key
self.parent_key = key
latest_node_at_this_level = self.latest_node
self.increase_level()
nodes = [self.parse_any(key, value) for key, value in items.items()]
self.decrease_level()
self.latest_node = latest_node_at_this_level
self.parent_key = prev_parent_key
container = ContainerNode(items=tuple(flatten(nodes)))
if self.latest_node and self.latest_node != DocumentNode:
prefix = "\n" + self.newline_indent
else:
prefix = self.prefix
node = BlockNode(
type=SyntaxToken(key, prefix=prefix),
left_brace=LeftCurlyBrace(prefix=" " if name else ""),
right_brace=RightCurlyBrace(
prefix=self.newline_indent if container.items else ""
),
name=SyntaxToken(name) if name else None,
container=container,
)
self.latest_node = BlockNode
return node
def parse_list(self, key: str, values: Sequence[Union[str, Dict]]) -> ListNode:
"""Serializes a sequence to a LookML block.
Args:
key: A LookML field type (e.g. "fields")
values: A sequence to serialize (e.g. ["orders.order_id", "orders.item"])
Returns:
A generator of serialized string chunks
"""
# `suggestions` is only quoted when it's a list, so override the default
force_quote = True if key == "suggestions" else False
prev_parent_key = self.parent_key
self.parent_key = key
type_token = SyntaxToken(key, prefix=self.prefix)
right_bracket = RightBracket()
items: list = []
pair_mode = False
# Check the first element to see if it's a single value or a pair
if values and not isinstance(values[0], (str, int)):
pair_mode = True
# Coerce type depending on pair mode value
if pair_mode:
items = cast(List[PairNode], items)
else:
items = cast(List[SyntaxToken], items)
# Choose newline delimiting or space delimiting based on contents
if len(values) >= 5 or pair_mode:
trailing_comma: Optional[Comma] = Comma()
self.increase_level()
for value in values:
if pair_mode:
value = cast(dict, value)
# Extract key and value from dictionary with only one key
[(key, val)] = value.items()
pair: PairNode = self.parse_pair(key, val)
items.append(pair)
else:
value = cast(str, value)
token: SyntaxToken = self.parse_token(
key, value, force_quote, prefix=self.newline_indent
)
items.append(token)
self.decrease_level()
right_bracket = RightBracket(prefix=self.newline_indent)
else:
trailing_comma = None
for i, value in enumerate(values):
value = cast(str, value)
if i == 0:
token = self.parse_token(key, value, force_quote)
else:
token = self.parse_token(key, value, force_quote, prefix=" ")
items.append(token)
self.parent_key = prev_parent_key
node = ListNode(
type=type_token,
left_bracket=LeftBracket(),
items=tuple(items),
right_bracket=right_bracket,
trailing_comma=trailing_comma,
)
self.latest_node = ListNode
return node
def parse_pair(self, key: str, value: str) -> PairNode:
"""Serializes a key and value to a LookML pair.
Args:
key: A LookML field type (e.g. "hidden")
value: The value string (e.g. "yes")
Returns:
A generator of serialized string chunks
"""
force_quote = True if self.parent_key == "filters" and key != "field" else False
value_syntax_token: SyntaxToken = self.parse_token(key, value, force_quote)
node = PairNode(
type=SyntaxToken(key, prefix=self.prefix), value=value_syntax_token
)
self.latest_node = PairNode
return node
@staticmethod
def parse_token(
key: str,
value: str,
force_quote: bool = False,
prefix: str = "",
suffix: str = "",
) -> SyntaxToken:
"""Parses a value into a token, quoting it if required by the key or forced.
Args:
key: A LookML field type (e.g. "hidden")
value: The value string (e.g. "yes")
force_quote: True if value should always be quoted
Returns:
A generator of serialized string chunks
"""
if force_quote or key in QUOTED_LITERAL_KEYS:
return QuotedSyntaxToken(value, prefix, suffix)
elif key in EXPR_BLOCK_KEYS:
return ExpressionSyntaxToken(value.strip(), prefix, suffix)
else:
return SyntaxToken(value, prefix, suffix)
``` |
{
"source": "joshterrill/tbsm",
"score": 2
} |
#### File: joshterrill/tbsm/tbsm_data_pytorch.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
from os import path
import sys
# numpy and scikit-learn
import numpy as np
# pytorch
import torch
# dataset (either synthetic or Taobao)
class TBSMDataset():
def __init__(
self,
datatype,
mode,
ts_length=20,
points_per_user=4,
numpy_rand_seed=7,
raw_path="",
pro_data="",
spa_fea_sizes="",
num_pts=1, # pts to train or test
):
# save arguments
if mode == "train":
self.numpy_rand_seed = numpy_rand_seed
else:
self.numpy_rand_seed = numpy_rand_seed + 31
self.mode = mode
# save dataset parameters
self.total = num_pts # number of lines in txt to process
self.ts_length = ts_length
self.points_per_user = points_per_user # pos and neg points per user
self.spa_fea_sizes = spa_fea_sizes
self.M = 200 # max history length
# split the datafile into path and filename
lstr = raw_path.split("/")
self.d_path = "/".join(lstr[0:-1]) + "/"
self.d_file = lstr[-1]
# preprocess data if needed
if path.exists(str(pro_data)):
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
file = str(pro_data)
levels = np.fromstring(self.spa_fea_sizes, dtype=int, sep="-")
if datatype == "taobao":
self.Unum = levels[0] # 987994 num of users
self.Inum = levels[1] # 4162024 num of items
self.Cnum = levels[2] # 9439 num of categories
print("Reading raw data=%s" % (str(raw_path)))
if self.mode == "test":
self.build_taobao_test(
raw_path,
file,
)
else:
self.build_taobao_train_or_val(
raw_path,
file,
)
elif datatype == "synthetic":
self.build_synthetic_train_or_val(
file,
)
# load data
with np.load(file) as data:
self.X_cat = data["X_cat"]
self.X_int = data["X_int"]
self.y = data["y"]
# common part between train/val and test generation
# truncates (if needed) and shuffles data points
def truncate_and_save(self, out_file, do_shuffle, t, users, items, cats, times, y):
# truncate. If for some users we didn't generate had too short history
# we truncate the unused portion of the pre-allocated matrix.
if t < self.total_out:
users = users[:t, :]
items = items[:t, :]
cats = cats[:t, :]
times = times[:t, :]
y = y[:t]
# shuffle
if do_shuffle:
indices = np.arange(len(y))
indices = np.random.permutation(indices)
users = users[indices]
items = items[indices]
cats = cats[indices]
times = times[indices]
y = y[indices]
N = len(y)
X_cat = np.zeros((3, N, self.ts_length + 1), dtype="i4") # 4 byte int
X_int = np.zeros((1, N, self.ts_length + 1), dtype=np.float)
X_cat[0, :, :] = users
X_cat[1, :, :] = items
X_cat[2, :, :] = cats
X_int[0, :, :] = times
# saving to compressed numpy file
if not path.exists(out_file):
np.savez_compressed(
out_file,
X_cat=X_cat,
X_int=X_int,
y=y,
)
return
# processes raw train or validation into npz format required by training
# for train data out of each line in raw datafile produces several randomly chosen
# datapoints, max number of datapoints per user is specified by points_per_user
# argument, for validation data produces one datapoint per user.
def build_taobao_train_or_val(self, raw_path, out_file):
with open(str(raw_path)) as f:
for i, _ in enumerate(f):
if i % 50000 == 0:
print("pre-processing line: ", i)
self.total = min(self.total, i + 1)
print("total lines: ", self.total)
self.total_out = self.total * self.points_per_user * 2 # pos + neg points
print("Total number of points in raw datafile: ", self.total)
print("Total number of points in output will be at most: ", self.total_out)
np.random.seed(self.numpy_rand_seed)
r_target = np.arange(0, self.M - 1)
time = np.arange(self.ts_length + 1, dtype=np.int32) / (self.ts_length + 1)
# time = np.ones(self.ts_length + 1, dtype=np.int32)
users = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
items = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
cats = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
times = np.zeros((self.total_out, self.ts_length + 1), dtype=np.float)
y = np.zeros(self.total_out, dtype="i4") # 4 byte int
# determine how many datapoints to take from each user based on the length of
# user behavior sequence
# ind=0, 1, 2, 3,... t < 10, 20, 30, 40, 50, 60, ...
k = 20
regime = np.zeros(k, dtype=np.int)
regime[1], regime[2], regime[3] = 1, 3, 6
for j in range(4, k):
regime[j] = self.points_per_user
if self.mode == "val":
self.points_per_user = 1
for j in range(k):
regime[j] = np.min([regime[j], self.points_per_user])
last = self.M - 1 # max index of last item
# try to generate the desired number of points (time series) per each user.
# if history is short it may not succeed to generate sufficiently different
# time series for a particular user.
t, t_pos, t_neg, t_short = 0, 0, 0, 0
with open(str(raw_path)) as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print("processing line: ", i, t, t_pos, t_neg, t_short)
if i >= self.total:
break
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
neg_item_hist_list = units[6].split(",")
neg_cate_hist_list = units[7].split(",")
user = np.array(np.maximum(np.int32(units[0]) - self.Inum, 0),
dtype=np.int32)
# y[i] = np.int32(units[3])
items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), item_hist_list)),
dtype=np.int32
)
cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), cate_hist_list)), dtype=np.int32
)
neg_items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), neg_item_hist_list)),
dtype=np.int32
)
neg_cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), neg_cate_hist_list)),
dtype=np.int32
)
# select datapoints
first = np.argmax(items_ > 0)
ind = int((last - first) // 10) # index into regime array
# pos
for _ in range(regime[ind]):
a1 = min(first + self.ts_length, last - 1)
end = np.random.randint(a1, last)
indices = np.arange(end - self.ts_length, end + 1)
if items_[indices[0]] == 0:
t_short += 1
items[t] = items_[indices]
cats[t] = cats_[indices]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
y[t] = 1
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
t += 1
t_pos += 1
# neg
for _ in range(regime[ind]):
a1 = min(first + self.ts_length - 1, last - 1)
end = np.random.randint(a1, last)
indices = np.arange(end - self.ts_length + 1, end + 1)
if items_[indices[0]] == 0:
t_short += 1
items[t, :-1] = items_[indices]
cats[t, :-1] = cats_[indices]
neg_indices = np.random.choice(r_target, 1,
replace=False) # random final item
items[t, -1] = neg_items_[neg_indices]
cats[t, -1] = neg_cats_[neg_indices]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
y[t] = 0
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
t += 1
t_neg += 1
print("total points, pos points, neg points: ", t, t_pos, t_neg)
self.truncate_and_save(out_file, True, t, users, items, cats, times, y)
return
# processes raw test datafile into npz format required to be used by
# inference step, produces one datapoint per user by taking last ts-length items
def build_taobao_test(self, raw_path, out_file):
with open(str(raw_path)) as f:
for i, _ in enumerate(f):
if i % 50000 == 0:
print("pre-processing line: ", i)
self.total = i + 1
self.total_out = self.total # pos + neg points
print("ts_length: ", self.ts_length)
print("Total number of points in raw datafile: ", self.total)
print("Total number of points in output will be at most: ", self.total_out)
time = np.arange(self.ts_length + 1, dtype=np.int32) / (self.ts_length + 1)
users = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
items = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
cats = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
times = np.zeros((self.total_out, self.ts_length + 1), dtype=np.float)
y = np.zeros(self.total_out, dtype="i4") # 4 byte int
# try to generate the desired number of points (time series) per each user.
# if history is short it may not succeed to generate sufficiently different
# time series for a particular user.
t, t_pos, t_neg = 0, 0, 0
with open(str(raw_path)) as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print("processing line: ", i, t, t_pos, t_neg)
if i >= self.total:
break
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
user = np.array(np.maximum(np.int32(units[0]) - self.Inum, 0),
dtype=np.int32)
y[t] = np.int32(units[3])
items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), item_hist_list)),
dtype=np.int32
)
cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), cate_hist_list)), dtype=np.int32
)
# get pts
items[t] = items_[-(self.ts_length + 1):]
cats[t] = cats_[-(self.ts_length + 1):]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
if y[t] == 1:
t_pos += 1
else:
t_neg += 1
t += 1
print("total points, pos points, neg points: ", t, t_pos, t_neg)
self.truncate_and_save(out_file, False, t, users, items, cats, times, y)
return
# builds small synthetic data mimicking the structure of taobao data
def build_synthetic_train_or_val(self, out_file):
np.random.seed(123)
fea_sizes = np.fromstring(self.spa_fea_sizes, dtype=int, sep="-")
maxval = np.min(fea_sizes)
num_s = len(fea_sizes)
X_cat = np.random.randint(maxval, size=(num_s, self.total, self.ts_length + 1),
dtype="i4") # 4 byte int
X_int = np.random.uniform(0, 1, size=(1, self.total, self.ts_length + 1))
y = np.random.randint(0, 2, self.total, dtype="i4") # 4 byte int
# saving to compressed numpy file
if not path.exists(out_file):
np.savez_compressed(
out_file,
X_cat=X_cat,
X_int=X_int,
y=y,
)
return
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
return self.X_cat[:, index, :], self.X_int[:, index, :], self.y[index]
def __len__(self):
return len(self.y)
# defines transform to be performed during each call to batch,
# used by loader
def collate_wrapper_tbsm(list_of_tuples):
# turns tuple into X, S_o, S_i, take last ts_length items
data = list(zip(*list_of_tuples))
all_cat = torch.tensor(data[0], dtype=torch.long)
all_int = torch.tensor(data[1], dtype=torch.float)
# print("shapes:", all_cat.shape, all_int.shape)
num_den_fea = all_int.shape[1]
num_cat_fea = all_cat.shape[1]
batchSize = all_cat.shape[0]
ts_len = all_cat.shape[2]
all_int = torch.reshape(all_int, (batchSize, num_den_fea * ts_len))
X = []
lS_i = []
lS_o = []
# transform data into the form used in dlrm nn
for j in range(ts_len):
lS_i_h = []
for i in range(num_cat_fea):
lS_i_h.append(all_cat[:, i, j])
lS_o_h = [torch.tensor(range(batchSize)) for _ in range(len(lS_i_h))]
lS_i.append(lS_i_h)
lS_o.append(lS_o_h)
X.append(all_int[:, j].view(-1, 1))
T = torch.tensor(data[2], dtype=torch.float32).view(-1, 1)
return X, lS_o, lS_i, T
# creates a loader (train, val or test data) to be used in the main training loop
# or during inference step
def make_tbsm_data_and_loader(args, mode):
if mode == "train":
raw = args.raw_train_file
proc = args.pro_train_file
numpts = args.num_train_pts
batchsize = args.mini_batch_size
doshuffle = True
elif mode == "val":
raw = args.raw_train_file
proc = args.pro_val_file
numpts = args.num_val_pts
batchsize = 25000
doshuffle = True
else:
raw = args.raw_test_file
proc = args.pro_test_file
numpts = 1
batchsize = 25000
doshuffle = False
data = TBSMDataset(
args.datatype,
mode,
args.ts_length,
args.points_per_user,
args.numpy_rand_seed,
raw,
proc,
args.arch_embedding_size,
numpts,
)
loader = torch.utils.data.DataLoader(
data,
batch_size=batchsize,
num_workers=0,
collate_fn=collate_wrapper_tbsm,
shuffle=doshuffle,
)
return loader, len(data)
``` |
{
"source": "joshthecoder/mousetrap",
"score": 4
} |
#### File: joshthecoder/mousetrap/cheese.py
```python
import sys
class Mouse:
LEFT_BUTTON = 1
MIDDLE_BUTTON = 2
RIGHT_BUTTON = 3
def move(self, x, y):
"""Move mouse pointer to specified location"""
raise NotImplementedError
def press(self, button):
"""Trigger mouse press of the specified button"""
raise NotImplementedError
def release(self, button):
"""Trigger mouse release of the specified button"""
raise NotImplementedError
class X11Mouse(Mouse):
def __init__(self):
self.display = Xlib.display.Display()
self.screen = self.display.screen()
def move(self, x, y):
self.screen.root.warp_pointer(x, y)
self.display.sync()
def press(self, button):
Xlib.ext.xtest.fake_input(self.display, Xlib.X.ButtonPress, button)
self.display.sync()
def release(self, button):
Xlib.ext.xtest.fake_input(self.display, Xlib.X.ButtonRelease, button)
self.display.sync()
class Win32Mouse(Mouse):
def move(self, x, y):
# TODO: implement
return
def press(self, button):
# TODO: implement
return
def release(self, button):
# TODO: implement
return
class Keyboard:
def press(self, key):
"""Trigger key press"""
raise NotImplementedError
def release(self, key):
"""Trigger key release"""
raise NotImplementedError
class X11Keyboard(Keyboard):
js_to_x11_keycode = {
8:22, 9:23, 13:36, 16:50, 17:37, 18:64, 19:127, 20:66, 27:9, 32:65, 33:112,
34:117, 35:115, 36:110, 37:113, 38:111, 39:114, 40:116, 45:118, 46:119,
48:19, 49:10, 50:11, 51:12, 52:13, 53:14, 54:15, 55:16, 56:17, 57:18,
65:38, 66:56, 67:54, 68:40, 69:26, 70:41, 71:42, 72:43, 73:31, 74:44,
75:45, 76:46, 77:58, 78:57, 79:32, 80:33, 81:24, 82:27, 83:39, 84:28,
85:30, 86:55, 87:25, 88:53, 89:29, 90:52, 96:90, 97:87, 98:88, 99:89,
100:83, 101:84, 102:85, 103:79, 104:80, 105:81, 106:63, 107:86,
109:82, 110:91, 111:106, 112:67, 113:68, 114:69, 115:70, 116:71, 117:72,
118:73, 119:74, 120:75, 121:76, 122:95, 123:96, 144:77, 145:78, 186:47,
187:21, 188:59, 189:20, 190:60, 191:61, 192:49, 219:34, 220:51,
221:35, 222:48
}
def __init__(self):
self.display = Xlib.display.Display()
def _lookup_keycode(self, js_keycode):
try:
return self.js_to_x11_keycode[js_keycode]
except:
raise RuntimeError('Invalid keycode: %s' % js_keycode)
def press(self, js_keycode):
x11_keycode = self._lookup_keycode(js_keycode)
Xlib.ext.xtest.fake_input(self.display, Xlib.X.KeyPress, x11_keycode)
self.display.sync()
def release(self, js_keycode):
x11_keycode = self._lookup_keycode(js_keycode)
Xlib.ext.xtest.fake_input(self.display, Xlib.X.KeyRelease, x11_keycode)
self.display.sync()
class Win32Keyboard(Keyboard):
def press(self, key):
return
def release(self, key):
return
if sys.platform == 'linux2':
import Xlib.display
import Xlib.X
import Xlib.ext.xtest
mouse = X11Mouse()
keyboard = X11Keyboard()
elif sys.platform == 'win32':
import win32api
mouse = Win32Mouse()
keyboard = Win32Keyboard()
else:
raise ImportError("Unsupported platform")
``` |
{
"source": "joshthecoder/pystache",
"score": 3
} |
#### File: pystache/tests/test_view.py
```python
import unittest
import pystache
from examples.simple import Simple
from examples.complex_view import ComplexView
class TestView(unittest.TestCase):
def test_basic(self):
view = Simple("Hi {{thing}}!", { 'thing': 'world' })
self.assertEquals(view.render(), "Hi world!")
def test_kwargs(self):
view = Simple("Hi {{thing}}!", thing='world')
self.assertEquals(view.render(), "Hi world!")
def test_template_load(self):
view = Simple(thing='world')
self.assertEquals(view.render(), "Hi world!")
def test_basic_method_calls(self):
view = Simple()
self.assertEquals(view.render(), "Hi pizza!")
def test_non_callable_attributes(self):
view = Simple()
view.thing = 'Chris'
self.assertEquals(view.render(), "Hi Chris!")
def test_view_instances_as_attributes(self):
other = Simple(name='chris')
other.template = '{{name}}'
view = Simple()
view.thing = other
self.assertEquals(view.render(), "Hi chris!")
def test_complex(self):
self.assertEquals(ComplexView().render(), """<h1>Colors</h1>
<ul>
<li><strong>red</strong></li>\n \n <li><a href="#Green">green</a></li>
<li><a href="#Blue">blue</a></li>
</ul>
""")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshthecoder/shorty-python",
"score": 3
} |
#### File: shorty-python/services/budurl.py
```python
class Budurl(Service):
def __init__(self, apikey=None):
self.apikey = apikey
def _test(self):
#prompt for apikey
self.apikey = raw_input('budurl apikey: ')
Service._test(self)
def shrink(self, bigurl, notes=None):
if self.apikey is None:
raise ShortyError('Must set an apikey')
parameters = {'long_url': bigurl, 'api_key': self.apikey}
if notes:
parameters['notes'] = notes
resp = request('http://budurl.com/api/v1/budurls/shrink', parameters)
jdata = json.loads(resp.read())
if jdata['success'] != 1:
raise ShortyError(jdata['error_message'])
else:
return str(jdata['budurl'])
def expand(self, tinyurl):
resp = request('http://budurl.com/api/v1/budurls/expand', {'budurl': tinyurl})
jdata = json.loads(resp.read())
if jdata['success'] != 1:
raise ShortyError(jdata['error_message'])
else:
return str(jdata['long_url'])
```
#### File: shorty-python/services/burnurl.py
```python
class Burnurl(Service):
def _test(self):
# all we can test is shrink
turl = self.shrink('http://test.com')
if turl.startswith('http://burnurl.com'):
return True
else:
return False
def shrink(self, bigurl):
resp = request('http://burnurl.com/', {'url': bigurl, 'output': 'plain'})
return resp.read()
def expand(self, tinyurl):
# burnurl uses iframes for displaying original url
# so we cannot expand them using the 301 redirect :(
return None
```
#### File: shorty-python/services/shortto.py
```python
class Shortto(Service):
def shrink(self, bigurl):
resp = request('http://short.to/s.txt', {'url': bigurl})
return resp.read()
def expand(self, tinyurl):
resp = request('http://long.to/do.txt', {'url': tinyurl})
return resp.read()
```
#### File: shorty-python/services/tweetburner.py
```python
class Tweetburner(Service):
def shrink(self, bigurl):
resp = request('http://tweetburner.com/links', post_data='link[url]=%s' % bigurl)
return resp.read()
```
#### File: shorty-python/services/ur1ca.py
```python
class Ur1ca(Service):
def shrink(self, bigurl):
resp = request('http://ur1.ca/',
post_data = urlencode({'longurl': bigurl, 'submit' : 'Make it an ur1!'}))
returned_data = resp.read()
matched_re = re.search('Your ur1 is: <a href="(http://ur1.ca/[^"]+)">\\1', returned_data)
if matched_re:
return matched_re.group(1)
else:
raise ShortyError('Failed to shrink url')
```
#### File: shorty-python/services/urlborg.py
```python
class Urlborg(Service):
def __init__(self, apikey=None):
self.apikey = apikey
def _test(self):
# prompt tester for apikey
self.apikey = raw_input('urlborg apikey: ').strip()
Service._test(self)
def shrink(self, bigurl):
if not self.apikey:
raise ShortyError('Must set an apikey')
url = 'http://urlborg.com/api/%s/create/%s' % (self.apikey, quote(bigurl))
resp = request(url)
turl = resp.read()
if not turl.startswith('http://'):
raise ShortyError(turl)
return turl
def expand(self, tinyurl):
if not self.apikey:
return get_redirect(get_redirect(tinyurl))
turl = urlparse(tinyurl)
url = 'http://urlborg.com/api/%s/url/info.json%s' % (self.apikey, turl[2])
resp = request(url)
jdata = json.loads(resp.read())
if jdata.has_key('error'):
raise ShortyError('Invalid tiny url or apikey')
return str(jdata['o_url'])
```
#### File: shorty-python/services/xr.py
```python
class Xr(Service):
def __init__(self, account_name=None):
self.account_name = account_name
def shrink(self, bigurl, custom=None, domain=None, direct=True):
parameters = {'link': bigurl}
if custom:
parameters['custom'] = custom
if domain:
parameters['domain'] = domain
if direct:
parameters['direct'] = 'yes'
if self.account_name:
parameters['pid'] = self.account_name
resp = request('http://api.xr.com/api', parameters)
url = resp.read()
if url.startswith('http'):
return url
else:
raise ShortyError(url)
``` |
{
"source": "JoshTheGent/TerminalCommands_Python",
"score": 3
} |
#### File: JoshTheGent/TerminalCommands_Python/create.py
```python
import sys
def createCProgram(params):
extention = ".c"
fileHan = open(params[1]+extention, 'w')
if( len(params) > 2 ):
if(params[2] == "school" and len(params) > 3):
school = ["/*\n",
"*\tStudent ID: 1001296598\n",
"*\tName: <NAME>\n",
"*\tClass: ",
params[3] + "\n",
"*/\n\n"]
fileHan.writelines(school)
else:
if(params[2] == "school"):
school = ["/*\n",
"*\tStudent ID: 1001296598\n",
"*\tName: <NAME>\n",
"*/\n\n"]
fileHan.writelines(school)
includes = ["#include <stdio.h>\n",
"#include <stdlib.h>\n",
"#include <math.h>\n",
"#include <string.h>\n\n"]
fileHan.writelines(includes)
body = ["int main()\n",
"{\n",
"\tprintf(\"hello world.\");\n",
"\treturn 0;\n",
"}\n"]
fileHan.writelines(body)
params = sys.argv
if(len(params) > 1):
createCProgram(params)
print("your program has been set up with parameters:")
print(params)
else:
print("Not Enough Parameters.")
quit()
``` |
{
"source": "Josh-Thompson/artifact_py",
"score": 2
} |
#### File: artifact_py/artifact_py/code.py
```python
from __future__ import unicode_literals
import re
import os
import six
from . import name
from .name import Name
from .name import SubPart
from . import utils
RE_NAME_KEY = "name"
RE_SUBPART_KEY = "subpart"
NAME_FULL_STR = r"(?P<name>{})(:?\.(?P<subpart>{}))?".format(
name.NAME_VALID_STR, name.SUB_PART_VALID_STR)
NAME_FULL_RE = re.compile(NAME_FULL_STR, re.I)
NAME_TAG_STR = "#" + NAME_FULL_STR
NAME_TAG_RE = re.compile(NAME_TAG_STR, re.I)
NAME_TAG_VALID_RE = re.compile("${}^".format(NAME_TAG_STR), re.I)
class ImplCode:
"""Implemented in code.
primary: list of CodeLoc
secondary: dict[SubPart, list[CodeLoc]]
"""
def __init__(self, primary, secondary):
self.primary = primary
self.secondary = secondary
@classmethod
def new(cls):
return cls([], {})
def insert_primary(self, codeloc):
assert isinstance(codeloc, CodeLoc)
self.primary.append(codeloc)
def insert_secondary(self, subpart, codeloc):
assert isinstance(subpart, name.SubPart)
assert isinstance(codeloc, CodeLoc)
if subpart not in self.secondary:
self.secondary[subpart] = []
self.secondary[subpart].append(codeloc)
def serialize(self, settings):
return {
"primary": settings.serialize_list(self.primary),
"secondary": {
n.serialize(settings): settings.serialize_list(c)
for n, c in six.iteritems(self.secondary)
},
}
class CodeLoc:
def __init__(self, file_, line):
self.file = file_
self.line = line
def serialize(self, settings):
return {
"file": settings.relpath(self.file),
"line": self.line,
}
def find_impls(settings):
invalid = []
impls = {}
find_impls_recursive(
invalid=invalid,
impls=impls,
code_paths=settings.code_paths,
exclude_code_paths=settings.exclude_code_paths,
)
if invalid:
raise ValueError("Paths do not exist: {}".format(invalid))
return impls
def find_impls_recursive(invalid, impls, code_paths, exclude_code_paths):
for code_path in code_paths:
if is_excluded(code_path, exclude_code_paths):
continue
if not os.path.exists(code_path):
invalid.append(code_path)
elif os.path.isdir(code_path):
for entry in os.listdir(code_path):
find_impls_recursive(
invalid=invalid,
impls=impls,
code_paths=[os.path.join(code_path, entry)],
exclude_code_paths=exclude_code_paths,
)
else:
update_impls_file(impls, code_path)
def is_excluded(path, exclude_code_paths):
for exclude in exclude_code_paths:
if path.startswith(exclude):
return True
return False
def update_impls_file(impls, code_file):
with open(code_file) as fd:
for linenum, line in enumerate(fd):
update_impls_line(code_file, impls, linenum, line)
def update_impls_line(code_file, impls, linenum, line):
for match in NAME_TAG_RE.finditer(line):
codeloc = CodeLoc(code_file, line=linenum)
groups = match.groupdict()
name = Name.from_str(groups[RE_NAME_KEY])
if name not in impls:
impls[name] = ImplCode.new()
subpart = groups.get(RE_SUBPART_KEY)
if subpart:
subpart = SubPart.from_str(subpart)
impls[name].insert_secondary(subpart, codeloc)
else:
impls[name].insert_primary(codeloc)
```
#### File: artifact_py/artifact_py/completion.py
```python
from __future__ import unicode_literals, division
"""
For types and methods associated with the completion ratio of artifacts.
"""
import re
from . import utils
from . import name
from . import code
class Completion(utils.KeyCmp):
def __init__(self, spc, tst):
super(Completion, self).__init__(key=(spc, tst))
self.spc = spc
self.tst = tst
def serialize(self, _settings):
return {
"spc": self.spc,
"tst": self.tst,
}
class ImplDone:
def __init__(self, raw):
self.raw = raw
def serialize(self, _settings):
return self.raw
def impl_to_statistics(impl, subparts):
""""
Return the `(count, value, secondary_count, secondary_value)` that this
impl should contribute to the "specified" and "tested" statistics.
"secondary" is used because the Done field actually does contribute to
both spc AND tst for REQ and SPC types.
`subparts` should contain the subparts the artifact defines.
"""
if impl is None:
if subparts:
# If subparts are defined not being implemented
# in code means that you get counts against you
return (1 + len(subparts), 0.0, 0, 0.0)
else:
return (0, 0.0, 0, 0.0)
if isinstance(impl, ImplDone):
return (1, 1.0, 1, 1.0)
if isinstance(impl, code.ImplCode):
return _implcode_to_statistics(impl, subparts)
else:
raise TypeError(impl)
def _implcode_to_statistics(impl, subparts):
count = 1
value = int(bool(impl.primary))
sec_count = 0
sec_value = 0.0
for sub in subparts:
count += 1
# track if the subname is implemented
contains_key = int(sub in impl.secondary)
value += contains_key
if sub.is_tst():
sec_count += 1
sec_value += contains_key
return (count, value, sec_count, sec_value)
```
#### File: artifact_py/tests/test_code.py
```python
import unittest
from artifact_py import code
class TestCode(unittest.TestCase):
def setUp(self):
self.invalid = []
self.impls = {}
def test_re_single(self):
text = "#SPC-single"
result = [m.group(0) for m in code.NAME_TAG_RE.finditer(text)]
assert ["#SPC-single"] == result
def test_re_outside(self):
text = "stuff #SPC-single. other"
result = [m.group(0) for m in code.NAME_TAG_RE.finditer(text)]
assert ["#SPC-single"] == result
``` |
{
"source": "joshthoward/amundsenmetadatalibrary",
"score": 2
} |
#### File: metadata_service/entity/resource_type.py
```python
from enum import Enum, auto
class ResourceType(Enum):
Table = auto()
Dashboard = auto()
User = auto()
def to_resource_type(*, label: str) -> ResourceType:
return ResourceType[label.title()]
```
#### File: proxy/roundtrip/test_janus_graph_proxy.py
```python
from typing import Any, Mapping
import unittest
from .abstract_gremlin_proxy_tests import abstract_gremlin_proxy_test_class
from .roundtrip_janusgraph_proxy import RoundtripJanusGraphProxy
class JanusGraphGremlinProxyTest(
abstract_gremlin_proxy_test_class(), unittest.TestCase): # type: ignore
def _create_gremlin_proxy(self, config: Mapping[str, Any]) -> RoundtripJanusGraphProxy:
# Don't use PROXY_HOST, PROXY_PORT, PROXY_PASSWORD. They might not be JanusGraph
return RoundtripJanusGraphProxy(host=config['JANUS_GRAPH_URL'])
``` |
{
"source": "joshthoward/onnx-cli",
"score": 3
} |
#### File: onnx-cli/onnx_cli/__main__.py
```python
import argparse
import sys
from .commands import convert
from . import __version__
cmd_lkp = {
"convert": convert.handler
}
def main():
"""Entrypoint for the ONNX CLI
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
prog="onnx",
description="A Command Line Interface for interacting with ONNX models",
epilog="test\n")
parser.add_argument("-v", "--version", action="store_true",
help="Print version information and quit")
# Subcommands
subparsers = parser.add_subparsers(dest="subcommand")
convert_parser = subparsers.add_parser("convert",
help="Convert a model from an external format to the ONNX format")
convert_parser.add_argument("-f", "--framework", type=str,
choices=convert.framework_lkp.keys(),
help="The source model framework")
convert_parser.add_argument("path", type=str,
help="The path to the source model")
args = parser.parse_args()
if args.version:
print(__version__)
return 0
try:
cmd = cmd_lkp[args.subcommand]
except KeyError:
print("Subcommand required")
return 1
cmd(args)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "josh-thurston/data-driven-web-apps-with-flask",
"score": 3
} |
#### File: final/tests/account_tests.py
```python
from flask import Response
from pypi_org.data.users import User
from pypi_org.viewmodels.account.register_viewmodel import RegisterViewModel
from tests.test_client import flask_app
import unittest.mock
def test_example():
print("Test example...")
assert 1 + 2 == 3
def test_vm_register_validation_when_valid():
# 3 A's of test: Arrange, Act, then Assert
# Arrange
form_data = {
'name': 'Michael',
'email': '<EMAIL>',
'password': 'a' * 6
}
with flask_app.test_request_context(path='/account/register', data=form_data):
vm = RegisterViewModel()
# Act
target = 'pypi_org.services.user_service.find_user_by_email'
with unittest.mock.patch(target, return_value=None):
vm.validate()
# Assert
assert vm.error is None
def test_vm_register_validation_for_existing_user():
# 3 A's of test: Arrange, Act, then Assert
# Arrange
form_data = {
'name': 'Michael',
'email': '<EMAIL>',
'password': 'a' * 6
}
with flask_app.test_request_context(path='/account/register', data=form_data):
vm = RegisterViewModel()
# Act
target = 'pypi_org.services.user_service.find_user_by_email'
test_user = User(email=form_data.get('email'))
with unittest.mock.patch(target, return_value=test_user):
vm.validate()
# Assert
assert vm.error is not None
assert 'already exists' in vm.error
def test_v_register_view_new_user():
# 3 A's of test: Arrange, Act, then Assert
# Arrange
from pypi_org.views.account_views import register_post
form_data = {
'name': 'Michael',
'email': '<EMAIL>',
'password': 'a' * 6
}
target = 'pypi_org.services.user_service.find_user_by_email'
find_user = unittest.mock.patch(target, return_value=None)
target = 'pypi_org.services.user_service.create_user'
create_user = unittest.mock.patch(target, return_value=User())
request = flask_app.test_request_context(path='/account/register', data=form_data)
with find_user, create_user, request:
# Act
resp: Response = register_post()
# Assert
assert resp.location == '/account'
def test_int_account_home_no_login(client):
target = 'pypi_org.services.user_service.find_user_by_id'
with unittest.mock.patch(target, return_value=None):
resp: Response = client.get('/account')
assert resp.status_code == 302
assert resp.location == 'http://localhost/account/login'
def test_int_account_home_with_login(client):
target = 'pypi_org.services.user_service.find_user_by_id'
test_user = User(name='Michael', email='<EMAIL>')
with unittest.mock.patch(target, return_value=test_user):
resp: Response = client.get('/account')
assert resp.status_code == 200
assert b'Michael' in resp.data
``` |
{
"source": "joshtice/fullstack_final_project",
"score": 2
} |
#### File: fullstack_final_project/app/views.py
```python
from app import app
from .models import Contact, Instrument, Error
from .auth import AuthError, requires_auth
from flask import abort, jsonify, redirect, request
from datetime import datetime
def generate_login_uri():
"""
Contruct the appropriate endpoint URI to direct the user to a login
page
"""
login_uri = (
f"https://{app.config['AUTH0_DOMAIN']}/authorize"
f"?audience={app.config['API_AUDIENCE']}"
f"&response_type=token"
f"&client_id={app.config['CLIENT_ID']}"
f"&redirect_uri={app.config['REDIRECT_URI']}"
)
return login_uri
@app.route('/', methods=['GET'])
def index():
return jsonify(
{
'status': 'healthy',
}
), 200
@app.route('/login', methods=['GET'])
def login():
return redirect(generate_login_uri())
########################################################################
# Contacts
########################################################################
@app.route('/contacts', methods=['GET'])
@requires_auth('read:contacts')
def get_all_contacts():
page = request.args.get('page', default=1, type=int)
query = {
key: request.args[key] for key in request.args if key != 'page'
}
if request.args:
contacts = Contact.query.filter_by(**query).paginate(
page=page,
per_page=app.config['RECORDS_PER_PAGE'],
error_out=False
)
else:
contacts = Contact.query.paginate(
page=page, per_page=app.config['RECORDS_PER_PAGE'], error_out=False
)
return jsonify(
{
'page': page,
'contacts': [contact.format() for contact in contacts.items],
}
), 200
@app.route('/contacts/<int:id>', methods=['GET'])
@requires_auth('read:contacts')
def get_contact(id):
contact = Contact.query.get_or_404(id)
return jsonify(contact.format()), 200
@app.route('/contacts/<int:id>/errors', methods=['GET'])
@requires_auth('read:contacts')
@requires_auth('read:errors')
def get_contact_errors(id):
contact = Contact.query.get_or_404(id)
return jsonify(
{
'errors': [error.format() for error in contact.errors],
}
), 200
@app.route('/contacts', methods=['POST'])
@requires_auth('create:contacts')
def post_contact():
try:
contact = Contact(**request.get_json())
contact.insert()
return jsonify(contact.format()), 200
except:
abort(400)
@app.route('/contacts/<int:id>', methods=['PATCH'])
@requires_auth('update:contacts')
def patch_contact(id):
try:
contact = Contact.query.get_or_404(id)
for key in request.get_json():
setattr(contact, key, request.get_json()[key])
contact.update()
return jsonify(contact.format()), 200
except Error as e:
abort(400)
@app.route('/contacts/<int:id>', methods=['DELETE'])
@requires_auth('delete:contacts')
def delete_contact(id):
contact = Contact.query.get_or_404(id)
contact.delete()
return jsonify(contact.format()), 200
########################################################################
# Instruments
########################################################################
@app.route('/instruments', methods=['GET'])
@requires_auth('read:instruments')
def get_all_instruments():
page = request.args.get('page', default=1, type=int)
query = {
key: request.args[key] for key in request.args if key != 'page'
}
if request.args:
instruments = Instrument.query.filter_by(**query).paginate(
page=page,
per_page=app.config['RECORDS_PER_PAGE'],
error_out=False
)
else:
instruments = Instrument.query.paginate(
page=page, per_page=app.config['RECORDS_PER_PAGE'], error_out=False
)
return jsonify(
{
'page': page,
'instruments': [
instrument.format() for instrument in instruments.items
],
}
), 200
@app.route('/instruments/<int:id>', methods=['GET'])
@requires_auth('read:instruments')
def get_instrument(id):
instrument = Instrument.query.get_or_404(id)
return jsonify(instrument.format()), 200
@app.route('/instruments/<int:id>/errors', methods=['GET'])
@requires_auth('read:instruments')
@requires_auth('read:errors')
def get_instrument_errors(id):
instrument = Instrument.query.get_or_404(id)
return jsonify({
'errors': [error.format() for error in instrument.errors],
}), 200
@app.route('/instruments', methods=['POST'])
@requires_auth('create:instruments')
def post_instrument():
try:
instrument = Instrument(**request.get_json())
instrument.insert()
return jsonify(instrument.format()), 200
except:
abort(400)
@app.route('/instruments/<int:id>', methods=['PATCH'])
@requires_auth('update:instruments')
def patch_instrument(id):
try:
instrument = Instrument.query.get_or_404(id)
for key in request.get_json():
setattr(instrument, key, request.get_json()[key])
instrument.update()
return jsonify(instrument.format()), 200
except:
abort(400)
@app.route('/instruments/<int:id>', methods=['DELETE'])
@requires_auth('delete:instruments')
def delete_instrument(id):
instrument = Instrument.query.get_or_404(id)
instrument.delete()
return jsonify(instrument.format()), 200
########################################################################
# Errors
########################################################################
@app.route('/errors', methods=['GET'])
@requires_auth('read:errors')
def get_all_errors():
page = request.args.get('page', default=1, type=int)
query = {
key: request.args[key] for key in request.args if key != 'page'
}
if request.args:
errors = Error.query.filter_by(**query).paginate(
page=page,
per_page=app.config['RECORDS_PER_PAGE'],
error_out=False
)
else:
errors = Error.query.paginate(
page=page, per_page=app.config['RECORDS_PER_PAGE'], error_out=False
)
return jsonify(
{
'page': page,
'errors': [
error.format() for error in errors.items
],
}
), 200
@app.route('/errors/<int:id>', methods=['GET'])
@requires_auth('read:errors')
def get_error(id):
error = Error.query.get_or_404(id)
return jsonify(error.format()), 200
@app.route('/errors', methods=['POST'])
@requires_auth('create:errors')
def post_error():
try:
data = request.get_json()
contact = Contact.query.get_or_404(data['contact'])
instrument = Instrument.query.get_or_404(data['instrument'])
date = (
datetime.strptime(data['date'], app.config['DATE_FORMAT'])
if 'date' in data
else datetime.utcnow()
)
error = Error(
description=data['description'],
contact=contact,
instrument=instrument,
date=date,
is_resolved=data.get('is_resolved', False)
)
error.insert()
return jsonify(error.format()), 200
except:
abort(400)
@app.route('/errors/<int:id>', methods=['PATCH'])
@requires_auth('update:errors')
def patch_error(id):
try:
error = Error.query.get_or_404(id)
data = request.get_json()
if 'contact' in data:
error.contact = Contact.query.get_or_404(data['contact'])
if 'instrument' in data:
error.instrument = Instrument.query.get_or_404(data['instrument'])
if 'description' in data:
error.description = data['description']
if 'date' in data:
error.date = datetime.strptime(data['date'], app.config['DATE_FORMAT'])
if 'is_resolved' in data:
error.is_resolved = data['is_resolved']
error.update()
return jsonify(error.format()), 200
except:
abort(400)
@app.route('/errors/<int:id>', methods=['DELETE'])
@requires_auth('delete:errors')
def delete_error(id):
error = Error.query.get_or_404(id)
deleted_record = error.format()
error.delete()
return jsonify(deleted_record), 200
########################################################################
# Error Handlers
########################################################################
@app.errorhandler(400)
def bad_request_error(error):
return jsonify(
{
'status_code': 400,
'message': 'The request was not formed correctly',
}
), 400
@app.errorhandler(404)
def not_found_error(error):
return jsonify(
{
'status_code': 404,
'message': 'The record or resource was not found',
}
), 404
@app.errorhandler(500)
def server_error(error):
return jsonify(
{
'status_code': 500,
'message': 'A server error occurred',
}
), 500
@app.errorhandler(AuthError)
def authorization_error(error):
return jsonify(
{
'status_code': error.error_code,
'message': error.error_message,
}
), error.error_code
```
#### File: migrations/versions/61fb62f47d4f_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '61fb62f47d4f'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('contacts_instruments',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('instrument_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.ForeignKeyConstraint(['instrument_id'], ['instrument.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'instrument_id')
)
op.alter_column('contact', 'first_name',
existing_type=sa.VARCHAR(length=32),
nullable=False)
op.alter_column('contact', 'last_name',
existing_type=sa.VARCHAR(length=32),
nullable=False)
op.alter_column('error', 'description',
existing_type=sa.VARCHAR(length=256),
nullable=False)
op.alter_column('instrument', 'serial_number',
existing_type=sa.VARCHAR(length=32),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('instrument', 'serial_number',
existing_type=sa.VARCHAR(length=32),
nullable=True)
op.alter_column('error', 'description',
existing_type=sa.VARCHAR(length=256),
nullable=True)
op.alter_column('contact', 'last_name',
existing_type=sa.VARCHAR(length=32),
nullable=True)
op.alter_column('contact', 'first_name',
existing_type=sa.VARCHAR(length=32),
nullable=True)
op.drop_table('contacts_instruments')
# ### end Alembic commands ###
``` |
{
"source": "joshtice/image_classifier",
"score": 3
} |
#### File: joshtice/image_classifier/predict.py
```python
import argparse
import json
import numpy as np
import os
from PIL import Image
import skimage
import skimage.io
import skimage.transform
import skimage.util
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, models, transforms
import utils
def parse_args():
"""
Parses arguments from the command line
Returns
-------
argparse.Namespace object
Container with parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('image_path',
help="path to image to be categorized by model")
parser.add_argument('checkpoint',
help="model checkpoint to use for prediction")
parser.add_argument('--top_k', type=int,
help="number of top predictions to show")
parser.add_argument('--category_names',
help="file for interpreting output")
parser.add_argument('--gpu', action='store_true',
help="run inference with gpu")
args = parser.parse_args()
if not os.path.isfile(args.image_path):
raise ValueError("image_path does not exist")
if not os.path.isfile(args.checkpoint):
raise ValueError("checkpoint does not exist")
if args.top_k is not None:
if (args.top_k < 1) or (args.top_k > 102):
raise ValueError("top_k must be between 1 and 102")
else:
args.top_k = 1
if ((args.category_names is not None) and
(not os.path.isfile(args.category_names))):
raise ValueError("category_names does not exist")
return args
def predict(image_path, model, topk=1, device='cpu'):
'''
Predict the class (or classes) of an image using a trained
deep learning model
Parameters
----------
image_path : str
Path to the image to be classified
model : torch object
Neural network used for prediction
top_k : int, optional
Number of top categories to print for prediction
device : str, optional
Indicates whether to run inference using cpu or gpu.
Allowed values: {'cpu', 'gpu'}
Returns
-------
tuple of lists
list of top_k classes and a list of the classes' associated
probabilities
'''
model.to(device)
model.eval()
with torch.no_grad():
image = utils.preprocess_image(image_path)
image = np.expand_dims(image, axis=0)
image = torch.tensor(image).float()
image = image.to(device)
pred = model.forward(image)
probs, classes = pred.topk(topk)
probs = torch.exp(probs)
probs = probs.to('cpu').numpy().tolist()[0]
classes = classes.to('cpu').numpy().tolist()[0]
for key, value in model.class_to_index.items():
classes[classes == value] = int(key)
return probs, classes
def translate_classes(classes, json_file):
"""
Convert torch model outputs to human-readable categories
Parameters
----------
classes : array-like
Numerical class output of the neural network
json_file : str
Path to json file with category/class mapping
Returns
-------
list
List of strings with human-readable predicted classes
"""
with open(json_file, 'r') as f:
category_mapper = json.load(f)
classes = list(map(str, classes))
names = list(map(category_mapper.get, classes))
names = [x if x is not None else 'name not available' for x in names]
return names
def main():
# Parse argments from command line
print("parsing arguments...")
args = parse_args()
# Load the model and send it to the desired processor
if args.gpu:
device = "cuda:0"
else:
device = "cpu"
print("loading model on device {}...".format(device))
model = utils.load_checkpoint(args.checkpoint, device)
# Generate prediction
print("running prediction...")
probs, classes = predict(args.image_path, model,
topk=args.top_k, device=device)
# Translate classes to human-readable form
if args.category_names is not None:
print("translating results...")
classes = translate_classes(classes, args.category_names)
# Print top predictions
print("Top predictions:")
print("Class Probability")
print("----- -----------")
for name, prob in zip(classes, probs):
print("{:<25}{:<11.3f}".format(name, prob))
if __name__ == '__main__':
main()
``` |
{
"source": "joshtingey/thetrains",
"score": 4
} |
#### File: src/common/mongo.py
```python
from pymongo import MongoClient
class Mongo(object):
"""Class to handle MongoDB data flow."""
def __init__(self, log, client):
"""Initialise Mongo.
Args:
log (logging.logger): logger to use
client (pymongo.MongoClient): pymongo client
"""
self.log = log # We take the logger from the application
self.client = client # Mongo database
@classmethod
def connect(cls, log, uri):
"""Connect to database, return None if not possible."""
try:
client = MongoClient(uri)
client = client.thetrains # Using thetrains database
log.info("Connected to mongo at {}".format(uri))
return cls(log, client)
except Exception:
log.warning("Mongo connection error: {}".format(uri))
return None
def collections(self):
"""Return list of all database collections."""
collections = None
try:
collections = self.client.list_collection_names()
except Exception as e:
self.log.warning("Mongo collections error ({})".format(e))
return collections
def drop(self, name):
"""Drop a named collection.
Args:
name (str): collection name
"""
try:
self.client.drop_collection(name)
except Exception as e:
self.log.warning("Mongo drop error ({})".format(e))
def add(self, collection, doc):
"""Add a document to a collection.
Args:
collection (str): collection name
doc (dict): document in dict format
"""
try:
self.client[collection].insert_one(doc)
except Exception as e:
self.log.warning("Mongo add error ({})".format(e))
def update(self, collection, selection, update, many=False):
"""Update document in collection by selection.
Args:
collection (str): collection name
selection (dict): document selection
update (dict): document update
"""
try:
if many:
self.client[collection].update_many(selection, update, upsert=True)
else:
self.client[collection].update_one(selection, update, upsert=True)
except Exception as e:
self.log.warning("Mongo update error ({})".format(e))
def get(self, collection):
"""Get all documents from a collection.
Args:
collection (str): collection name
"""
try:
return self.client[collection].find()
except Exception as e:
self.log.warning("Mongo get error ({})".format(e))
return None
```
#### File: src/dash/app.py
```python
from flask import Flask
from dash import Dash
import dash_bootstrap_components as dbc
from common.config import Config
from common.mongo import Mongo
def create_flask():
"""Create the Flask instance for this application.
Returns:
flask.Flask: flask application
"""
server = Flask(__package__)
# load default settings
server.config.from_object(Config)
return server
def create_dash(server):
"""Create the Dash instance for this application.
Args:
server (flask.Flask): flask application
Returns:
dash.Dash: dash application
"""
app = Dash(
name=__package__,
server=server,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.LUX],
)
# Initialise logging
Config.init_logging(app.logger)
server.logger.removeHandler(app.logger.handlers[0])
# Initialise the mongo database
app.mongo = Mongo.connect(app.logger, app.server.config["MONGO_URI"])
# Update the Flask config a default "TITLE" and then with any new Dash
# configuration parameters that might have been updated so that we can
# access Dash config easily from anywhere in the project with Flask's
# 'current_app'
server.config.setdefault("TITLE", "Dash")
# Set the app name
app.title = "thetrains"
return app
# Create the Flask instance
server = create_flask()
# Create the Dash instance
app = create_dash(server)
``` |
{
"source": "joshtombs/EPL_Parser",
"score": 3
} |
#### File: joshtombs/EPL_Parser/test_epl_parser.py
```python
import unittest
import main
from main import (ASSIGN_OR_RAISE, get_match_filename, match_is_valid,
print_run_statistics, extract_one_match_team)
class TestAssignOrRaise(unittest.TestCase):
def test_assign_or_raise_with_none(self):
"""
Test to ensure ASSIGN_OR_RAISE raises when an expression evaluates
to none
"""
self.assertRaises(ValueError, lambda: ASSIGN_OR_RAISE(None))
def test_assign_or_raise_with_string(self):
"""
Test to ensure ASSIGN_OR_RAISE returns a string when passed a string
"""
self.assertEqual(ASSIGN_OR_RAISE('6'), '6')
def test_assign_or_raise_with_object(self):
"""
Test to ensure ASSIGN_OR_RAISE returns an object when passed the object
"""
obj = type('obj', (object,), {'propertyName' : 'propertyValue'})
self.assertEqual(ASSIGN_OR_RAISE(obj), obj)
self.assertEqual(ASSIGN_OR_RAISE(obj.propertyName), 'propertyValue')
class TestGetMatchFilename(unittest.TestCase):
def test_get_match_filename_no_date(self):
"""
Test that get_match_filename handles None for a date
"""
expected_name = None
self.assertEqual(get_match_filename(None, 'TeamA', 'TeamB'), expected_name)
def test_get_match_filename_no_teams(self):
"""
Test that get_match_filename handles None for a team
"""
expected_name = None
self.assertEqual(get_match_filename('Sunday January 03, 2021', None, None), expected_name)
def test_get_match_filename_simple(self):
"""
Test that match filename generated correctly in base case
"""
expected_name = '03Jan2021_TeamA_vs_TeamB.json'
self.assertEqual(get_match_filename('Sunday January 03, 2021', 'TeamA', 'TeamB'), expected_name)
def test_get_match_filename_with_spaces(self):
"""
Test that match filename with spaces is generated correctly
"""
expected_name = '05Jan2021_Team_with_Spaces_vs_Team_with_&_symbol.json'
self.assertEqual(get_match_filename('Tuesday January 05, 2021', 'Team with Spaces', 'Team with & symbol'), expected_name)
def test_get_match_filename_with_bad_date(self):
"""
Test that match filename returns None for an invalid date
"""
expected_name = None
self.assertEqual(get_match_filename('Monday February 32, 2021', 'TeamA', 'TeamB'), expected_name)
self.assertEqual(get_match_filename('Something January 5, 2021', 'TeamA', 'TeamB'), expected_name)
self.assertEqual(get_match_filename('Tuesday January 5, -5', 'TeamA', 'TeamB'), expected_name)
def test_get_match_filename_with_wrong_weekday(self):
"""
Test that match filename ignores the weekday being wrong
"""
expected_name = expected_name = '05Jan2021_TeamA_vs_TeamB.json'
self.assertEqual(get_match_filename('Tuesday January 05, 2021', 'TeamA', 'TeamB'), expected_name)
self.assertEqual(get_match_filename('Thursday January 05, 2021', 'TeamA', 'TeamB'), expected_name)
class TestMatchIsValid(unittest.TestCase):
def setUp(self):
# Instead of actually populating stats, it is enough just to have array elements
self.test_match = {
'Date': 'Tuesday January 05, 2021',
'Result': 'Draw',
'HomeStats': {
'Team': 'TeamA',
'Goals': 5
},
'AwayStats': {
'Team': 'TeamB',
'Goals': 5
},
'HomePlayers': [0,1,2,3,4,5,6,7,8,9,10],
'AwayPlayers': [0,1,2,3,4,5,6,7,8,9,10],
'HomeKeepers': [0],
'AwayKeepers': [0]
}
def test_match_is_valid_with_none(self):
"""
Test that match_is_valid handles None for an input
"""
self.assertEqual(match_is_valid(None), False)
def test_match_is_valid_with_empty(self):
"""
Test that match_is_valid handles an empty object as input
"""
self.assertEqual(match_is_valid({}), False)
def test_match_is_valid_with_good_match(self):
"""
Test that match_is_valid asserts true when the input criteria is satisfied
"""
self.assertEqual(match_is_valid(self.test_match), True)
def test_match_is_valid_with_too_few_home_players(self):
"""
Test that match_is_valid asserts false when too few home players
"""
self.test_match['HomePlayers'] = [0,1,2,3]
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_too_few_away_players(self):
"""
Test that match_is_valid asserts false when too few away players
"""
self.test_match['AwayPlayers'] = []
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_too_few_home_players(self):
"""
Test that match_is_valid asserts false when too few home keepers
"""
self.test_match['HomeKeepers'] = []
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_too_few_away_players(self):
"""
Test that match_is_valid asserts false when too few away keepers
"""
self.test_match['AwayKeepers'] = []
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_bad_input(self):
"""
Test that match_is_valid asserts false when a field is the wrong type
"""
self.test_match = 'string'
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_no_date(self):
"""
Test that match_is_valid asserts false when the date field is missing
"""
self.test_match.pop('Date', None)
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_bad_date(self):
"""
Test that match_is_valid asserts false when the date field is invalid
"""
self.test_match['Date'] = 'Tuesday May 35, 2010'
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_no_result(self):
"""
Test that match_is_valid asserts false when the result field is missing
"""
self.test_match.pop('Result', None)
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_wrong_result(self):
"""
Test that match_is_valid asserts false when the result field is incorrect
"""
self.test_match['Result'] = 'Away'
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_no_home_score(self):
"""
Test that match_is_valid asserts false when the home goal field is missing
"""
self.test_match['HomeStats'].pop('Goals', None)
self.assertEqual(match_is_valid(self.test_match), False)
def test_match_is_valid_with_no_away_score(self):
"""
Test that match_is_valid asserts false when the away goal field is missing
"""
self.test_match['AwayStats'].pop('Goals', None)
self.assertEqual(match_is_valid(self.test_match), False)
class TestPrintRunStatistics(unittest.TestCase):
def setUp(self):
self.stats = {
'total': 100,
'new': 60,
'old': 40,
'skipped': 5,
'bucket': 95,
}
def test_print_run_statistics_with_valid(self):
"""
Test that print_run_statistics succeeds with valid input
"""
try:
print_run_statistics(self.stats)
except:
self.fail('print_run_statistics failed unexpectedly')
def test_print_run_statistics_with_none(self):
"""
Test that print_run_statistics succeeds with None as an input
"""
self.stats = None
try:
print_run_statistics(self.stats)
except:
self.fail('print_run_statistics failed unexpectedly')
def test_print_run_statistics_with_invalid(self):
"""
Test that print_run_statistics succeeds with valid input
"""
self.stats = None
try:
print_run_statistics(self.stats)
except:
self.fail('print_run_statistics failed unexpectedly')
class TestExtractOneMatchTeam(unittest.TestCase):
def setUp(self):
self.match = {
'Date': 'Saturday January 02, 2021',
'Result': 'Away',
'HomeStats': {
'Team': 'TeamA',
'Record': '0-4-1',
'Goals': 2,
'Possession': '51%'
},
'AwayStats': {
'Team': 'TeamB & Spaces',
'Record': '17-4-0',
'Goals': 4,
'Possession': '49%'
},
'HomeKeepers': [
{
'Name': 'KeeperA'
}
],
'AwayKeepers': [
{
'Name': 'KeeperB'
}
],
'HomePlayers': [0,1,2,3,4,5,6,7,8,9,10,11,12,13],
'AwayPlayers': [0,1,2,3,4,5,6,7,8,9,10,11]
}
def test_extract_one_match_with_home(self):
new_json = extract_one_match_team(self.match, self.match['HomeStats']['Team'])
self.assertEqual(new_json['Team'], self.match['HomeStats']['Team'])
self.assertEqual(new_json['Date'], self.match['Date'])
self.assertEqual(new_json['Result'], 'Loss')
self.assertEqual(new_json['Opponent'], self.match['AwayStats']['Team'])
self.assertEqual(new_json['OppRecord'], self.match['AwayStats']['Record'])
self.assertEqual(new_json['GlsFor'], self.match['HomeStats']['Goals'])
self.assertEqual(new_json['GlsAgainst'], self.match['AwayStats']['Goals'])
self.assertEqual(new_json['Possession'], self.match['HomeStats']['Possession'])
self.assertEqual(new_json['Keepers'], self.match['HomeKeepers'])
self.assertEqual(new_json['Players'], self.match['HomePlayers'])
def test_extract_one_match_with_away(self):
new_json = extract_one_match_team(self.match, self.match['AwayStats']['Team'])
self.assertEqual(new_json['Team'], self.match['AwayStats']['Team'])
self.assertEqual(new_json['Date'], self.match['Date'])
self.assertEqual(new_json['Result'], 'Win')
self.assertEqual(new_json['Opponent'], self.match['HomeStats']['Team'])
self.assertEqual(new_json['OppRecord'], self.match['HomeStats']['Record'])
self.assertEqual(new_json['GlsFor'], self.match['AwayStats']['Goals'])
self.assertEqual(new_json['GlsAgainst'], self.match['HomeStats']['Goals'])
self.assertEqual(new_json['Possession'], self.match['AwayStats']['Possession'])
self.assertEqual(new_json['Keepers'], self.match['AwayKeepers'])
self.assertEqual(new_json['Players'], self.match['AwayPlayers'])
def test_extract_one_match_with_bad_team(self):
new_json = extract_one_match_team(self.match, 'TeamC')
self.assertIsNone(new_json)
def test_extract_one_match_with_none_match(self):
new_json = extract_one_match_team(None, self.match['AwayStats']['Team'])
self.assertIsNone(new_json)
def test_extract_one_match_with_missing_team(self):
self.match['HomeStats'].pop('Team', None)
new_json = extract_one_match_team(self.match, self.match['AwayStats']['Team'])
self.assertIsNone(new_json)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshtombs/Pyppeteer-Flask-Docker",
"score": 3
} |
#### File: joshtombs/Pyppeteer-Flask-Docker/main.py
```python
import os
from flask import Flask
# Imports for web scraping
from requests_html import AsyncHTMLSession
import asyncio
import pyppeteer
app = Flask(__name__)
async def get_page(url):
new_loop=asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
session = AsyncHTMLSession()
print("Launching browser...")
browser = await pyppeteer.launch({
# 'executablePath': 'google-chrome-stable',
'executablePath': 'google-chrome-unstable',
'ignoreHTTPSErrors':True,
'dumpio':True,
'headless':True,
'handleSIGINT':False,
'handleSIGTERM':False,
'handleSIGHUP':False
})
print("Launched browser...")
session._browser = browser
resp_page = await session.get(url)
print("Got response from page...")
await resp_page.html.arender()
print("Rendered page...")
return resp_page.html # note, changed from content
@app.route("/test/<path:url>")
def get_page_name(url):
print("Got request to collect ", url)
try:
page_html = asyncio.run(get_page(url))
except:
return "Error retrieving match content from URL"
return page_html.find('title')[0].text
@app.route("/")
def get_toscrape_name():
return get_page_name("http://toscrape.com")
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
``` |
{
"source": "joshtrivedi/face-detection-tflite",
"score": 2
} |
#### File: face-detection-tflite/fdlite/face_detection.py
```python
u"""BlazeFace face detection.
Ported from Google® MediaPipe (https://google.github.io/mediapipe/).
Model card:
https://mediapipe.page.link/blazeface-mc
Reference:
<NAME> et al. BlazeFace: Sub-millisecond
Neural Face Detection on Mobile GPUs. CVPR
Workshop on Computer Vision for Augmented and
Virtual Reality, Long Beach, CA, USA, 2019.
"""
import numpy as np
import os
import tensorflow as tf
from enum import IntEnum
from PIL.Image import Image
from typing import List, Optional, Union
from fdlite import InvalidEnumError
from fdlite.nms import non_maximum_suppression
from fdlite.transform import detection_letterbox_removal, image_to_tensor
from fdlite.transform import sigmoid
from fdlite.types import Detection, Rect
MODEL_NAME_BACK = 'face_detection_back.tflite'
MODEL_NAME_FRONT = 'face_detection_front.tflite'
MODEL_NAME_SHORT = 'face_detection_short_range.tflite'
MODEL_NAME_FULL = 'face_detection_full_range.tflite'
MODEL_NAME_FULL_SPARSE = 'face_detection_full_range_sparse.tflite'
# score limit is 100 in mediapipe and leads to overflows with IEEE 754 floats
# this lower limit is safe for use with the sigmoid functions and float32
RAW_SCORE_LIMIT = 80
# threshold for confidence scores
MIN_SCORE = 0.5
# NMS similarity threshold
MIN_SUPPRESSION_THRESHOLD = 0.3
# from mediapipe module; irrelevant parts removed
# (reference: mediapipe/modules/face_detection/face_detection_front_cpu.pbtxt)
SSD_OPTIONS_FRONT = {
'num_layers': 4,
'input_size_height': 128,
'input_size_width': 128,
'anchor_offset_x': 0.5,
'anchor_offset_y': 0.5,
'strides': [8, 16, 16, 16],
'interpolated_scale_aspect_ratio': 1.0
}
# (reference: modules/face_detection/face_detection_back_desktop_live.pbtxt)
SSD_OPTIONS_BACK = {
'num_layers': 4,
'input_size_height': 256,
'input_size_width': 256,
'anchor_offset_x': 0.5,
'anchor_offset_y': 0.5,
'strides': [16, 32, 32, 32],
'interpolated_scale_aspect_ratio': 1.0
}
# (reference: modules/face_detection/face_detection_short_range_common.pbtxt)
SSD_OPTIONS_SHORT = {
'num_layers': 4,
'input_size_height': 128,
'input_size_width': 128,
'anchor_offset_x': 0.5,
'anchor_offset_y': 0.5,
'strides': [8, 16, 16, 16],
'interpolated_scale_aspect_ratio': 1.0
}
# (reference: modules/face_detection/face_detection_full_range_common.pbtxt)
SSD_OPTIONS_FULL = {
'num_layers': 1,
'input_size_height': 192,
'input_size_width': 192,
'anchor_offset_x': 0.5,
'anchor_offset_y': 0.5,
'strides': [4],
'interpolated_scale_aspect_ratio': 0.0
}
class FaceIndex(IntEnum):
"""Indexes of keypoints returned by the face detection model.
Use these with detection results (by indexing the result):
```
def get_left_eye_position(detection):
x, y = detection[FaceIndex.LEFT_EYE]
return x, y
```
"""
LEFT_EYE = 0
RIGHT_EYE = 1
NOSE_TIP = 2
MOUTH = 3
LEFT_EYE_TRAGION = 4
RIGHT_EYE_TRAGION = 5
class FaceDetectionModel(IntEnum):
"""Face detection model option:
FRONT_CAMERA - 128x128 image, assumed to be mirrored
BACK_CAMERA - 256x256 image, not mirrored
SHORT - 128x128 image, assumed to be mirrored; best for short range images
(i.e. faces within 2 metres from the camera)
FULL - 192x192 image, assumed to be mirrored; dense; best for mid-ranges
(i.e. faces within 5 metres from the camera)
FULL_SPARSE - 192x192 image, assumed to be mirrored; sparse; best for
mid-ranges (i.e. faces within 5 metres from the camera)
this model is up ~30% faster than `FULL` when run on the CPU
"""
FRONT_CAMERA = 0
BACK_CAMERA = 1
SHORT = 2
FULL = 3
FULL_SPARSE = 4
class FaceDetection:
"""BlazeFace face detection model as used by Google MediaPipe.
This model can detect multiple faces and returns a list of detections.
Each detection contains the normalised [0,1] position and size of the
detected face, as well as a number of keypoints (also normalised to
[0,1]).
The model is callable and accepts a PIL image instance, image file name,
and Numpy array of shape (height, width, channels) as input. There is no
size restriction, but smaller images are processed faster.
Example:
```
detect_faces = FaceDetection(model_path='/var/mediapipe/models')
detections = detect_faces('/home/user/pictures/group_photo.jpg')
print(f'num. faces found: {len(detections)}')
# convert normalised coordinates to pixels (assuming 3kx2k image):
if len(detections):
rect = detections[0].bbox.scale(3000, 2000)
print(f'first face rect.: {rect}')
else:
print('no faces found')
```
Raises:
InvalidEnumError: `model_type` contains an unsupported value
"""
def __init__(
self,
model_type: FaceDetectionModel = FaceDetectionModel.FRONT_CAMERA,
model_path: Optional[str] = None
) -> None:
ssd_opts = {}
if model_path is None:
my_path = os.path.abspath(__file__)
model_path = os.path.join(os.path.dirname(my_path), 'data')
if model_type == FaceDetectionModel.FRONT_CAMERA:
self.model_path = os.path.join(model_path, MODEL_NAME_FRONT)
ssd_opts = SSD_OPTIONS_FRONT
elif model_type == FaceDetectionModel.BACK_CAMERA:
self.model_path = os.path.join(model_path, MODEL_NAME_BACK)
ssd_opts = SSD_OPTIONS_BACK
elif model_type == FaceDetectionModel.SHORT:
self.model_path = os.path.join(model_path, MODEL_NAME_SHORT)
ssd_opts = SSD_OPTIONS_SHORT
elif model_type == FaceDetectionModel.FULL:
self.model_path = os.path.join(model_path, MODEL_NAME_FULL)
ssd_opts = SSD_OPTIONS_FULL
elif model_type == FaceDetectionModel.FULL_SPARSE:
self.model_path = os.path.join(model_path, MODEL_NAME_FULL_SPARSE)
ssd_opts = SSD_OPTIONS_FULL
else:
raise InvalidEnumError(f'unsupported model_type "{model_type}"')
self.interpreter = tf.lite.Interpreter(model_path=self.model_path)
self.interpreter.allocate_tensors()
self.input_index = self.interpreter.get_input_details()[0]['index']
self.input_shape = self.interpreter.get_input_details()[0]['shape']
self.bbox_index = self.interpreter.get_output_details()[0]['index']
self.score_index = self.interpreter.get_output_details()[1]['index']
self.anchors = _ssd_generate_anchors(ssd_opts)
def __call__(
self,
image: Union[Image, np.ndarray, str],
roi: Optional[Rect] = None
) -> List[Detection]:
"""Run inference and return detections from a given image
Args:
image (Image|ndarray|str): Numpy array of shape
`(height, width, 3)`, PIL Image instance or file name.
roi (Rect|None): Optional region within the image that may
contain faces.
Returns:
(list) List of detection results with relative coordinates.
"""
height, width = self.input_shape[1:3]
image_data = image_to_tensor(
image,
roi,
output_size=(width, height),
keep_aspect_ratio=True,
output_range=(-1, 1))
input_data = image_data.tensor_data[np.newaxis]
self.interpreter.set_tensor(self.input_index, input_data)
self.interpreter.invoke()
raw_boxes = self.interpreter.get_tensor(self.bbox_index)
raw_scores = self.interpreter.get_tensor(self.score_index)
boxes = self._decode_boxes(raw_boxes)
scores = self._get_sigmoid_scores(raw_scores)
detections = FaceDetection._convert_to_detections(boxes, scores)
pruned_detections = non_maximum_suppression(
detections,
MIN_SUPPRESSION_THRESHOLD, MIN_SCORE,
weighted=True)
detections = detection_letterbox_removal(
pruned_detections, image_data.padding)
return detections
def _decode_boxes(self, raw_boxes: np.ndarray) -> np.ndarray:
"""Simplified version of
mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc
"""
# width == height so scale is the same across the board
scale = self.input_shape[1]
num_points = raw_boxes.shape[-1] // 2
# scale all values (applies to positions, width, and height alike)
boxes = raw_boxes.reshape(-1, num_points, 2) / scale
# adjust center coordinates and key points to anchor positions
boxes[:, 0] += self.anchors
for i in range(2, num_points):
boxes[:, i] += self.anchors
# convert x_center, y_center, w, h to xmin, ymin, xmax, ymax
center = np.array(boxes[:, 0])
half_size = boxes[:, 1] / 2
boxes[:, 0] = center - half_size
boxes[:, 1] = center + half_size
return boxes
def _get_sigmoid_scores(self, raw_scores: np.ndarray) -> np.ndarray:
"""Extracted loop from ProcessCPU (line 327) in
mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc
"""
# just a single class ("face"), which simplifies this a lot
# 1) thresholding; adjusted from 100 to 80, since sigmoid of [-]100
# causes overflow with IEEE single precision floats (max ~10e38)
raw_scores[raw_scores < -RAW_SCORE_LIMIT] = -RAW_SCORE_LIMIT
raw_scores[raw_scores > RAW_SCORE_LIMIT] = RAW_SCORE_LIMIT
# 2) apply sigmoid function on clipped confidence scores
return sigmoid(raw_scores)
@staticmethod
def _convert_to_detections(
boxes: np.ndarray,
scores: np.ndarray
) -> List[Detection]:
"""Apply detection threshold, filter invalid boxes and return
detection instance.
"""
# return whether width and height are positive
def is_valid(box: np.ndarray) -> bool:
return np.all(box[1] > box[0])
score_above_threshold = scores > MIN_SCORE
filtered_boxes = boxes[np.argwhere(score_above_threshold)[:, 1], :]
filtered_scores = scores[score_above_threshold]
return [Detection(box, score)
for box, score in zip(filtered_boxes, filtered_scores)
if is_valid(box)]
def _ssd_generate_anchors(opts: dict) -> np.ndarray:
"""This is a trimmed down version of the C++ code; all irrelevant parts
have been removed.
(reference: mediapipe/calculators/tflite/ssd_anchors_calculator.cc)
"""
layer_id = 0
num_layers = opts['num_layers']
strides = opts['strides']
assert len(strides) == num_layers
input_height = opts['input_size_height']
input_width = opts['input_size_width']
anchor_offset_x = opts['anchor_offset_x']
anchor_offset_y = opts['anchor_offset_y']
interpolated_scale_aspect_ratio = opts['interpolated_scale_aspect_ratio']
anchors = []
while layer_id < num_layers:
last_same_stride_layer = layer_id
repeats = 0
while (last_same_stride_layer < num_layers and
strides[last_same_stride_layer] == strides[layer_id]):
last_same_stride_layer += 1
# aspect_ratios are added twice per iteration
repeats += 2 if interpolated_scale_aspect_ratio == 1.0 else 1
stride = strides[layer_id]
feature_map_height = input_height // stride
feature_map_width = input_width // stride
for y in range(feature_map_height):
y_center = (y + anchor_offset_y) / feature_map_height
for x in range(feature_map_width):
x_center = (x + anchor_offset_x) / feature_map_width
for _ in range(repeats):
anchors.append((x_center, y_center))
layer_id = last_same_stride_layer
return np.array(anchors, dtype=np.float32)
``` |
{
"source": "joshtronic/tmpufw",
"score": 3
} |
#### File: joshtronic/tmpufw/tmpufw.py
```python
__author__ = '<NAME>'
__file__ = 'tmpufw'
__license__ = 'MIT'
__status__ = 'Production'
__version__ = '1.0.0'
from argparse import ArgumentParser
from datetime import datetime
from os import getpid, makedirs, path, remove
from parsedatetime import Calendar
from shutil import move
from subprocess import CalledProcessError, check_output, STDOUT
from sys import exit
from time import mktime, time
class tmpufw(object):
parser = ArgumentParser(description = 'Temporarily apply `ufw` rules')
def __init__(self):
self.parser.add_argument('-s', '--status', action = 'store_true', help = 'show rule list with expirations')
self.parser.add_argument('-c', '--clean', action = 'store_true', help = 'clean up expired rules')
self.parser.add_argument('-r', '--rule', help = 'rule to be added to `ufw`')
self.parser.add_argument('-p', '--position', default = 1, help = 'position to add the rule')
self.parser.add_argument('-t', '--ttl', default = '30 days', help = 'time to live for the rule')
args = self.parser.parse_args()
# Our file names
pid_file = '/var/run/' + __file__ + '.pid'
rules_file = '/usr/local/share/' + __file__ + '/rules'
tmp_rules_file = '/tmp/' + __file__ + '-rules'
if args.status:
if path.exists(rules_file):
try:
print("Expiration\t\tRule")
print('=' * 80)
# Loops through the rules lines
for line in open(rules_file, 'r'):
# Breaks apart line into expiration timestamp and rule
timestamp, rule = line.strip("\n").split(' ', 1)
print(str(datetime.fromtimestamp(float(timestamp))) + "\t" + rule)
except IOError:
self.error('unable to read from the rules file: ' + rules_file)
else:
self.error('there are no rules to display')
elif args.clean:
# Checks for PID file
if path.exists(pid_file):
self.error(__file__ + ' is already running')
else:
# Creates the PID file
try:
handle = open(pid_file, 'w')
handle.write(str(getpid()))
handle.close()
except IOError:
self.error('unable to create PID file: ' + pid_file)
# Checks for the rules file
if path.exists(rules_file):
# Opens the temporary rules file
try:
handle = open(tmp_rules_file, 'a')
except IOError:
self.error('unable to write to the tmp rules file: ' + tmp_rules_file)
try:
current_time = time()
# Loops through the rules lines
for line in open(rules_file, 'r'):
# Breaks apart line into expiration timestamp and rule
timestamp, rule = line.strip("\n").split(' ', 1)
# Checks if rule has expired
if current_time < float(timestamp):
handle.write(line)
print(str(datetime.fromtimestamp(time())) + "\tskipped rule\t" + rule)
else:
try:
self.ufw_execute('delete ' + rule)
print(str(datetime.fromtimestamp(time())) + "\tdeleted rule\t" + rule)
except CalledProcessError as error:
self.ufw_error(error)
handle.close()
# Moves the tmp file to the rules file
move(tmp_rules_file, rules_file)
except IOError:
self.error('unable to from the read rules file: ' + rules_file)
# Removes the PID
remove(pid_file)
elif args.rule:
rules_path = path.dirname(rules_file)
if not path.exists(rules_path):
makedirs(rules_path)
# Converts the TTL to a timestamp
cal = Calendar()
timestamp = mktime(cal.parse(args.ttl)[0])
# Writes the rule to the rules file
try:
# TODO Check if rule already exists and update it instead of adding it again
handle = open(rules_file, 'a')
handle.write(str(timestamp) + ' ' + args.rule)
handle.write("\n")
handle.close()
except IOError:
self.error('unable to write to the rules file: ' + rules_file)
# Attempts to add the rule to `ufw`
try:
self.ufw_execute('insert ' + str(args.position) + ' ' + args.rule)
except CalledProcessError as error:
# Catches an error when attempting to add a rule to an empty database
if error.output == b"ERROR: Invalid position '1'\n":
try:
self.ufw_execute(args.rule)
except CalledProcessError as error:
self.ufw_error(error)
else:
self.ufw_error(error)
else:
self.error('no arguments specified')
def error(self, message):
self.parser.print_usage()
print(__file__ + ': error: ' + message)
exit(2)
def ufw_execute(self, rule):
for arg in [' --dry-run ', ' ']:
command = 'ufw' + arg + rule
check_output(command, stderr = STDOUT, shell = True)
def ufw_error(self, error):
self.error('ufw: ' + error.output.decode(encoding = 'UTF-8'))
if __name__ == '__main__':
tmpufw()
``` |
{
"source": "joshtsch/elasticsearch_py2",
"score": 3
} |
#### File: tests/unit/tests.py
```python
import httplib
from unittest import TestCase
import elasticsearch_ss.elasticsearch as elasticsearch
class testElasticsearchPackage(TestCase):
"""Test class for Elasticsearch class
"""
def test_elasticsearch_http(self):
"""Test Elasticsearch class with HTTP host
"""
es = elasticsearch.Elasticsearch("http://localhost")
self.assertEqual("localhost", es.host)
self.assertEqual(False, es.https)
connection = es.connection()
self.assertTrue(isinstance(connection, httplib.HTTPConnection))
connection.close()
def test_elasticsearch_init_noargs(self):
"""Test Elasticsearch class with HTTPS host
"""
es = elasticsearch.Elasticsearch("https://localhost")
self.assertTrue(isinstance(es, elasticsearch.Elasticsearch))
self.assertEqual("localhost", es.host)
self.assertEqual(True, es.https)
connection = es.connection()
self.assertTrue(isinstance(connection, httplib.HTTPSConnection))
connection.close()
``` |
{
"source": "joshturge/BinConvert",
"score": 4
} |
#### File: BinConvert/prototypes/binaryconv_prototype.py
```python
from os import system as syscmd
from platform import system
CharList = []
BinList = []
def BinToDec(Bin):
# The binary string is split into a list of string bits so they can be converted from string to integer.
BinaryStr = list(Bin)
BinaryStr = [int(bit) for bit in BinaryStr]
Dec = 0
Pow = 7
''' Every bit is run through this simple little formula, adding all the sums of the bits returns
the ASCII decimal. The decimal is then added to list so we can combine it later on to form a string. '''
for bit in BinaryStr:
Dec += bit*2**Pow
Pow -= 1
CharList.append(chr(Dec))
def DecToBin(Dec):
Dec = int(Dec)
BinaryStr = []
''' Here I run the decimal through modulus condition 8 times (for every bit in a 8-bit binary code),
depending if the decimal returns a remainder or not I add '1' or '0' to a list respectivly.
This list of bits is inverted in the list so it is reversed, the list is joined
together to form an 8-bit binary string (e.g.'01000001') and added to another list so it can
later be joined (seperated by a whitespace) and printed on screen.'''
for bit in range(8):
if Dec % 2 == 0:
Dec /= 2
BinaryStr.append('0')
else:
Dec //= 2
BinaryStr.append('1')
BinaryStr.reverse()
BinList.append(''.join(BinaryStr))
def StrToDec(String):
''' The string is split into a list so I can run it through a for loop, the characters
in the list are converted into its decimal counterpart so we can run it through a function.'''
String = list(String)
for char in String:
Dec = ord(char)
DecToBin(Dec)
def BinSnipper(BinStr):
BinStr = BinStr.split(" ")
for Bin in BinStr:
BinToDec(Bin)
def ClearTerminal():
if system() == 'Windows':
syscmd('cls')
else: syscmd('clear')
def main():
while True:
print("1) Convert Text to Binary\n2) Convert Binary to Text\nq) Quit\n")
UserInput = input("Enter: ")
if UserInput == '1':
ClearTerminal()
print("Enter the text you would like to encode.\n")
StrToDec(input("Enter: "))
print('\n', ' '.join(BinList), '\n')
BinList.clear()
elif UserInput == '2':
ClearTerminal()
print("Enter the binary code you would like to decode\n")
BinSnipper(input("Enter: "))
print('\n', ''.join(CharList), '\n')
CharList.clear()
elif UserInput == 'q':
print("Exiting...")
break
else:
print("{Uin} is not a valid option.".format(Uin=UserInput))
if __name__ == '__main__':
main()
``` |
{
"source": "joshturge/year12",
"score": 4
} |
#### File: Maths/src/mmmr.py
```python
from math import ceil, floor
from collections import Counter
def mean(numLS):
"""
Finds the sum of a list of numbers and divided by the
length of the list leaving the mean.
"""
return sum(numLS) / float(len(numLS))
def median(numLS):
"""
The middle value of a set of ordered data.
"""
def medFormula(numLS):
return (len(numLS) + 1) / 2 - 1
numLS.sort()
if (len(numLS) % 2) == 0:
belMed = numLS[int(floor(medFormula(numLS)))]
aboMed = numLS[int(ceil(medFormula(numLS)))]
return (belMed + aboMed) / 2
else:
return numLS[int(medFormula(numLS))]
def mode(numLS):
"""
Finds the most occurring number
"""
moCom = Counter(numLS).most_common(1)
mode = moCom[0]
return mode[0]
def sRange(numLS):
numLS.sort()
return numLS[len(numLS) - 1] - numLS[0]
numlist = [34, 34, 132, 45, 576, -67, 67, 67, 67, 67]
print(sRange(numlist))
```
#### File: Maths/src/sndDev.py
```python
from mmmr import mean
from math import sqrt
def sndDev(numLS):
"""
The Standard Deviation is a measure of how spread out numbers are.
"""
sqrdifls = []
totalMean = mean(numLS)
for number in numLS:
sqrdif = (number - totalMean)**2
sqrdifls.append(sqrdif)
return sqrt(mean(sqrdifls))
``` |
{
"source": "joshtyler/PyC8",
"score": 3
} |
#### File: PyC8/src/CPU.py
```python
from array import *
from threading import Timer
from screen import *
from keyboard import *
import logging
import random
class CPU:
# Constants
__font = array('B', [
0xF0, 0x90, 0x90, 0x90, 0xF0,
0x20, 0x60, 0x20, 0x20, 0x70,
0xF0, 0x10, 0xF0, 0x80, 0xF0,
0xF0, 0x10, 0xF0, 0x10, 0xF0,
0x90, 0x90, 0xF0, 0x10, 0x10,
0xF0, 0x80, 0xF0, 0x10, 0xF0,
0xF0, 0x80, 0xF0, 0x90, 0xF0,
0xF0, 0x10, 0x20, 0x40, 0x40,
0xF0, 0x90, 0xF0, 0x90, 0xF0,
0xF0, 0x90, 0xF0, 0x10, 0xF0,
0xF0, 0x90, 0xF0, 0x90, 0x90,
0xE0, 0x90, 0xE0, 0x90, 0xE0,
0xF0, 0x80, 0x80, 0x80, 0xF0,
0xE0, 0x90, 0x90, 0x90, 0xE0,
0xF0, 0x80, 0xF0, 0x80, 0xF0,
0xF0, 0x80, 0xF0, 0x80, 0x80
])
__PROG_START = 0x200 # Address of start of program
__STACK_DEPTH = 16
# Variables
__mem = array('B') # General memory, 'B' is Unsigned byte
__V = array('B') # Gen purpose registers
__I = "0" # 16 bit General purpose register
__PC = "0" # Program counter
__SP = "0" # Stack pointer
__stack = array('H') # Unsigned short (2 bytes)
__DT = "0" # delay timer
__ST = "0" # sound timer
__instruction = "0" # Variable to store current instruction
__disp = "0" # Display
__keyb = "0" # Keyboard
# Methods
def __init__(self, file, display, keyboard):
self.__mem.extend(self.__font) # Load font to start of memory
padMemory(self.__mem, 0, self.__PROG_START - 1) # Pad until program start
assert(self.__mem.buffer_info()[1] * self.__mem.itemsize -1 == 0x1FF) # Assert if font is not correct size
self.__mem.fromfile(file, getFileSize(file)) # Load memory from file
self.__V = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # 16 general purpose registers
self.__I = 0
self.__PC = self.__PROG_START
self.__SP = 0
self.__stack = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # 16 layer stack
self.__disp = display
self.__keyb = keyboard
# Call timer functions to setup timer callbacks
logging.warning("Add sound output")
self.__ST = 0 # sound timer
self.__soundTimer()
self.__DT = 0 # delay timer
self.__delayTimer()
# Jump table for most significant nibble of instruction
self.__GenOpDecode = {
0x0: self.__op0ClearReturn,
0x1: self.__op1Jump,
0x2: self.__op2Call,
0x3: self.__op3SkipEqualConst,
0x4: self.__op4SkipNotEqualConst,
0x5: self.__op5SkipEqualRegister,
0x6: self.__op6LoadConstantV,
0x7: self.__op7AddConstantV,
0x8: self.__op8MathematicalLogical,
0x9: self.__op9SkipNotEqualRegister,
0xA: self.__opALoadConstantI,
0xB: self.__opBJumpAdd,
0xC: self.__opCRandAND,
0xD: self.__opDSprite,
0xE: self.__opEKeyboard,
0xF: self.__opFMisc
}
# Jump table for least significant nibble of instruction, if most significant is 8
self.__MathOpDecode = {
0x0: self.__op8_0LdVxVy,
0x1: self.__op8_1ORVxVy,
0x2: self.__op8_2ANDVxVy,
0x3: self.__op8_3XORVxVy,
0x4: self.__op8_4AddVxVy,
0x5: self.__op8_5SubVxVy,
0x6: self.__op8_6ShrVx,
0x7: self.__op8_7SubNVxVy,
0xE: self.__op8_EShlVx
}
# Jump table for least significant byte of instruction, if most significant is F
self.__MiscOpDecode = {
0x07: self.__opF_07LoadVxDT,
0x0A: self.__opF_0ALoadVxK,
0x15: self.__opF_15LoadDTVx,
0x18: self.__opF_18LoadSTVx,
0x1E: self.__opF_1EAddIVx,
0x29: self.__opF_29LoadFVx,
0x33: self.__opF_33LoadBVx,
0x55: self.__opF_55LoadIVx,
0x65: self.__opF_33LoadVxI,
}
def __soundTimer(self):
if self.__ST > 0:
self.__ST -= 1
#Activate sound here
Timer(1.0/60.0, self.__soundTimer).start() # Callback at 60Hz
def __delayTimer(self):
if self.__DT > 0:
self.__DT -= 1
Timer(1.0/60.0, self.__delayTimer).start() # Callback at 60Hz
def start(self):
while self.__PC < ( self.__mem.buffer_info()[1] * self.__mem.itemsize - 1): # Only loop while inside program range
self.__instruction = self.__mem[self.__PC] << 8 | self.__mem[self.__PC + 1] # load two byte instruction
logging.debug("Instruction: %s", hex(self.__instruction))
self.__PC += 2 # Increment here to avoid messing up jumps
self.__GenOpDecode[(self.__instruction & 0xF000) >> 12]()
# Base instruction functions
def __op0ClearReturn(self):
logging.debug("0x0 opcode")
if self.__instruction == 0x00E0:
logging.debug("Clear Screen")
self.__disp.clearScreen()
elif self.__instruction == 0x00EE:
logging.debug("Return from subroutine")
assert(self.__SP > 0)
self.__PC = self.__stack[self.__SP]
self.__SP -= 1
else :
logging.warning("Redundant opcode: %s", hex(self.__instruction))
def __op1Jump(self):
logging.debug("0x1 opcode \n Jump")
self.__PC = self.__instruction & 0x0FFF
def __op2Call(self):
logging.debug("0x2 opcode \n Call subroutine")
assert(self.__SP +1 < self.__STACK_DEPTH)
self.__SP += 1
self.__stack[self.__SP] = self.__PC
self.__PC = self.__instruction & 0x0FFF
def __op3SkipEqualConst(self):
logging.debug("0x3 opcode \n Skip if Vx = constant")
if self.__V[(self.__instruction & 0x0F00) >> 8] == self.__instruction & 0x00FF:
self.__PC += 2
def __op4SkipNotEqualConst(self):
logging.debug("0x4 opcode \n Skip if Vx != constant")
if self.__V[(self.__instruction & 0x0F00) >> 8] != self.__instruction & 0x00FF:
self.__PC += 2
def __op5SkipEqualRegister(self):
logging.debug("0x3 opcode \n Skip if Vx = Vy")
if self.__V[(self.__instruction & 0x0F00) >> 8] == self.__V[(self.__instruction & 0x00F0) >> 4]:
self.__PC += 2
def __op6LoadConstantV(self):
logging.debug("0x6 opcode \n Load constant to Vx")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__instruction & 0x00FF
def __op7AddConstantV(self):
logging.debug("0x7 opcode \n Add constant to Vx")
self.__V[(self.__instruction & 0x0F00) >> 8] += self.__instruction & 0x00FF
def __op8MathematicalLogical(self):
logging.debug("0x8 opcode")
self.__MathOpDecode[self.__instruction & 0x000F]()
def __op9SkipNotEqualRegister(self):
logging.debug("0x9 opcode \n Skip if Vx != Vy")
if self.__V[(self.__instruction & 0x0F00) >> 8] != self.__V[(self.__instruction & 0x00F0) >> 4]:
self.__PC += 2
def __opALoadConstantI(self):
logging.debug("0xA opcode \n Set I to constant")
self.__I = self.__instruction & 0x0FFF
def __opBJumpAdd(self):
logging.debug("0xB opcode \n Jump to I plus V0")
self.__op1Jump()
self.__PC += self.__V[0]
def __opCRandAND(self):
logging.debug("0xC opcode \n Store random int ANDed with constant in Vx")
randNo = random.randint(0, 255)
constant = self.__instruction & 0x00FF
self.__V[(self.__instruction & 0x0F00) >> 8] = randNo & constant
def __opDSprite(self):
logging.debug("0xD opcode \n Read and draw sprite")
noBytes = self.__instruction & 0x000F
x = self.__V[(self.__instruction & 0x0F00) >> 8]
y = self.__V[(self.__instruction & 0x00F0) >> 4]
logging.debug("Loading %d byte sprite from %s. Drawing to x: %d, y: %d", noBytes, hex(self.__I), x, y)
# Ensure we are not trying to read outside of program memory
assert(self.__mem.buffer_info()[1] * self.__mem.itemsize -1 >= self.__I + (noBytes -1) )
self.__V[0xF] = 0 # Clear erased flag
for i in range(0, noBytes):
temp = self.__mem[self.__I + i]
erased = self.__disp.XORByte(temp, x, y + i)
if erased:
self.__V[0xF] = 1
def __opEKeyboard(self):
logging.debug("0xE opcode")
if self.__instruction & 0x00FF == 0x009E:
logging.debug("Skip if key in Vx is pressed")
if self.__keyb.checkIfPressed(self.__V[(self.__instruction & 0x0F00) >> 8]):
self.__PC += 2
elif self.__instruction & 0x00FF == 0x00A1:
logging.debug("Skip if key in Vx is not pressed")
if self.__keyb.checkIfPressed(self.__V[(self.__instruction & 0x0F00) >> 8]):
self.__PC += 2
else :
logging.warning("Invalid opcode: %s", hex(self.__instruction))
def __opFMisc(self):
logging.debug("0xF opcode")
self.__MiscOpDecode[self.__instruction & 0x00FF]()
# 0x8XXX instructions
def __op8_0LdVxVy(self):
logging.debug("Set Vx = Vy")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__V[(self.__instruction & 0x00F0) >> 4]
def __op8_1ORVxVy(self):
logging.debug("Set Vx = Vx OR Vy")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__V[(self.__instruction & 0x00F0) >> 4] | self.__V[(self.__instruction & 0x0F00) >> 8]
def __op8_2ANDVxVy(self):
logging.debug("Set Vx = Vx AND Vy")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__V[(self.__instruction & 0x00F0) >> 4] & self.__V[(self.__instruction & 0x0F00) >> 8]
def __op8_3XORVxVy(self):
logging.debug("Set Vx = Vx XOR Vy")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__V[(self.__instruction & 0x00F0) >> 4] ^ self.__V[(self.__instruction & 0x0F00) >> 8]
def __op8_4AddVxVy(self):
logging.debug("Set Vx = Vx + Vy. Vf is carry")
result = self.__V[(self.__instruction & 0x0F00) >> 8] + self.__V[(self.__instruction & 0x00F0) >> 4]
if result & 0xFF != 0:
self.__V[0xF] = 1
else:
self.__V[0xF] = 0
self.__V[(self.__instruction & 0x0F00) >> 8] = result & 0xFF
def __op8_5SubVxVy(self):
logging.debug("Set Vx = Vx - Vy. Vf is NOT Borrwo")
result = self.__V[(self.__instruction & 0x0F00) >> 8] - self.__V[(self.__instruction & 0x00F0) >> 4]
if result > 0:
self.__V[0xF] = 1
else:
self.__V[0xF] = 0
self.__V[(self.__instruction & 0x0F00) >> 8] = result & 0xFF
def __op8_6ShrVx(self):
logging.debug("Shift Vx right. Vf is carry")
self.__V[0xF] = self.__V[(self.__instruction & 0x0F00) >> 8] & 0b00000001
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__V[(self.__instruction & 0x0F00) >> 8] >> 1
def __op8_7SubNVxVy(self):
logging.debug("Set Vx = Vy - Vx. Vf is NOT Borrow")
result = self.__V[(self.__instruction & 0x00F0) >> 4] - self.__V[(self.__instruction & 0x0F00) >> 8]
if result > 0:
self.__V[0xF] = 1
else:
self.__V[0xF] = 0
self.__V[(self.__instruction & 0x0F00) >> 8] = result & 0xFF
def __op8_EShlVx(self):
logging.debug("Shift Vx left. Vf is carry")
self.__V[0xF] = self.__V[(self.__instruction & 0x0F00) >> 8] & 0b10000000
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__V[(self.__instruction & 0x0F00) >> 8] << 1
def __opF_07LoadVxDT(self):
logging.debug("Vx = DT")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__DT
def __opF_0ALoadVxK(self):
logging.debug("Wait for keypress Store in Vx")
self.__V[(self.__instruction & 0x0F00) >> 8] = self.__keyb.waitForKeypress()
def __opF_15LoadDTVx(self):
logging.debug("DT = Vx")
self.__DT = self.__V[(self.__instruction & 0x0F00) >> 8]
def __opF_18LoadSTVx(self):
logging.debug("ST = Vx")
self.__ST = self.__V[(self.__instruction & 0x0F00) >> 8]
def __opF_1EAddIVx(self):
logging.debug("I = I + Vx")
self.__I += self.__V[(self.__instruction & 0x0F00) >> 8]
def __opF_29LoadFVx(self):
logging.debug("Set I to address of font sprite for value stored in Vx")
self.__I = self.__V[(self.__instruction & 0x0F00) >> 8] * 5 # 5 bytes per sprite, starting at 0x00
def __opF_33LoadBVx(self):
logging.debug("Set data at address I, I+1, I+2 to BCD representation of Vx")
number = self.__V[(self.__instruction & 0x0F00) >> 8]
hundreds = int(number /100)
tens = int((number - hundreds*100) /10)
units = number - hundreds*100 -tens*10
self.__mem[self.__I] = hundreds
self.__mem[self.__I + 1] = tens
self.__mem[self.__I + 2] = units
def __opF_55LoadIVx(self):
logging.debug("Store registers V0...Vx at mem with address I..I+15")
max = self.__V[(self.__instruction & 0x0F00) >> 8] +1
for i in range(0,max):
self.__mem[self.__I + i] = self.__V[i]
self.__I += max # Customary from original implementation
def __opF_33LoadVxI(self):
logging.debug("Read registers V0...Vx from mem with address I..I+15")
max = ((self.__instruction & 0x0F00) >> 8) + 1
for i in range(0,max):
self.__V[i] = self.__mem[self.__I + i]
self.__I += max # Customary from original implementation
def getFileSize(file):
oldPos = file.tell()
file.seek(0, 2) # Seek end of file
size = file.tell()
logging.debug("File size: ", size," bytes")
file.seek(oldPos, 0) # Return to old position
return size
def padMemory(arr, value, newSize):
while arr.buffer_info()[1] * arr.itemsize <= newSize:
arr.append(value)
``` |
{
"source": "Joshua0128/ArticutAPI",
"score": 2
} |
#### File: Demos/NovelAnalysis/novel_analysis.py
```python
import os
import re
try:
# Installed via pip install
try:
from .ArticutAPI import Articut
except:
from ArticutAPI import Articut
except:
# Installed via git clone
import sys
sys.path.append("../..")
from ArticutAPI import Articut
#username 和 apikey 留空的話,就只有每小時 2000 字的公用額度可以玩囉。
username = ""
apikey = ""
articut = Articut(username=username, apikey=apikey)
userDefinedDictFILE = "./KNOWLEDGE_三國人物.json"
if os.path.exists(userDefinedDictFILE):
pass
else:
raise IOError("請到 https://github.com/Droidtown/ArticutAPI/blob/master/Public_UserDefinedDict/ 下載 KNOWLEDGE_三國人物.json 字典檔配合使用。")
#取出所有依字典被標為人名的字串
UserDefinedPat = re.compile("<UserDefined>[^<]*?</UserDefined>")
#取出所有只有「一個字符」,可能是人名的字串
possibleAliasPosTUPL = ("ENTITY_nounHead", "ENTITY_nouny", "ENTITY_noun", "ENTITY_oov", "ACTION_verb", "MODIFIER")
def main(inputSTR):
articutResultDICT = articut.parse(inputSTR, userDefinedDictFILE="./KNOWLEDGE_三國人物.json")
print(articutResultDICT)
resultDICT = {}
#取出這段文字裡所有被列在「三國人物.json」裡的人名,並加到 resultDICT 裡做為 key
possibleAliasDICT = {}
for posSentenceDICT in articutResultDICT["result_obj"]:
if posSentenceDICT[0]["pos"] == "PUNCTUATION":
pass
else:
for ud in posSentenceDICT:
if ud["pos"] == "UserDefined":
resultDICT[ud["text"]] = []
possibleAliasDICT[ud["text"][-1]] = ud["text"]
if ud["pos"] in possibleAliasPosTUPL and len(ud["text"]) == 1 and ud["text"] in possibleAliasDICT.keys():
ud["pos"] = "UserDefined"
ud["text"] = possibleAliasDICT[ud["text"]] #這行示範把「雲」取代成「趙雲」
#若要把「夫人」或是「妾」取代成「糜夫人」,可仿照前述的方式,在這裡處理。
#真正開始抽取資訊的段落
for i in range(len(articutResultDICT["result_obj"])):
if articutResultDICT["result_obj"][i][0]["pos"] == "PUNCTUATION":
pass
else:
for ud in articutResultDICT["result_obj"][i]:
if ud["pos"] == "UserDefined":
focusPerson = ud["text"]
if ud["pos"] in ("ACTION_verb", "ACTION_quantifiedVerb", "VerbP"):
if focusPerson != None:
resultDICT[focusPerson].append(ud["text"])
else:
pass
elif "ENTITY" in ud["pos"]:
focusPerson = None
else:
pass
return resultDICT #回傳結果範例 {"趙雲": ["伏地", "拍馬"], "糜夫人": ["投井"]}
if __name__== "__main__":
inputSTR = """趙雲聽了,連忙追尋。只見一個人家,被火燒壞土牆,糜夫人抱著阿斗,坐於牆下枯井之傍啼哭。
雲急下馬伏地而拜。夫人曰:「妾得見將軍,阿斗有命矣。望將軍可憐他父親飄蕩半世,只有這點骨血。
將軍可護持此子,教他得見父面,妾死無恨!」"""
resultDICT = main(inputSTR)
print(resultDICT)
```
#### File: Demos/StockNews/HeadlineReader.py
```python
try:
# Installed via pip install
try:
from .ArticutAPI import Articut
except:
from ArticutAPI import Articut
except:
# Installed via git clone
import sys
sys.path.append("../..")
from ArticutAPI import Articut
from pprint import pprint
import json
if __name__ == "__main__":
try:
#使用自己的斷詞額度。
with open("../../account.info", "r") as f:
userDICT = json.loads(f.read())
username = userDICT["email"]
apikey = userDICT["apikey"]
atc = Articut(username=userDICT["email"], apikey=userDICT["apikey"])
except:
#使用公用的斷詞額度。(每小時 2000 字)
atc = Articut()
downSample = '''台指選擇權盤後-年線保衛戰失利 外資續賣台股
台股期權盤後-華為拖累電子 台股摔破支撐
華為風暴持續擴大 三王領跌重挫148點 創波段新低
華為風暴 大跌退守10300點
美商禁令聲聲催,台股電子三王大跌,指數再破年線
跌148.85點
華為風暴擴大 一度破10300點
外資狂撤,台股欲振乏力
利空燒不盡,台股挫百點失年線
華為風暴!台股摜破前低,將往半年線測試
電子股恐慌殺盤出籠,可成慘破200元大關
貿易戰演變成科技戰 電子權值股成壓力蓋
華為風暴嚇趴!台積電狂殺3%,台股重挫逾140點
台股開盤10404點,跌53.22點
續跌危機未除 恐尋求半年線支撐
美中貿易戰延燒 牽動台股後市
不要亂猜底,小心被連環套
電子雙王獨撐 台股上檔遇壓收黑小跌(
年線持續下彎 台股技術面仍看空
新台幣貶不停 外資猛落跑 小心年線岌岌可危
中華電不賣華為新機;新台幣貶破31.5元
美擬封殺海康,安控股飆;聯茂、華通5G題材發酵
晶片事業重傷!安謀加入封殺華為
22日自營商、外資賣超4.32億元、111.21億元
外資賣壓不止,台股走勢偏空
上遇年線壓力 台股量縮整理
台股欲振乏力,三大法人賣超107.39億元
5/22集中市場三大法人合計賣超107.40億元
台積電獨木難撐,台股隨陸股衰尾
台積電帶著大立光小反彈 無助大盤光彩反攻
美國接連對中國出招,台股追價力不強,量縮收黑
短線反彈後,恐再下殺
今年高點已過
上市櫃企業Q1獲利年減近2成,4公司財報續難產
宏達電Q3營收 估衰退27~35%'''
riseSample = '''KD低檔黃金交叉,有利戰季線
台股量縮回穩,反彈契機不遠
台積賣超停止日,台股止跌時
台商回台投資將達5,500億,千載難逢的機會
貿易談判空窗,台股有望醞釀反彈
台積電強漲 大盤一度突破10500點
台積回神,反攻10500點
美給華為寬限期90天,台系華為供應鏈鬆口氣
台股開盤10481.1點,漲16.6點
金融傳產發威 台股破低後強彈收復年線
澳洲央行6月考慮降息;新興市場短空長多
上季GDP成長3.11% 全年有機會2.42%
毛利率攀升 聯發科Q3每股賺3.26元
聯亞Q3營收季增5成 矽光訂單看到明年2月
台達電工業自動化收成 電動車起飛
半月來新高 台幣升3.5分
神達車聯網收割 獲美政府車隊訂單
今年製造業景氣 上修至黃藍燈
蘋果iPhone 8明年推出 上看1.5億支
第四季SSD價漲10% 創見、威剛利多
華航聯手漢翔、空巴 搶A350維修商機
昇恆昌預購網站 年底業績衝億元
電源步入旺季 台達電、群電營收獲利增
觀光帶動成長 Q2 GDP3.84%
台股強彈 大漲214點
華映Q2虧轉盈 下半年挑戰百億
BDI逼近新高 散裝航運股紅通通
台股反彈 挑戰萬點動能仍強
全球生技類股 行情飆漲
晶華 今年EPS可望破4元
製藥一軍亮麗 百略獲利增近倍
掌握關鍵零組件 三星迅速崛起
外資券商加持 LED亮晶晶
月線連四紅 台股看好上萬點
11月份基金表現亞股滿堂彩
亞泰影像擴產後銷售動能強 Q2單季營收將超越去年同期'''
def signalMaker():
#建議可以將做完的漲跌訊號 (downSignal/riseSignal) 另行儲存,
#這麼一來,只要 downSample/riseSample 沒有更新,就不需要每次都要重新計算。
downSignal = []
riseSignal = []
result = atc.parse(downSample, level="lv2")
#確認仍有字數可使用
if result["status"] == False:
print(result["msg"])
return None
else:
#將下跌新聞的標題中,每一則動詞收集起來,做為 downSignal 的下跌訊號。
verbLIST = atc.getVerbStemLIST(result)
for v in verbLIST:
if len(v) == 0:
pass
else:
for i in v:
if "negation" in result["result_pos"][0][i[0]-22:i[0]]: #再檢查動詞前是否為「否定」表示。e.g., 不/看好。那麼「看好」應該被歸類到 riseSignal。
riseSignal.append(i[-1])
else:
downSignal.append(i[-1])
downSignal = set(downSignal)
result = atc.parse(riseSample, level="lv2")
if result["status"] == False:
print(result["msg"])
return None
else:
#將下跌新聞的標題中,每一則動詞收集起來,做為 riseSignal 的上漲訊號。
verbLIST = atc.getVerbStemLIST(result)
for v in verbLIST:
if len(v) == 0:
pass
else:
for i in v:
if "negation" in result["result_pos"][0][i[0]-22:i[0]]: #再檢查動詞前是否為「否定」表示。e.g., 不會/下跌。那麼「下跌」應該被歸類到 downSignal。
downSignal.append(i[-1])
else:
riseSignal.append(i[-1])
riseSignal = set(riseSignal)
#把 downSignal 和 riseSignal 中重覆的動詞清除。它可能是「中性」或是無關漲跌的動詞。
downSignal = downSignal - riseSignal.intersection(downSignal)
riseSignal = riseSignal - riseSignal.intersection(downSignal)
return (downSignal, riseSignal)
if __name__== "__main__":
downSignal, riseSignal = signalMaker()
if None in (downSignal, riseSignal):
print("Cannot proceed!")
else:
testSTR = "產業供應鏈分散效應看好東協布局" #測試用句。注意到這一句並沒有在前述學習的 downSample/riseSample 中。
testResult = atc.parse(testSTR, level="lv2")
testVerbLIST = atc.getVerbStemLIST(testResult)
resultLIST = []
for tv in testVerbLIST:
if len(tv) == 0:
pass
else:
for v in tv:
if v[-1] in downSignal:
if "negation" in testResult["result_pos"][0][v[0]-22:v[0]]: #確認是否有「否定詞」出現在 downSignal 中。如果有的話,那就是上漲囉!
resultLIST.append("這句新聞標題…應該是看漲↗")
else:
resultLIST.append("這句新聞標題…應該是看跌↘")
elif v[-1] in riseSignal:
if "negation" in testResult["result_pos"][0][v[0]-22:v[0]]: #確認是否有「否定詞」出現在 riseSignal 中。如果有的話,那就是下跌囉!
resultLIST.append("這句新聞標題…應該是看跌↘")
else:
resultLIST.append("這句新聞標題…應該是看漲↗")
else:
pass
resultSET = set(resultLIST)
if len(resultSET) == 1:
print(list(resultSET)[0])
else:
print("這句新聞標題…應該是看跌↘")
``` |
{
"source": "joshua19881228/Joshua-s-Blog",
"score": 2
} |
#### File: Joshua-s-Blog/app_accounts/models.py
```python
from django.db import models
# Create your models here.
from django.contrib.auth.models import User
class Profile(models.Model):
profile_user = models.OneToOneField(User, verbose_name='User')
profile_level = models.IntegerField('Level', default=0)
profile_real_name = models.CharField(max_length=32, blank=True, null=True)
def __unicode__(self):
return self.profile_user.username
class Meta:
verbose_name = 'Profile'
```
#### File: Joshua-s-Blog/joshua_blog/middleware.py
```python
class WebFactionFixes(object):
"""Sets 'REMOTE_ADDR' based on 'HTTP_X_FORWARDED_FOR', if the latter is
set.
Based on http://djangosnippets.org/snippets/1706/
"""
def process_request(self, request):
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR'].split(",")[0].strip()
request.META['REMOTE_ADDR'] = ip
``` |
{
"source": "Joshua1989/Mikan_Project_RSS",
"score": 2
} |
#### File: Mikan_Project_RSS/src/mikan.py
```python
import sys
import json
from workflow import Workflow3, ICON_SYNC, ICON_EJECT, ICON_WARNING
from workflow.background import run_in_background
def show_RSS_items(posts):
settings = json.loads(open('mikan_settings.json', 'r').read())
filters = settings.get('filters', {})
history = wf.stored_data('downloaded') or set()
for info in posts:
if not all(x in info['title'] for x in filters.get(info['name'], [])):
continue
item = wf.add_item(
title=info['title'],
subtitle='publish date: {0}, size: {1}'.format(info['pubdate'], info['size']),
largetext=info['title'],
icon=ICON_EJECT if info['key'] not in history else ICON_SYNC,
quicklookurl=info['cover']
)
item.add_modifier(
key='cmd',
subtitle='Download by magnet link',
arg=info['title'] + u'#' + info['key'] + u'#' + info['name'] + u'#' + info['magnet'],
valid=True
)
item.add_modifier(
key='alt',
subtitle='Download by torrent',
arg=info['title'] + u'#' + info['key'] + u'#' + info['name'] + u'#' + info['torrent'],
valid=True
)
item.add_modifier(
key='fn',
subtitle='Copy meta-data to clipboard',
arg=u'\n'.join([u'{}: {}'.format(k, v) for k, v in info.iteritems()]),
valid=True
)
item.add_modifier(
key='ctrl',
subtitle='Go to series home page',
arg=info['series_page'],
valid=True
)
item.add_modifier(
key='shift',
subtitle=info['title'],
valid=False
)
def main(wf):
query = wf.args[0]
# Get posts from cache. Set `data_func` to None, as we don't want to
# update the cache in this script and `max_age` to 0 because we want
# the cached data regardless of age
posts = wf.cached_data('posts', None, max_age=0)
# Start update script if cached data are too old (or doesn't exist)
if not wf.cached_data_fresh('posts', max_age=1200):
cmd = ['/usr/bin/python', wf.workflowfile('update.py')]
run_in_background('update', cmd)
# If script was passed a query, use it to filter posts if we have some
if query and posts:
posts = wf.filter(query, posts, key=lambda x: x['title'])
# If we have no data to show, so show a warning and stop
if not posts:
wf.add_item('No posts found', icon=ICON_WARNING)
wf.send_feedback()
return 0
show_RSS_items(posts)
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow3()
sys.exit(wf.run(main))
``` |
{
"source": "joshua2285-cmis/joshua2285-cmis-cs2",
"score": 4
} |
#### File: joshua2285-cmis/joshua2285-cmis-cs2/oneguess.py
```python
import random
#Main
def main():
minnumber = int(raw_input("What is the minimum number? "))
maxnumber = int(raw_input("What is the maximum number? "))
thenumber = int(random.randint(minnumber, maxnumber))
print """
I am thinking of a number from {} to {}.
""" .format(minnumber, maxnumber)
guessnumber = int(raw_input("What do you think it is?: "))
wrongby = abs(thenumber - guessnumber)
if thenumber == guessnumber:
print """
The target was {}.
Your guess was {}.
That's correct! You must be psychic!
""" .format(thenumber, guessnumber)
elif thenumber > guessnumber :
print """
The target was {}.
Your guess was {}.
That's under by {}.
""" .format(thenumber,guessnumber, wrongby)
else:
print """
The target was {}.
Your guess was {}.
That's over by {}.
""" .format(thenumber,guessnumber, wrongby)
main()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.