metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshi-bharat/slamutils-python",
"score": 3
} |
#### File: joshi-bharat/slamutils-python/stereo_calibration_test.py
```python
import cv2
import yaml
import numpy as np
def read_kalibr_file(filename):
with open(filename, 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
K1 = data['cam0']['intrinsics']
K1 = np.array([[K1[0], 0.0, K1[2]],
[0.0, K1[1], K1[3]],
[0.0, 0.0, 1.0]])
K2 = data['cam1']['intrinsics']
K2 = np.array([[K2[0], 0.0, K2[2]],
[0.0, K2[1], K2[3]],
[0.0, 0.0, 1.0]])
d1 = np.array(data['cam0']['distortion_coeffs'])
d2 = np.array(data['cam1']['distortion_coeffs'])
size = data['cam0']['resolution']
trans_c1_c0 = np.array(data['cam1']['T_cn_cnm1'])
R_c1_c0 = np.array(trans_c1_c0[0:3, 0:3])
T_c1_c0 = np.array(trans_c1_c0[0:3, 3])
distortion_type = data['cam0']['distortion_model']
# print('K1: ', K1)
# print('K2: ', K2)
# print('d1: ', d1)
# print('d2: ', d2)
# print('size: ', size)
# print('T_c1_c0: ', T_c1_c0)
# print('R_c1_c0: ', R_c1_c0)
return K1, K2, d1, d2, distortion_type, size, T_c1_c0, R_c1_c0
calib_file = './stereo_rig_calib.yaml'
left = cv2.imread('./left_new.png')
right = cv2.imread('./right_new.png')
K1, K2, d1, d2, distortion_type, size, T_c1_c0, R_c1_c0 = read_kalibr_file(
calib_file)
result = None
size = (1600, 1200)
if distortion_type == 'radtan':
result = cv2.stereoRectify(cameraMatrix1=K1, cameraMatrix2=K2, distCoeffs1=d1,
distCoeffs2=d2, imageSize=size, R=R_c1_c0, T=T_c1_c0,
flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)
else:
result = cv2.fisheye.stereoRectify(cameraMatrix1=K1, cameraMatrix2=K2, distCoeffs1=d1,
distCoeffs2=d2, imageSize=size, R=R_c1_c0, T=T_c1_c0,
flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)
R1 = result[0]
R2 = result[1]
P1 = result[2]
P2 = result[3]
Q = result[4]
print('R1: ', R1)
print('R2: ', R2)
print('P1: ', P1)
print('P2: ', P2)
baseline = 1.0 / Q[3, 2]
fx = P1[0, 0]
bf = baseline * fx
print('baseline: ', baseline)
mapx_1, mapy_1 = cv2.initUndistortRectifyMap(
cameraMatrix=K1, distCoeffs=d1, R=R1, newCameraMatrix=P1, size=size, m1type=cv2.CV_32FC1)
mapx_2, mapy_2 = cv2.initUndistortRectifyMap(
cameraMatrix=K2, distCoeffs=d2, R=R2, newCameraMatrix=P2, size=size, m1type=cv2.CV_32FC1)
left_rectified = cv2.remap(left, mapx_1, mapy_1, cv2.INTER_LINEAR)
right_rectified = cv2.remap(right, mapx_2, mapy_2, cv2.INTER_LINEAR)
left_rectified = cv2.resize(left_rectified, (800, 600),
interpolation=cv2.INTER_LINEAR)
right_rectified = cv2.resize(right_rectified, (800, 600),
interpolation=cv2.INTER_LINEAR)
# left = cv2.resize(left, (800, 600),
# interpolation=cv2.INTER_LINEAR)
# right = cv2.resize(right, (800, 600),
# interpolation=cv2.INTER_LINEAR)
concat = cv2.hconcat([left_rectified, right_rectified])
for i in range(120):
start = (0, i * 20)
end = (3200, i * 20)
cv2.line(concat, start, end, (0, 0, 255), 1)
cv2.imshow('concat', concat)
cv2.waitKey(0)
# cv2.imshow('right', right_rectified)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
``` |
{
"source": "joshibhaumik/Notes",
"score": 2
} |
#### File: backend/API/models.py
```python
from django.db import models
from django.contrib.auth.models import User
class Note(models.Model):
STATUS = [
("public", "public"),
("private", "private")
]
# you can add your own default profile picture uri
DEFAULT_URL = "http://res.cloudinary.com/dqaiapvgm/image/upload/v1602228372/profile_picture_swxnoc.webp"
name = models.CharField(max_length=100)
width = models.FloatField()
height = models.FloatField()
points = models.TextField()
status = models.CharField(max_length=10, choices=STATUS, default="public")
creater = models.ForeignKey(
User, related_name="notes", on_delete=models.CASCADE)
shared_to = models.ManyToManyField(
User, blank=True, related_name="shared_to_me")
profile = models.CharField(max_length=250, default=DEFAULT_URL)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.name} by {self.creater}"
``` |
{
"source": "joshichaitanya3/morpho",
"score": 3
} |
#### File: morpho/examples/examples.py
```python
import os, glob, sys
from queue import Empty
import regex as rx
from functools import reduce
import operator
import colored
from colored import stylize
sys.path.append('../test')
ext = "morpho"
command = 'morpho5'
stk = '@stacktrace'
err = '@error'
def finderror(str):
#return rx.findall(r'\/\/ expect ?(.*) error', str)
#return rx.findall(r'.*[E|e]rror[ :].*?(.*)', str)
return rx.findall(r'@error.*', str)
def simplify_errors(str):
# this monster regex extraxts NAME from error messages of the form error ... 'NAME'
return rx.sub('.*[E|e]rror[ :]*\'([A-z;a-z]*)\'.*', err+'[\\1]', str.rstrip())
# Simplify stacktrace
def simplify_stacktrace(str):
return rx.sub(r'.*at line.*', stk, str.rstrip())
def iserror(str):
#return rx.findall(r'\/\/ expect ?(.*) error', str)
test=rx.findall(r'@error.*', str)
return len(test)>0
def remove_control_characters(str):
return rx.sub(r'\x1b[^m]*m', '', str.rstrip())
def isin(str):
#return rx.findall(r'\/\/ expect ?(.*) error', str)
test=rx.findall(r'.*in .*', str)
return len(test)>0
def getoutput(filepath):
# Load the file
file_object = open(filepath, 'r')
lines = file_object.readlines()
file_object.close()
# remove all control characters
lines = list(map(remove_control_characters, lines))
# Convert errors to our universal error code
lines = list(map(simplify_errors, lines))
# Identify stack trace lines
lines = list(map(simplify_stacktrace, lines))
for i in range(len(lines)-1):
if (iserror(lines[i])):
if (isin(lines[i+1])):
lines[i+1]=stk
# and remove them
return list(filter(lambda x: x!=stk, lines))
def run(file,testLog,CI):
ret = 1
print(file+":", end=" ")
# Create a temporary file in the same directory
tmp = file + '.out'
# Run the test
os.system(command + ' ' +file + ' > ' + tmp)
# If we produced output
if os.path.exists(tmp):
# Get the output
out = getoutput(tmp)
#look for erros
for line in out:
err = finderror(line)
# Was it expected?
if (iserror(line)):
if not CI:
print(stylize("Failed",colored.fg("red")))
else:
print("::error file = {",file,"}::{",file," Failed}")
#also print to the test log
print(file+":", end=" ", file = testLog)
print("Failed", end=" ", file = testLog)
print("with error "+ err[0], file = testLog)
print("\n",file = testLog)
ret = 0
break
if (ret ==1):
if not CI:
print(file+":", end=" ")
print(stylize("Passed",colored.fg("green")))
# Delete the temporary file
os.system('rm ' + tmp)
return ret
print('--Building Examples---------------------')
# open a test log
# write failures to log
success=0 # number of successful examples
total=0 # total number of examples
# look for a command line arguement that says
# this is being run for continous integration
CI = sys.argv == '-c'
files=glob.glob('**/**.'+ext, recursive=True)
with open("FailedExamples.txt",'w') as testLog:
for f in files:
success+=run(f,testLog,CI)
total+=1
print('--End testing-----------------------')
print(success, 'out of', total, 'tests passed.')
if CI and success<total:
exit(-1)
``` |
{
"source": "joshicola/fw-gear-building-gui",
"score": 2
} |
#### File: joshicola/fw-gear-building-gui/GearBuilderGUI.py
```python
import json
import os
import os.path as op
import sys
from pathlib import Path
from PyQt5 import QtGui, QtWidgets, uic
from gear_builder_gui.dockerfile import Dockerfile
from gear_builder_gui.manifest import Manifest
from gear_builder_gui.menus import Gear_Builder_Menus
from gear_builder_gui.template_management import Template_Management
class GearBuilderGUI(QtWidgets.QMainWindow):
def __init__(self):
super(GearBuilderGUI, self).__init__()
self.root_dir = Path(op.dirname(__file__))
# set gear definition to an empty default
self.gear_def = {"manifest": {}, "dockerfile": {}, "template": {}}
script_dir = op.dirname(os.path.realpath(__file__))
icon_path = op.join(script_dir, "gear_builder_gui/resources/flywheel.png")
self.setWindowIcon(QtGui.QIcon(icon_path))
self.setWindowIconText("Flywheel Gear Builder")
# This section is to load directly from the form object
Form, _ = uic.loadUiType(
op.join(script_dir, "gear_builder_gui/pyqt5_ui/gear_builder_gui.ui")
)
self.ui = Form()
# This will load from a python file. Useful for direct debugging
# self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Separating out functionality of the three components to facilitate
# individual development
self.manifest = Manifest(self)
self.dockerfile = Dockerfile(self)
self.templates = Template_Management(self)
self.menus = Gear_Builder_Menus(self)
self.ui.btn_export_gear.clicked.connect(self.export_gear)
def save_gear_def(self, directory):
gear_name = self.gear_def["manifest"]["name"]
output_file = Path(directory) / (gear_name + ".gear.json")
with open(output_file, "w") as fp:
json.dump(self.gear_def, fp)
def export_gear(self):
directory = str(
QtWidgets.QFileDialog.getExistingDirectory(
self, "Select Folder to export gear template"
)
)
if op.exists(directory):
self.manifest._update_manifest_from_form()
gear_path = Path(directory) / self.gear_def["manifest"]["name"]
gear_path.mkdir(parents=True, exist_ok=True)
self.manifest.save(gear_path)
self.templates.render_and_copy_templates(gear_path)
self.save_gear_def(gear_path)
if __name__ == "__main__":
source_dir = Path(os.path.dirname(os.path.realpath(__file__)))
app = QtWidgets.QApplication([])
app.setWindowIcon(
QtGui.QIcon(str(source_dir / "gear_builder_gui/resources/flywheel.png"))
)
application = GearBuilderGUI()
application.show()
sys.exit(app.exec())
```
#### File: fw-gear-building-gui/gear_builder_gui/template_management.py
```python
import copy
import glob
import json
import os
import shutil
from collections import OrderedDict
from pathlib import Path
import pystache
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QCheckBox,
QFileDialog,
QFormLayout,
QLabel,
QLineEdit,
QMessageBox,
QWidget,
)
# TODO: This should be named "Gear Template Management"
"""
Provide the base run.py and utils package.
Creating build/validate/execute functional modules around specific command-line
programs.
Add a command-line "switch-detector" to populate the manifest config with values
to loop through.
Provide a library of code-blocks that facilitate certain functionality
module-based log reporting
bids functionality
verbose config validation against manifest
compress working directory to a file in output
notify on pep8 violations(??)
"""
class Template_Management:
"""
Class for template management.
"""
def __init__(self, main_window):
"""
Initialize templates management tab form elements with defaults.
Args:
main_window (GearBuilderGUI): The instantiated main window.
"""
self.main_window = main_window
self.ui = main_window.ui
self.template_def = main_window.gear_def["template"]
self.reload_template = True
self.init_template_options()
# Initialize gear-template combo with available/valid gear-templates
self.init_gear_templates()
self.ui.cbo_gear_template.currentIndexChanged.connect(
self._update_template_options
)
self.ui.cbo_gear_template.setCurrentIndex(0)
self._update_template_options()
self.ui.btn_Imp_Temp.clicked.connect(self.import_gear_template)
def import_gear_template(self):
"""
Import a gear template from a local directory.
"""
directory = str(
QFileDialog.getExistingDirectory(
self.main_window,
(
"Select Folder to import gear template. "
"Must have .template directory"
),
)
)
# Look for necessary directory and files
template_dir = Path(directory) / ".template"
if not template_dir.exists():
raise Exception("Invalid template directory")
gear_template_directives = template_dir / "gear_template_directives.json"
if not gear_template_directives.exists():
raise Exception("Missing gear_template_directives.json")
template_directives = json.loads(gear_template_directives.read_text())
# make it point to the template_dir
template_directives["base_dir"] = str(template_dir)
# save a copy of template directives in ~/.gearbuilder/gear_library/
gear_library = Path(os.path.expanduser("~") + "/.gearbuilder/gear_library/")
if not gear_library.exists():
gear_library.mkdir(parents=True)
gear_library_template_dir = gear_library / template_directives["template_name"]
gear_library_template_dir.mkdir(parents=True, exist_ok=True)
gear_library_template_dir /= "gear_template_directives.json"
gear_library_template_dir.write_text(json.dumps(template_directives))
self.init_gear_templates()
def init_gear_templates(self):
"""
Initialize gear template combo from gear_library sub-directories.
Only valid templates are shown.
"""
self.ui.cbo_gear_template.clear()
default_gear_templates = []
template_dirs = glob.glob(str(self.main_window.root_dir / "gear_library/*"))
# check for imported gear templates
gear_library = Path(os.path.expanduser("~") + "/.gearbuilder/gear_library/")
if gear_library.exists():
gear_library_template_dirs = glob.glob(str(gear_library / "*"))
template_dirs.extend(gear_library_template_dirs)
for template_dir in template_dirs:
template_dir = Path(template_dir)
if template_dir.is_dir():
directive_path = template_dir / "gear_template_directives.json"
if directive_path.exists():
template_directive = json.load(
open(directive_path, "r"), object_pairs_hook=OrderedDict,
)
# if this template is in the user gear library
if gear_library in template_dir.parents:
base_dir = Path(template_directive["base_dir"])
# if the origin of template exists
if base_dir.exists():
update_directive_path = (
base_dir / "gear_template_directives.json"
)
if update_directive_path.exists():
template_directive_update = json.load(
open(update_directive_path, "r"),
object_pairs_hook=OrderedDict,
)
template_directive.update(template_directive_update)
directive_path.write_text(
json.dumps(template_directive)
)
else:
# if the origin of the template does not exist, remove it
shutil.rmtree(template_dir)
continue
# TODO: Add template-manifest validator....
default_gear_templates.append(template_directive)
self.ui.cbo_gear_template.clear()
# Set the gear template data
for template in default_gear_templates:
self.ui.cbo_gear_template.addItem(
template["template_description"], userData=template
)
def init_template_options(self):
"""
Initialize script options ScrollArea
"""
self.widget = QWidget()
self.ui.fbox = QFormLayout()
self.widget.setLayout(self.ui.fbox)
# Scroll Area Properties
self.ui.scrOptions.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.ui.scrOptions.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.ui.scrOptions.setWidgetResizable(True)
self.ui.scrOptions.setWidget(self.widget)
def _update_template_options(self):
"""
Update subform script options on update of named script-template
"""
# clear QFormLayout()
while self.ui.fbox.rowCount() > 0:
self.ui.fbox.removeRow(0)
# iterate through tags of current gear_template
data = self.ui.cbo_gear_template.currentData()
self.base_dir = Path(data["base_dir"])
if (self.base_dir / ".template.gear.json").exists() and self.reload_template:
qm = QMessageBox()
ret = qm.question(
self.main_window,
"",
"Would you like to load default values for this template?",
qm.Yes | qm.No,
)
if ret == qm.Yes:
self.main_window.gear_def.clear()
with open(self.base_dir / ".template.gear.json", "r") as fp:
self.main_window.gear_def.update(json.load(fp))
self.main_window.menus.update_forms_from_gear_def()
for k, v in data["tags"].items():
Label = QLabel(k + ":")
if isinstance(v, bool):
object = QCheckBox()
object.setChecked(v)
else:
object = QLineEdit()
object.setText(v)
object.setObjectName(k)
self.ui.fbox.addRow(Label, object)
def _update_form_from_template_def(self):
"""
Select and populate the template-specific form values from the template_def.
"""
for index in range(self.ui.cbo_gear_template.count()):
if (
self.ui.cbo_gear_template.itemData(index)["template_name"]
== self.template_def["template_name"]
and index != self.ui.cbo_gear_template.currentIndex()
):
self.reload_template = False
self.ui.cbo_gear_template.setCurrentIndex(index)
self.reload_template = True
# self._update_template_options(reload_template)
break
for i in range(self.ui.fbox.rowCount()):
item = self.ui.fbox.itemAt(i * 2 + 1).widget()
if isinstance(item, QLineEdit):
if self.template_def.get(item.objectName()):
item.setText(self.template_def[item.objectName()])
elif isinstance(item, QCheckBox):
if self.template_def.get(item.objectName()):
item.setChecked(self.template_def[item.objectName()])
def _update_template_def_from_form(self):
"""
Clear and repopulate template_def from template-specific form values.
"""
# unlike the manifest and dockerfile definitions, we need to clear the script
# definition and repopulate.
self.template_def.clear()
template_def = {}
data = self.ui.cbo_gear_template.currentData()
template_def["template_name"] = data["template_name"]
for i in range(self.ui.fbox.rowCount()):
item = self.ui.fbox.itemAt(i * 2 + 1).widget()
if isinstance(item, QLineEdit):
template_def[item.objectName()] = item.text()
elif isinstance(item, QCheckBox):
template_def[item.objectName()] = item.isChecked()
self.template_def.update(template_def)
def save(self, directory):
"""
Saves the script hierarchy to provided directory.
Args:
directory (str): Path to output directory.
"""
self._update_template_def_from_form()
cbo_template_data = self.ui.cbo_gear_template.currentData()
for fl in cbo_template_data["templates"]:
# Mustache Render
gear_template = (
self.main_window.root_dir / cbo_template_data["base_dir"] / fl
)
if gear_template.exists():
output_filename = Path(directory) / fl.replace(".mu", "").replace(
".mustache", ""
)
renderer = pystache.Renderer()
template_output = renderer.render_path(
gear_template, self.main_window.gear_def
)
# Ensure the path to write rendered template exists
if not output_filename.parent.exists():
output_filename.parent.mkdir(parents=True)
with open(output_filename, "w") as fp:
fp.write(template_output)
else:
# TODO: Alert user with PopUp
# TODO: This should be considered an invalid script-template.
print("template does not exist.")
for fl in cbo_template_data["copy"]:
source_path = self.main_window.root_dir / cbo_template_data["base_dir"] / fl
destination_path = Path(directory) / fl
if source_path.exists():
if source_path.is_dir():
# shutil.copytree must have an empty destination
if destination_path.exists():
shutil.rmtree(destination_path)
shutil.copytree(source_path, destination_path)
elif source_path.is_file():
shutil.copy(source_path, destination_path)
else:
print("Unrecognized Path object.")
else:
# TODO: Alert user with PopUp... or cummulative errs in single popup
print("File or path does not exist.")
def _prep_gear_def(self, gear_def):
"""
Give manifest some fields to assist in rendering the README.
Args:
gear_def (dict): Dictionary of manifest, docker, and other attributes.
"""
local_manifest = gear_def["manifest"]
# Check for non-zero number of inputs
if len(local_manifest["inputs"].keys()) > 0:
local_manifest["has_inputs"] = True
local_manifest["inputs_list"] = []
for inp, val in local_manifest["inputs"].items():
val["name"] = inp
local_manifest["inputs_list"].append(val)
# Check for a non-zero number of configs
if len(local_manifest["config"].keys()) > 0:
local_manifest["has_configs"] = True
local_manifest["config_list"] = []
for conf, val in local_manifest["config"].items():
val["name"] = conf
if "default" in val.keys():
val["default_val"] = {"val": val["default"]}
local_manifest["config_list"].append(val)
# Prep Dockerfile_def for rendering
local_dockerfile = gear_def["dockerfile"]
# Check for non-zero number of inputs
if len(local_dockerfile["apt_get"]) > 0:
local_dockerfile["has_apt"] = True
# Check for a non-zero number of configs
if len(local_dockerfile["pip"]) > 0:
local_dockerfile["has_pip"] = True
# Check for a non-zero number of configs
if len(local_dockerfile["ENV"]) > 0:
local_dockerfile["has_env"] = True
def _render_templates(self, gear_def, gear_directives, output_dir):
"""
Iterate through and render all file and folder templates in the gear.
Args:
gear_def (dict): Dictionary of manifest, docker, and other attributes.
gear_directives (dict): Dictionary containing the templates to render.
output_dir (Pathlike): Top level directory to write gear to.
"""
renderer = pystache.Renderer()
# Iterate through the templates and render them
for template_file in gear_directives["templates"]:
# there may be multiple templates for a template directive
# this is where we initialize these flags
template_files = None
template_file_in, template_file_out = None, None
# ":" is the delimiter between "source" and "destination"
if ":" in template_file:
template_file_in, template_file_out = template_file.split(":")
# the "destination" may be a mustache template to render to
# a specified path
template_file_out = renderer.render(template_file_out, gear_def)
# iterate through directories with wildcard
if "*" in template_file:
if not template_file_in:
template_file_in = template_file
template_file_out = template_file_in.replace("*", "")
template_files = glob.glob(str(self.base_dir / template_file_in))
# iterating through the wildcards
if template_files:
for template_fl in template_files:
with open(self.base_dir / template_fl, "r") as fp:
template = fp.read()
rendered_template = renderer.render(template, gear_def)
if template_file_out:
output_path = output_dir / template_file_out
output_fl = output_path / Path(template_fl).name
else:
output_fl = Path(output_dir) / template_fl
output_path.mkdir(parents=True, exist_ok=True)
with open(output_fl, "w") as fp:
fp.write(rendered_template)
# or rendering the file-level template
else:
with open(self.base_dir / template_file, "r") as fp:
template = fp.read()
rendered_template = renderer.render(template, gear_def)
with open(output_dir / template_file.replace(".mu", ""), "w") as fp:
fp.write(rendered_template)
def _copy_files(self, gear_directives, output_dir):
"""
Copy files from the template to the gear.
Args:
gear_directives (dict): Dicitonary of files to copy.
output_dir (Pathlike): Top level directory to write gear to.
"""
for template_file in gear_directives["copy"]:
template_file_path = self.base_dir / template_file
if template_file_path.is_file():
shutil.copy(template_file_path, output_dir)
elif template_file_path.is_dir():
shutil.copytree(
template_file_path,
output_dir / template_file_path.name,
dirs_exist_ok=True,
)
def render_and_copy_templates(self, directory):
"""
Renders or copies all templates in the script-template directory.
Args:
directory (str): Path to output directory.
"""
self.main_window.dockerfile._update_dockerfile_def_from_form()
self._update_template_def_from_form()
cbo_template_data = self.ui.cbo_gear_template.currentData()
if cbo_template_data["base_dir"].startswith("/"):
self.base_dir = Path(cbo_template_data["base_dir"])
else:
self.base_dir = self.main_window.root_dir / cbo_template_data["base_dir"]
gear_def = copy.deepcopy(self.main_window.gear_def)
self._prep_gear_def(gear_def)
self._render_templates(gear_def, cbo_template_data, Path(directory))
self._copy_files(cbo_template_data, Path(directory))
if "README.md.mu" not in cbo_template_data["templates"]:
self.main_window.manifest.save_draft_readme(Path(directory))
if "Dockerfile.mu" not in cbo_template_data["templates"]:
self.main_window.dockerfile.save(Path(directory))
```
#### File: build_validate_execute/utils/args.py
```python
import os.path as op
import sys
from collections import OrderedDict
import flywheel
from gear_toolkit.command_line import build_command_list, exec_command
def build(context):
# use Ordered Dictionary to keep the order created.
# Default in Python 3.6 onward
params = OrderedDict()
config = context.config
inputs = context._inputs
# Default behavior here is to have gear-configuration parameters
# that are the same as the command-line parameters
# e.g. --<name>=<value>
for key in config.keys():
params[key] = config[key]
# inputs (file inputs) have a path
# e.g. --<name>=<input file path>
for key in inputs.keys():
params[key] = context.get_input_path(key)
return params
def validate(params):
"""
validate the given parameters against potential conflicts
Some command parameters can be mutually exclusive, that is, they cannot
both be set to particular values and guarantee an error-free execution.
This will be custom to each command and is initialized to `pass` for ease
of representation.
Args:
params (dict): dictionary of pydeface parameters
Raises:
Exception: if we have parameter conflicts, we raise an exception and
conclude our execution instead of attempting a command execution
that is guaranteed to fail.
"""
pass
def execute(command, params, dry_run=False, environ=None):
# Get Params
# Build command-line parameters
command = build_command_list(command, params)
# Extend with positional arguments
# command.append(context.get_input_path('infile'))
exec_command(command, environ=environ)
```
#### File: compound_script/utils/custom_logger.py
```python
import logging
import sys
import time
def log_config(context):
config = context.config
inputs = context._invocation['inputs']
context.log.info('\n\nThe following inputs are used:')
for key in inputs.keys():
if key == 'api-key':
context.log.info('{}: *********'.format(key))
else:
context.log.info(
'{}: {}'.format(key, context.get_input_path(key))
)
context.log.info('\n\nThe following configuration parameters are set:')
for key in config.keys():
context.log.info(
'{}: {}'.format(key, context.config[key])
)
context.log.info('\n')
def get_custom_logger(log_name):
# Initialize Custom Logging
# Timestamps with logging assist debugging algorithms
# With long execution times
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(
fmt='%(levelname)s - %(name)-8s - %(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger = logging.getLogger(log_name)
logger.propagate = False
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
``` |
{
"source": "joshidhaivat/QAlign",
"score": 3
} |
#### File: QAlign/qalign/convert_test.py
```python
import unittest
class ConvertTest(unittest.TestCase):
def test_dummy(self):
"""
usage: python -m unittest qalign.convert_test.ConvertTest.test_dummy
"""
print('test_dummy')
return
def test_get_kmermap(self):
from qalign.convert import get_kmermap
path = None
kmermap = get_kmermap(path)
print(kmermap)
return
def test_get_qlevels(self):
from qalign.convert import get_kmermap, get_qlevels
kmermap = get_kmermap()
num_level_list = [2, 3, 4]
for num_level in num_level_list:
qlevels = get_qlevels(kmermap, num_level)
print('num_level=%d'%num_level)
print(qlevels)
return
def test_convert_reads(self):
import os
from qalign.convert import convert
pa_dir = os.path.dirname(os.path.abspath(__file__))
read_path = os.path.join(pa_dir, 'data', 'dummy_reads.fasta')
test_dir = os.path.join(pa_dir, 'test')
convert(read_path, test_dir, 2, rc=True, kmermap_path=None)
return
if __name__ == '__main__':
unittest.main()
```
#### File: QAlign/qalign/main.py
```python
from pathlib import Path
print('Running' if __name__ == '__main__' else 'Importing', Path(__file__).resolve())
from argparse import ArgumentParser
import sys
from qalign.convert import convert
import pdb
def main():
parser = ArgumentParser()
subs = parser.add_subparsers()
#pdb.set_trace()
if sys.argv[1]=='convert':
s_parser = subs.add_parser('convert')
s_parser.add_argument('--input_fasta')
s_parser.add_argument('--outdir')
s_parser.add_argument('--qlevel', type=int, default=2, help="2 for Q2, 3 for Q3")
s_parser.add_argument('--rc', type=int, default=1, help="1 for rev-complement, 0 to disable")
s_parser.add_argument('--kmerpath', help="absolute path for kmer model")
args = parser.parse_args()
if args.input_fasta==None:
print('Please provide an input reads in fasta format')
exit(1)
if args.outdir==None:
print('Please provide an output directory')
exit(1)
if args.kmerpath==None:
print('Please provide the path to the kmer model (should be a text file)')
exit(1)
#pdb.set_trace()
convert(
args.input_fasta,
args.outdir,
args.qlevel,
True if args.rc==1 else False,
args.kmerpath
)
print('Conversion of the sequences is completed successfully!')
else:
print('Not a valid option!')
exit(1)
pass
if __name__ == "__main__":
main()
```
#### File: qalign/raw/analyse_overlap_v2.py
```python
from multiprocessing import Pool
import sys
sys.path.append('/home/djjoshi')
import all_functions as all_func
import numpy as np
import scipy.io as sio
import time
import multiprocessing as mp
#import edit_distance as ed
import os
tol = []#int(sys.argv[2])
ed_score_thd = float(sys.argv[4])
mapping_thd = 0
overlap = []
reads_overlap = []
overlap_ed = []
thd = []#[int(sys.argv[3])]
n = []
reads = []
percent = 0
MAX_PROCESS = 80#int(mp.cpu_count())
MAX_TPP = 1000
total_loops = []
reverse = 0
overlap_factor = 0.9
def func(i):
#print(f'Process {os.getpid()} started')
global overlap,overlap_ed,n,thd,reads,percent,MAX_PROCESS,tol,reverse,reads_overlap,ed_score_thd,overlap_factor,mapping_thd
percent += 1
t1 = time.time()
ovp_set = np.zeros([4,1],dtype='f')
first_index = int(overlap[i,0])
read_length1 = int(overlap[i,1])
match_start1 = int(overlap[i,2])
if(match_start1<1):
match_start1 = 1
match_end1 = int(overlap[i,3])
strand = int(overlap[i,4])
second_index = int(overlap[i,5])
read_length2 = int(overlap[i,6])
match_start2 = int(overlap[i,7])
if(match_start2<1):
match_start2 = 1
match_end2 = int(overlap[i,8])
matched_base = int(overlap[i,9])
matched_total = int(overlap[i,10])
ed_score = overlap_ed[i]
mapping_quality = int(overlap[i,11])
pair = np.array([first_index,second_index])
pair.sort(axis=0)
temp_index = 0
for j in range(0,pair[0]-1):
temp_index += n-j
temp_index += pair[1]-pair[0]+1
#print('tmp_idx='+str(temp_index))
g1 = match_end1-match_start1
g2 = match_end2-match_start2
l = reads_overlap[temp_index-1,2]
write = 0
if((matched_total/min([read_length1,read_length2])) >= 0.85):
tol1=0
tol2=0
diff1 = max(0,g1+tol1-l)
diff2 = max(0,g2+tol2-l)
r_l = min(read_length1,read_length2)
if(g1 >= overlap_factor*r_l and g2 >= overlap_factor*r_l and (ed_score <= ed_score_thd or mapping_quality >= mapping_thd)):
write=1
else:
tol1 = min(match_start1,read_length1-match_end1)
tol2 = min(match_start2,read_length2-match_end2)
diff1 = max(0,g1+tol1-l)
diff2 = max(0,g2+tol2-l)
if((tol1+tol2 <= (1-overlap_factor)*g1) and (tol1+tol2 <= (1-overlap_factor)*g2) and ed_score <= ed_score_thd):
write = 1
if(l!=0):
score = (g1+g2-diff1-diff2)/(l+l+diff1+diff2)
else:
score = 0
if(write==1):
ovp_set[0] = 1
ovp_set[1] = temp_index
ovp_set[2] = ed_score
ovp_set[3] = score
#if(i%1000 == 0):
#print('i='+str(percent)+'/'+str(MAX_TPP)+'; score='+str(score)+'; t='+str(time.time()-t1))
return ovp_set
if __name__ == "__main__":
t1 = time.time()
foldername = str(sys.argv[1])#str(input("Enter the number : "))
filename = str(sys.argv[2])
out_filename = str(sys.argv[3])
#matfile = sio.loadmat('chosen_reads.mat')
#reads_list = matfile['chosen_reads']
#for i in range(reads_list.shape[1]):
# reads += [reads_list[0,i][0]]
reads,_,reads_name = all_func.get_reads_from_fasta(foldername+'reads.fasta')
overlap,overlap_ed,cigar = all_func.extract_from_paf(foldername+filename,1)
reads_overlap = sio.loadmat(foldername+'ground_truth.mat')
reads_overlap = reads_overlap['reads_overlap']
#reads_overlap = reads_overlap[:,:,1]
n = int(len(reads))
print('n='+str(n))
print(len(thd))
if 'q' in filename:
loops = 2
else:
loops = 1
for ll in range(0,loops):
if(ll==0):
print('Loading file : {}'.format(filename))
else:
overlap,overlap_ed,cigar = all_func.extract_from_paf(foldername+'rc_'+filename,1)
print('Loading file : {}'.format('rc_'+filename))
reverse = 1
#ovp_set = np.zeros([overlap.shape[0],2,len(thd)],dtype='float')
print('Loading complete')
ovp_set = []
print('# of CPUs = '+str(MAX_PROCESS))
time.sleep(1)
total_loops = int(overlap.shape[0]/(MAX_PROCESS*MAX_TPP))+1
for i in range(0,total_loops):
print('Starting loop '+str(i)+' of '+str(total_loops))
p = Pool(processes = MAX_PROCESS, maxtasksperchild = 10)
i_start = i*MAX_PROCESS*MAX_TPP
if(i != total_loops-1):
i_end = (i+1)*MAX_PROCESS*MAX_TPP
else:
i_end = overlap.shape[0]
ovp_set += p.map(func,range(i_start,i_end))
p.close()
p.join()
ovp_set = np.array(ovp_set)
#print(ovp_set)
#print(ovp_set.shape)
#sio.savemat('ovlp_'+name+'.mat',{'ovp_set':ovp_set})
if(ll==0):
overlap_set = np.concatenate((reads_overlap, np.zeros([reads_overlap.shape[0],3],dtype='f')),axis=1)
for i in range(0,ovp_set.shape[0]):
if(ovp_set[i,0]!=0):
write = 0
index = int(ovp_set[i,1])
score = ovp_set[i,2]
temp_score = overlap_set[index-1,-2]
if(temp_score!=0):
if(score<=temp_score):
write = 1
else:
write = 1
if(write):
overlap_set[index-1,-2] = score
overlap_set[index-1,-3] = ovp_set[i,0]
overlap_set[index-1,-1] = ovp_set[i,3]
#print('k='+str(k)+' i='+str(i))
ovp = overlap_set
a1 = ovp[:,3]==1
a = np.sum(a1)
print('Ground truth = '+str(a))
a2 = ovp[:,-3]==1
a = (a1 & a2)
a = np.sum(a)
print('True positives = '+str(a))
a1 = ovp[:,3]==0
a2 = ovp[:,-3]==1
a = (a1 & a2)
a = np.sum(a)
print('False positives = '+str(a))
sio.savemat(foldername+'overlap_analysis_'+out_filename+'.mat',{'overlap_set':overlap_set})
print('Done! '+foldername+filename+'_'+str(tol)+' Time taken = '+str(time.time()-t1))
``` |
{
"source": "joshidhawal/Google-Foobar-Challenge",
"score": 4
} |
#### File: joshidhawal/Google-Foobar-Challenge/foobar1.py
```python
def solution(plaintext):
outputcode=''
brailledots = { 'a':'100000','b':'110000', 'c': '100100','d':'100110','e':'100010','f':'110100','g':'110110','h':'110010','i':'010100','j':'010110',
'k':'101000','l':'111000','m':'101100','n':'101110','o':'101010','p':'111100','q':'111110','r':'111010','s':'011100','t':'011110','u':'101001',
'v':'111001','w':'010111','x':'101101','y':'101111','z':'101011',' ':'000000' ,'capital':'000001'}
for i in plaintext:
if(i==' '):
outputcode = outputcode+brailledots[i]
elif(i.isupper()):
a=i.lower()
outputcode=outputcode+brailledots['capital']+brailledots[a]
else:
outputcode=outputcode+brailledots[i]
return(outputcode)
plaintext='The quick brown fox jumps over the lazy dog'
output = solution(plaintext)
print(output)
print(type(output))
test='000001011110110010100010000000111110101001010100100100101000000000110000111010101010010111101110000000110100101010101101000000010110101001101100111100011100000000101010111001100010111010000000011110110010100010000000111000100000101011101111000000100110101010110110'
if(test==output):
print('sucess')
``` |
{
"source": "joshidipesh12/Information-Security-Assignment-1-DipeshJoshi",
"score": 4
} |
#### File: Information-Security-Assignment-1-DipeshJoshi/modules/affine_cipher.py
```python
from constants import ENGLISH_ALPHABETS
from modules.utilities import inverse_Z26
def encrypt(message, a, b):
"""Method Defined for ENCRYPTION of Simple String \
message into a Cipher Text Using Affine Cipher
\nPARAMETERS\n
message: string to be encrypted
a: integer coefficient of x
b: integer additive value
\nRETURNS\n
Cipher_Text: encrypted Message string
"""
if inverse_Z26(a) == None:
print("Please Try Again!")
return ""
Cipher_Text = ""
message_chars = message.upper().split(" ")
for char in message_chars:
for i in char:
if i in ENGLISH_ALPHABETS:
index = (
(
a * ENGLISH_ALPHABETS.index(i)
)
+ b) % 26
Cipher_Text += ENGLISH_ALPHABETS[index]
else:
Cipher_Text += i
Cipher_Text += " "
return Cipher_Text[:-1]
def decrypt(Cipher_Text, a, b):
"""Method Defined to DECRYPTION of a Cipher Text \
String into the original Message Using Affine \
Cipher Technique
\nPARAMETERS\n
CipherText: string to be decrypted
a: integer coefficient of x
b: integer additive value
\nRETURNS\n
message: decrypted string of Original Message
"""
a_inverse = inverse_Z26(a)
if a_inverse == None:
print("Please Try Again")
return ""
message = ""
Cipher_Text_chars = Cipher_Text.upper().split(" ")
for char in Cipher_Text_chars:
for i in char:
if i in ENGLISH_ALPHABETS:
index = (
a_inverse*(
ENGLISH_ALPHABETS.index(i)-b
)
) % 26
message += ENGLISH_ALPHABETS[index]
else:
message += i
message += " "
return message[:-1]
```
#### File: Information-Security-Assignment-1-DipeshJoshi/modules/hill_cipher.py
```python
from modules.utilities import (
matrix_inverse_Z26,
string_to_Matrix_Z26
)
from constants import ENGLISH_ALPHABETS
import math
def encrypt(message_text, key):
"""Method Defined for ENCRYPTION of a Simple \
String message into a Cipher Text Using \
2x2 Hill Cipher Technique
\nPARAMETERS\n
message_text: string to be encrypted
key: string key for encryption with length <= 4
\nRETURNS\n
cipher_text: encrypted Message string
"""
# for 2x2 Hill Cipher length of key must be <= 4
# print("Warning: All Spaces with be lost!")
cipher_text = ""
key_matrix = None
if len(key) <= 4:
key_matrix = string_to_Matrix_Z26(key, 2, 2)
else:
print("Key Length must be <= 4 in 2x2 Hill Cipher")
return
pairs = math.ceil((len(message_text)/2))
matrix = string_to_Matrix_Z26(message_text, 2, pairs)
key_inverse = matrix_inverse_Z26(key_matrix)
if type(key_inverse) == type(None):
print("NOTE: The provided Key is NOT Invertible,")
print("To avoid failure while decryption,")
print("Try again with an invertible Key")
return None
for i in range(pairs):
result_char = (key_matrix*matrix[:, i]) % 26
cipher_text += ENGLISH_ALPHABETS[
result_char[0, 0]
]
cipher_text += ENGLISH_ALPHABETS[
result_char[1, 0]
]
return cipher_text
def decrypt(cipher_text, key):
"""Method Defined for DECRYPTION of a \
String Cipher Text into the original \
message Using 2x2 Hill Cipher Technique
\nPARAMETERS\n
cipher_text: string to be decrypted
key: string key for encryption, length <= 4
\nRETURNS\n
message: decrypted Message string
"""
message_text = ""
key_matrix = None
if len(key) <= 4:
key_matrix = string_to_Matrix_Z26(key, 2, 2)
key_matrix = matrix_inverse_Z26(key_matrix)
else:
print("Key Length must be <= 4 in 2x2 Hill Cipher")
return
pairs = math.ceil((len(cipher_text)/2))
matrix = string_to_Matrix_Z26(cipher_text, 2, pairs)
if type(key_matrix) != type(None):
for i in range(pairs):
result_char = (key_matrix*matrix[:, i]) % 26
message_text += ENGLISH_ALPHABETS[
result_char[0, 0]
]
message_text += ENGLISH_ALPHABETS[
result_char[1, 0]
]
else:
print("Unable to decrypt Cipher")
print("The key is Non-Invertible")
return message_text
``` |
{
"source": "JoshieKun/Flask-React-SocketIO",
"score": 2
} |
#### File: Flask-React-SocketIO/app/__init__.py
```python
from .events import socketio
from .config import DevelopmentConfig
from .views import app as application
from flask import Flask
def create_app():
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
socketio.init_app(app)
app.register_blueprint(application)
return app
``` |
{
"source": "joshiggins/django-view-composer",
"score": 2
} |
#### File: tests/basic/tests.py
```python
from django.test import TestCase
from django.template import Context, Template
from django.test.client import RequestFactory
import jsonpickle
class ViewTagTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def get_with_context(self, template, context):
request = self.factory.get("/")
t = Template(template)
c = Context({"request": request, **context})
return t.render(c)
def test_single_view(self):
res = self.get_with_context(
"{% load view_composer %}{% view 'basic.views.IndexView' %}", {}
)
self.assertEqual(res, "<h1>Test</h1>")
def test_all_context(self):
res = self.get_with_context(
"{% load view_composer %}{% view 'basic.views.ContextTestView' %}",
{"food": "spam"},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food"], "spam")
def test_no_context(self):
res = self.get_with_context(
"{% load view_composer %}{% view 'basic.views.ContextTestView' with only %}",
{"food": "spam"},
)
test_ctx = jsonpickle.decode(res)
self.assertFalse("food" in test_ctx)
def test_extra_context(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% view 'basic.views.ContextTestView' with food='spam' %}",
{},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food"], "spam")
def test_extra_context_override(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% view 'basic.views.ContextTestView' with food='eggs' %}",
{"food": "spam"},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food"], "eggs")
def test_extra_context_only(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% view 'basic.views.ContextTestView' with ham=1 only %}",
{"food": "spam"},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["ham"], 1)
self.assertFalse("food" in test_ctx)
def test_extra_context_resolved(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% view 'basic.views.ContextTestView' with food=spam %}",
{"spam": "eggs"},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food"], "eggs")
def test_block_view(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% viewblock 'basic.views.BlockTestView' %}"
" {% view 'basic.views.ContextTestView' %}"
"{% endviewblock %}",
{"food": "spam"},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food"], "spam")
def test_nested_block_views(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% viewblock 'basic.views.BlockTestView' %}"
" {% viewblock 'basic.views.BlockTestView' %}"
" {% viewblock 'basic.views.BlockTestView' %}"
" {% view 'basic.views.ContextTestView' %}"
" {% endviewblock %}"
" {% endviewblock %}"
"{% endviewblock %}",
{"food": "spam"},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food"], "spam")
def test_view_kwargs(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% view 'basic.views.KwargsTestView' food='spam' with ham='eggs' %}",
{},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food_kwarg"], "spam")
self.assertEqual(test_ctx["ham"], "eggs")
def test_viewblock_kwargs(self):
res = self.get_with_context(
"{% load view_composer %}"
"{% viewblock 'basic.views.KwargsTestView' food='spam' with ham='eggs' %}"
"{% endviewblock %}",
{},
)
test_ctx = jsonpickle.decode(res)
self.assertEqual(test_ctx["food_kwarg"], "spam")
self.assertEqual(test_ctx["ham"], "eggs")
``` |
{
"source": "Joshi-Hiroshi/Covaware_Website--Flask",
"score": 3
} |
#### File: Joshi-Hiroshi/Covaware_Website--Flask/data_render.py
```python
import requests
def daywise_data_world():
day_wise = requests.get("https://corona.lmao.ninja/v2/all")
global_data = day_wise.json()
#print(day_wise_data)
new_confirmed = global_data['todayCases']
print(new_confirmed)
total_confirmed = global_data['cases']
new_deaths = global_data['todayDeaths']
total_deaths = global_data['deaths']
new_recovered = global_data['todayRecovered']
total_recovered = global_data['recovered']
c_per_mil = global_data["casesPerOneMillion"]
d_per_mil = global_data["deathsPerOneMillion"]
tests_per_mil = global_data["testsPerOneMillion"]
total_tests = global_data["tests"]
#i = 1
'''for item in day_wise_data:
day.append(i)
new_confirmed.append(item['Confirmed'])
new_deaths.append(item['Deaths'])
i += 1'''
return {"NewC" : f'{new_confirmed:,}' , "TotalC": f'{total_confirmed:,}' , "NewD" :f'{new_deaths:,}' , "TotalD": f'{total_deaths:,}', "NewR" : f'{new_recovered:,}' , "TotalR": f'{total_recovered:,}' , 'casesPerOneMillion': f'{c_per_mil:,}' , 'deathsPerOneMillion': d_per_mil , "testsPerOneMillion": f'{tests_per_mil:,}' ,"tests": f'{total_tests:,}' }
def daywise_data_india():
day_wise = requests.get("https://api.covid19api.com/dayone/country/india")
day_wise_data = day_wise.json()
day = []
confirmed = []
deaths = []
i = 1
for item in day_wise_data:
day.append(i)
confirmed.append(item['Confirmed'])
deaths.append(item['Deaths'])
i += 1
daywise_multi_list = [day[:-1], confirmed[:-1], deaths[:-1]]
del day
del confirmed
del deaths
return daywise_multi_list
def statewise_analysis():
state_data = requests.get("https://api.covid19india.org/state_district_wise.json")
state_data = state_data.json()
state_names = []
statewise_confirmed = []
statewise_deaths = []
statewise_recovered = []
statewise_active = []
i = 0
for states in state_data:
state_names.append(states)
statewise_confirmed.append(0)
statewise_deaths.append(0)
statewise_recovered.append(0)
statewise_active.append(0)
for districts in state_data[states]['districtData']:
statewise_confirmed[i] += state_data[states]['districtData'][districts]['confirmed']
statewise_deaths[i] += state_data[states]['districtData'][districts]['deceased']
statewise_recovered[i] += state_data[states]['districtData'][districts]['recovered']
statewise_active[i] += state_data[states]['districtData'][districts]['active']
i += 1
states_dict = {}
for i in range(len(state_names)):
states_dict.update({state_names[i]:[statewise_confirmed[i], statewise_active[i], statewise_recovered[i], statewise_deaths[i]]})
states_sorted_list = sorted(states_dict.items(), key = lambda kv:[kv[1], kv[0]], reverse = True)
final_state_names = []
final_statewise_confirmed = []
final_statewise_active = []
final_statewise_recovered = []
final_statewise_deaths = []
for item in states_sorted_list:
final_state_names.append(item[0])
final_statewise_confirmed.append(item[1][0])
final_statewise_active.append(item[1][1])
final_statewise_recovered.append(item[1][2])
final_statewise_deaths.append(item[1][3])
state_report_list = [final_state_names, final_statewise_confirmed, final_statewise_active, final_statewise_recovered, final_statewise_deaths]
del state_names
del statewise_confirmed
del statewise_deaths
del statewise_recovered
del statewise_active
del final_state_names
del final_statewise_confirmed
del final_statewise_active
del final_statewise_recovered
del final_statewise_deaths
del states_sorted_list
return state_report_list
def district_zone_analysis():
import requests
district_data = requests.get("https://api.covid19india.org/zones.json").json()
states_district = {}
states_district_zone = {}
for item in district_data['zones']:
if item['state'] not in states_district:
states_district[item['state']] = [item['district']]
states_district_zone[item['state']] = {item['district']:[item['lastupdated'],item['zone']]}
else:
states_district[item['state']].append(item['district'])
states_district_zone[item['state']].update({item['district']:[item['lastupdated'],item['zone']]})
return [states_district, states_district_zone]
```
#### File: Joshi-Hiroshi/Covaware_Website--Flask/main.py
```python
from flask import Flask, render_template, request, redirect , flash , url_for,json,jsonify
import data_file
import requests
from flask_sitemap import Sitemap
app=Flask(__name__)
app.config['SECRET_KEY'] = 'babaBlackSheep$123'
#Main Home Route
@app.route("/")
def home():
return render_template("index.html")
def data_in(Country, State=None):
index_in, data_in_c = data_file.data_Confirmed(Country, State)
data_in_r = data_file.data_Recovered(Country, State)
data_in_d = data_file.data_Deaths(Country, State)
return [index_in, data_in_c, data_in_r, data_in_d]
@app.route('/world_map', methods=['GET'])
def world_wide():
list_of_data = data_file.data_of_world_wide()
index = [['Country', 'Confirmed', 'Recovered']]
for i in list_of_data:
index.append(i)
data_json = json.dumps(index)
# print(index[0:2], type(index))
return render_template('worldwide.html', list_data=data_json)
@app.route("/covid/cases")
def covid_cases():
from data_render import daywise_data_world
data = daywise_data_world()
return render_template("covid_cases.html" , data = data )
@app.route("/covid/india", methods = ["POST", "GET"])
def india():
from data_render import daywise_data_india
india_plot_daywise_data = daywise_data_india()
day = india_plot_daywise_data[0]
confirmed = india_plot_daywise_data[1]
deaths = india_plot_daywise_data[2]
from data_render import statewise_analysis
statewise_data = statewise_analysis()
india_data_url = "https://disease.sh/v2/countries/India?yesterday=true&strict=true"
india_content = requests.get(india_data_url)
india_data = india_content.json()
#from data_render import district_zone_analysis
#states_district_zone_data = district_zone_analysis()
#district_data = states_district_zone_data
return render_template("india.html", data = india_data, day = day, confirmed = confirmed, deaths = deaths, states = statewise_data)
@app.route("/about")
def about_page():
return render_template("about.html")
@app.route("/about-covid")
def abt_covid():
return render_template("about_covid.html")
@app.route("/symptoms")
def symptoms():
return render_template("symptoms.html")
@app.route("/precautions")
def precautions():
return render_template("precautions.html")
@app.route('/myths')
def myths():
return render_template("myths.html")
@app.route("/vaccines")
def vaccines():
return render_template("vaccineinfo.html")
@app.route("/stuff-to-do-during-lockdown")
def stuff():
return render_template("stuff_to_do_during_lockdown.html")
@ext.register_generator
def sitemap():
yield 'home' , {}
yield 'india' , {}
yield 'covid_cases', {}
yield 'abt_covid',{}
yield 'symptoms',{}
yield 'precautions',{}
yield 'myths',{}
yield 'vaccines',{}
yield 'stuff',{}
yield 'about_page' ,{}
yield 'world_wide',{}
#app.run(debug=True)
``` |
{
"source": "joshijayesh/conplyent",
"score": 3
} |
#### File: conplyent/conplyent/_msg.py
```python
from enum import Enum
class MSGType(Enum):
ACKNOWLEDGE = 0,
HEARTBEAT = 1,
COMMAND = 2,
DETAILS = 3,
COMPLETE = 4,
SYNC = 5
class MSG(object):
def __init__(self, type, request=True, **kwargs):
self._type = type
self._request = request
self._kwargs = kwargs
def __str__(self):
string = "<MSG Type: {}> <Request?: {}>".format(self._type, self._request)
if(self._kwargs):
string += " <Kwargs: {}>".format(self._kwargs)
return string
@property
def type(self):
return self._type
@property
def request(self):
return self._request
@property
def cmd_id(self):
return self._kwargs["cmd_id"]
@property
def args(self):
return self._kwargs["args"]
@property
def kwargs(self):
return self._kwargs["keywargs"]
@property
def details(self):
return self._kwargs["details"]
@property
def request_id(self):
return self._kwargs["request_id"]
@property
def exit_code(self):
return self._kwargs["exit_code"]
@property
def msg_num(self):
return self._kwargs["msg_num"]
def has_request_id(self):
return "request_id" in self._kwargs
```
#### File: conplyent/tests/test_server.py
```python
import os
import time
from unittest import TestCase, main
import conplyent
os.chdir(os.path.dirname(os.path.realpath(__file__)))
class TestThorough(TestCase):
@classmethod
def setUp(self):
self._client = conplyent.ConsoleExecutor("python thorough_client.py")
@classmethod
def tearDown(self):
self._client.close()
def test_server(self):
result = conplyent.server.start()
self.assertEqual(result, 0)
time.sleep(2)
if(__name__ == '__main__'):
main()
``` |
{
"source": "joshim5/CRISPR-Library-Designer",
"score": 2
} |
#### File: data/functional_tests/on_target_contribution.py
```python
import json
import random
from random_guides import random_guides
import time
# CLD code from parent directories
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(currentdir)))
os.sys.path.insert(0,rootdir)
import computations
import seq_generator
### Constant ranker parameters
genome = {
"human" : seq_generator.FastGenome()
}
tissues = ['Thyroid', 'Testis', 'Cervix Uteri', 'Adipose Tissue', 'Breast', 'Vagina', 'Nerve', 'Pituitary', 'Stomach', 'Fallopian Tube', 'Bone Marrow', 'Bladder', 'Blood', 'Colon', 'Prostate', 'Pancreas', 'Blood Vessel', 'Liver', 'Spleen', 'Small Intestine', 'Uterus', 'Ovary', 'Muscle', 'Heart', 'Adrenal Gland', 'Brain', 'Salivary Gland', 'Lung', 'Skin', 'Esophagus', 'Kidney']
### Experimental parameters
library_size = 5
num_runs = 10
num_genes = 500
scoring_alg = "Azimuth"
def compute_average_effeciency_with_on_target(genes, library_size):
# Call into computations.py, using GTEx_enabled = False, and take results.
ranker = computations.Ranker(genome["human"], "human", tissues, False, False, False, scoring_alg = scoring_alg)
for gene in genes:
ranker.rank(gene['ensembl_id'], gene['name'], library_size)
guides_by_exon = ranker.get_guides_by_exon()
total_score = 0.0
for gene in guides_by_exon:
for idx, exon in enumerate(gene['exons']):
guide_count = 0
for guide in exon['gRNAs']:
if guide['selected']:
total_score += guide['score']
average_score = total_score / (len(genes) * library_size)
return average_score
def compute_average_effeciency_without_on_target(genes, library_size):
# Redo the code in precompute_guides_msgpack.py but don't do any ranking by on-target.
# Basically, just regex and take from some random order.
total_score = 0.0
for gene in genes:
guides = random_guides(gene, library_size)
for guide in guides:
total_score += guide['score']
average_score = total_score / (len(genes) * library_size)
return average_score
if __name__ == "__main__":
print "Selecting guides with and without on-target effeciency. Computing average guide effeciency across these sets. Analyzing {0} genes, with {1} guides/gene. {2} runs will be performed.".format(num_genes, library_size, num_runs)
t0 = time.time()
# Stores average results
results_Y = []
results_N = []
# open the list of genes
with open('../pre_processed/genes_list.json', 'r') as genes_list_file:
genes_list = json.load(genes_list_file)
for i in xrange(num_runs):
print "run", i
genes = random.sample(genes_list, num_genes)
result_Y = compute_average_effeciency_with_on_target(genes, library_size)
result_N = compute_average_effeciency_without_on_target(genes, library_size)
results_Y.append(result_Y)
results_N.append(result_N)
filename = 'on_target_contribution.' + str(num_runs) + '.' + scoring_alg + '.results'
with open(filename, 'w') as results:
results.write("Average guide effeciency with and without on-target scores considered. Analyzing {0} genes, with {1} guides/gene.\nData from {2} runs is summarized below.\n\n".format(num_genes, library_size, num_runs))
results.write("Guide selection with on-target considered:\n")
for result in results_Y:
results.write(str(result) + "\n")
results.write("\n\nRandom guide selection:\n")
for result in results_N:
results.write(str(result) + "\n")
print "Completed job in", time.time() - t0, "seconds."
```
#### File: additional_scripts/generation/remove_guides_with_off_target_2.py
```python
import os
import os.path
import msgpack
APP_STATIC = "/home/joshm/GUIDES/CRISPR-Library-Designer/static"
path1 = os.path.join(APP_STATIC, 'data/GRCh37_guides_msgpack_Azimuth/')
path2 = os.path.join(APP_STATIC, 'data/GRCm38_guides_msgpack_Azimuth/')
exome_path_hum = os.path.join(APP_STATIC, 'data', 'exome_hum.txt')
exome_path_mus = os.path.join(APP_STATIC, 'data', 'exome_mus.txt')
mer_len = 13
exome_mers = None
def valid(guide):
hits = 0
for middle in ["A", "G", "T", "C"]:
guide_seq = guide['seq'] + middle + "GG"
if guide_seq in exome_mers:
hits += exome_mers[guide_seq]
return hits < 2
print "preparing hum kmers"
with open(exome_path_hum, 'r') as input:
exome = input.read()
exome_mers = {}
for i in range(len(exome)):
s = exome[i:i + mer_len]
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
print "pruning dup hum guides"
for file in os.listdir(path1):
with open(os.path.join(path1, file), 'r') as datafile:
gRNAs = msgpack.load(datafile)
gRNAs = [i for i in gRNAs if valid(i)]
with open(os.path.join(path1, file), 'w+') as datafile:
msgpack.dump(gRNAs, datafile)
print "preparing mus kmers"
with open(exome_path_mus, 'r') as input:
exome = input.read()
exome_mers = {}
for i in range(len(exome)):
s = exome[i:i + mer_len]
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
print "pruning dup mus guides"
for file in os.listdir(path2):
with open(os.path.join(path2, file), 'r') as datafile:
gRNAs = msgpack.load(datafile)
gRNAs = [i for i in gRNAs if valid(i)]
with open(os.path.join(path2, file), 'w+') as datafile:
msgpack.dump(gRNAs, datafile)
```
#### File: data/pre_processed/precompute_guides_msgpack_CFD.py
```python
import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
from multiprocessing import Process
import os
import time
start_time = time.time()
#Reverse complements a given string
def revcom(s):
basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A','U':'A', 'N':'N'}
letters = list(s[::-1])
letters = [basecomp[base] for base in letters]
return ''.join(letters)
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, seq_before, seq_after, chrom, cut_pos, score, exon_ranking, ensembl_gene, gene_name, functional_domain, has_exome_repeat, off_target_score):
self.start = start
self.seq = seq
self.PAM = PAM
self.seq_before = seq_before # 10bp before the sgRNA
self.seq_after = seq_after # 10bp after the sgRNA
self.chrom = chrom
self.cut_pos = cut_pos
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
if functional_domain:
self.has_functional_domain = True
else:
self.has_functional_domain = False
self.has_exome_repeat = has_exome_repeat
self.off_target_score = off_target_score
if off_target_score == 'inf':
self.off_target_score = 10000
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"seq_before": self.seq_before,
"seq_after": self.seq_after,
"chrom": self.chrom,
"cut_pos": self.cut_pos,
"selected": self.selected,
"has_exome_repeat": self.has_exome_repeat,
"off_target_score": self.off_target_score,
"has_functional_domain": self.has_functional_domain
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def cmp_scheme(self, g):
return (-g.off_target_score, g.has_functional_domain, g.score)
def __cmp__(self, other):
return cmp(self.cmp_scheme(self), self.cmp_scheme(other))
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": False,
"mer_len": 20
}
# azimuth model
print "loading azimuth models", time.time() - start_time
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
azimuth_scores_file = 'azimuth_scores.p'
with open(azimuth_scores_file, 'rb') as inp:
azimuth_scores = pickle.load(inp)
def get_azimuth_score(mer30):
if mer30 in azimuth_scores:
return azimuth_scores[mer30]
else:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
print "generating Azimuth", mer30, score
azimuth_scores[mer30] = score
return score
# load in exome
APP_STATIC = "/home/joshm/GUIDES/CRISPR-Library-Designer/static"
exome_seq_path = os.path.join(APP_STATIC, 'data', 'GRCh37_exons')
mer_len = params['mer_len']
# process kmers
# consider all kmers which are followed by NGG
print "preparing hum kmers", time.time() - start_time
exome_mers = {}
for file in os.listdir(exome_seq_path):
file_loc = os.path.join(exome_seq_path, file)
with open(file_loc, 'r') as file_data:
fwdseq = file_data.read()
revseq = revcom(fwdseq)
for seq in [fwdseq, revseq]:
for i in range(len(seq) - mer_len - 2):
s = seq[i: i + mer_len]
if seq[i + mer_len + 1 : i + mer_len + 3] != "GG": # only PAMs
continue
if 'N' in s:
continue
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
print 'len(exome_mers) = ', len(exome_mers), time.time() - start_time
# takes in guide OBJECT
# returns whether there is a duplicate in exome
def hasExomeRepeat(protospacer):
guide_seq = protospacer[-mer_len:] # get PAM-proximal mer_len bases
hits = exome_mers[guide_seq] # how many times does occur in genome followed by NGG?
return hits >= 2
# loading CFD preprocessed
#Unpickle mismatch scores and PAM scores
def get_mm_pam_scores():
try:
mm_scores = pickle.load(open('mismatch_score.pkl','rb'))
pam_scores = pickle.load(open('pam_scores.pkl','rb'))
return (mm_scores,pam_scores)
except:
raise Exception("Could not find file with mismatch scores or PAM scores")
#Calculates CFD score
def calc_cfd(wt,sg,pam):
mm_scores,pam_scores = get_mm_pam_scores()
score = 1
sg = sg.replace('T','U')
wt = wt.replace('T','U')
s_list = list(sg)
wt_list = list(wt)
for i,sl in enumerate(s_list):
if wt_list[i] == sl:
score*=1
else:
key = 'r'+wt_list[i]+':d'+revcom(sl)+','+str(i+1)
score*= mm_scores[key]
score*=pam_scores[pam]
return (score)
def get_pot_off_targets(seq):
seq_list = list(seq)
backup_seq_list = list(seq)
nts = ['A','T','C','G']
results = {}
for a in range(len(seq)):
for a_sym in nts:
seq_list[a] = a_sym
for b in range(a + 1, len(seq)):
for b_sym in nts:
seq_list[b] = b_sym
for c in range(b + 1, len(seq)):
for c_sym in nts:
seq_list[c] = c_sym
new_seq = ''.join(seq_list)
results[new_seq] = True
seq_list[c] = backup_seq_list[c]
seq_list[b] = backup_seq_list[b]
seq_list[a] = backup_seq_list[a]
if seq in results:
del results[seq]
return results.keys()
# load preprocessed info
with open("off_target_scores.p", "rb") as inp:
off_target_scores = pickle.load(inp)
print 'len(off_target_scores) = ', len(off_target_scores), time.time() - start_time
def get_off_target_score(protospacer):
if hasExomeRepeat(protospacer):
return 100000
if not protospacer in off_target_scores:
score = 0
off_targets = get_pot_off_targets(protospacer)
for off_target in off_targets:
if off_target in exome_mers:
wt = protospacer + "CGG"
sg = off_target
pam = "GG"
score += exome_mers[off_target] * calc_cfd(wt, sg, pam)
off_target_scores[protospacer] = score
return off_target_scores[protospacer]
# Create interval tree for functional domains
print "constructing interval tuples", time.time() - start_time
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees", time.time() - start_time
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene", time.time() - start_time
refGeneFilename = '../gtex/refGene.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene, exon):
try:
start = list(refGene.loc[refGene['name'] == gene]['exonStarts'])[0][exon]
end = list(refGene.loc[refGene['name'] == gene]['exonEnds'])[0][exon]
chrom = list(refGene.loc[refGene['name'] == gene]['chrom'])[0]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
with open("/home/joshm/GUIDES/CRISPR-Library-Designer/static/data/pre_processed/exon_info.p", "rb") as f:
exon_info = pickle.load(f)
def get_exon_start_chrom(gene, exon):
# get the row from the exon_info dataframe
row = exon_info[exon_info['name'] == gene].iloc[0]
# find where the exon starts
start = row['exonStarts'][exon]
# find the chromosome this falls in
chrom = str(row['chrom'])
if chrom.isdigit():
chrom = str(int(chrom)) # get rid of decimal place
return start, chrom
# this is run on multiprocessing workflow
def run(genes_list):
for gene in genes_list:
exon = 0
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
while seq:
# Check if we haven't done this in a previous run of the program
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../cfdGRCh37_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
continue
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = get_azimuth_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
protospacer_before = seq[PAM_start-params["protospacer_len"]-10:PAM_start-params["protospacer_len"]]
protospacer_after = seq[PAM_start:PAM_start+10]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
protospacer_before = seq[PAM_start+params["PAM_len"]-10:PAM_start+params["PAM_len"]]
protospacer_after = seq[PAM_start+params["PAM_len"]+params["protospacer_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]+10]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
if protospacer not in exome_mers:
print protospacer, 'NOT in exome_mers', gene["ensembl_id"], exon
print 'PAM is', seq[PAM_start:PAM_start+params["PAM_len"]]
has_exome_repeat = hasExomeRepeat(protospacer)
off_target_score = get_off_target_score(protospacer)
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, protospacer_before, protospacer_after, score, exon, gene["ensembl_id"], gene["name"], domain, has_exome_repeat, off_target_score)
# If there's enough room, add it, no question.
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if cmp(potential_gRNA, lowest_gRNA) == 1: # if potential_gRNA > lowest_gRNA
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../cfdGRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
NUM_CORES = 16
print "beginning gene by gene processing", time.time() - start_time
with open('genes_list.json') as genes_list_file:
full_genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
processes = []
unit = len(full_genes_list) / NUM_CORES + 1
print 'unit is', unit, time.time() - start_time
for i in range(NUM_CORES):
start = unit * i
end = min(unit * (i + 1), len(full_genes_list))
genes_list = full_genes_list[start:end]
p = Process(target = run, args=(genes_list,))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
with open('azimuth_scores.p', 'wb') as output:
pickle.dump(azimuth_scores, output)
end_time = time.time()
hours, rem = divmod(end_time-start_time, 3600)
minutes, seconds = divmod(rem, 60)
print "time elapsed"
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
``` |
{
"source": "joshim5/mogwai",
"score": 3
} |
#### File: mogwai/data/base_wrapper_dataset.py
```python
from argparse import ArgumentParser
import torch
from typing import List, Any
class BaseWrapperDataset(torch.utils.data.Dataset):
"""BaseWrapperDataset. Wraps an existing dataset.
Args:
dataset (torch.utils.data.dataset): Dataset to wrap.
"""
def __init__(self, dataset: torch.utils.data.dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, idx):
return self.dataset[idx]
def __len__(self):
return len(self.dataset)
@staticmethod
def add_args(parser: ArgumentParser) -> ArgumentParser:
return parser
def collater(self, batch: List[Any]) -> Any:
return self.dataset.collater(batch)
```
#### File: mogwai/data/pseudolikelihood_dataset.py
```python
from typing import List, Dict
from argparse import ArgumentParser
import torch
from .base_wrapper_dataset import BaseWrapperDataset
from ..utils import collate_tensors
from ..vocab import FastaVocab
class PseudolikelihoodDataset(BaseWrapperDataset):
"""PseudolikelihoodDataset implements a mostly-dummy dataset, which simply wraps an
existing token dataset. It is designed to act as a drop-in replacement of the
MaskedLMDataset.
Args:
dataset (torch.utils.data.dataset): Dataset of tensors to wrap.
"""
def __init__(self, dataset: torch.utils.data.dataset):
super().__init__(dataset)
def __getitem__(self, idx):
item = self.dataset[idx]
if isinstance(item, tuple) and len(item) == 1:
item = item[0]
return {"src_tokens": item, "targets": item.clone()}
@staticmethod
def add_args(parser: ArgumentParser) -> ArgumentParser:
return parser
def collater(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
concat = {
"src_tokens": collate_tensors(
[element["src_tokens"] for element in batch], FastaVocab.pad_idx
),
"targets": collate_tensors(
[element["targets"] for element in batch], FastaVocab.pad_idx
),
}
return concat
```
#### File: mogwai/models/factored_attention.py
```python
from argparse import ArgumentParser, Namespace
from typing import Optional
import math
import torch
import torch.nn as nn
from apex.optimizers import FusedLAMB
from .base_model import BaseModel
from ..utils import symmetrize_matrix_, symmetrize_potts
from ..utils.init import init_potts_bias, gremlin_weight_decay_coeffs
from .. import lr_schedulers
class FactoredAttention(BaseModel):
"""FactoredAttention Layer.
Args:
num_seqs (int): Number of sequences in MSA.
msa_length (int): Length of MSA.
msa_counts (tensor, optional): Counts of each amino acid in each position of MSA. Used
for initialization.
attention_head_size (int, optional): Dimension of queries and keys for a single head.
num_attention_heads (int, optional): Number of attention heads.
optimizer (str, optional): Choice of optimizer from ["adam", "lamb", "gremlin"]. "gremlin"
specifies GremlinAdam.
learning_rate (float, optional): Learning rate for training model.
vocab_size (int, optional): Alphabet size of MSA.
true_contacts (tensor, optional): True contacts for family. Used to compute
metrics while training.
l2_coeff (int, optional): Coefficient of L2 regularization for all weights.
use_bias (bool, optional): Whether to include single-site potentials.
pad_idx (int, optional): Integer for padded positions.
lr_scheduler (str, optional): Learning schedule to use. Choose from ["constant", "warmup_constant"].
warmup_steps (int, optional): Number of warmup steps for learning rate schedule.
max_steps (int, optional): Maximum number of training batches before termination.
factorize_vocab (bool, optional): Factorize the (A, A) interaction terms into a product of
(A, d) and (d, A) matrices. True allows for arbitrary value dimension.
"""
def __init__(
self,
num_seqs: int,
msa_length: int,
msa_counts: Optional[torch.Tensor] = None,
attention_head_size: int = 16,
num_attention_heads: int = 32,
optimizer: str = "adam",
learning_rate: float = 1e-3,
use_adaptive_lr: bool = False,
vocab_size: int = 20,
true_contacts: Optional[torch.Tensor] = None,
l2_coeff: float = 1e-2,
use_bias: bool = True,
pad_idx: int = 20,
lr_scheduler: str = "warmup_constant",
warmup_steps: int = 0,
max_steps: int = 10000,
factorize_vocab: bool = False,
):
super().__init__(num_seqs, msa_length, learning_rate, vocab_size, true_contacts)
self.l2_coeff = l2_coeff
self.use_bias = use_bias
self.pad_idx = pad_idx
self.num_seqs = num_seqs
self.msa_length = msa_length
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size
self.optimizer = optimizer
self.vocab_size = vocab_size
self.lr_scheduler = lr_scheduler
self.warmup_steps = warmup_steps
self.max_steps = max_steps
self.factorize_vocab = factorize_vocab
self.use_adaptive_lr = use_adaptive_lr
if self.use_adaptive_lr:
self.learning_rate *= math.log(self.num_seqs) / self.msa_length
hidden_size = attention_head_size * num_attention_heads
query = torch.empty(msa_length, num_attention_heads, attention_head_size)
nn.init.xavier_uniform_(query)
self.query = nn.Parameter(query, requires_grad=True)
key = torch.empty(msa_length, num_attention_heads, attention_head_size)
nn.init.xavier_uniform_(key)
self.key = nn.Parameter(key, requires_grad=True)
if self.factorize_vocab:
value = torch.empty(num_attention_heads, vocab_size, attention_head_size)
nn.init.xavier_uniform_(value)
self.value = nn.Parameter(value, requires_grad=True)
output = torch.empty(num_attention_heads, attention_head_size, vocab_size)
nn.init.xavier_uniform_(output)
self.output = nn.Parameter(output, requires_grad=True)
else:
value = torch.empty(num_attention_heads, vocab_size, vocab_size)
nn.init.xavier_uniform_(value)
self.value = nn.Parameter(value, requires_grad=True)
if self.use_bias:
if msa_counts is not None:
bias = init_potts_bias(msa_counts, l2_coeff, num_seqs)
else:
bias = torch.zeros(msa_length, vocab_size)
self.bias = nn.Parameter(bias, True)
self.register_buffer("diag_mask", torch.eye(msa_length) * -10000)
self.register_buffer("one_hot", torch.eye(vocab_size + 1, vocab_size))
# self.save_hyperparameters()
def maybe_onehot_inputs(self, src_tokens):
"""Onehots src_tokens if necessary otherwise uses original tokens"""
if src_tokens.dtype == torch.long:
return self.one_hot[src_tokens]
else:
return src_tokens
def forward(self, src_tokens, targets=None, src_lengths=None):
inputs = self.maybe_onehot_inputs(src_tokens)
mrf_weight = self.compute_mrf_weight()
logits = torch.tensordot(inputs, mrf_weight, 2)
if self.use_bias:
logits = logits + self.bias
outputs = (logits, mrf_weight.norm(dim=(1, 3)))
if targets is not None:
loss = self.loss(logits, targets, mrf_weight)
outputs = (loss,) + outputs
return outputs
def configure_optimizers(self):
if self.optimizer == "adam":
optimizer = torch.optim.AdamW(
self.parameters(), lr=self.learning_rate, weight_decay=0.0
)
elif self.optimizer == "lamb":
optimizer = FusedLAMB(
self.parameters(),
lr=self.learning_rate,
weight_decay=0.0,
)
elif self.optimizer == "gremlin":
from ..optim import GremlinAdam
optimizer = GremlinAdam(
[{"params": self.parameters(), "gremlin": True}],
lr=self.learning_rate,
)
else:
raise ValueError(f"Unrecognized optimizer {self.optimizer}")
lr_scheduler = lr_schedulers.get(self.lr_scheduler)(
optimizer, self.warmup_steps, self.trainer.max_steps
)
scheduler_dict = {
"scheduler": lr_scheduler,
"interval": "step",
}
return [optimizer], [scheduler_dict]
def compute_regularization(self, targets, mrf_weight: torch.Tensor):
"""Compute regularization weights based on the number of targets."""
batch_size = targets.size(0)
weight_reg_coeff, bias_reg_coeff = gremlin_weight_decay_coeffs(
batch_size, self.msa_length, self.l2_coeff, self.vocab_size
)
sample_size = (targets != self.pad_idx).sum()
# After multiplying by sample_size, comes to lambda * L * A / 2
reg = weight_reg_coeff * mrf_weight.pow(2).sum()
if self.use_bias:
# After multiplying by sample_size, comes to lambda
reg += bias_reg_coeff * self.bias.pow(2).sum()
return reg * sample_size
def loss(self, logits, targets, mrf_weight: torch.Tensor):
"""Compute GREMLIN loss w/ L2 Regularization"""
loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx, reduction="sum")(
logits.view(-1, self.vocab_size), targets.view(-1)
)
loss *= self.num_seqs / logits.size(0)
loss += self.compute_regularization(targets, mrf_weight)
return loss
def compute_mrf_weight(self):
attention = torch.einsum("ihd,jhd->hij", self.query, self.key)
attention = attention / math.sqrt(self.attention_head_size)
attention = attention + self.diag_mask
attention = attention.softmax(-1) # H x L x L
if self.factorize_vocab:
embed = torch.einsum("had,hdb->hab", self.value, self.output) # H x A x A
else:
embed = self.value
W = torch.einsum("hij,hab->iajb", attention, embed) # L x A x L x A
W = symmetrize_potts(W)
return W
@torch.no_grad()
def get_contacts(self, mrf_weight: Optional[torch.Tensor] = None):
"""Extracts contacts by getting the attentions."""
if mrf_weight is None:
mrf_weight = self.compute_mrf_weight()
return mrf_weight.norm(dim=(1, 3))
@classmethod
def from_args(
cls,
args: Namespace,
num_seqs: int,
msa_length: int,
msa_counts: Optional[torch.Tensor] = None,
vocab_size: int = 20,
pad_idx: int = 20,
true_contacts: Optional[torch.Tensor] = None,
) -> "FactoredAttention":
return cls(
num_seqs=num_seqs,
msa_length=msa_length,
msa_counts=msa_counts,
attention_head_size=args.attention_head_size,
num_attention_heads=args.num_attention_heads,
optimizer=args.optimizer,
learning_rate=args.learning_rate,
use_adaptive_lr=args.use_adaptive_lr,
vocab_size=vocab_size,
true_contacts=true_contacts,
l2_coeff=args.l2_coeff,
use_bias=args.use_bias,
pad_idx=pad_idx,
lr_scheduler=args.lr_scheduler,
warmup_steps=args.warmup_steps,
factorize_vocab=args.factorize_vocab,
)
@staticmethod
def add_args(parser: ArgumentParser) -> ArgumentParser:
parser.add_argument(
"--learning_rate",
type=float,
default=1e-3,
help="Learning rate for training.",
)
parser.add_argument(
"--use_adaptive_lr",
action="store_true",
help="Whether to rescale lr as a function of MSA.",
)
parser.add_argument(
"--l2_coeff",
type=float,
default=1e-2,
help="L2 Regularization Coefficient.",
)
parser.add_argument(
"--use_bias", action="store_true", help="Use a bias when training GREMLIN."
)
parser.add_argument(
"--no_bias",
action="store_false",
help="Use a bias when training GREMLIN.",
dest="use_bias",
)
parser.add_argument(
"--num_attention_heads",
type=int,
default=32,
help="Number of attention heads.",
)
parser.add_argument(
"--attention_head_size",
type=int,
default=16,
help="Dims in each attention head.",
)
parser.add_argument(
"--optimizer",
choices=["adam", "lamb", "gremlin"],
default="adam",
help="Which optimizer to use.",
)
parser.add_argument(
"--lr_scheduler",
choices=lr_schedulers.LR_SCHEDULERS.keys(),
default="warmup_constant",
help="Learning rate scheduler to use.",
)
parser.add_argument(
"--warmup_steps",
type=int,
default=0,
help="How many warmup steps to use when using a warmup schedule.",
)
parser.add_argument(
"--factorize_vocab",
action="store_true",
help="Whether to factorize the vocab embedding.",
)
return parser
```
#### File: mogwai/mogwai/parsing.py
```python
from typing import Union, List, Tuple, Dict, Optional
from Bio import SeqIO
from biotite.structure.io.pdb import PDBFile
from scipy.spatial.distance import pdist, squareform
from pathlib import Path
import numpy as np
import string
from .vocab import FastaVocab
PathLike = Union[str, Path]
def one_hot(x, cat=None):
"""Onehot encodes a sequence of ints."""
if cat is None:
cat = np.max(x) + 1
oh = np.concatenate((np.eye(cat), np.zeros([1, cat])))
return oh[x]
def parse_fasta(
filename: Union[str, Path],
remove_insertions: bool = False,
remove_gaps: bool = False,
) -> Tuple[List[str], List[str]]:
filename = Path(filename)
if filename.suffix == ".sto":
form = "stockholm"
elif filename.suffix in (".fas", ".fasta", ".a3m"):
form = "fasta"
else:
raise ValueError(f"Unknown file format {filename.suffix}")
translate_dict: Dict[str, Optional[str]] = {}
if remove_insertions:
translate_dict.update(dict.fromkeys(string.ascii_lowercase))
else:
translate_dict.update(dict(zip(string.ascii_lowercase, string.ascii_uppercase)))
if remove_gaps:
translate_dict["-"] = None
translate_dict["."] = None
translate_dict["*"] = None
translation = str.maketrans(translate_dict)
def process_record(record: SeqIO.SeqRecord):
return record.description, str(record.seq).translate(translation)
records = SeqIO.parse(str(filename), form)
records = map(process_record, records)
records = zip(*records)
headers, sequences = tuple(records)
return headers, sequences
def get_seqref(x: str) -> Tuple[List[int], List[int], List[int]]:
# input: string
# output
# -seq: unaligned sequence (remove gaps, lower to uppercase,
# numeric(A->0, R->1...))
# -ref: reference describing how each sequence aligns to the first
# (reference sequence)
n, seq, ref, aligned_seq = 0, [], [], []
for aa in x:
if aa != "-":
seq.append(FastaVocab.A2N.get(aa.upper(), -1))
if aa.islower():
ref.append(-1)
n -= 1
else:
ref.append(n)
aligned_seq.append(seq[-1])
else:
aligned_seq.append(-1)
n += 1
return np.array(seq), np.array(ref), np.array(aligned_seq)
def load_a3m_msa(filename):
"""
Given A3M file (from hhblits)
return MSA (aligned), MS (unaligned) and ALN (alignment)
"""
names, seqs = parse_fasta(filename)
reference = seqs[0]
# get the multiple sequence alignment
max_len = 0
ms, aln, msa = [], [], []
for seq in seqs:
seq_, ref_, aligned_seq_ = get_seqref(seq)
max_len = max(max_len, len(seq_))
ms.append(seq_)
msa.append(aligned_seq_)
aln.append(ref_)
# pad each unaligned-sequence and alignment to same length
for n in range(len(ms)):
pad = max_len - len(ms[n])
ms[n] = np.pad(ms[n], [0, pad], constant_values=-1)
aln[n] = np.pad(aln[n], [0, pad], constant_values=-1)
return one_hot(msa), one_hot(ms), one_hot(aln), reference
def contacts_from_cf(filename: PathLike, cutoff=0.001, sequence=None) -> np.ndarray:
# contact Y,1 Y,2 0.006281 MET ARG
n, cons = 0, []
with open(filename, "r") as f:
for line in f:
line = line.rstrip()
if line[:7] == "contact":
_, _, i, _, j, p, _, _ = line.replace(",", " ").split()
i, j, p = int(i), int(j), float(p)
if i > n:
n = i
if j > n:
n = j
cons.append([i - 1, j - 1, p])
if line.startswith("SEQUENCE") and sequence is not None:
seq = line.split()[1:]
seq = "".join(FastaVocab.THREE_LETTER[code] for code in seq)
start = seq.index(sequence)
end = start + len(sequence)
break
else:
start = 0
end = n
cm = np.zeros([n, n])
for i, j, p in cons:
cm[i, j] = p
contacts = cm + cm.T
contacts = contacts[start:end, start:end]
return contacts
def extend(a, b, c, L, A, D):
"""
input: 3 coords (a,b,c), (L)ength, (A)ngle, and (D)ihedral
output: 4th coord
"""
def normalize(x):
return x / np.linalg.norm(x, ord=2, axis=-1, keepdims=True)
bc = normalize(b - c)
n = normalize(np.cross(b - a, bc))
m = [bc, np.cross(n, bc), n]
d = [L * np.cos(A), L * np.sin(A) * np.cos(D), -L * np.sin(A) * np.sin(D)]
return c + sum([m * d for m, d in zip(m, d)])
def contacts_from_pdb(
filename: PathLike, distance_threshold: float = 8.0
) -> np.ndarray:
pdbfile = PDBFile.read(str(filename))
structure = pdbfile.get_structure()
N = structure.coord[0, structure.atom_name == "N"]
C = structure.coord[0, structure.atom_name == "C"]
CA = structure.coord[0, structure.atom_name == "CA"]
Cbeta = extend(C, N, CA, 1.522, 1.927, -2.143)
distogram = squareform(pdist(Cbeta))
return distogram < distance_threshold
def contacts_from_trrosetta(
filename: PathLike,
distance_threshold: float = 8.0,
):
fam_data = np.load(filename)
dist = fam_data["dist6d"]
nat_contacts = dist * ((dist > 0) & (dist < distance_threshold))
return nat_contacts
def read_contacts(filename: PathLike, **kwargs) -> np.ndarray:
filename = Path(filename)
if filename.suffix == ".cf":
return contacts_from_cf(filename, **kwargs)
elif filename.suffix == ".pdb":
return contacts_from_pdb(filename, **kwargs)
elif filename.suffix == ".npz":
return contacts_from_trrosetta(filename, **kwargs)
else:
raise ValueError(
f"Cannot read file of type {filename.suffix}, must be one of (.cf, .pdb)"
)
```
#### File: mogwai/utils/tensor.py
```python
from typing import Sequence, Union
import torch
import numpy as np
def collate_tensors(
tensors: Sequence[torch.Tensor], pad_value: Union[int, float, bool, str] = 0
):
dtype = tensors[0].dtype
device = tensors[0].device
batch_size = len(tensors)
shape = (batch_size,) + tuple(np.max([tensor.size() for tensor in tensors], 0))
padded = torch.full(shape, pad_value, dtype=dtype, device=device)
for position, tensor in zip(padded, tensors):
tensorslice = tuple(slice(dim) for dim in tensor.shape)
position[tensorslice] = tensor
return padded
```
#### File: mogwai/mogwai/vocab.py
```python
from typing import Sequence, List
class _FastaVocab:
def __init__(self):
self.ALPHABET = "ARNDCQEGHILKMFPSTWYV-"
self.A2N = {a: n for n, a in enumerate(self.ALPHABET)}
self.A2N["X"] = 20
self.IUPAC_CODES = {
"Ala": "A",
"Arg": "R",
"Asn": "N",
"Asp": "D",
"Cys": "C",
"Gln": "Q",
"Glu": "E",
"Gly": "G",
"His": "H",
"Ile": "I",
"Leu": "L",
"Lys": "K",
"Met": "M",
"Phe": "F",
"Pro": "P",
"Ser": "S",
"Thr": "T",
"Trp": "W",
"Val": "V",
"Tyr": "Y",
"Asx": "B",
"Sec": "U",
"Xaa": "X",
"Glx": "Z",
}
self.THREE_LETTER = {aa: name for name, aa in self.IUPAC_CODES.items()}
def convert_indices_to_tokens(self, indices: Sequence[int]) -> List[str]:
return [self.ALPHABET[i] for i in indices]
def convert_tokens_to_indices(self, tokens: Sequence[str], skip_unknown: bool = False) -> List[int]:
if skip_unknown:
return [self.A2N[token] for token in tokens if token in self.A2N]
else:
return [self.A2N.get(token, 20) for token in tokens]
def tokenize(self, sequence: str) -> List[int]:
return self.convert_tokens_to_indices(list(sequence))
def __len__(self) -> int:
return 20
@property
def pad_idx(self) -> int:
return 20
FastaVocab = _FastaVocab()
```
#### File: mogwai/tests/test_gremlin_pl.py
```python
import itertools
import numpy as np
import torch
import unittest
from mogwai.data_loading import one_hot
from mogwai.models import GremlinPseudolikelihood
class TestGremlinPL(unittest.TestCase):
def setUp(self):
torch.manual_seed(0)
N = 100
L = 20
A = 8
msa = torch.randint(0, A, [N, L])
msa = torch.FloatTensor(one_hot(msa.numpy()))
msa_counts = msa.sum(0)
self.msa = msa
self.model = GremlinPseudolikelihood(N, L, msa_counts, vocab_size=A)
# Need nonzero weights but don't want to take a grad for this test
wt = self.model.weight.data
self.model.weight.data = torch.randn_like(wt)
# Used for data leakage test.
self.A = A
def test_parameter_shapes(self):
self.assertTupleEqual(self.model.weight.shape, (20, 8, 20, 8))
self.assertTupleEqual(self.model.bias.shape, (20, 8))
def test_forward_shape(self):
batch = self.msa[:64]
loss, logits = self.model(batch)
self.assertTupleEqual(logits.shape, (64, 20, 8))
def onehot_vector(self, idx: int):
oh = torch.zeros(self.A)
oh[idx] = 1.0
return oh
@torch.no_grad()
def test_data_leakage(self):
# Confirm that logits for position 0 do not change
# when sequence at position 0 is exhaustively changed.
logits_list = []
example = self.msa[0]
seq_pos = 0
for i in range(self.A):
example[seq_pos] = self.onehot_vector(i)
_, logits = self.model(example.unsqueeze(0))
logits_list.append(logits[0, seq_pos])
all_pairs = itertools.combinations(logits_list, 2)
for x, y in all_pairs:
np.testing.assert_array_almost_equal(x.numpy(), y.numpy())
class TestGremlinPLGrad(unittest.TestCase):
def setUp(self):
torch.manual_seed(0)
N = 100
L = 20
A = 8
msa = torch.randint(0, A, [N, L])
msa = torch.FloatTensor(one_hot(msa.numpy()))
msa_counts = msa.sum(0)
self.msa = msa
self.model = GremlinPseudolikelihood(N, L, msa_counts, vocab_size=A)
def test_gradient(self):
# Tests that backward runs.
batch = self.msa[:64]
loss, _ = self.model(batch)
loss.backward()
# TODO: Presumably there's a less stupid approach
self.assertTrue(True)
if __name__ == "__main__":
unittest.main()
```
#### File: mogwai/tests/test_metrics.py
```python
import torch
import unittest
from mogwai.metrics import contact_auc, precision_at_cutoff
class TestPrecision(unittest.TestCase):
def setUp(self):
self.pred = torch.FloatTensor(
[
[1e-3, 1e-2, 0.8],
[1e-2, 1e-4, 0.3],
[0.8, 0.3, 1e-10],
]
)
self.meas = torch.IntTensor([[0, 1, 1], [1, 0, 0], [1, 1, 0]])
def test_precision_cutoffs(self):
p_at_1 = precision_at_cutoff(self.pred, self.meas, cutoff=1, superdiag=0)
p_at_2 = precision_at_cutoff(self.pred, self.meas, cutoff=2, superdiag=0)
p_at_3 = precision_at_cutoff(self.pred, self.meas, cutoff=3, superdiag=0)
self.assertEqual(p_at_1, 2.0 / 3)
self.assertEqual(p_at_2, 1.0)
self.assertEqual(p_at_3, 1.0)
def test_superdiag(self):
superdiag_0 = precision_at_cutoff(self.pred, self.meas, cutoff=1, superdiag=0)
superdiag_1 = precision_at_cutoff(self.pred, self.meas, cutoff=1, superdiag=1)
superdiag_2 = precision_at_cutoff(self.pred, self.meas, cutoff=1, superdiag=2)
superdiag_3 = precision_at_cutoff(self.pred, self.meas, cutoff=1, superdiag=3)
self.assertEqual(superdiag_0, 2.0 / 3)
self.assertEqual(superdiag_1, 2.0 / 3)
self.assertEqual(superdiag_2, 1.0)
self.assertTrue(superdiag_3.isnan())
class TestAUC(unittest.TestCase):
def setUp(self):
self.pred = torch.FloatTensor(
[
[1e-3, 1e-2, 0.8],
[1e-2, 1e-4, 0.3],
[0.8, 0.3, 1e-10],
]
)
self.meas = torch.IntTensor([[0, 1, 1], [1, 0, 0], [1, 1, 0]])
def test_range(self):
auc = contact_auc(self.pred, self.meas, superdiag=0, cutoff_range=[1, 2, 3])
self.assertEqual(auc, 8.0 / 9)
def test_superdiag_range(self):
auc_superdiag_1 = contact_auc(
self.pred, self.meas, superdiag=1, cutoff_range=[1, 2, 3]
)
auc_superdiag_2 = contact_auc(
self.pred, self.meas, superdiag=2, cutoff_range=[1, 2, 3]
)
self.assertEqual(auc_superdiag_1, 8.0 / 9)
self.assertEqual(auc_superdiag_2, 1.0)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joshim5/TALE-Toolbox",
"score": 2
} |
#### File: TALE_Toolbox/public/views.py
```python
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session, make_response)
from TALE_Toolbox.utils import flash_errors
from TALE_Toolbox.computations import ReferenceSequenceGenerator
#from TALE_Toolbox.computations import generate_genbank
blueprint = Blueprint('public', __name__, static_folder="../static")
@blueprint.route("/", methods=["GET", "POST"])
def home():
return render_template("public/home.html")
@blueprint.route("/about/")
def about():
return render_template("public/about.html")
@blueprint.route("/generate/")
def generate():
sequence = request.args.get('sequence')
g_monomer = request.args.get('g_monomer')
backbone = request.args.get('backbone')
generator = ReferenceSequenceGenerator(sequence, g_monomer, backbone)
genbank = generator.generate_genbank()
response = make_response(genbank)
# Wequence with TF or Nuc appended to the name, e.g. “TALE_Nuc_TGAACAGATGC.gb"
filename = "TALE_Nuc_"
if backbone == "TALETF":
filename = "TALE_TF_"
filename = filename + sequence + ".gb"
response.headers["Content-Disposition"] = "attachment; filename=" + filename
response.status_code = 200
return response
``` |
{
"source": "joshimbriani/Pyarks",
"score": 3
} |
#### File: pyarks/universal/UniversalStudiosJapan.py
```python
import json
import pkg_resources
import requests
import pyarks.utility
from pyarks.park import Park
from pyarks.ride import Ride
class UniversalStudiosJapan(Park):
def __init__(self):
self.rides = self.getRides()
super(UniversalStudiosJapan, self).__init__("Universal Studios Japan")
def getRides(self):
rides = []
response = self.getResponse()
if response["status"] == 2:
self.isOpen = False
resource_package = __name__ # Could be any module/package name
resource_path = '/'.join(('data', 'USJ.json')) # Do not use os.path.join(), see below
datafile = json.loads(pkg_resources.resource_string(resource_package, resource_path))
for ride in datafile["List"][0]["Rows"]:
rides.append(Ride(self, ride["Text"].encode("utf-8"), -1, ""))
return rides
else:
self.isOpen = True
#Fill in here when the park is open
for waitTimeGroup in response["list"]:
if utility.USJTranslate(waitTimeGroup["wait"].encode("utf-8")) == "Inactive":
waitTime = -2
else:
waitTime = int(waitTimeGroup["wait"][:-1])
for ride in waitTimeGroup["rows"]:
rides.append(Ride(self, utility.USJTranslate(ride["text"].encode("utf-8")), waitTime, ""))
return rides
def getResponse(self):
return requests.get("http://ar02.biglobe.ne.jp/app/waittime/waittime.json").json()
```
#### File: Pyarks/pyarks/utility.py
```python
def universalNameToID(name):
if name == "IOA" or name == "Islands of Adventure":
return 10000
elif name == "USF" or name == "Universal Studios Florida":
return 10010
elif name == "USH" or name == "Universal Studios Hollywood":
return 13825
elif name == "VB" or name == "Volcano Bay":
return 13801
else:
return -1
def USJTranslate(name):
if name == "ハローキティのカップケーキ・ドリーム":
return "Hello Kitty's Cupcake Dream"
elif name == "エルモのゴーゴー・スケートボード":
return "Elmo's go-go skateboard"
elif name == "モッピーのバルーン・トリップ":
return "Mobi Balloon Trip"
elif name == "フライング・スヌーピー":
return "Flying Snoopy"
elif name == "スヌーピーのグレートレース™":
return "Snoopy's Great Race ™"
elif name == "アメージング・アドベンチャー・オブ・スパイダーマン・ザ・ライド 4K3D":
return "Amazing Adventure of Spider-Man The Ride 4K 3 D"
elif name == "妖怪ウォッチ・ザ・リアル 4":
return "Yokai Watch The Real 4"
elif name == "ジュラシック・パーク・ザ・ライド®":
return "Jurassic Park - The Ride ®"
elif name == "ジョーズ®":
return "Jaws ®"
elif name == "セサミストリート 4-D ムービーマジック™":
return "Sesame Street 4-D Movie Magic ™"
elif name == "フライト・オブ・ザ・ヒッポグリフ™":
return "Flight of the Hippogriff ™"
elif name == "ハリウッド・ドリーム・ザ・ライド":
return "Hollywood · Dream · The · Ride"
elif name == "ハリウッド・ドリーム・ザ・ライド~バックドロップ~":
return "Hollywood · Dream · The Ride ~ Backdrop ~"
elif name == "ザ・フライング・ダイナソー":
return "The Flying Dinosaur"
elif name == "ハリー・ポッター・アンド・ザ・フォービドゥン・ジャーニー™":
return "Harry Potter and the Forbidden Journey ™"
elif name == "スペース・ファンタジー・ザ・ライド":
return "Space Fantasy the Ride"
elif name == "バックドラフト®":
return "Backdraft ®"
elif name == "シュレック 4-D アドベンチャー™":
return "Shrek 4-D Adventure ™"
elif name == "休止中":
return "Inactive"
else:
return "No translation"
def seaworldNameToID(name):
if name == "BGT":
return "BG_TPA"
elif name == "SWO":
return "SW_MCO"
elif name == "SWSD":
return "SW_SAN"
elif name == "SWSA":
return "SW_SAT"
elif name == "BGW":
return "BG_PHF"
else:
return "BG_TPA"
``` |
{
"source": "joshimbriani/pyflightsearch",
"score": 3
} |
#### File: phlights/models/leg.py
```python
import math
from datetime import datetime
import pytz
from tzlocal import get_localzone
from phlights.errors.configuration_error import ConfigurationError
from phlights.util import find_route
from phlights.models.flight import Flight
class Leg:
def __init__(self, departure_time=None, arrival_time=None, flights=None, from_location="", from_location_code="", to_location="", to_location_code=""):
self._departure_time = departure_time
self._arrival_time = arrival_time
self._flights = flights
self._from_location = from_location
self._from_location_code = from_location_code
self._to_location = to_location
self._to_location_code = to_location_code
@property
def departure_time(self):
if not self._departure_time:
return datetime.utcfromtimestamp(0)
local_tz = get_localzone()
return datetime.utcfromtimestamp(self._departure_time).replace(tzinfo=pytz.utc).astimezone(local_tz)
@property
def arrival_time(self):
if not self._arrival_time:
return datetime.utcfromtimestamp(0)
local_tz = get_localzone()
return datetime.utcfromtimestamp(self._arrival_time).replace(tzinfo=pytz.utc).astimezone(local_tz)
@property
def flights(self):
if not self._flights:
return []
return self._flights
@property
def from_location(self):
return self._from_location
@property
def from_location_code(self):
return self._from_location_code
@property
def to_location(self):
return self._to_location
@property
def to_location_code(self):
return self._to_location_code
@property
def layovers(self):
if not self._flights:
return -1
return len(self._flights) - 1
@property
def duration(self):
if not self._arrival_time or not self._departure_time:
return 0
return datetime.utcfromtimestamp(self._arrival_time) - datetime.utcfromtimestamp(self._departure_time)
@staticmethod
def build_legs(flight_data, start, end, start_city, end_city):
# First find the route from start to end
departure_route = find_route(flight_data, start, end)
if not departure_route:
return ConfigurationError("Couldn't determine route.")
departure_leg = Leg()
departure_flight_params = generate_leg_params(departure_route)
departure_leg._flights = departure_flight_params[0]
departure_leg._departure_time = departure_flight_params[1]
departure_leg._arrival_time = departure_flight_params[2]
departure_leg._from_location_code = start
departure_leg._from_location = start_city
departure_leg._to_location_code = end
departure_leg._to_location = end_city
# Then find the route from end to start
arrival_route = find_route(flight_data, end, start)
if not arrival_route:
return ConfigurationError("Couldn't determine route.")
return_leg = Leg()
return_flight_params = generate_leg_params(arrival_route)
return_leg._flights = return_flight_params[0]
return_leg._departure_time = return_flight_params[1]
return_leg._arrival_time = return_flight_params[2]
return_leg._from_location_code = end
return_leg._from_location = end_city
return_leg._to_location_code = start
return_leg._to_location = start_city
return [departure_leg, return_leg]
def __str__(self):
s = ""
s += "Leg from {} to {}".format(self.from_location, self.to_location) + "\n"
s += " Duration: {} hours".format(self.duration.total_seconds()/60/60) + "\n"
s += " Layovers: {}".format(self.layovers) + "\n"
for flight in self.flights:
s += " " + str(flight) + "\n"
return s
def generate_leg_params(all_flights):
flights = []
departure_time = math.inf
arrival_time = -math.inf
for flight in all_flights:
flights.append(Flight.build_flight(flight))
departure_time = min(departure_time, flight["dTimeUTC"])
arrival_time = max(arrival_time, flight["aTimeUTC"])
return [flights, departure_time, arrival_time]
```
#### File: pyflightsearch/phlights/util.py
```python
from datetime import date, timedelta, datetime
from copy import deepcopy
import requests
from phlights.constants import API_BASE, Day
from phlights.errors.configuration_error import ConfigurationError
def build_flight_search_queries(flight_search_builder):
queries = []
query_string = ["partner=picky", "curr=USD"]
query_string.append("max_stopovers=" + ("1" if flight_search_builder._allow_layovers else "0"))
# build the rest of the string
query_string.append("fly_from=" + flight_search_builder._from_location)
query_string.append("fly_to=" + flight_search_builder._to_location)
if flight_search_builder._departure_time:
dpt_time_str = flight_search_builder.get_departure_time_string()
query_string.append("dtime_from=" + dpt_time_str[0])
query_string.append("dtime_to=" + dpt_time_str[1])
if flight_search_builder._arrival_time:
arr_time_str = flight_search_builder.get_arrival_time_string()
query_string.append("atime_from=" + arr_time_str[0])
query_string.append("atime_to=" + arr_time_str[1])
if flight_search_builder._return_departure_time:
ret_dpt_time_str = flight_search_builder.get_return_departure_time_string()
query_string.append("ret_dtime_from=" + ret_dpt_time_str[0])
query_string.append("ret_dtime_to=" + ret_dpt_time_str[1])
if flight_search_builder._return_arrival_time:
ret_arr_time_str = flight_search_builder.get_return_arrival_time_string()
query_string.append("ret_atime_from=" + ret_arr_time_str[0])
query_string.append("ret_atime_to=" + ret_arr_time_str[1])
# generate dates given the conditions
date_range = flight_search_builder.get_date_range()
if flight_search_builder._departure_day and flight_search_builder._return_arrival_day:
dates = generate_dates_meeting_conditions(date_range[0], flight_search_builder._departure_day.value, flight_search_builder._return_arrival_day.value, date_range[1])
for start_end_pair in dates:
query_copy = deepcopy(query_string)
query_copy.append("date_from=" + start_end_pair[0].strftime("%d/%m/%Y"))
query_copy.append("date_to=" + start_end_pair[0].strftime("%d/%m/%Y"))
query_copy.append("return_from=" + start_end_pair[1].strftime("%d/%m/%Y"))
query_copy.append("return_to=" + start_end_pair[1].strftime("%d/%m/%Y"))
query_copy.append("nights_in_dst_from=" + str((start_end_pair[1] - start_end_pair[0]).days - 1))
query_copy.append("nights_in_dst_to=" + str((start_end_pair[1] - start_end_pair[0]).days - 1))
queries.append(API_BASE + "&".join(query_copy))
elif flight_search_builder._departure_date and flight_search_builder._return_departure_date:
# User has specified a firm start and end date
query_string.append("date_from=" + flight_search_builder._departure_date.strftime("%d/%m/%Y"))
query_string.append("date_to=" + flight_search_builder._departure_date.strftime("%d/%m/%Y"))
query_string.append("return_from=" + flight_search_builder._return_departure_date.strftime("%d/%m/%Y"))
query_string.append("return_to=" + flight_search_builder._return_departure_date.strftime("%d/%m/%Y"))
queries.append(API_BASE + "&".join(query_string))
else:
# User hasn't give a start or end date, instead just set the search start date to start_from
query_string.append("date_from=" + date_range[0].strftime("%d/%m/%Y"))
query_string.append("date_to=" + date_range[1].strftime("%d/%m/%Y"))
queries.append(API_BASE + "&".join(query_string))
# return the queries
return queries
def generate_dates_meeting_conditions(start_date, departure_day, return_day, stop_date):
if not isinstance(start_date, date) or isinstance(start_date, datetime):
return ConfigurationError("start_date input to generate_dates_meeting_conditions must be a date object")
if type(departure_day) != int:
return ConfigurationError("departure_day input to generate_dates_meeting_conditions must be a int")
if type(return_day) != int:
return ConfigurationError("return_day input to generate_dates_meeting_conditions must be a int")
if not isinstance(stop_date, date) or isinstance(stop_date, datetime):
return ConfigurationError("stop_date input to generate_dates_meeting_conditions must be a date object")
pairs = []
start = None
for day in daterange(start_date, ((stop_date - start_date).days) + 6):
if day.weekday() == departure_day:
start = day
if start and day != start and day.weekday() == return_day:
pairs.append((start, day))
start = None
return pairs
def daterange(start_date, duration):
for i in range(duration):
yield start_date + timedelta(i)
def make_api_request(query_string):
r = requests.get(query_string)
if r.status_code != 200 and r.status_code != 201:
return None
response_json = r.json()
if "data" not in response_json:
return None
return response_json["data"]
def find_route(flight_data, start, end):
return find_route_helper(flight_data, start, end, set())
def find_route_helper(flight_data, start, end, seen):
for route in flight_data:
if (route["flyFrom"], route["flyTo"]) in seen:
continue
if route["flyFrom"] == start and route["flyTo"] == end:
return [route]
elif route["flyFrom"] == start:
seen_clone = deepcopy(seen)
seen_clone.add((route["flyFrom"], route["flyTo"]))
rest_of_path = find_route_helper(flight_data, route["flyTo"], end, seen_clone)
if rest_of_path:
return [route] + rest_of_path
return None
``` |
{
"source": "joshimhoff/intervals",
"score": 3
} |
#### File: joshimhoff/intervals/lexer.py
```python
import ply.lex as lex
tokens = ('LBRACKET', 'RBRACKET', 'LPARAN', 'RPARAN',
'POST', 'DASH', 'LETTER', 'NUMBER')
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LPARAN = r'\('
t_RPARAN = r'\)'
t_POST = r'\|'
t_DASH = r'-'
t_LETTER = r'[A-G_](b|\#)*'
t_NUMBER = r'\d+'
t_ignore = ' '
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
print("lexer: illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
if __name__ == '__main__':
import sys
lexer.input(sys.stdin.read())
while True:
tok = lexer.token()
if not tok: break
print tok
``` |
{
"source": "Jo-Shin/Cow_Estrus_Detection",
"score": 3
} |
#### File: Cow_Estrus_Detection/mrcnn/cow.py
```python
import os
import sys
import json
import datetime
import numpy as np
import pandas as pd
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("/content/drive/Shareddrives/스마트축사_데이터_활용_대회/Mask_RCNN")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
sys.path.append("/content/drive/Shareddrives/스마트축사_데이터_활용_대회/Mask_RCNN/mrcnn")
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import imgaug
import imgaug.augmenters
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class CowConfig(Config):
"""Configuration for training on the cow dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "cow"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 + 1 # Background + 발정 + 비발정
# Number of training steps per epoch
STEPS_PER_EPOCH = 64
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.6
############################################################
# Dataset
############################################################
class CowDataset(utils.Dataset):
def load_cow(self, dataset_dir, subset):
"""Load a cow dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have two class to add.
# class 0: background
self.add_class("cow", 1, "anestrus") # 비발정
self.add_class("cow", 2, "estrus") # 발정
# Train or validation dataset?
assert subset in ["train", "val"]
# Load annotations (json file)
# 경로
json_file = json.load(open(os.path.join(dataset_dir, subset, subset + "_answer.json")))
Images = json_file['images'] # 이미지
annotations = pd.DataFrame(json_file['annotations']) # 이미지 속 인스턴스
# utils.Dataset에서 상속한 image_info 리스트에 이미지와 인스턴스의 정보를 저장
for image in Images:
# 이미지 파일 경로
image_path = os.path.join(dataset_dir,
subset + '/' + image['file_name'])
# 이미지 파일의 높이/너비
height, width = image['height'], image['width']
# 이미지 속 인스턴스들의 테두리 좌표 및 class
# 좌표값
polygons = []
for polygon in annotations.loc[annotations.image_id == image['id'],
'segmentation']:
polygon_resize = polygon.copy()
for i, coord in enumerate(polygon):
if i % 2 == 0 and coord >= width:
polygon_resize[i] = coord-1
elif i % 2 == 1 and coord >= height:
polygon_resize[i] = coord-1
polygons.append(polygon_resize)
category_id = [x for x in annotations.loc[annotations.image_id == image['id'],
'category_id']]
# image_info 리스트에 정보를 저장
self.add_image(
'cow', # source
image['id'], # image_id
image_path,
width=width, height=height,
polygons=polygons,
category_id=category_id)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Convert polygons to a bitmap mask of shape
# mask.shape = [높이, 너비, 인스턴스의 개수]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
# Get indexes of pixels inside the polygon and set them to 1
# skimage.draw.polygon(y point, x point)
for i, p in enumerate(info['polygons']):
rr, cc = skimage.draw.polygon(p[1::2], p[0::2])
mask[rr, cc, i] = 1
# 인스턴스의 mask와 class 반환
return mask.astype(np.bool), np.array(info['category_id'], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
######## 수정 ##########
if info["source"] == "cow":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model, augmentation):
"""Train the model."""
# Training dataset.
dataset_train = CowDataset()
dataset_train.load_cow(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = CowDataset()
dataset_val.load_cow(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
learning_rate = float(args.learning_rate),
epochs=int(args.epochs),
layers=args.layers,
augmentation=exec(args.augmentation))
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray.astype(np.uint8)
return splash
def detect_and_color_splash(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the color splash effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
splash = color_splash(image, r['masks'])
# Save output
file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, splash)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
splash = color_splash(image, r['masks'])
# RGB -> BGR to save image to video
splash = splash[..., ::-1]
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect Cow.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/balloon/dataset/",
help='Directory of the Balloon dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--epochs', required=False,
default=30,
metavar="set the epoch",
help='Set the epoch')
parser.add_argument('--learning_rate', required=False,
default=0.001,
metavar="set the learning_rate",
help='Set the learning_rate')
parser.add_argument('--rpn_nms_threshold', required=False,
default=0.7,
metavar="set the rpn_nms_threshold",
help='Set the rpn_nms_threshold')
parser.add_argument('--augmentation', required=False,
default=None,
metavar="augmentation",
help='augmentation : True or None')
parser.add_argument('--layers', required=False,
default='heads',
metavar="layers",
help='layers : all, heads, 3+, 4+, 5+')
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Epochs: ", args.epochs)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
print("augmentation: ", args.augmentation)
print("Layers: ", args.layers)
# Configurations
if args.command == "train":
class TrainConfig(CowConfig):
RPN_NMS_THRESHOLD = float(args.rpn_nms_threshold)
config = TrainConfig()
else:
class InferenceConfig(CowConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model, bool(args.augmentation))
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
``` |
{
"source": "joshingeniero/true-hybrid-sase-asic",
"score": 2
} |
#### File: true-hybrid-sase-asic/sasebot/test_creation.py
```python
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright (c) 2021 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
import requests
import json
from DETAILS import *
"""" Test URLs """
WebExProductivityToolsURL = "https://cisco.webex.com/WBXService/XMLService"
PrimaryCBServerURL = "https://ed1sgcb191.webex.com"
SecondaryCBServerURL = "https://epycb16302.webex.com"
WebExPrimaryAudioURL = "msg2mcs136.webex.com"
WebExSecondaryAudioURL = "gmjp2mcs192.webex.com"
WebExPrimaryVideoURL = "msg2mcs136.webex.com"
WebExSecondaryVideoURL = "gmjp2mcs192.webex.com"
SalesforceURL = "https://ciscosales.my.salesforce.com/"
O365URL = "https://login.microsoftonline.com"
def custom_enterprise_test(agent_id, custom_url):
url = "https://api.thousandeyes.com/v6/instant/http-server.json"
payload = json.dumps({
"agents": [
{
"agentId": agent_id
}
],
"testName": "Custom URL Enterprise Instant Test",
"url": custom_url
})
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': Authorization
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def find_endpoint_agent_id(webex_card_data):
computer_name = webex_card_data["hostnameVal"]
url = "https://api.thousandeyes.com/v6/endpoint-agents.json?computerName=" + computer_name
print(url)
payload = ""
headers = {
'Content-Type': 'application/json',
'Authorization': Authorization
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
response_json = json.loads(response.text)
agentId = (response_json["endpointAgents"][0]["agentId"])
print(agentId)
result_array = create_endpoint_agent_label(webex_card_data, agentId)
print("findendpointagentid")
print(result_array)
return result_array
def create_endpoint_agent_label(webex_card_data, endpoint_agent_id):
url = "https://api.thousandeyes.com/v6/groups/endpoint-agents/new.json"
endpoint_agents_array = [{"agentId": endpoint_agent_id}]
print('1111111111111111')
print(endpoint_agents_array)
print('1111111111111111')
payload = json.dumps({
"name": "MY NEW5 Label",
"endpointAgents": endpoint_agents_array
})
headers = {
'Content-Type': 'application/json',
'Authorization': Authorization
}
response = requests.request("POST", url, headers=headers, data=payload)
print('222222222222222222')
print(response.text)
print('222222222222222222')
response_json = json.loads(response.text)
groupId = (response_json["groups"][0]["groupId"])
print(groupId)
resultArray = test_selector(groupId, webex_card_data)
print("create label ")
print(resultArray)
return resultArray
def test_selector(group_id, webex_card_data):
issueArray = webex_card_data["IssueSelectVal"].split(",")
print(issueArray)
resultArray = []
for issue in issueArray:
print(issue)
if issue == "WebexAudio":
resultArray.append(webex_primary_audio(group_id))
resultArray.append(webex_secondary_audio(group_id))
resultArray.append(webex_primary_cb_server(group_id))
resultArray.append(webex_secondary_cb_server(group_id))
elif issue == "WebexVideo":
resultArray.append(webex_primary_video(group_id))
resultArray.append(webex_secondary_video(group_id))
resultArray.append(webex_primary_cb_server(group_id))
resultArray.append(webex_secondary_cb_server(group_id))
elif issue == "salesforce":
resultArray.append(salesforce(group_id))
elif issue == "Noneofabove":
CustomURL = webex_card_data["CustomURLVal"]
resultArray.append(custom_endpoint_test(group_id, CustomURL))
elif issue == "Webexproductivitytools":
resultArray.append(webex_productivity_tools(group_id))
elif issue == "Office365":
resultArray.append(o365_test(group_id))
delete_label(group_id)
print("test selector")
print(resultArray)
return resultArray
def webex_productivity_tools(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/http-server.json"
payload = json.dumps({
"authType": "NONE",
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"httpTimeLimit": 5000,
"maxMachines": 5,
"sslVersion": 0,
"targetResponseTime": 5000,
"testName": "WebEx Productivity Tools Instant HTTP test",
"url": WebExProductivityToolsURL,
"verifyCertHostname": True
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def webex_primary_cb_server(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/http-server.json"
payload = json.dumps({
"authType": "NONE",
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"httpTimeLimit": 5000,
"maxMachines": 5,
"sslVersion": 0,
"targetResponseTime": 1000,
"testName": "WebEx Primary CB Server Instant HTTP test",
"url": PrimaryCBServerURL,
"verifyCertHostname": True
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def webex_secondary_cb_server(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/http-server.json"
payload = json.dumps({
"authType": "NONE",
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"httpTimeLimit": 5000,
"maxMachines": 5,
"sslVersion": 0,
"targetResponseTime": 1000,
"testName": "WebEx Secondary CB Server Instant HTTP test",
"url": SecondaryCBServerURL,
"verifyCertHostname": True
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def webex_primary_audio(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/agent-to-server.json"
payload = json.dumps({
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"maxMachines": 5,
"testName": "Webex Primary Audio Test",
"serverName": WebExPrimaryAudioURL,
"port": 5004
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def webex_secondary_audio(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/agent-to-server.json"
payload = json.dumps({
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"maxMachines": 5,
"testName": "Webex Secondary Audio Test",
"serverName": WebExSecondaryAudioURL,
"port": 5004
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def webex_primary_video(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/agent-to-server.json"
payload = json.dumps({
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"maxMachines": 5,
"testName": "Webex Primary Video Test",
"serverName": WebExPrimaryVideoURL,
"port": 5004
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def webex_secondary_video(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/agent-to-server.json"
payload = json.dumps({
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"maxMachines": 5,
"testName": "Webex Secondary Video Test",
"serverName": WebExSecondaryVideoURL,
"port": 5004
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def salesforce(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/http-server.json"
payload = json.dumps({
"authType": "NONE",
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"httpTimeLimit": 5000,
"maxMachines": 5,
"sslVersion": 0,
"targetResponseTime": 5000,
"testName": "Salesforce Instant HTTP test",
"url": SalesforceURL,
"verifyCertHostname": True
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def custom_endpoint_test(group_id, custom_url):
url = "https://api.thousandeyes.com/v6/endpoint-instant/http-server.json"
payload = json.dumps({
"authType": "NONE",
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"httpTimeLimit": 5000,
"maxMachines": 5,
"sslVersion": 0,
"targetResponseTime": 5000,
"testName": "Custom URL Instant HTTP test",
"url": custom_url,
"verifyCertHostname": True
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def o365_test(group_id):
url = "https://api.thousandeyes.com/v6/endpoint-instant/http-server.json"
payload = json.dumps({
"authType": "NONE",
"flagPing": True,
"flagTraceroute": True,
"groupId": group_id,
"httpTimeLimit": 5000,
"maxMachines": 5,
"sslVersion": 0,
"targetResponseTime": 1000,
"testName": "O365 Instant HTTP test",
"url": O365URL,
"verifyCertHostname": True
})
headers = {
'Accept': 'application/json',
'Authorization': Authorization,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.text
def delete_label(group_id):
url = "https://api.thousandeyes.com/v6/groups/" + str(group_id) + "/delete.json"
payload = ""
headers = {
'Content-Type': 'application/json',
'Authorization': Authorization
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
``` |
{
"source": "joshingmachine/kielbasa",
"score": 3
} |
#### File: python/kielbasa/get_lower_case.py
```python
def get_lower_case(original_string):
lower_case_string = original_string.lower()
return lower_case_string
``` |
{
"source": "joshington/blogapi",
"score": 3
} |
#### File: posts/api/permissions.py
```python
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
message = "You must be the owner of this object."
my_safe_method = ['GET','PUT']
def has_permission(self,request, view):
#similar to has _object permission, nothing very muc different
if request.method in self.my_safe_method:
return True
return False
def has_object_permission(self, request,view,obj):
#member = Membership.objects.get(uer=request.user)
#member.is_active
if request.method in SAFE_METHODS
return True#this will only allow you to update the object if your the owner user
return obj.user == request.user#making suer that the request user is same as that one who
#created the object, and the object user is comming from the user in the Post model
```
#### File: posts/api/views.py
```python
from django.db.models import Q #handling the Q lookup
from rest_framework.filters import (
SearchFilter,
OrderingFilter,
)
from rest_framework.generics import (
CreateAPIView,
ListAPIView,
RetrieveUpdateAPIView,
UpdateAPIView,
DestroyAPIView
) #want to list the posts
#pagination is neing handled right here
from .pagination import PostLimitOffsetPagination,PostPageNumberPagination
#handling permissions here
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly,
)
from posts.models import Post
from .permissions import IsOwnerOrReadOnly
from .serializers import (
PostCreateSerializer,
PostListSerializer,
PostDetailSerializer,
)
class PostCreateAPIView(CreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
#for user to create apost they must be authenticated
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
serializer.save(user=self.request.user)#this is gonna use the user field for the Post model
class PostListAPIView(ListAPIView):
#queryset = Post.objects.all()
serializer_class = PostSerializer
filter_backends = [SearchFilter,OrderingFilter]
search_fields = ['title', 'content', 'user__first_name']#user can only be able to check those
pagination_class = PostPageNumberPagination#PostLimitOffsetPagination
#we intend to override our get_queryset method
def get_queryset(self, *args, **kwargs):
#queryset_list = Post.objects.filter(user=self.request.user)==could use that if maybe its GET
queryset_list = Post.objects.all()
#orqueryset_lst = super(PostListAPIView, self).get_queryset(*args, **kwargs)
#similar to the above
query = self.request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
return queryset_list
class PostDetailAPIView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
lookup_field = 'slug'
#lookup_field_kwarg = "abc"#fixes the slug to abc
class PostUpdateAPIView(RetrieveUpdateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticatedOrReadOnly,IsOwnerOrReadOnly]#user should be authenticated or else readonly
def perform_update(self, serializer):
serializer.save(user=self.request.user)#this changes the update from the original user that
#have to make sure that the user trying to update the post is the owner of the object
#submitted to this new user.
#could us use this segment here to notify the logged in user that email was updated
#so watch those two functions;perform_update and perform_create critically they are useful
class PostDeleteAPIView(DestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
lookup_field = 'slug'
permissions = [IsAuthenticatedOrReadOnly,IsOwnerOrReadOnly]
``` |
{
"source": "joshinils/upload-scripts",
"score": 3
} |
#### File: joshinils/upload-scripts/osc_discoverer.py
```python
import os
import json
import logging
import constants
from common.models import GPS, OSCDevice
from io_storage.storage import Local
from parsers.exif import ExifParser
from parsers.osc_metadata.parser import MetadataParser
from visual_data_discover import VisualDataDiscoverer
from visual_data_discover import ExifPhotoDiscoverer
from visual_data_discover import PhotoMetadataDiscoverer
from visual_data_discover import VideoDiscoverer
from validators import SequenceValidator, SequenceMetadataValidator, SequenceFinishedValidator
from osc_utils import unzip_metadata
from osc_models import Sequence, Photo, VisualData
LOGGER = logging.getLogger('osc_tools.osc_discoverer')
class OSCUploadProgressDiscoverer:
"""This class is responsible with finding a upload progress file"""
def __eq__(self, other):
if isinstance(other, OSCUploadProgressDiscoverer):
return self == other
return False
def __hash__(self):
return super().__hash__()
@classmethod
def discover(cls, path: str) -> [str]:
"""this method will discover a upload progress file and parse it to get a progress list."""
LOGGER.debug("will read uploaded indexes")
progress_file_path = path + "/" + constants.PROGRESS_FILE_NAME
if not os.path.isfile(progress_file_path):
return []
with open(progress_file_path, 'r') as input_file:
line = input_file.readline()
indexes = list(filter(None, line.split(";")))
return indexes
class OSCMetadataDiscoverer:
"""this class will discover a metadata file"""
def __eq__(self, other):
if isinstance(other, OSCMetadataDiscoverer):
return self == other
return False
def __hash__(self):
return super().__hash__()
@classmethod
def discover(cls, path: str) -> str:
"""This method will discover osc metadata path"""
files = os.listdir(path)
for file_path in files:
file_name, file_extension = os.path.splitext(file_path)
if ".txt" in file_extension and "track" in file_name:
return path + "/" + file_path
if ".gz" in file_extension and "track" in file_name:
return unzip_metadata(path)
return None
# if no metadata found generate metadata from gpx or exif
class OnlineIDDiscoverer:
"""This class will discover online id of a sequence"""
@classmethod
def discover(cls, path: str) -> str:
"""This method will discover online id"""
LOGGER.debug("searching for metadata %s", path)
sequence_file_path = path + "/osc_sequence_id.txt"
if not os.path.isfile(sequence_file_path):
return None
try:
with open(sequence_file_path) as json_file:
data = json.load(json_file)
if "id" in data and data["id"] and str.isdigit(data["id"]):
return int(data["id"])
except FileNotFoundError:
return None
return None
@classmethod
def discover_using_type(cls, path: str, osc_type: str):
"""this method is discovering the online id"""
print(path)
print(osc_type)
class SequenceDiscoverer:
"""Seq discoverer base class"""
def __init__(self):
self.ignored_for_upload: bool = False
self.name = "default"
self.online_id: OnlineIDDiscoverer = OnlineIDDiscoverer()
self.visual_data: VisualDataDiscoverer = VisualDataDiscoverer()
self.osc_metadata: OSCMetadataDiscoverer = OSCMetadataDiscoverer()
self.upload_progress: OSCUploadProgressDiscoverer = OSCUploadProgressDiscoverer()
self.validator: SequenceValidator = SequenceValidator()
def discover(self, path: str) -> [Sequence]:
"""This method will discover a valid sequence"""
files = os.listdir(path)
sequences = []
for file_path in files:
full_path = os.path.join(path, file_path)
if os.path.isdir(full_path):
sequences = sequences + self.discover(full_path)
sequence = self.create_sequence(path)
if self.validator.validate(sequence):
sequences.append(sequence)
else:
LOGGER.debug("This sequence (%s) does not conform to this discoverer %s.", path,
self.name)
return sequences
def create_sequence(self, path):
"""This method will discover all attributes af a sequence"""
sequence = Sequence()
if self.online_id:
sequence.online_id = self.online_id.discover(path)
if self.visual_data:
(visual_data, data_type) = self.visual_data.discover(path)
sequence.visual_items = visual_data
sequence.visual_data_type = data_type
if self.osc_metadata:
sequence.osc_metadata = self.osc_metadata.discover(path)
if self.upload_progress:
sequence.progress = self.upload_progress.discover(path)
sequence.path = path
self._find_latitude_longitude_device_info(sequence)
return sequence
def _find_latitude_longitude_device_info(self, sequence: Sequence):
if not sequence.online_id:
if sequence.osc_metadata and isinstance(self.validator, SequenceMetadataValidator):
parser = MetadataParser.valid_parser(sequence.osc_metadata, Local())
gps = parser.next_item_with_class(GPS)
if gps:
sequence.latitude = gps.latitude
sequence.longitude = gps.longitude
device_info: OSCDevice = parser.next_item_with_class(OSCDevice)
if device_info:
sequence.device = device_info.device_raw_name
sequence.platform = device_info.platform_name
elif sequence.visual_items:
visual_item: VisualData = sequence.visual_items[0]
if isinstance(self.visual_data, ExifPhotoDiscoverer):
parser = ExifParser.valid_parser(visual_item.path, Local())
device_info: OSCDevice = parser.next_item_with_class(OSCDevice)
if device_info:
sequence.device = device_info.device_raw_name
sequence.platform = device_info.platform_name
if isinstance(visual_item, Photo):
sequence.latitude = visual_item.latitude
sequence.longitude = visual_item.longitude
class SequenceDiscovererFactory:
"""Class that builds a list of sequence discoverers ready to use."""
@classmethod
def discoverers(cls) -> [SequenceDiscoverer]:
"""This is a factory method that will return Sequence Discoverers"""
return [cls.finished_discoverer(),
cls.photo_metadata_discoverer(),
cls.exif_discoverer(),
cls.video_discoverer()]
@classmethod
def photo_metadata_discoverer(cls) -> SequenceDiscoverer:
"""This method will return a photo discoverer"""
photo_metadata_finder = SequenceDiscoverer()
photo_metadata_finder.name = "Metadata-Photo"
photo_metadata_finder.visual_data = PhotoMetadataDiscoverer()
photo_metadata_finder.validator = SequenceMetadataValidator()
return photo_metadata_finder
@classmethod
def exif_discoverer(cls) -> SequenceDiscoverer:
"""This method will return a photo discoverer"""
exif_photo_finder = SequenceDiscoverer()
exif_photo_finder.name = "Exif-Photo"
exif_photo_finder.visual_data = ExifPhotoDiscoverer()
exif_photo_finder.osc_metadata = None
return exif_photo_finder
@classmethod
def video_discoverer(cls) -> SequenceDiscoverer:
"""this method will return a video discoverer"""
video_finder = SequenceDiscoverer()
video_finder.name = "Metadata-Video"
video_finder.visual_data = VideoDiscoverer()
video_finder.validator = SequenceMetadataValidator()
return video_finder
@classmethod
def finished_discoverer(cls) -> SequenceDiscoverer:
"""this method will return a discoverer that finds all the sequences that finished upload"""
finished_finder = SequenceDiscoverer()
finished_finder.name = "Done Uploading"
finished_finder.ignored_for_upload = True
finished_finder.visual_data = None
finished_finder.osc_metadata = None
finished_finder.validator = SequenceFinishedValidator()
return finished_finder
```
#### File: joshinils/upload-scripts/osc_models.py
```python
from typing import Optional
class Sequence:
"""Sequence is a model class containing a list of visual items"""
def __init__(self):
self.path: str = ""
self.online_id: str = ""
self.progress: [str] = []
self.visual_items: [VisualData] = []
self.osc_metadata: str = ""
self.visual_data_type: str = ""
self.latitude: float = None
self.longitude: float = None
self.platform: Optional[str] = None
self.device: Optional[str] = None
@property
def description(self) -> str:
"""this method returns a string description of a sequence"""
return self.online_id + self.osc_metadata + self.visual_data_type
def visual_data_count(self) -> int:
"""this method returns the count of visual data"""
return len(self.visual_items)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Sequence):
return self.path == other.path
return False
def __hash__(self):
return hash(self.path)
class VisualData:
"""VisualData is a model class for a visual item"""
def __init__(self, path):
self.path: str = path
self.index: int = None
def __eq__(self, other):
if isinstance(other, VisualData):
return self.path == other.path and \
self.index == other.index
return False
def __hash__(self):
return hash((self.path, self.index))
class Photo(VisualData):
"""Photo is a VisualData model for a photo item"""
def __init__(self, path):
super().__init__(path)
self.latitude: float = None
self.longitude: float = None
self.exif_timestamp: float = None
self.gps_timestamp: float = None
self.gps_speed: float = None
self.gps_altitude: float = None
self.gps_compass: float = None
self.fov: Optional[float] = None
self.projection: str = None
def __eq__(self, other):
if isinstance(other, Photo):
return self.gps_timestamp == other.gps_timestamp and \
self.latitude == other.path and \
self.longitude == other.longitude
return False
def __hash__(self):
return hash((self.gps_timestamp,
self.latitude,
self.longitude))
class Video(VisualData):
"""Video is a VisualData model for a video item"""
def __eq__(self, other):
if isinstance(other, Video):
return self.path == other.path and self.index == other.index
return False
def __hash__(self):
return hash((self.path, self.index))
``` |
{
"source": "Joshinn-io/augur",
"score": 2
} |
#### File: facade_worker/excel_generators/example.py
```python
import sys
import MySQLdb
import imp
import time
import datetime
import xlsxwriter
import os
dirname = os.path.dirname
filepath = os.path.abspath(__file__)
sys.path.append(dirname(dirname(filepath)))
try:
imp.find_module('db')
from db import db,cursor
except:
sys.exit("Can't find db.py. Have you created it?")
def get_setting(setting):
# Get a setting from the database
query = ("SELECT value FROM settings WHERE setting='%s' ORDER BY "
"last_modified DESC LIMIT 1" % setting)
cursor.execute(query)
return cursor.fetchone()["value"]
### The real program starts here ###
#--> Set your filename
filename = 'facade_summary-projects_by_LoC_and_number_contributors_by_year.xlsx'
#--> Set the description of the data
detail = 'LoC added (Unique emails)'
#--> Change this to modify the names of each worksheet
sheets = reversed(list(range(int(get_setting('start_date')[:4]),
datetime.datetime.now().year + 1)))
#--> Change this to modify the x axis
get_x_axis = "SELECT name,id FROM projects"
cursor.execute(get_x_axis)
x_axis = list(cursor)
facade_dir = dirname(dirname(dirname(filepath)))
outfile = os.path.join(facade_dir,'files',filename)
workbook = xlsxwriter.Workbook(outfile)
bold = workbook.add_format({'bold': True})
italic = workbook.add_format({'italic': True})
bold_italic = workbook.add_format({'bold': True, 'italic': True})
numformat = workbook.add_format({'num_format': '#,##0'})
for sheet in sheets:
worksheet = workbook.add_worksheet(str(sheet))
worksheet.write(1,1,'Report generated on %s by Facade' %
time.strftime('%Y-%m-%d'),bold)
worksheet.write(2,1,'https://github.com/brianwarner/facade')
worksheet.write(3,1,'Format: %s' % detail)
top_row = 5
first_col = 1
col = first_col + 1
for x in x_axis:
#--> Change the value of x[''] to match SELECT statment
worksheet.write(top_row,col,x['name'],bold_italic)
col += 1
#--> Change this to modify the y axis
get_y_axis = ("SELECT DISTINCT affiliation FROM project_annual_cache "
"WHERE year = %s "
"ORDER BY affiliation ASC"
% sheet)
cursor.execute(get_y_axis)
y_axis = list(cursor)
row = top_row + 1
for y in y_axis:
#--> Change the value of y[''] to match SELECT statement
worksheet.write(row,first_col,y['affiliation'],bold)
col = first_col + 1
for x in x_axis:
#--> Change this to modify the data
get_stats = ("SELECT FORMAT(SUM(added),0) AS added, "
"FORMAT(COUNT(email),0) AS emails "
"FROM project_annual_cache "
"WHERE affiliation = '%s' "
"AND projects_id = %s "
"AND year = %s"
% (y['affiliation'].replace("'","\\'"),
x['id'], sheet))
cursor.execute(get_stats)
stats = list(cursor)
for stat in stats:
#--> Change this to define the format for each data point
if stat['added']:
worksheet.write(row,col,'%s (%s)'
% (stat['added'], stat['emails']))
col += 1
row += 1
workbook.close()
``` |
{
"source": "joshis1/C_Programming",
"score": 2
} |
#### File: WiresharkDissectorFoo/test/conftest.py
```python
import re
import fixtures
def pytest_addoption(parser):
parser.addoption('--disable-capture', action='store_true',
help='Disable capture tests'
)
parser.addoption('--program-path', help='Path to Wireshark executables.')
parser.addoption('--skip-missing-programs',
help='Skip tests that lack programs from this list instead of failing'
' them. Use "all" to ignore all missing programs.')
_all_test_groups = None
# this is set only to please case_unittests.test_unit_ctest_coverage
def pytest_collection_modifyitems(items):
'''Find all test groups.'''
global _all_test_groups
suites = []
for item in items:
name = item.nodeid.split("::")[0].replace(".py", "")
# When executed from the rootdir (e.g. "pytest test"), be sure to strip
# all preceding components ("test/suite_io" -> "suite_io").
name = re.sub(r'^.*/suite_', 'suite_', name)
name = name.replace("/", ".")
if name not in suites:
suites.append(name)
_all_test_groups = sorted(suites)
# Must enable pytest before importing fixtures_ws.
fixtures.enable_pytest()
from fixtures_ws import *
@fixtures.fixture(scope='session')
def all_test_groups():
return _all_test_groups
```
#### File: WiresharkDissectorFoo/test/fixtures.py
```python
import argparse
import functools
import inspect
import sys
import unittest
_use_native_pytest = False
def enable_pytest():
global _use_native_pytest, pytest
assert not _fallback
import pytest
_use_native_pytest = True
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
"""
When running under pytest, this is the same as the pytest.fixture decorator.
See https://docs.pytest.org/en/latest/reference.html#pytest-fixture
"""
if _use_native_pytest:
# XXX sorting of fixtures based on scope does not work, see
# https://github.com/pytest-dev/pytest/issues/4143#issuecomment-431794076
# When ran under pytest, use native functionality.
return pytest.fixture(scope, params, autouse, ids, name)
init_fallback_fixtures_once()
return _fallback.fixture(scope, params, autouse, ids, name)
def _fixture_wrapper(test_fn, params):
@functools.wraps(test_fn)
def wrapped(self):
if not _use_native_pytest:
self._fixture_request.function = getattr(self, test_fn.__name__)
self._fixture_request.fillfixtures(params)
fixtures = [self._fixture_request.getfixturevalue(n) for n in params]
test_fn(self, *fixtures)
return wrapped
def uses_fixtures(cls):
"""Enables use of fixtures within test methods of unittest.TestCase."""
assert issubclass(cls, unittest.TestCase)
for name in dir(cls):
func = getattr(cls, name)
if not name.startswith('test') or not callable(func):
continue
params = inspect.getfullargspec(func).args[1:]
# Unconditionally overwrite methods in case usefixtures marks exist.
setattr(cls, name, _fixture_wrapper(func, params))
if _use_native_pytest:
# Make request object to _fixture_wrapper
@pytest.fixture(autouse=True)
def __inject_request(self, request):
self._fixture_request = request
cls.__inject_request = __inject_request
else:
_patch_unittest_testcase_class(cls)
return cls
def mark_usefixtures(*args):
"""Add the given fixtures to every test method."""
if _use_native_pytest:
return pytest.mark.usefixtures(*args)
def wrapper(cls):
cls._fixtures_prepend = list(args)
return cls
return wrapper
# Begin fallback functionality when pytest is not available.
# Supported:
# - session-scoped fixtures (for cmd_tshark)
# - function-scoped fixtures (for tmpfile)
# - teardown (via yield keyword in fixture)
# - sorting of scopes (session before function)
# - fixtures that depend on other fixtures (requires sorting)
# - marking classes with @pytest.mark.usefixtures("fixture")
# Not supported (yet) due to lack of need for it:
# - autouse fixtures
# - parameterized fixtures (@pytest.fixture(params=...))
# - class-scoped fixtures
# - (overriding) fixtures on various levels (e.g. conftest, module, class)
class _FixtureSpec(object):
def __init__(self, name, scope, func):
self.name = name
self.scope = scope
self.func = func
self.params = inspect.getfullargspec(func).args
if inspect.ismethod(self.params):
self.params = self.params[1:] # skip self
def __repr__(self):
return '<_FixtureSpec name=%s scope=%s params=%r>' % \
(self.name, self.scope, self.params)
class _FixturesManager(object):
'''Records collected fixtures when pytest is unavailable.'''
fixtures = {}
# supported scopes, in execution order.
SCOPES = ('session', 'function')
def _add_fixture(self, scope, autouse, name, func):
name = name or func.__name__
if name in self.fixtures:
raise NotImplementedError('overriding fixtures is not supported')
self.fixtures[name] = _FixtureSpec(name, scope, func)
return func
def fixture(self, scope, params, autouse, ids, name):
if params:
raise NotImplementedError('params is not supported')
if ids:
raise NotImplementedError('ids is not supported')
if autouse:
raise NotImplementedError('autouse is not supported yet')
if callable(scope):
# used as decorator, pass through the original function
self._add_fixture('function', autouse, name, scope)
return scope
assert scope in self.SCOPES, 'unsupported scope'
# invoked with arguments, should return a decorator
return lambda func: self._add_fixture(scope, autouse, name, func)
def lookup(self, name):
return self.fixtures.get(name)
def resolve_fixtures(self, fixtures):
'''Find all dependencies for the requested list of fixtures.'''
unresolved = fixtures.copy()
resolved_keys, resolved = [], []
while unresolved:
param = unresolved.pop(0)
if param in resolved:
continue
spec = self.lookup(param)
if not spec:
if param == 'request':
continue
raise RuntimeError("Fixture '%s' not found" % (param,))
unresolved += spec.params
resolved_keys.append(param)
resolved.append(spec)
# Return fixtures, sorted by their scope
resolved.sort(key=lambda spec: self.SCOPES.index(spec.scope))
return resolved
class _ExecutionScope(object):
'''Store execution/teardown state for a scope.'''
def __init__(self, scope, parent):
self.scope = scope
self.parent = parent
self.cache = {}
self.finalizers = []
def _find_scope(self, scope):
context = self
while context.scope != scope:
context = context.parent
return context
def execute(self, spec, test_fn):
'''Execute a fixture and cache the result.'''
context = self._find_scope(spec.scope)
if spec.name in context.cache:
return
try:
value, cleanup = self._execute_one(spec, test_fn)
exc = None
except Exception:
value, cleanup, exc = None, None, sys.exc_info()[1]
context.cache[spec.name] = value, exc
if cleanup:
context.finalizers.append(cleanup)
if exc:
raise exc
def cached_result(self, spec):
'''Obtain the cached result for a previously executed fixture.'''
entry = self._find_scope(spec.scope).cache.get(spec.name)
if not entry:
return None, False
value, exc = entry
if exc:
raise exc
return value, True
def _execute_one(self, spec, test_fn):
# A fixture can only execute in the same or earlier scopes
context_scope_index = _FixturesManager.SCOPES.index(self.scope)
fixture_scope_index = _FixturesManager.SCOPES.index(spec.scope)
assert fixture_scope_index <= context_scope_index
if spec.params:
# Do not invoke destroy, it is taken care of by the main request.
subrequest = _FixtureRequest(self)
subrequest.function = test_fn
subrequest.fillfixtures(spec.params)
fixtures = (subrequest.getfixturevalue(n) for n in spec.params)
value = spec.func(*fixtures) # Execute fixture
else:
value = spec.func() # Execute fixture
if not inspect.isgenerator(value):
return value, None
@functools.wraps(value)
def cleanup():
try:
next(value)
except StopIteration:
pass
else:
raise RuntimeError('%s yielded more than once!' % (spec.name,))
return next(value), cleanup
def destroy(self):
exceptions = []
for cleanup in self.finalizers:
try:
cleanup()
except:
exceptions.append(sys.exc_info()[1])
self.cache.clear()
self.finalizers.clear()
if exceptions:
raise exceptions[0]
class _FixtureRequest(object):
'''
Holds state during a single test execution. See
https://docs.pytest.org/en/latest/reference.html#request
'''
def __init__(self, context):
self._context = context
self._fixtures_prepend = [] # fixtures added via usefixtures
# XXX is there any need for .module or .cls?
self.function = None # test function, set before execution.
def fillfixtures(self, params):
params = self._fixtures_prepend + params
specs = _fallback.resolve_fixtures(params)
for spec in specs:
self._context.execute(spec, self.function)
def getfixturevalue(self, argname):
spec = _fallback.lookup(argname)
if not spec:
assert argname == 'request'
return self
value, ok = self._context.cached_result(spec)
if not ok:
# If getfixturevalue is called directly from a setUp function, the
# fixture value might not have computed before, so evaluate it now.
# As the test function is not available, use None.
self._context.execute(spec, test_fn=None)
value, ok = self._context.cached_result(spec)
assert ok, 'Failed to execute fixture %s' % (spec,)
return value
def destroy(self):
self._context.destroy()
def addfinalizer(self, finalizer):
self._context.finalizers.append(finalizer)
@property
def instance(self):
return self.function.__self__
@property
def config(self):
'''The pytest config object associated with this request.'''
return _config
def _patch_unittest_testcase_class(cls):
'''
Patch the setUp and tearDown methods of the unittest.TestCase such that the
fixtures are properly setup and destroyed.
'''
def setUp(self):
assert _session_context, 'must call create_session() first!'
function_context = _ExecutionScope('function', _session_context)
req = _FixtureRequest(function_context)
req._fixtures_prepend = getattr(self, '_fixtures_prepend', [])
self._fixture_request = req
self._orig_setUp()
def tearDown(self):
try:
self._orig_tearDown()
finally:
self._fixture_request.destroy()
# Only the leaf test case class should be decorated!
assert not hasattr(cls, '_orig_setUp')
assert not hasattr(cls, '_orig_tearDown')
cls._orig_setUp, cls.setUp = cls.setUp, setUp
cls._orig_tearDown, cls.tearDown = cls.tearDown, tearDown
class _Config(object):
def __init__(self, args):
assert isinstance(args, argparse.Namespace)
self.args = args
def getoption(self, name, default):
'''Partial emulation for pytest Config.getoption.'''
name = name.lstrip('-').replace('-', '_')
return getattr(self.args, name, default)
_fallback = None
_session_context = None
_config = None
def init_fallback_fixtures_once():
global _fallback
assert not _use_native_pytest
if _fallback:
return
_fallback = _FixturesManager()
# Register standard fixtures here as needed
def create_session(args=None):
'''Start a test session where args is from argparse.'''
global _session_context, _config
assert not _use_native_pytest
_session_context = _ExecutionScope('session', None)
if args is None:
args = argparse.Namespace()
_config = _Config(args)
def destroy_session():
global _session_context
assert not _use_native_pytest
_session_context = None
def skip(msg):
'''Skip the executing test with the given message.'''
if _use_native_pytest:
pytest.skip(msg)
else:
raise unittest.SkipTest(msg)
```
#### File: WiresharkDissectorFoo/test/subprocesstest.py
```python
import difflib
import io
import os
import os.path
import re
import subprocess
import sys
import unittest
# To do:
# - Add a subprocesstest.SkipUnlessCapture decorator?
# - Try to catch crashes? See the comments below in waitProcess.
process_timeout = 300 # Seconds
def cat_dhcp_command(mode):
'''Create a command string for dumping dhcp.pcap to stdout'''
# XXX Do this in Python in a thread?
sd_cmd = ''
if sys.executable:
sd_cmd = '"{}" '.format(sys.executable)
this_dir = os.path.dirname(__file__)
sd_cmd += os.path.join(this_dir, 'util_dump_dhcp_pcap.py ' + mode)
return sd_cmd
def cat_cap_file_command(cap_files):
'''Create a command string for dumping one or more capture files to stdout'''
# XXX Do this in Python in a thread?
if isinstance(cap_files, str):
cap_files = [ cap_files ]
quoted_paths = ' '.join('"{}"'.format(cap_file) for cap_file in cap_files)
if sys.platform.startswith('win32'):
# https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-xp/bb491026(v=technet.10)
# says that the `type` command "displays the contents of a text
# file." Copy to the console instead.
return 'copy {} CON'.format(quoted_paths)
return 'cat {}'.format(quoted_paths)
class LoggingPopen(subprocess.Popen):
'''Run a process using subprocess.Popen. Capture and log its output.
Stdout and stderr are captured to memory and decoded as UTF-8. The
program command and output is written to log_fd.
'''
def __init__(self, proc_args, *args, **kwargs):
self.log_fd = kwargs.pop('log_fd', None)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
# Make sure communicate() gives us bytes.
kwargs['universal_newlines'] = False
self.cmd_str = 'command ' + repr(proc_args)
super().__init__(proc_args, *args, **kwargs)
self.stdout_str = ''
self.stderr_str = ''
def wait_and_log(self):
'''Wait for the process to finish and log its output.'''
out_data, err_data = self.communicate(timeout=process_timeout)
out_log = out_data.decode('UTF-8', 'replace')
err_log = err_data.decode('UTF-8', 'replace')
self.log_fd.flush()
self.log_fd.write('-- Begin stdout for {} --\n'.format(self.cmd_str))
self.log_fd.write(out_log)
self.log_fd.write('-- End stdout for {} --\n'.format(self.cmd_str))
self.log_fd.write('-- Begin stderr for {} --\n'.format(self.cmd_str))
self.log_fd.write(err_log)
self.log_fd.write('-- End stderr for {} --\n'.format(self.cmd_str))
self.log_fd.flush()
# Throwing a UnicodeDecodeError exception here is arguably a good thing.
self.stdout_str = out_data.decode('UTF-8', 'strict')
self.stderr_str = err_data.decode('UTF-8', 'strict')
def stop_process(self, kill=False):
'''Stop the process immediately.'''
if kill:
super().kill()
else:
super().terminate()
def terminate(self):
'''Terminate the process. Do not log its output.'''
# XXX Currently unused.
self.stop_process(kill=False)
def kill(self):
'''Kill the process. Do not log its output.'''
self.stop_process(kill=True)
class SubprocessTestCase(unittest.TestCase):
'''Run a program and gather its stdout and stderr.'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.exit_ok = 0
self.exit_command_line = 1
self.exit_error = 2
self.exit_code = None
self.log_fname = None
self.log_fd = None
self.processes = []
self.cleanup_files = []
self.dump_files = []
def log_fd_write_bytes(self, log_data):
self.log_fd.write(log_data)
def filename_from_id(self, filename):
'''Generate a filename prefixed with our test ID.'''
id_filename = self.id() + '.' + filename
if id_filename not in self.cleanup_files:
self.cleanup_files.append(id_filename)
return id_filename
def kill_processes(self):
'''Kill any processes we've opened so far'''
for proc in self.processes:
try:
proc.kill()
except:
pass
def setUp(self):
"""
Set up a single test. Opens a log file and add it to the cleanup list.
"""
self.processes = []
self.log_fname = self.filename_from_id('log')
# Our command line utilities generate UTF-8. The log file endcoding
# needs to match that.
# XXX newline='\n' works for now, but we might have to do more work
# to handle line endings in the future.
self.log_fd = io.open(self.log_fname, 'w', encoding='UTF-8', newline='\n')
self.cleanup_files.append(self.log_fname)
def _last_test_failed(self):
"""Check for non-skipped tests that resulted in errors."""
# The test outcome is not available via the public unittest API, so
# check a private property, "_outcome", set by unittest.TestCase.run.
# It remains None when running in debug mode (`pytest --pdb`).
# The property is available since Python 3.4 until at least Python 3.7.
if self._outcome:
for test_case, exc_info in self._outcome.errors:
if exc_info:
return True
# No errors occurred or running in debug mode.
return False
def tearDown(self):
"""
Tears down a single test. Kills stray processes and closes the log file.
On errors, display the log contents. On success, remove temporary files.
"""
self.kill_processes()
self.log_fd.close()
if self._last_test_failed():
self.dump_files.append(self.log_fname)
# Leave some evidence behind.
self.cleanup_files = []
print('\nProcess output for {}:'.format(self.id()))
with io.open(self.log_fname, 'r', encoding='UTF-8', errors='backslashreplace') as log_fd:
for line in log_fd:
sys.stdout.write(line)
for filename in self.cleanup_files:
try:
os.unlink(filename)
except OSError:
pass
self.cleanup_files = []
def getCaptureInfo(self, capinfos_args=None, cap_file=None):
'''Run capinfos on a capture file and log its output.
capinfos_args must be a sequence.
Default cap_file is <test id>.testout.pcap.'''
# XXX convert users to use a new fixture instead of this function.
cmd_capinfos = self._fixture_request.getfixturevalue('cmd_capinfos')
if not cap_file:
cap_file = self.filename_from_id('testout.pcap')
self.log_fd.write('\nOutput of {0} {1}:\n'.format(cmd_capinfos, cap_file))
capinfos_cmd = [cmd_capinfos]
if capinfos_args is not None:
capinfos_cmd += capinfos_args
capinfos_cmd.append(cap_file)
capinfos_data = subprocess.check_output(capinfos_cmd)
capinfos_stdout = capinfos_data.decode('UTF-8', 'replace')
self.log_fd.write(capinfos_stdout)
return capinfos_stdout
def checkPacketCount(self, num_packets, cap_file=None):
'''Make sure a capture file contains a specific number of packets.'''
got_num_packets = False
capinfos_testout = self.getCaptureInfo(cap_file=cap_file)
count_pat = r'Number of packets:\s+{}'.format(num_packets)
if re.search(count_pat, capinfos_testout):
got_num_packets = True
self.assertTrue(got_num_packets, 'Failed to capture exactly {} packets'.format(num_packets))
def countOutput(self, search_pat=None, count_stdout=True, count_stderr=False, proc=None):
'''Returns the number of output lines (search_pat=None), otherwise returns a match count.'''
match_count = 0
self.assertTrue(count_stdout or count_stderr, 'No output to count.')
if proc is None:
proc = self.processes[-1]
out_data = ''
if count_stdout:
out_data = proc.stdout_str
if count_stderr:
out_data += proc.stderr_str
if search_pat is None:
return len(out_data.splitlines())
search_re = re.compile(search_pat)
for line in out_data.splitlines():
if search_re.search(line):
match_count += 1
return match_count
def grepOutput(self, search_pat, proc=None):
return self.countOutput(search_pat, count_stderr=True, proc=proc) > 0
def diffOutput(self, blob_a, blob_b, *args, **kwargs):
'''Check for differences between blob_a and blob_b. Return False and log a unified diff if they differ.
blob_a and blob_b must be UTF-8 strings.'''
lines_a = blob_a.splitlines()
lines_b = blob_b.splitlines()
diff = '\n'.join(list(difflib.unified_diff(lines_a, lines_b, *args, **kwargs)))
if len(diff) > 0:
self.log_fd.flush()
self.log_fd.write('-- Begin diff output --\n')
self.log_fd.writelines(diff)
self.log_fd.write('-- End diff output --\n')
return False
return True
def startProcess(self, proc_args, stdin=None, env=None, shell=False):
'''Start a process in the background. Returns a subprocess.Popen object.
You typically wait for it using waitProcess() or assertWaitProcess().'''
if env is None:
# Apply default test environment if no override is provided.
env = getattr(self, 'injected_test_env', None)
# Not all tests need test_env, but those that use runProcess or
# startProcess must either pass an explicit environment or load the
# fixture (via a test method parameter or class decorator).
assert not (env is None and hasattr(self, '_fixture_request')), \
"Decorate class with @fixtures.mark_usefixtures('test_env')"
proc = LoggingPopen(proc_args, stdin=stdin, env=env, shell=shell, log_fd=self.log_fd)
self.processes.append(proc)
return proc
def waitProcess(self, process):
'''Wait for a process to finish.'''
process.wait_and_log()
# XXX The shell version ran processes using a script called run_and_catch_crashes
# which looked for core dumps and printed stack traces if found. We might want
# to do something similar here. This may not be easy on modern Ubuntu systems,
# which default to using Apport: https://wiki.ubuntu.com/Apport
def assertWaitProcess(self, process, expected_return=0):
'''Wait for a process to finish and check its exit code.'''
process.wait_and_log()
self.assertEqual(process.returncode, expected_return)
def runProcess(self, args, env=None, shell=False):
'''Start a process and wait for it to finish.'''
process = self.startProcess(args, env=env, shell=shell)
process.wait_and_log()
return process
def assertRun(self, args, env=None, shell=False, expected_return=0):
'''Start a process and wait for it to finish. Check its return code.'''
process = self.runProcess(args, env=env, shell=shell)
self.assertEqual(process.returncode, expected_return)
return process
```
#### File: WiresharkDissectorFoo/test/suite_capture.py
```python
import fixtures
import glob
import hashlib
import os
import socket
import subprocess
import subprocesstest
import sys
import threading
import time
import uuid
capture_duration = 5
testout_pcap = 'testout.pcap'
testout_pcapng = 'testout.pcapng'
snapshot_len = 96
class UdpTrafficGenerator(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.stopped = False
def run(self):
while not self.stopped:
time.sleep(.05)
self.sock.sendto(b'Wireshark test\n', ('127.0.0.1', 9))
def stop(self):
if not self.stopped:
self.stopped = True
self.join()
@fixtures.fixture
def traffic_generator():
'''
Traffic generator factory. Invoking it returns a tuple (start_func, cfilter)
where cfilter is a capture filter to match the generated traffic.
start_func can be invoked to start generating traffic and returns a function
which can be used to stop traffic generation early.
Currently generates a bunch of UDP traffic to localhost.
'''
threads = []
def start_processes():
thread = UdpTrafficGenerator()
thread.start()
threads.append(thread)
return thread.stop
try:
yield start_processes, 'udp port 9'
finally:
for thread in threads:
thread.stop()
@fixtures.fixture(scope='session')
def wireshark_k(wireshark_command):
return tuple(list(wireshark_command) + ['-k'])
def capture_command(*cmd_args, shell=False):
if type(cmd_args[0]) != str:
# Assume something like ['wireshark', '-k']
cmd_args = list(cmd_args[0]) + list(cmd_args)[1:]
if shell:
cmd_args = ' '.join(cmd_args)
return cmd_args
@fixtures.fixture
def check_capture_10_packets(capture_interface, cmd_dumpcap, traffic_generator):
start_traffic, cfilter = traffic_generator
def check_capture_10_packets_real(self, cmd=None, to_stdout=False):
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
stop_traffic = start_traffic()
if to_stdout:
capture_proc = self.runProcess(capture_command(cmd,
'-i', '"{}"'.format(capture_interface),
'-p',
'-w', '-',
'-c', '10',
'-a', 'duration:{}'.format(capture_duration),
'-f', '"{}"'.format(cfilter),
'>', testout_file,
shell=True
),
shell=True
)
else:
capture_proc = self.runProcess(capture_command(cmd,
'-i', capture_interface,
'-p',
'-w', testout_file,
'-c', '10',
'-a', 'duration:{}'.format(capture_duration),
'-f', cfilter,
))
stop_traffic()
capture_returncode = capture_proc.returncode
if capture_returncode != 0:
self.log_fd.write('{} -D output:\n'.format(cmd))
self.runProcess((cmd, '-D'))
self.assertEqual(capture_returncode, 0)
self.checkPacketCount(10)
return check_capture_10_packets_real
@fixtures.fixture
def check_capture_fifo(cmd_dumpcap):
if sys.platform == 'win32':
fixtures.skip('Test requires OS fifo support.')
def check_capture_fifo_real(self, cmd=None):
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
fifo_file = self.filename_from_id('testout.fifo')
try:
# If a previous test left its fifo laying around, e.g. from a failure, remove it.
os.unlink(fifo_file)
except:
pass
os.mkfifo(fifo_file)
slow_dhcp_cmd = subprocesstest.cat_dhcp_command('slow')
fifo_proc = self.startProcess(
('{0} > {1}'.format(slow_dhcp_cmd, fifo_file)),
shell=True)
capture_proc = self.assertRun(capture_command(cmd,
'-i', fifo_file,
'-p',
'-w', testout_file,
'-a', 'duration:{}'.format(capture_duration),
))
fifo_proc.kill()
self.assertTrue(os.path.isfile(testout_file))
self.checkPacketCount(8)
return check_capture_fifo_real
@fixtures.fixture
def check_capture_stdin(cmd_dumpcap):
# Capturing always requires dumpcap, hence the dependency on it.
def check_capture_stdin_real(self, cmd=None):
# Similar to suite_io.check_io_4_packets.
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
slow_dhcp_cmd = subprocesstest.cat_dhcp_command('slow')
capture_cmd = capture_command(cmd,
'-i', '-',
'-w', testout_file,
'-a', 'duration:{}'.format(capture_duration),
shell=True
)
is_gui = type(cmd) != str and '-k' in cmd[0]
if is_gui:
capture_cmd += ' -o console.log.level:127'
pipe_proc = self.assertRun(slow_dhcp_cmd + ' | ' + capture_cmd, shell=True)
if is_gui:
self.assertTrue(self.grepOutput('Wireshark is up and ready to go'), 'No startup message.')
self.assertTrue(self.grepOutput('Capture started'), 'No capture start message.')
self.assertTrue(self.grepOutput('Capture stopped'), 'No capture stop message.')
self.assertTrue(os.path.isfile(testout_file))
self.checkPacketCount(8)
return check_capture_stdin_real
@fixtures.fixture
def check_capture_read_filter(capture_interface, traffic_generator):
start_traffic, cfilter = traffic_generator
def check_capture_read_filter_real(self, cmd=None):
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
stop_traffic = start_traffic()
capture_proc = self.assertRun(capture_command(cmd,
'-i', capture_interface,
'-p',
'-w', testout_file,
'-2',
'-R', 'dcerpc.cn_call_id==123456', # Something unlikely.
'-c', '10',
'-a', 'duration:{}'.format(capture_duration),
'-f', cfilter,
))
stop_traffic()
self.checkPacketCount(0)
return check_capture_read_filter_real
@fixtures.fixture
def check_capture_snapshot_len(capture_interface, cmd_tshark, traffic_generator):
start_traffic, cfilter = traffic_generator
def check_capture_snapshot_len_real(self, cmd=None):
self.assertIsNotNone(cmd)
stop_traffic = start_traffic()
testout_file = self.filename_from_id(testout_pcap)
capture_proc = self.assertRun(capture_command(cmd,
'-i', capture_interface,
'-p',
'-w', testout_file,
'-s', str(snapshot_len),
'-a', 'duration:{}'.format(capture_duration),
'-f', cfilter,
))
stop_traffic()
self.assertTrue(os.path.isfile(testout_file))
# Use tshark to filter out all packets larger than 68 bytes.
testout2_file = self.filename_from_id('testout2.pcap')
filter_proc = self.assertRun((cmd_tshark,
'-r', testout_file,
'-w', testout2_file,
'-Y', 'frame.cap_len>{}'.format(snapshot_len),
))
self.checkPacketCount(0, cap_file=testout2_file)
return check_capture_snapshot_len_real
@fixtures.fixture
def check_dumpcap_autostop_stdin(cmd_dumpcap):
def check_dumpcap_autostop_stdin_real(self, packets=None, filesize=None):
# Similar to check_capture_stdin.
testout_file = self.filename_from_id(testout_pcap)
cat100_dhcp_cmd = subprocesstest.cat_dhcp_command('cat100')
condition='oops:invalid'
self.assertTrue(packets is not None or filesize is not None, 'Need one of packets or filesize')
self.assertFalse(packets is not None and filesize is not None, 'Need one of packets or filesize')
if packets is not None:
condition = 'packets:{}'.format(packets)
elif filesize is not None:
condition = 'filesize:{}'.format(filesize)
capture_cmd = ' '.join((cmd_dumpcap,
'-i', '-',
'-w', testout_file,
'-a', condition,
))
pipe_proc = self.assertRun(cat100_dhcp_cmd + ' | ' + capture_cmd, shell=True)
self.assertTrue(os.path.isfile(testout_file))
if packets is not None:
self.checkPacketCount(packets)
elif filesize is not None:
capturekb = os.path.getsize(testout_file) / 1000
self.assertGreaterEqual(capturekb, filesize)
return check_dumpcap_autostop_stdin_real
@fixtures.fixture
def check_dumpcap_ringbuffer_stdin(cmd_dumpcap):
def check_dumpcap_ringbuffer_stdin_real(self, packets=None, filesize=None):
# Similar to check_capture_stdin.
rb_unique = 'dhcp_rb_' + uuid.uuid4().hex[:6] # Random ID
testout_file = '{}.{}.pcapng'.format(self.id(), rb_unique)
testout_glob = '{}.{}_*.pcapng'.format(self.id(), rb_unique)
cat100_dhcp_cmd = subprocesstest.cat_dhcp_command('cat100')
condition='oops:invalid'
self.assertTrue(packets is not None or filesize is not None, 'Need one of packets or filesize')
self.assertFalse(packets is not None and filesize is not None, 'Need one of packets or filesize')
if packets is not None:
condition = 'packets:{}'.format(packets)
elif filesize is not None:
condition = 'filesize:{}'.format(filesize)
capture_cmd = ' '.join((cmd_dumpcap,
'-i', '-',
'-w', testout_file,
'-a', 'files:2',
'-b', condition,
))
pipe_proc = self.assertRun(cat100_dhcp_cmd + ' | ' + capture_cmd, shell=True)
rb_files = glob.glob(testout_glob)
for rbf in rb_files:
self.cleanup_files.append(rbf)
self.assertEqual(len(rb_files), 2)
for rbf in rb_files:
self.assertTrue(os.path.isfile(rbf))
if packets is not None:
self.checkPacketCount(packets, cap_file=rbf)
elif filesize is not None:
capturekb = os.path.getsize(rbf) / 1000
self.assertGreaterEqual(capturekb, filesize)
return check_dumpcap_ringbuffer_stdin_real
@fixtures.fixture
def check_dumpcap_pcapng_sections(cmd_dumpcap, cmd_tshark, capture_file):
if sys.platform == 'win32':
fixtures.skip('Test requires OS fifo support.')
def check_dumpcap_pcapng_sections_real(self, multi_input=False, multi_output=False):
# Make sure we always test multiple SHBs in an input.
in_files_l = [ [
capture_file('many_interfaces.pcapng.1'),
capture_file('many_interfaces.pcapng.2')
] ]
if multi_input:
in_files_l.append([ capture_file('many_interfaces.pcapng.3') ])
fifo_files = []
fifo_procs = []
# Default values for our validity tests
check_val_d = {
'filename': None,
'packet_count': 0,
'idb_count': 0,
'ua_pt1_count': 0,
'ua_pt2_count': 0,
'ua_pt3_count': 0,
'ua_dc_count': 0,
}
check_vals = [ check_val_d ]
for in_files in in_files_l:
fifo_file = self.filename_from_id('dumpcap_pcapng_sections_{}.fifo'.format(len(fifo_files) + 1))
fifo_files.append(fifo_file)
# If a previous test left its fifo laying around, e.g. from a failure, remove it.
try:
os.unlink(fifo_file)
except: pass
os.mkfifo(fifo_file)
cat_cmd = subprocesstest.cat_cap_file_command(in_files)
fifo_procs.append(self.startProcess(('{0} > {1}'.format(cat_cmd, fifo_file)), shell=True))
if multi_output:
rb_unique = 'sections_rb_' + uuid.uuid4().hex[:6] # Random ID
testout_glob = '{}.{}_*.pcapng'.format(self.id(), rb_unique)
testout_file = '{}.{}.pcapng'.format(self.id(), rb_unique)
check_vals.append(check_val_d.copy())
# check_vals[]['filename'] will be filled in below
else:
testout_file = self.filename_from_id(testout_pcapng)
check_vals[0]['filename'] = testout_file
# Capture commands
if not multi_input and not multi_output:
# Passthrough SHBs, single output file
capture_cmd_args = (
'-i', fifo_files[0],
'-w', testout_file
)
check_vals[0]['packet_count'] = 79
check_vals[0]['idb_count'] = 22
check_vals[0]['ua_pt1_count'] = 1
check_vals[0]['ua_pt2_count'] = 1
elif not multi_input and multi_output:
# Passthrough SHBs, multiple output files
capture_cmd_args = (
'-i', fifo_files[0],
'-w', testout_file,
'-a', 'files:2',
'-b', 'packets:53'
)
check_vals[0]['packet_count'] = 53
check_vals[0]['idb_count'] = 11
check_vals[0]['ua_pt1_count'] = 1
check_vals[1]['packet_count'] = 26
check_vals[1]['idb_count'] = 22
check_vals[1]['ua_pt1_count'] = 1
check_vals[1]['ua_pt2_count'] = 1
elif multi_input and not multi_output:
# Dumpcap SHBs, single output file
capture_cmd_args = (
'-i', fifo_files[0],
'-i', fifo_files[1],
'-w', testout_file
)
check_vals[0]['packet_count'] = 88
check_vals[0]['idb_count'] = 35
check_vals[0]['ua_dc_count'] = 1
else:
# Dumpcap SHBs, multiple output files
capture_cmd_args = (
'-i', fifo_files[0],
'-i', fifo_files[1],
'-w', testout_file,
'-a', 'files:2',
'-b', 'packets:53'
)
check_vals[0]['packet_count'] = 53
check_vals[0]['idb_count'] = 13
check_vals[0]['ua_dc_count'] = 1
check_vals[1]['packet_count'] = 35
check_vals[1]['idb_count'] = 35
check_vals[1]['ua_dc_count'] = 1
capture_cmd = capture_command(cmd_dumpcap, *capture_cmd_args)
capture_proc = self.assertRun(capture_cmd)
for fifo_proc in fifo_procs: fifo_proc.kill()
rb_files = []
if multi_output:
rb_files = sorted(glob.glob(testout_glob))
self.assertEqual(len(rb_files), 2)
check_vals[0]['filename'] = rb_files[0]
check_vals[1]['filename'] = rb_files[1]
for rbf in rb_files:
self.cleanup_files.append(rbf)
self.assertTrue(os.path.isfile(rbf))
# Output tests
if not multi_input and not multi_output:
# Check strict bit-for-bit passthrough.
in_hash = hashlib.sha256()
out_hash = hashlib.sha256()
for in_file in in_files_l[0]:
in_cap_file = capture_file(in_file)
with open(in_cap_file, 'rb') as f:
in_hash.update(f.read())
with open(testout_file, 'rb') as f:
out_hash.update(f.read())
self.assertEqual(in_hash.hexdigest(), out_hash.hexdigest())
# many_interfaces.pcapng.1 : 64 packets written by "Passthrough test #1"
# many_interfaces.pcapng.2 : 15 packets written by "Passthrough test #2"
# many_interfaces.pcapng.3 : 9 packets written by "Passthrough test #3"
# Each has 11 interfaces.
idb_compare_eq = True
if multi_input and multi_output:
# Having multiple inputs forces the use of threads. In our
# case this means that non-packet block counts in the first
# file in is nondeterministic.
idb_compare_eq = False
for check_val in check_vals:
self.checkPacketCount(check_val['packet_count'], cap_file=check_val['filename'])
tshark_proc = self.assertRun(capture_command(cmd_tshark,
'-r', check_val['filename'],
'-V',
'-X', 'read_format:MIME Files Format'
))
# XXX Are there any other sanity checks we should run?
if idb_compare_eq:
self.assertEqual(self.countOutput(r'Block: Interface Description Block',
proc=tshark_proc), check_val['idb_count'])
else:
self.assertGreaterEqual(self.countOutput(r'Block: Interface Description Block',
proc=tshark_proc), check_val['idb_count'])
idb_compare_eq = True
self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #1',
proc=tshark_proc), check_val['ua_pt1_count'])
self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #2',
proc=tshark_proc), check_val['ua_pt2_count'])
self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #3',
proc=tshark_proc), check_val['ua_pt3_count'])
self.assertEqual(self.countOutput(r'Option: User Application = Dumpcap \(Wireshark\)',
proc=tshark_proc), check_val['ua_dc_count'])
return check_dumpcap_pcapng_sections_real
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_wireshark_capture(subprocesstest.SubprocessTestCase):
def test_wireshark_capture_10_packets_to_file(self, wireshark_k, check_capture_10_packets, make_screenshot_on_error):
'''Capture 10 packets from the network to a file using Wireshark'''
with make_screenshot_on_error():
check_capture_10_packets(self, cmd=wireshark_k)
# Wireshark doesn't currently support writing to stdout while capturing.
# def test_wireshark_capture_10_packets_to_stdout(self, wireshark_k, check_capture_10_packets):
# '''Capture 10 packets from the network to stdout using Wireshark'''
# check_capture_10_packets(self, cmd=wireshark_k, to_stdout=True)
def test_wireshark_capture_from_fifo(self, wireshark_k, check_capture_fifo, make_screenshot_on_error):
'''Capture from a fifo using Wireshark'''
with make_screenshot_on_error():
check_capture_fifo(self, cmd=wireshark_k)
def test_wireshark_capture_from_stdin(self, wireshark_k, check_capture_stdin, make_screenshot_on_error):
'''Capture from stdin using Wireshark'''
with make_screenshot_on_error():
check_capture_stdin(self, cmd=wireshark_k)
def test_wireshark_capture_snapshot_len(self, wireshark_k, check_capture_snapshot_len, make_screenshot_on_error):
'''Capture truncated packets using Wireshark'''
with make_screenshot_on_error():
check_capture_snapshot_len(self, cmd=wireshark_k)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_tshark_capture(subprocesstest.SubprocessTestCase):
def test_tshark_capture_10_packets_to_file(self, cmd_tshark, check_capture_10_packets):
'''Capture 10 packets from the network to a file using TShark'''
check_capture_10_packets(self, cmd=cmd_tshark)
def test_tshark_capture_10_packets_to_stdout(self, cmd_tshark, check_capture_10_packets):
'''Capture 10 packets from the network to stdout using TShark'''
check_capture_10_packets(self, cmd=cmd_tshark, to_stdout=True)
def test_tshark_capture_from_fifo(self, cmd_tshark, check_capture_fifo):
'''Capture from a fifo using TShark'''
check_capture_fifo(self, cmd=cmd_tshark)
def test_tshark_capture_from_stdin(self, cmd_tshark, check_capture_stdin):
'''Capture from stdin using TShark'''
check_capture_stdin(self, cmd=cmd_tshark)
def test_tshark_capture_snapshot_len(self, cmd_tshark, check_capture_snapshot_len):
'''Capture truncated packets using TShark'''
check_capture_snapshot_len(self, cmd=cmd_tshark)
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_capture(subprocesstest.SubprocessTestCase):
def test_dumpcap_capture_10_packets_to_file(self, cmd_dumpcap, check_capture_10_packets):
'''Capture 10 packets from the network to a file using Dumpcap'''
check_capture_10_packets(self, cmd=cmd_dumpcap)
def test_dumpcap_capture_10_packets_to_stdout(self, cmd_dumpcap, check_capture_10_packets):
'''Capture 10 packets from the network to stdout using Dumpcap'''
check_capture_10_packets(self, cmd=cmd_dumpcap, to_stdout=True)
def test_dumpcap_capture_from_fifo(self, cmd_dumpcap, check_capture_fifo):
'''Capture from a fifo using Dumpcap'''
check_capture_fifo(self, cmd=cmd_dumpcap)
def test_dumpcap_capture_from_stdin(self, cmd_dumpcap, check_capture_stdin):
'''Capture from stdin using Dumpcap'''
check_capture_stdin(self, cmd=cmd_dumpcap)
def test_dumpcap_capture_snapshot_len(self, check_capture_snapshot_len, cmd_dumpcap):
'''Capture truncated packets using Dumpcap'''
check_capture_snapshot_len(self, cmd=cmd_dumpcap)
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_autostop(subprocesstest.SubprocessTestCase):
# duration, filesize, packets, files
def test_dumpcap_autostop_filesize(self, check_dumpcap_autostop_stdin):
'''Capture from stdin using Dumpcap until we reach a file size limit'''
check_dumpcap_autostop_stdin(self, filesize=15)
def test_dumpcap_autostop_packets(self, check_dumpcap_autostop_stdin):
'''Capture from stdin using Dumpcap until we reach a packet limit'''
check_dumpcap_autostop_stdin(self, packets=97) # Last prime before 100. Arbitrary.
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_ringbuffer(subprocesstest.SubprocessTestCase):
# duration, interval, filesize, packets, files
def test_dumpcap_ringbuffer_filesize(self, check_dumpcap_ringbuffer_stdin):
'''Capture from stdin using Dumpcap and write multiple files until we reach a file size limit'''
check_dumpcap_ringbuffer_stdin(self, filesize=15)
def test_dumpcap_ringbuffer_packets(self, check_dumpcap_ringbuffer_stdin):
'''Capture from stdin using Dumpcap and write multiple files until we reach a packet limit'''
check_dumpcap_ringbuffer_stdin(self, packets=47) # Last prime before 50. Arbitrary.
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_pcapng_sections(subprocesstest.SubprocessTestCase):
def test_dumpcap_pcapng_single_in_single_out(self, check_dumpcap_pcapng_sections):
'''Capture from a single pcapng source using Dumpcap and write a single file'''
check_dumpcap_pcapng_sections(self)
def test_dumpcap_pcapng_single_in_multi_out(self, check_dumpcap_pcapng_sections):
'''Capture from a single pcapng source using Dumpcap and write two files'''
check_dumpcap_pcapng_sections(self, multi_output=True)
def test_dumpcap_pcapng_multi_in_single_out(self, check_dumpcap_pcapng_sections):
'''Capture from two pcapng sources using Dumpcap and write a single file'''
check_dumpcap_pcapng_sections(self, multi_input=True)
def test_dumpcap_pcapng_multi_in_multi_out(self, check_dumpcap_pcapng_sections):
'''Capture from two pcapng sources using Dumpcap and write two files'''
check_dumpcap_pcapng_sections(self, multi_input=True, multi_output=True)
```
#### File: test/suite_dfilter/group_time_type.py
```python
import unittest
import fixtures
from suite_dfilter.dfiltertest import *
@fixtures.uses_fixtures
class case_time(unittest.TestCase):
trace_file = "http.pcap"
def test_eq_1(self, checkDFilterCount):
dfilter = 'frame.time == "Dec 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 1)
def test_eq_2(self, checkDFilterCount):
dfilter = 'frame.time == "Jan 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 0)
def test_eq_3(self, checkDFilterCount):
dfilter = 'frame.time == "2002-12-31 13:55:31.3"'
checkDFilterCount(dfilter, 1)
def test_ne_1(self, checkDFilterCount):
dfilter = 'frame.time != "Dec 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 0)
def test_ne_2(self, checkDFilterCount):
dfilter = 'frame.time != "Jan 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 1)
def test_gt_1(self, checkDFilterCount):
dfilter = 'frame.time > "Dec 31, 2002 13:54:31.3"'
checkDFilterCount(dfilter, 1)
def test_gt_2(self, checkDFilterCount):
dfilter = 'frame.time > "Dec 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 0)
def test_gt_3(self, checkDFilterCount):
dfilter = 'frame.time > "Dec 31, 2002 13:56:31.3"'
checkDFilterCount(dfilter, 0)
def test_ge_1(self, checkDFilterCount):
dfilter = 'frame.time >= "Dec 31, 2002 13:54:31.3"'
checkDFilterCount(dfilter, 1)
def test_ge_2(self, checkDFilterCount):
dfilter = 'frame.time >= "Dec 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 1)
def test_ge_3(self, checkDFilterCount):
dfilter = 'frame.time >= "Dec 31, 2002 13:56:31.3"'
checkDFilterCount(dfilter, 0)
def test_lt_1(self, checkDFilterCount):
dfilter = 'frame.time < "Dec 31, 2002 13:54:31.3"'
checkDFilterCount(dfilter, 0)
def test_lt_2(self, checkDFilterCount):
dfilter = 'frame.time < "Dec 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 0)
def test_lt_3(self, checkDFilterCount):
dfilter = 'frame.time < "Dec 31, 2002 13:56:31.3"'
checkDFilterCount(dfilter, 1)
def test_le_1(self, checkDFilterCount):
dfilter = 'frame.time <= "Dec 31, 2002 13:54:31.3"'
checkDFilterCount(dfilter, 0)
def test_le_2(self, checkDFilterCount):
dfilter = 'frame.time <= "Dec 31, 2002 13:55:31.3"'
checkDFilterCount(dfilter, 1)
def test_le_3(self, checkDFilterCount):
dfilter = 'frame.time <= "Dec 31, 2002 13:56:31.3"'
checkDFilterCount(dfilter, 1)
def test_bad_time_1(self, checkDFilterFail):
# No text is permitted after the time.
dfilter = 'frame.time == "Dec 31, 2002 13:56:31.3 UTC"'
error = '"Dec 31, 2002 13:56:31.3 UTC" is not a valid absolute time. Example: "Nov 12, 1999 08:55:44.123" or "2011-07-04 12:34:56"'
checkDFilterFail(dfilter, error)
def test_bad_time_2(self, checkDFilterFail):
# Miliseconds can only occur after seconds.
dfilter = 'frame.time == "2002-12-31 13:55.3"'
error = '"2002-12-31 13:55.3" is not a valid absolute time. Example: "Nov 12, 1999 08:55:44.123" or "2011-07-04 12:34:56"'
checkDFilterFail(dfilter, error)
def test_bad_time_3(self, checkDFilterFail):
# Reject months in a different locale (mrt is March in nl_NL.UTF-8).
dfilter = 'frame.time == "mrt 1, 2000 00:00:00"'
error = '"mrt 1, 2000 00:00:00" is not a valid absolute time. Example: "Nov 12, 1999 08:55:44.123" or "2011-07-04 12:34:56"'
checkDFilterFail(dfilter, error)
```
#### File: test/suite_dfilter/group_tvb.py
```python
import unittest
import fixtures
from suite_dfilter.dfiltertest import *
@fixtures.uses_fixtures
class case_tvb(unittest.TestCase):
trace_file = "http.pcap"
def test_eq_1(self, checkDFilterCount):
# We expect 0 because even though this byte
# string matches the 'eth' protocol, protocols cannot
# work in an '==' comparison yet.
dfilter = "eth == 00:e0:81:00:b0:28:00:09:6b:88:f6:c9:08:00"
checkDFilterCount(dfilter, 0)
def test_slice_1(self, checkDFilterCount):
dfilter = "ip[0:2] == 45:00"
checkDFilterCount(dfilter, 1)
def test_slice_2(self, checkDFilterCount):
dfilter = "ip[0:2] == 00:00"
checkDFilterCount(dfilter, 0)
def test_slice_3(self, checkDFilterCount):
dfilter = "ip[2:2] == 00:c1"
checkDFilterCount(dfilter, 1)
@unittest.skip("This doesn't work yet in Wireshark")
def test_slice_4(self, checkDFilterCount):
dfilter = "ip[-5] == 0x86"
checkDFilterCount(dfilter, 0)
@unittest.skip("This doesn't work yet in Wireshark")
def test_slice_5(self, checkDFilterCount):
dfilter = "ip[-1] == 0x86"
checkDFilterCount(dfilter, 1)
def test_contains_1(self, checkDFilterCount):
dfilter = "eth contains 6b"
checkDFilterCount(dfilter, 1)
def test_contains_2(self, checkDFilterCount):
dfilter = "eth contains 09:6b:88"
checkDFilterCount(dfilter, 1)
def test_contains_3(self, checkDFilterCount):
dfilter = "eth contains 00:e0:81:00:b0:28:00:09:6b:88:f5:c9:08:00"
checkDFilterCount(dfilter, 1)
def test_contains_4(self, checkDFilterCount):
dfilter = "eth contains ff:ff:ff"
checkDFilterCount(dfilter, 0)
def test_contains_5(self, checkDFilterCount):
dfilter = 'http contains "HEAD"'
checkDFilterCount(dfilter, 1)
```
#### File: WiresharkDissectorFoo/test/suite_fileformats.py
```python
import os.path
import subprocesstest
import unittest
import fixtures
# XXX Currently unused. It would be nice to be able to use this below.
time_output_args = ('-Tfields', '-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta')
# Microsecond pcap, direct read was used to generate the baseline:
# tshark -Tfields -e frame.number -e frame.time_epoch -e frame.time_delta \
# -r captures/dhcp.pcap > baseline/ff-ts-usec-pcap-direct.txt
baseline_file = 'ff-ts-usec-pcap-direct.txt'
@fixtures.fixture(scope='session')
def fileformats_baseline_str(dirs):
with open(os.path.join(dirs.baseline_dir, baseline_file), 'r') as f:
return f.read()
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_fileformat_pcap(subprocesstest.SubprocessTestCase):
def test_pcap_usec_stdin(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs microsecond pcap stdin'''
capture_proc = self.assertRun(' '.join((cmd_tshark,
'-r', '-',
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta',
'<', capture_file('dhcp.pcap')
)),
shell=True)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
def test_pcap_nsec_stdin(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs nanosecond pcap stdin'''
capture_proc = self.assertRun(' '.join((cmd_tshark,
'-r', '-',
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta',
'<', capture_file('dhcp-nanosecond.pcap')
)),
shell=True)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
def test_pcap_nsec_direct(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs nanosecond pcap direct'''
capture_proc = self.assertRun((cmd_tshark,
'-r', capture_file('dhcp-nanosecond.pcap'),
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta',
),
)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_fileformat_pcapng(subprocesstest.SubprocessTestCase):
def test_pcapng_usec_stdin(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs microsecond pcapng stdin'''
capture_proc = self.assertRun(' '.join((cmd_tshark,
'-r', '-',
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta'
'<', capture_file('dhcp.pcapng')
)),
shell=True)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
def test_pcapng_usec_direct(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs microsecond pcapng direct'''
capture_proc = self.assertRun((cmd_tshark,
'-r', capture_file('dhcp.pcapng'),
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta',
),
)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
def test_pcapng_nsec_stdin(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs nanosecond pcapng stdin'''
capture_proc = self.assertRun(' '.join((cmd_tshark,
'-r', '-',
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta'
'<', capture_file('dhcp-nanosecond.pcapng')
)),
shell=True)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
def test_pcapng_nsec_direct(self, cmd_tshark, capture_file, fileformats_baseline_str):
'''Microsecond pcap direct vs nanosecond pcapng direct'''
capture_proc = self.assertRun((cmd_tshark,
'-r', capture_file('dhcp-nanosecond.pcapng'),
'-Tfields',
'-e', 'frame.number', '-e', 'frame.time_epoch', '-e', 'frame.time_delta',
),
)
self.assertTrue(self.diffOutput(capture_proc.stdout_str, fileformats_baseline_str, 'tshark', baseline_file))
@fixtures.fixture
def check_pcapng_dsb_fields(request, cmd_tshark):
'''Factory that checks whether the DSB within the capture file matches.'''
self = request.instance
def check_dsb_fields_real(outfile, fields):
proc = self.assertRun((cmd_tshark,
'-r', outfile,
'-Xread_format:MIME Files Format',
'-Tfields',
'-e', 'pcapng.dsb.secrets_type',
'-e', 'pcapng.dsb.secrets_length',
'-e', 'pcapng.dsb.secrets_data',
'-Y', 'pcapng.dsb.secrets_data'
))
# Convert "t1,t2 l1,l2 v1,2" -> [(t1, l1, v1), (t2, l2, v2)]
output = proc.stdout_str.strip()
actual = list(zip(*[x.split(",") for x in output.split('\t')]))
def format_field(field):
t, l, v = field
v_hex = ''.join('%02x' % c for c in v)
return ('0x%08x' % t, str(l), v_hex)
fields = [format_field(field) for field in fields]
self.assertEqual(fields, actual)
return check_dsb_fields_real
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_fileformat_pcapng_dsb(subprocesstest.SubprocessTestCase):
def test_pcapng_dsb_1(self, cmd_tshark, dirs, capture_file, check_pcapng_dsb_fields):
'''Check that DSBs are preserved while rewriting files.'''
dsb_keys1 = os.path.join(dirs.key_dir, 'tls12-dsb-1.keys')
dsb_keys2 = os.path.join(dirs.key_dir, 'tls12-dsb-2.keys')
outfile = self.filename_from_id('tls12-dsb-same.pcapng')
self.assertRun((cmd_tshark,
'-r', capture_file('tls12-dsb.pcapng'),
'-w', outfile,
))
with open(dsb_keys1, 'r') as f:
dsb1_contents = f.read().encode('utf8')
with open(dsb_keys2, 'r') as f:
dsb2_contents = f.read().encode('utf8')
check_pcapng_dsb_fields(outfile, (
(0x544c534b, len(dsb1_contents), dsb1_contents),
(0x544c534b, len(dsb2_contents), dsb2_contents),
))
def test_pcapng_dsb_2(self, cmd_editcap, dirs, capture_file, check_pcapng_dsb_fields):
'''Insert a single DSB into a pcapng file.'''
key_file = os.path.join(dirs.key_dir, 'dhe1_keylog.dat')
outfile = self.filename_from_id('dhe1-dsb.pcapng')
self.assertRun((cmd_editcap,
'--inject-secrets', 'tls,%s' % key_file,
capture_file('dhe1.pcapng.gz'), outfile
))
with open(key_file, 'rb') as f:
keylog_contents = f.read()
check_pcapng_dsb_fields(outfile, (
(0x544c534b, len(keylog_contents), keylog_contents),
))
def test_pcapng_dsb_3(self, cmd_editcap, dirs, capture_file, check_pcapng_dsb_fields):
'''Insert two DSBs into a pcapng file.'''
key_file1 = os.path.join(dirs.key_dir, 'dhe1_keylog.dat')
key_file2 = os.path.join(dirs.key_dir, 'http2-data-reassembly.keys')
outfile = self.filename_from_id('dhe1-dsb.pcapng')
self.assertRun((cmd_editcap,
'--inject-secrets', 'tls,%s' % key_file1,
'--inject-secrets', 'tls,%s' % key_file2,
capture_file('dhe1.pcapng.gz'), outfile
))
with open(key_file1, 'rb') as f:
keylog1_contents = f.read()
with open(key_file2, 'rb') as f:
keylog2_contents = f.read()
check_pcapng_dsb_fields(outfile, (
(0x544c534b, len(keylog1_contents), keylog1_contents),
(0x544c534b, len(keylog2_contents), keylog2_contents),
))
def test_pcapng_dsb_4(self, cmd_editcap, dirs, capture_file, check_pcapng_dsb_fields):
'''Insert a single DSB into a pcapng file with existing DSBs.'''
dsb_keys1 = os.path.join(dirs.key_dir, 'tls12-dsb-1.keys')
dsb_keys2 = os.path.join(dirs.key_dir, 'tls12-dsb-2.keys')
key_file = os.path.join(dirs.key_dir, 'dhe1_keylog.dat')
outfile = self.filename_from_id('tls12-dsb-extra.pcapng')
self.assertRun((cmd_editcap,
'--inject-secrets', 'tls,%s' % key_file,
capture_file('tls12-dsb.pcapng'), outfile
))
with open(dsb_keys1, 'r') as f:
dsb1_contents = f.read().encode('utf8')
with open(dsb_keys2, 'r') as f:
dsb2_contents = f.read().encode('utf8')
with open(key_file, 'rb') as f:
keylog_contents = f.read()
# New DSBs are inserted before the first record. Due to the current
# implementation, this is inserted before other (existing) DSBs. This
# might change in the future if it is deemed more logical.
check_pcapng_dsb_fields(outfile, (
(0x544c534b, len(keylog_contents), keylog_contents),
(0x544c534b, len(dsb1_contents), dsb1_contents),
(0x544c534b, len(dsb2_contents), dsb2_contents),
))
def test_pcapng_dsb_bad_key(self, cmd_editcap, dirs, capture_file, check_pcapng_dsb_fields):
'''Insertion of a RSA key file is not very effective.'''
rsa_keyfile = os.path.join(dirs.key_dir, 'rsasnakeoil2.key')
p12_keyfile = os.path.join(dirs.key_dir, 'key.p12')
outfile = self.filename_from_id('rsasnakeoil2-dsb.pcapng')
proc = self.assertRun((cmd_editcap,
'--inject-secrets', 'tls,%s' % rsa_keyfile,
'--inject-secrets', 'tls,%s' % p12_keyfile,
capture_file('rsasnakeoil2.pcap'), outfile
))
self.assertEqual(proc.stderr_str.count('unsupported private key file'), 2)
with open(rsa_keyfile, 'rb') as f:
dsb1_contents = f.read()
with open(p12_keyfile, 'rb') as f:
dsb2_contents = f.read()
check_pcapng_dsb_fields(outfile, (
(0x544c534b, len(dsb1_contents), dsb1_contents),
(0x544c534b, len(dsb2_contents), dsb2_contents),
))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_fileformat_mime(subprocesstest.SubprocessTestCase):
def test_mime_pcapng_gz(self, cmd_tshark, capture_file):
'''Test that the full uncompressed contents is shown.'''
proc = self.assertRun((cmd_tshark,
'-r', capture_file('icmp.pcapng.gz'),
'-Xread_format:MIME Files Format',
'-Tfields', '-e', 'frame.len', '-e', 'pcapng.block.length',
))
self.assertEqual(proc.stdout_str.strip(), '480\t128,128,88,88,132,132,132,132')
```
#### File: WiresharkDissectorFoo/test/suite_follow.py
```python
import subprocesstest
import fixtures
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_follow_tcp(subprocesstest.SubprocessTestCase):
def test_follow_tcp_bad_conditions(self, cmd_tshark, capture_file):
'''Checks whether Follow TCP correctly handles lots of edge cases.'''
# Edge cases include:
# 1. two sequential segments
# 2. out-of-order (swapped two sequential segments)
# 3. Bad overlap (second overlap with different data should be ignored)
# 4. Ignore bad retransmitted data, but extend with remaining data.
# 5. Check handling of overlapping data while fragments are incomplete
# (out-of-order - cannot add fragments to stream)
# 6. lost but acked segments
# 7. lost 3/5 fragments, but acked
# Not checked: lost and not acked (currently truncated, is that OK?)
proc = self.assertRun((cmd_tshark,
'-r', capture_file('tcp-badsegments.pcap'),
'-qz', 'follow,tcp,hex,0',
))
self.assertIn("""\
===================================================================
Follow: tcp,hex
Filter: tcp.stream eq 0
Node 0: 10.0.0.1:32323
Node 1: 10.0.0.2:80
00000000 47 45 54 20 2f 20 48 54 54 50 2f 31 2e 31 0d 0a GET / HT TP/1.1..
00000010 48 6f 73 74 3a 6c 6f 63 61 6c 68 6f 73 74 0d 0a Host:loc alhost..
00000020 58 2d 53 77 61 70 70 65 64 3a 20 31 73 74 0d 0a X-Swappe d: 1st..
00000030 58 2d 53 77 61 70 70 65 64 3a 20 32 6e 64 0d 0a X-Swappe d: 2nd..
00000040 58 2d 4f 76 65 72 6c 61 70 2d 50 61 63 6b 65 74 X-Overla p-Packet
00000050 3a 20 65 78 74 72 61 20 64 61 74 61 2d 2d 0d 0a : extra data--..
00000060 58 2d 4f 6f 4f 2d 4f 76 65 72 6c 61 70 3a 20 74 X-OoO-Ov erlap: t
00000070 68 69 73 20 69 73 20 64 65 6c 61 79 65 64 0d 0a his is d elayed..
00000080 58 2d 4f 6f 4f 2d 4f 76 65 72 6c 61 70 32 3a 20 X-OoO-Ov erlap2:
00000090 73 65 63 6f 6e 64 20 64 65 6c 61 79 65 64 0d 0a second d elayed..
000000A0 58 2d 4f 6f 4f 2d 4f 76 65 72 6c 61 70 33 3a 65 X-OoO-Ov erlap3:e
000000B0 78 74 65 6e 64 20 66 72 61 67 6d 65 6e 74 0d 0a xtend fr agment..
000000C0 5b 33 32 20 62 79 74 65 73 20 6d 69 73 73 69 6e [32 byte s missin
000000D0 67 20 69 6e 20 63 61 70 74 75 72 65 20 66 69 6c g in cap ture fil
000000E0 65 5d 00 e].
000000E3 58 2d 4d 69 73 73 69 6e 67 2d 42 75 74 2d 41 63 X-Missin g-But-Ac
000000F3 6b 65 64 2d 50 72 65 76 69 6f 75 73 3a 31 0d 0a ked-Prev ious:1..
00000103 5b 31 36 20 62 79 74 65 73 20 6d 69 73 73 69 6e [16 byte s missin
00000113 67 20 69 6e 20 63 61 70 74 75 72 65 20 66 69 6c g in cap ture fil
00000123 65 5d 00 e].
00000126 3a :
00000127 5b 31 33 20 62 79 74 65 73 20 6d 69 73 73 69 6e [13 byte s missin
00000137 67 20 69 6e 20 63 61 70 74 75 72 65 20 66 69 6c g in cap ture fil
00000147 65 5d 00 e].
0000014A 0d .
0000014B 5b 31 20 62 79 74 65 73 20 6d 69 73 73 69 6e 67 [1 bytes missing
0000015B 20 69 6e 20 63 61 70 74 75 72 65 20 66 69 6c 65 in capt ure file
0000016B 5d 00 ].
0000016D 58 2d 4d 69 73 73 69 6e 67 2d 33 2d 4f 75 74 2d X-Missin g-3-Out-
0000017D 4f 66 2d 35 2d 42 75 74 2d 41 43 4b 3a 59 0d 0a Of-5-But -ACK:Y..
0000018D 0d 0a ..
===================================================================
""".replace("\r\n", "\n"),
proc.stdout_str.replace("\r\n", "\n"))
```
#### File: WiresharkDissectorFoo/test/suite_sharkd.py
```python
import json
import subprocess
import unittest
import subprocesstest
import fixtures
from matchers import *
@fixtures.fixture(scope='session')
def cmd_sharkd(program):
return program('sharkd')
@fixtures.fixture
def run_sharkd_session(cmd_sharkd, request):
self = request.instance
def run_sharkd_session_real(sharkd_commands):
sharkd_proc = self.startProcess(
(cmd_sharkd, '-'), stdin=subprocess.PIPE)
sharkd_proc.stdin.write('\n'.join(sharkd_commands).encode('utf8'))
self.waitProcess(sharkd_proc)
self.assertIn('Hello in child.', sharkd_proc.stderr_str)
outputs = []
for line in sharkd_proc.stdout_str.splitlines():
line = line.strip()
if not line:
continue
try:
jdata = json.loads(line)
except json.JSONDecodeError:
self.fail('Invalid JSON: %r' % line)
outputs.append(jdata)
return tuple(outputs)
return run_sharkd_session_real
@fixtures.fixture
def check_sharkd_session(run_sharkd_session, request):
self = request.instance
def check_sharkd_session_real(sharkd_commands, expected_outputs):
sharkd_commands = [json.dumps(x) for x in sharkd_commands]
actual_outputs = run_sharkd_session(sharkd_commands)
self.assertEqual(expected_outputs, actual_outputs)
return check_sharkd_session_real
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_sharkd(subprocesstest.SubprocessTestCase):
def test_sharkd_req_load_bad_pcap(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('non-existant.pcap')},
), (
{"err": 2},
))
def test_sharkd_req_status_no_pcap(self, check_sharkd_session):
check_sharkd_session((
{"req": "status"},
), (
{"frames": 0, "duration": 0.0},
))
def test_sharkd_req_status(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "status"},
), (
{"err": 0},
{"frames": 4, "duration": 0.070345000,
"filename": "dhcp.pcap", "filesize": 1400},
))
def test_sharkd_req_analyse(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "analyse"},
), (
{"err": 0},
{"frames": 4, "protocols": ["frame", "eth", "ethertype", "ip", "udp",
"dhcp"], "first": 1102274184.317452908, "last": 1102274184.387798071},
))
def test_sharkd_req_info(self, check_sharkd_session):
matchTapNameList = MatchList(
{"tap": MatchAny(str), "name": MatchAny(str)})
check_sharkd_session((
{"req": "info"},
), (
{
"version": MatchAny(str),
"columns": MatchList({"format": MatchAny(str), "name": MatchAny(str)}),
"stats": matchTapNameList,
"convs": matchTapNameList,
"eo": matchTapNameList,
"srt": matchTapNameList,
"rtd": matchTapNameList,
"seqa": matchTapNameList,
"taps": matchTapNameList,
"follow": matchTapNameList,
"ftypes": MatchList(MatchAny(str)),
"nstat": matchTapNameList,
},
))
def test_sharkd_req_check(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "check"},
{"req": "check", "filter": "garbage filter"},
{"req": "check", "field": "garbage field"},
{"req": "check", "filter": "ip", "field": "ip"},
), (
{"err": 0},
{"err": 0},
{"err": 0, "filter": '"filter" was unexpected in this context.'},
{"err": 0, "field": "notfound"},
{"err": 0, "filter": "ok", "field": "ok"},
))
def test_sharkd_req_complete_field(self, check_sharkd_session):
check_sharkd_session((
{"req": "complete"},
{"req": "complete", "field": "frame.le"},
{"req": "complete", "field": "garbage.nothing.matches"},
), (
{"err": 0},
{"err": 0, "field": MatchList(
{"f": "frame.len", "t": 7, "n": "Frame length on the wire"}, match_element=any)},
{"err": 0, "field": []},
))
def test_sharkd_req_complete_pref(self, check_sharkd_session):
check_sharkd_session((
{"req": "complete", "pref": "tcp."},
{"req": "complete", "pref": "garbage.nothing.matches"},
), (
{"err": 0, "pref": MatchList(
{"f": "tcp.check_checksum", "d": "Validate the TCP checksum if possible"}, match_element=any)},
{"err": 0, "pref": []},
))
def test_sharkd_req_frames(self, check_sharkd_session, capture_file):
# XXX need test for optional input parameters, ignored/marked/commented
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "frames"},
), (
{"err": 0},
MatchList({
"c": MatchList(MatchAny(str)),
"num": MatchAny(int),
"bg": MatchAny(str),
"fg": MatchAny(str),
}),
))
def test_sharkd_req_tap_invalid(self, check_sharkd_session, capture_file):
# XXX Unrecognized taps result in an empty line, modify
# run_sharkd_session such that checking for it is possible.
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "tap"},
{"req": "tap", "tap0": "garbage tap"},
), (
{"err": 0},
))
def test_sharkd_req_tap(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "tap"},
{"req": "tap", "tap0": "conv:Ethernet", "tap1": "endpt:TCP"},
), (
{"err": 0},
{
"err": 0,
"taps": [
{
"tap": "endpt:TCP",
"type": "host",
"proto": "TCP",
"geoip": MatchAny(bool),
"hosts": [],
},
{
"tap": "conv:Ethernet",
"type": "conv",
"proto": "Ethernet",
"geoip": MatchAny(bool),
"convs": [
{
"saddr": MatchAny(str),
"daddr": "Broadcast",
"txf": 2,
"txb": 628,
"rxf": 0,
"rxb": 0,
"start": 0,
"stop": 0.070031,
"filter": "eth.addr==00:0b:82:01:fc:42 && eth.addr==ff:ff:ff:ff:ff:ff",
},
{
"saddr": MatchAny(str),
"daddr": MatchAny(str),
"rxf": 0,
"rxb": 0,
"txf": 2,
"txb": 684,
"start": 0.000295,
"stop": 0.070345,
"filter": "eth.addr==00:08:74:ad:f1:9b && eth.addr==00:0b:82:01:fc:42",
}
],
},
]
},
))
def test_sharkd_req_follow_bad(self, check_sharkd_session, capture_file):
# Unrecognized taps currently produce no output (not even err).
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "follow"},
{"req": "follow", "follow": "garbage follow", "filter": "ip"},
{"req": "follow", "follow": "HTTP", "filter": "garbage filter"},
), (
{"err": 0},
))
def test_sharkd_req_follow_no_match(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "follow", "follow": "HTTP", "filter": "ip"},
), (
{"err": 0},
{"err": 0, "shost": "NONE", "sport": "0", "sbytes": 0,
"chost": "NONE", "cport": "0", "cbytes": 0},
))
def test_sharkd_req_follow_udp(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "follow", "follow": "UDP", "filter": "frame.number==1"},
), (
{"err": 0},
{"err": 0,
"shost": "255.255.255.255", "sport": "67", "sbytes": 272,
"chost": "0.0.0.0", "cport": "68", "cbytes": 0,
"payloads": [
{"n": 1, "d": MatchRegExp(r'AQEGAAAAPR0A[a-zA-Z0-9]{330}AANwQBAwYq/wAAAAAAAAA=')}]},
))
def test_sharkd_req_iograph_bad(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "iograph"},
{"req": "iograph", "graph0": "garbage graph name"},
), (
{"err": 0},
{"iograph": []},
{"iograph": []},
))
def test_sharkd_req_iograph_basic(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "iograph", "graph0": "max:udp.length", "filter0": "udp.length"},
{"req": "iograph", "graph0": "packets", "graph1": "bytes"},
{"req": "iograph", "graph0": "packets", "filter0": "garbage filter"},
), (
{"err": 0},
{"iograph": [{"items": [308.000000]}]},
{"iograph": [{"items": [4.000000]}, {"items": [1312.000000]}]},
{"iograph": [
{"errmsg": 'Filter "garbage filter" is invalid - "filter" was unexpected in this context.'}]},
))
def test_sharkd_req_intervals_bad(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "intervals", "filter": "garbage filter"},
), (
{"err": 0},
))
def test_sharkd_req_intervals_basic(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "intervals"},
{"req": "intervals", "interval": 1},
{"req": "intervals", "filter": "frame.number <= 2"},
), (
{"err": 0},
{"intervals": [[0, 4, 1312]], "last": 0,
"frames": 4, "bytes": 1312},
{"intervals": [[0, 2, 656], [70, 2, 656]],
"last": 70, "frames": 4, "bytes": 1312},
{"intervals": [[0, 2, 656]], "last": 0, "frames": 2, "bytes": 656},
))
def test_sharkd_req_frame_basic(self, check_sharkd_session, capture_file):
# XXX add more tests for other options (ref_frame, prev_frame, columns, color, bytes, hidden)
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "frame", "frame": 2},
), (
{"err": 0},
{"err": 0, "fol": [["UDP", "udp.stream eq 1"]]},
))
def test_sharkd_req_frame_proto(self, check_sharkd_session, capture_file):
# Check proto tree output (including an UTF-8 value).
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
{"req": "frame", "frame": 2, "proto": True},
), (
{"err": 0},
MatchObject({
"tree": MatchList({
"l": "Dynamic Host Configuration Protocol (Offer)",
"t": "proto",
"f": "dhcp",
"e": MatchAny(int),
"n": MatchList({
"l": "Padding: 000000000000000000000000000000000000000000000000…",
"h": [316, 26],
"f": "dhcp.option.padding == 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"
}, match_element=any), # match one element from 'n'
"h": [42, 300],
}, match_element=any), # match one element from 'tree'
}),
))
def test_sharkd_req_setcomment(self, check_sharkd_session, capture_file):
check_sharkd_session((
{"req": "load", "file": capture_file('dhcp.pcap')},
# invalid frame number returns early.
{"req": "setcomment", "frame": 99999, "comment": "meh\nbaz"},
{"req": "setcomment", "frame": 3, "comment": "foo\nbar"},
{"req": "frame", "frame": 3},
), (
{"err": 0},
{"err": 0},
{"err": 0, "comment": "foo\nbar", "fol": MatchAny(list)},
))
def test_sharkd_req_setconf_bad(self, check_sharkd_session):
check_sharkd_session((
{"req": "setconf", "name": "uat:garbage-pref", "value": "\"\""},
), (
{"err": 1, "errmsg": "Unknown preference"},
))
def test_sharkd_req_dumpconf_bad(self, check_sharkd_session):
check_sharkd_session((
{"req": "dumpconf", "pref": "invalid-garbage-preference"},
{"req": "dumpconf", "pref": "uat:custom_http_header_fields"},
), ())
def test_sharkd_req_dumpconf_all(self, check_sharkd_session):
check_sharkd_session((
{"req": "dumpconf"},
), (
{"prefs": MatchObject({"tcp.check_checksum": {"b": 0}})},
))
def test_sharkd_req_download_tls_secrets(self, check_sharkd_session, capture_file):
# XXX test download for eo: and rtp: too
check_sharkd_session((
{"req": "load", "file": capture_file('tls12-dsb.pcapng')},
{"req": "download", "token": "<PASSWORD>"},
), (
{"err": 0},
# TODO remove "RSA Session-ID:" support and support "CLIENT_RANDOM "... only
{"file": "keylog.txt", "mime": "text/plain",
"data": MatchRegExp(r'UlNBIFNlc3Npb24tSUQ6.+')},
))
def test_sharkd_req_bye(self, check_sharkd_session):
check_sharkd_session((
{"req": "bye"},
), (
))
def test_sharkd_bad_request(self, check_sharkd_session):
check_sharkd_session((
{"req": 1337},
), (
))
def test_sharkd_config(self, check_sharkd_session):
check_sharkd_session((
{"req": "setconf", "name": "uat:custom_http_header_fields",
"value": "\"X-Header-Name\", \"Description\""},
{"req": "setconf", "name": "tcp.check_checksum", "value": "TRUE"},
{"req": "dumpconf", "pref": "tcp.check_checksum"},
{"req": "setconf", "name": "tcp.check_checksum", "value": "FALSE"},
{"req": "dumpconf", "pref": "tcp.check_checksum"},
), (
# Check that the UAT preference is set. There is no way to query it
# (other than testing for side-effects in dissection).
{"err": 0},
{"err": 0},
{"prefs": {"tcp.check_checksum": {"b": 1}}},
{"err": 0},
{"prefs": {"tcp.check_checksum": {"b": 0}}},
))
def test_sharkd_config_enum(self, check_sharkd_session):
'''Dump default enum preference value, change it and restore it.'''
check_sharkd_session((
{"req": "dumpconf", "pref": "wlan.ignore_wep"},
{"req": "setconf", "name": "wlan.ignore_wep", "value": "Yes - with IV"},
{"req": "dumpconf", "pref": "wlan.ignore_wep"},
{"req": "setconf", "name": "wlan.ignore_wep", "value": "No"},
{"req": "dumpconf", "pref": "wlan.ignore_wep"},
), (
{"prefs": {"wlan.ignore_wep": {"e": [
{"v": 0, "s": 1, "d": "No"},
{"v": 1, "d": "Yes - without IV"},
{"v": 2, "d": "Yes - with IV"}
]}}},
{"err": 0},
{"prefs": {"wlan.ignore_wep": {"e": [
{"v": 0, "d": "No"},
{"v": 1, "d": "Yes - without IV"},
{"v": 2, "s": 1, "d": "Yes - with IV"}
]}}},
{"err": 0},
{"prefs": {"wlan.ignore_wep": {"e": [
{"v": 0, "s": 1, "d": "No"},
{"v": 1, "d": "Yes - without IV"},
{"v": 2, "d": "Yes - with IV"}
]}}},
))
def test_sharkd_nested_file(self, check_sharkd_session, capture_file):
'''Request a frame from a file with a deep level of nesting.'''
check_sharkd_session((
{"req": "load", "file": capture_file("http2-data-reassembly.pcap")},
{"req": "frame", "frame": "4", "proto": "yes"},
), (
{"err": 0},
MatchAny(),
))
``` |
{
"source": "joshisagar92/behaviour_clonning",
"score": 3
} |
#### File: joshisagar92/behaviour_clonning/model.py
```python
import csv
import cv2
import argparse
from sklearn.utils import shuffle
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Conv2D, BatchNormalization, Activation, Dropout
filename = "./driving_log.csv"
data = []
## Code to read data from log csv - contains anti-clockwise driving data
with open(filename,"r") as f:
training_data = csv.reader(f)
for row in training_data:
data.append(row)
## Code to read data from log1 csv - contains clockwise driving data
with open("./driving_log1.csv","r") as f:
training_data = csv.reader(f)
for row in training_data:
data.append(row)
#print(len(data))
shuffle(data)
train_data, validation_data = train_test_split(data, test_size=0.3)
#print(len(train_data))
def augmentImage(batch_sample):
steering_angle = np.float32(batch_sample[3])
images, steering_angles = [], []
for i in range(3):
#print(batch_sample[i])
image = cv2.imread(batch_sample[i])
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cropped = rgb_image[60:130, :]
resized = cv2.resize(cropped, (160, 70))
images.append(resized)
#append angle based on camera postion
if i == 1:
steering_angles.append(steering_angle + 0.2)
elif i == 2:
steering_angles.append(steering_angle - 0.2)
else:
steering_angles.append(steering_angle)
if i == 0:
au_image = cv2.flip(resized, 1)
images.append(au_image)
steering_angles.append(-steering_angle)
# elif i == 1:
# au_image = cv2.flip(resized, 1)
# images.append(au_image)
# steering_angles.append(-(steering_angle + 0.2))
# elif i == 2:
# au_image = cv2.flip(resized, 1)
# images.append(au_image)
# steering_angles.append(-(steering_angle - 0.2))
return images, steering_angles
def generator(sample,batch_size = 128):
num_sample = len(sample)
while True:
shuffle(sample)
for offset in range(0,num_sample,batch_size):
batch_samples = sample[offset:offset + batch_size]
images, steering_angles = [], []
for batch_sample in batch_samples:
#print(batch_sample)
augmented_images, augmented_angles = augmentImage(batch_sample)
#print(augmented_images)
images.extend(augmented_images)
steering_angles.extend(augmented_angles)
X_train, y_train = np.array(images), np.array(steering_angles)
yield shuffle(X_train, y_train)
train_generator = generator(train_data,128)
validation_generator = generator(validation_data,128)
def model(loss='mse', optimizer='adam'):
model = Sequential()
model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(70, 160, 3)))
model.add(Conv2D(filters=24, kernel_size=5, strides=(2, 2), activation='relu'))
model.add(Conv2D(filters=36, kernel_size=5, strides=(2, 2), activation='relu'))
model.add(Conv2D(filters=48, kernel_size=5, strides=(2, 2), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), activation='relu'))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss=loss, optimizer=optimizer)
return model
model = model()
model.summary()
model.fit_generator(generator=train_generator,
validation_data=validation_generator,
epochs=10,
steps_per_epoch=len(train_data) * 10 // 128,
validation_steps=len(validation_data) * 10 // 128,verbose=1)
model.save('model.h5')
``` |
{
"source": "joshishreesh/FirstTest",
"score": 2
} |
#### File: erptest/erplogin/models.py
```python
from django.db import models
import pyodbc
# Create your models here.
class sqlserverconn(models.Model):
EMPLOYEE_CODE =models.BigIntegerField()
FIRST_NAME =models.CharField(max_length=250)
LAST_NAME =models.CharField(max_length=250)
LOGIN_PASSKEY =models.CharField(max_length=250)
def __str__(self) -> str:
return super().__str__()
```
#### File: erptest/erplogin/views.py
```python
from django.shortcuts import render
from erplogin.models import sqlserverconn
from django.forms.models import model_to_dict
import pyodbc
import base64
def login(request):
return render(request, "index.html")
def connsql(request):
conn=pyodbc.connect('Driver={sql server};'
'Server=UNICORN\SQLSERVER19;'
'Database=MIPLERP;'
'Trusted_Connection=yes;')
cursor=conn.cursor()
cursor.execute("select EMPLOYEE_CODE,FIRST_NAME,LAST_NAME,LOGIN_PASSKEY from MST_EMPLOYEE_MASTER order by FIRST_NAME")
result= cursor.fetchall()
return render(request,'index.html',{'sqlserverconn':result})
def loginuser(request):
username=request.POST['userid']
password=request.POST['<PASSWORD>']
validlogin=checkLogin(username=str(username),password=str(password))
if (validlogin==True):
return render(request, "dashboard.html")
else:
return render(request, "index.html")
def checkLogin(username:str,password:str):
conn=pyodbc.connect('Driver={sql server};'
'Server=UNICORN\SQLSERVER19;'
'Database=MIPLERP;'
'Trusted_Connection=yes;')
cursor=conn.cursor()
cursor.execute("select LOGIN_PASSKEY from MST_EMPLOYEE_MASTER where isactive=0 and EMPLOYEE_CODE= %s " % username)
row=cursor.fetchval()
pass1byte=password.encode('ascii')
baseByte=base64.b64encode(pass1byte)
baseString=baseByte.decode('ascii')
if (row==baseString):
return True
else:
return False
``` |
{
"source": "joshishungry/learnAI",
"score": 3
} |
#### File: assignments/lab5/lab5.py
```python
from data_reader import *
from boost import *
from orange_for_6034 import *
from neural_net_data import *
import neural_net
SVM_TYPE = orange.SVMLearner.C_SVC
# Senate and House Data
# These should be familiar by now.
senate_people = read_congress_data('S110.ord')
senate_votes = read_vote_data('S110desc.csv')
house_people = read_congress_data('H110.ord')
house_votes = read_vote_data('H110desc.csv')
last_senate_people = read_congress_data('S109.ord')
last_senate_votes = read_vote_data('S109desc.csv')
house_1796 = read_congress_data('H004.ord')
house_1796_votes = read_vote_data('H004desc.csv')
# The first step is to complete the boosting code in boost.py. None of the
# following steps will work without it.
#
# Once you've done that, the following line should make a boost classifier
# that learns about the 4th House of Representatives (1795-1796).
boost_1796 = BoostClassifier(make_vote_classifiers(house_1796_votes),
house_1796, standardPartyClassifier)
# You will need to train it, however. You can change the number of steps here.
boost_1796.train(20)
# Once you have run your boosting classifier for a sufficient number of steps
# on the 4th House of Representatives data, it should tell you how it believes
# Republicans and Federalists generally voted on a range of issues. Which way
# does it predict a Republican would vote on the amendment to require
# "newspapers to be sufficiently dried before mailing"? ('yes' or 'no')
republican_newspaper_vote = 'answer yes or no'
# In the 4th House of Representatives, which five representatives were
# misclassified the most while training your boost classifier?
#
# You should answer this question by defining the following function.
# It should return five names of legislators, in the format that comes from
# the legislator_info function. The tests will check the function, not just
# its output in this case.
def most_misclassified(classifier, n=5):
"""
Given a trained boosting classifier, return the n data points that were
misclassified the most (based on their final weights). The
most-misclassified datum should be at the beginning of the list.
You will need to use the "legislator_info(datum)" function to put your
output in the correct format.
classifier: instance of Classifier -- used to classify the data
n: int -- the number of most-misclassified data points to return
returns: list of data points (each passed through legislator_info) that were
misclassified most often
"""
raise NotImplementedError
# The following line is used by the tester; please leave it in place!
most_misclassified_boost_1796 = lambda n: most_misclassified(boost_1796, n)
# print most_misclassified_boost_1796(5)
# Now train a similar classifier on the 110th Senate (2007-2008).
# How does it say a Republican would vote on Cardin Amdt No. 3930; To modify
# the sunset provision (whatever that is)?
boost = BoostClassifier(make_vote_classifiers(senate_votes), senate_people,
standardPartyClassifier)
boost.train(20)
republican_sunset_vote = 'answer yes or no'
# Which five Senators are the most misclassified after training your
# classifier? (Again, the tester will test the function, not the answer you
# print out here.)
# The following line is used by the tester; please leave it in place!
most_misclassified_boost = lambda n: most_misclassified(boost, n)
# print most_misclassified_boost(5)
########################################################################
def show_decisions(learner, data):
print " "+learner.name+":"
classifier = learner(data) # Train on the data
print " "+str(classifier)
total = 0
for i in range(len(data)): # Test each of the same data points
decision = classifier(data[i])
probabilities = classifier(data[i], orange.Classifier.GetProbabilities)
correct = (decision == data[i].getclass())
if correct:
total += 1
print (" %d: %5.3f -> %s (should be %s) %scorrect" %
(i+1, probabilities[1], decision, data[i].getclass(),
("" if correct else "in")))
print " accuracy on training data: %1.2f" % (float(total)/len(data))
def describe_and_classify(filename, learners):
data = orange.ExampleTable(filename)
print "Classes:",len(data.domain.classVar.values)
print "Attributes:",len(data.domain.attributes)
# obtain class distribution
c = [0] * len(data.domain.classVar.values)
for e in data:
c[int(e.getclass())] += 1
print "Instances:", len(data), "total",
for i in range(len(data.domain.classVar.values)):
print ",", c[i], "with class", data.domain.classVar.values[i],
print
print "Possible classes:", data.domain.classVar.values
for name in learners:
show_decisions(learners[name], data)
print "Decision Tree boundaries:"
orngTree.printTxt(learners["dt"](data))
# Now we'll cross-validate with the same learners.
print
print "Accuracy with cross-validation:"
classifiers = [learners[k] for k in learners]
results = orngTest.crossValidation(classifiers, data,
folds=min(10,len(data)))
confusion_matrices = orngStat.confusionMatrices(results)
#f_scores = orngStat.F1(confusion_matrices)
# http://en.wikipedia.org/wiki/F_score
accuracies = orngStat.CA(results)
# http://en.wikipedia.org/wiki/Accuracy
brierscores= orngStat.BrierScore(results)
# http://en.wikipedia.org/wiki/Brier_score
ROC_areas = orngStat.AUC(results) # Area under the ROC curve
# http://en.wikipedia.org/wiki/ROC_curve
# NOTE: many other measurements are available.
print " Confusion Matrices:"
for name in learners:
classifier = learners[name]
i = classifiers.index(classifier)
print " %5s: %s" % (name, confusion_matrices[i])
print " Classifier accuracy Brier AUC"
for name in learners:
classifier = learners[name]
i = classifiers.index(classifier)
print (" %-12s %5.3f %5.3f %5.3f" %
(name, accuracies[i], brierscores[i], ROC_areas[i]))
# Note that it's the same declarations as above, just without the data
learners = {
"maj" : orange.MajorityLearner(), # a useful baseline
"dt" : orngTree.TreeLearner(sameMajorityPruning=1, mForPruning = 2),
"knn" : orange.kNNLearner(k = 10),
"svml": orange.SVMLearner(kernel_type = orange.SVMLearner.Linear,
probability = True, svm_type=SVM_TYPE),
"svmp3":orange.SVMLearner(kernel_type = orange.SVMLearner.Polynomial,
degree = 3,
probability = True, svm_type=SVM_TYPE),
"svmr": orange.SVMLearner(kernel_type = orange.SVMLearner.RBF,
probability = True, svm_type=SVM_TYPE),
"svms": orange.SVMLearner(kernel_type = orange.SVMLearner.Sigmoid,
probability = True, svm_type=SVM_TYPE),
"nb": orange.BayesLearner(),
#"boost":orngEnsemble.BoostedLearner(orngTree.TreeLearner()),
# http://www.ailab.si/orange/doc/modules/orngEnsemble.htm
# but it doesn't work...
# you can use SVMLearner.Custom to make your own, of course.
}
learners["maj"].name = "Majority classifier"
learners["dt"].name = "Decision Tree classifier"
learners["knn"].name = "k-Nearest Neighbors classifier"
learners["svml"].name = "Support Vector Machine classifier with linear kernel"
learners["svmp3"].name = "Support Vector Machine classifier with degree 3 polynomial kernel"
learners["svmr"].name = "Support Vector Machine classifier with radial basis kernel"
learners["svms"].name = "Support Vector Machine classifier with sigmoid kernel"
learners["nb"].name = "Naive Bayes classifier"
#FIXME: learners["034b"].name = "Our boosting classifier for party id datasets"
#learners["boost"].name = "Boosted decision trees classifier"
if __name__ == "__main__":
describe_and_classify("vampires", learners)
# For the vampire dataset, what variable does the id tree query, that our
# algorithm in class did not?
vampires_idtree_odd = "one of: shadow garlic complexion accent"
# For the vampire dataset, which classifier does the worst when tested on just
# the data on which it was trained?
vampires_worst_on_training = 'one of: maj dt knn svml svmp3 svmr svms nb'
# Is it actually doing badly, or is it just confused?
# For the vampire dataset, which classifier does the worst when cross-validated?
vampires_worst_on_test = 'one of: maj dt knn svml svmp3 svmr svms nb'
# Which of the above classifiers has the best Brier distance to the true answers
# in ten-fold cross-validation for the H004 dataset?
best_brier_for_h004 = 'one of: maj dt knn svml svmp3 svmr svms nb'
# Just looking at the confusion matrices, what is the minimum number
# of data points that must have been differently classified between
# the best classifier and the second-best classifier for the H004 data
# set?
min_disagreement_h004 = None
# Which bill was the most divisive along party lines in the H004 data
# set, according to the classification tree (id tree)?
most_divisive_h004 = 'a bill number'
################################################################
# Now let's see if we can do even better on the H004 dataset, by
# boosting the results of the classifiers we already have!
def boosted_ensemble(filename, learners, standard, verbose=False):
data = orange.ExampleTable(filename)
ensemble_learner = BoostOrangeLearner(learners, standard)
if verbose:
# Print the ensemble classifier that was trained on all of the
# data. For debugging the constituents of the ensemble classifier.
classifier = ensemble_learner(data)
print "ensemble classifier: %s" %(classifier)
ensemble_crossval = orngTest.crossValidation([ensemble_learner], data,
folds=min(10,len(data)))
accuracies = orngStat.CA(ensemble_crossval)
brierscores= orngStat.BrierScore(ensemble_crossval)
ROC_areas = orngStat.AUC(ensemble_crossval)
return accuracies[0], brierscores[0], ROC_areas[0]
DATASET_STANDARDS={
"H004" : standardPartyClassifier,
"H109" : standardPartyClassifier,
"H110" : standardPartyClassifier,
"S109" : standardPartyClassifier,
"S110" : standardPartyClassifier,
"vampires" : OrangeStandardClassifier("yes"),
"titanic" : OrangeStandardClassifier("yes"),
"breast-cancer" : OrangeStandardClassifier("recurrence-events"),
"adult" : OrangeStandardClassifier(">50K") # this is big -- optional!
}
if __name__ == "__main__":
dataset = "H004"
describe_and_classify(dataset, learners)
print "Boosting with our suite of orange classifiers:"
print (" accuracy: %.3f, brier: %.3f, auc: %.3f" %
boosted_ensemble(dataset, learners, DATASET_STANDARDS[dataset]))
# Play with the datasets mentioned above. What ensemble of classifiers
# will give you the best cross-validation accuracy on the breast-cancer
# dataset?
classifiers_for_best_ensemble = ['maj', 'dt', 'knn', 'svml',
'svmp3', 'svmr', 'svms', 'nb']
## The standard survey questions.
HOW_MANY_HOURS_THIS_PSET_TOOK = None
WHAT_I_FOUND_INTERESTING = None
WHAT_I_FOUND_BORING = None
## The following code is used by the tester; please leave it in place!
def classifier_tester(classifier_name, data_set):
""" Test a particular classifier, verify that it improves every step over 20 steps """
return list(classifier_tester_helper(classifier_name, data_set))
def classifier_tester_helper(classifier_name, data_set):
if classifier_name in globals():
classifier = globals()[classifier_name]
data = globals()[data_set]
if isinstance(classifier, Classifier):
original_classifier_count = len(classifier.classifiers)
classifier.reset()
for x in xrange(20):
classifier.step()
yield classifier.error_rate(data, standardPartyClassifier)
classifier.reset()
classifier.train(original_classifier_count)
return
raise Exception, "Error: Classifier %s doesn't exist!, can't test it" % classifier_name
from neural_net import *
def neural_net_tester(network_maker_func,
train_dataset_name,
test_dataset_name,
iterations):
"""Test a neural net making function on a named dataset"""
neural_net.seed_random()
network_maker_func = globals()[network_maker_func]
train_dataset = globals()[train_dataset_name]
test_dataset = globals()[test_dataset_name]
nn = network_maker_func()
train(nn, train_dataset, max_iterations=iterations)
result = test(nn, test_dataset)
return result
def neural_net_size_tester(network_maker_func):
"""Test a neural net size"""
network_maker_func = globals()[network_maker_func]
nn = network_maker_func()
return len(nn.neurons)
``` |
{
"source": "joshisumit/pythonic_scripts",
"score": 4
} |
#### File: joshisumit/pythonic_scripts/generate_oauth_token.py
```python
import requests
import getpass
import json
GITHUB_API='https://api.github.com'
def main():
#User Input
username=raw_input("Enter your GitHub username: ")
password=<PASSWORD>("Github password:")
note=raw_input('Note (optional):')
#Compose request
url=GITHUB_API+"/authorizations"
payload={}
if note:
payload['note']=note
# sends a request for token
res=requests.post(url,auth=(username,password),data=json.dumps(payload))
print res.status_code
print res.headers['content-type']
print res.text
#Writing json response to a file
f=open("token_details_v1.txt","w")
f.write(res.text)
# parse json response
j=json.loads(res.text)
if res.status_code >= 400:
msg=j.get('message','UNDEFINED ERROR (no error description from the server)')
print 'ERROR: %s' %msg
return
token=j['token']
print "New Token: %s" %token
print "U can use this token with any of the application"
print "Token is also written in github_tokens file"
if __name__ == '__main__':
main()
``` |
{
"source": "joshiumang107/forseti-security",
"score": 2
} |
#### File: common/data_access/org_resource_rel_dao.py
```python
from google.cloud.security.common.data_access import folder_dao
from google.cloud.security.common.data_access import organization_dao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.gcp_type import resource
class OrgResourceRelDao(object):
"""DAO for organization resource entity relationships."""
def __init__(self, global_configs):
"""Initialize.
Args:
global_configs (dict): Global configurations.
"""
# Map the org resource type to the appropriate dao class
self._resource_db_lookup = {
resource.ResourceType.ORGANIZATION: {
'dao': organization_dao.OrganizationDao(global_configs),
'get': 'get_organization',
},
resource.ResourceType.FOLDER: {
'dao': folder_dao.FolderDao(global_configs),
'get': 'get_folder',
},
resource.ResourceType.PROJECT: {
'dao': project_dao.ProjectDao(global_configs),
'get': 'get_project',
}
}
def find_ancestors(self, org_resource, snapshot_timestamp=None):
"""Find ancestors of a particular resource.
Args:
org_resource (Resource): A Resource.
snapshot_timestamp (str): The timestamp to use for data lookup.
Returns:
list: A list of Resource ancestors, starting with the
closest (lowest-level) ancestor.
"""
# TODO: handle case where snapshot is None
ancestors = []
curr_resource = org_resource
while curr_resource is not None:
parent_resource = None
if (curr_resource.parent and
curr_resource.parent.type and
curr_resource.parent.id):
resource_lookup = self._resource_db_lookup.get(
curr_resource.parent.type, {})
# No dao object for the parent resource, so quit
if not resource_lookup.get('dao'):
break
# Invoke the dao.get_*() method, to get the parent resource
parent_resource = getattr(
resource_lookup.get('dao'),
resource_lookup.get('get'))(
curr_resource.parent.id, snapshot_timestamp)
if parent_resource:
ancestors.append(parent_resource)
curr_resource = parent_resource
return ancestors
```
#### File: security/iam/client.py
```python
import binascii
import os
import grpc
from google.cloud.security.iam.explain import explain_pb2_grpc, explain_pb2
from google.cloud.security.iam.playground import playground_pb2_grpc
from google.cloud.security.iam.playground import playground_pb2
from google.cloud.security.iam.utils import oneof
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
# pylint: disable=missing-param-doc,missing-raises-doc
def require_model(f):
"""Decorator to perform check that the model handle exists in the service.
"""
def wrapper(*args, **kwargs):
"""Function wrapper to perform model handle existence check."""
if args[0].config.handle():
return f(*args, **kwargs)
raise Exception("API requires model to be set")
return wrapper
class ClientConfig(dict):
"""Provide access to client configuration data."""
def handle(self):
"""Return currently active handle."""
return self['handle']
class IAMClient(object):
"""Client base class."""
def __init__(self, config):
self.config = config
def metadata(self):
"""Create default metadata for gRPC call."""
return [('handle', self.config.handle())]
class ExplainClient(IAMClient):
"""Explain service allows the client to reason about a model.
Explain provides the following functionality:
- List access by resource/member
- Provide information on why a member has access
- Provide recommendations on how to provide access
"""
def __init__(self, config):
super(ExplainClient, self).__init__(config)
self.stub = explain_pb2_grpc.ExplainStub(config['channel'])
def is_available(self):
"""Checks if the 'Explain' service is available by performing a ping."""
data = binascii.hexlify(os.urandom(16))
return self.stub.Ping(explain_pb2.PingRequest(data=data)).data == data
def new_model(self, source, name):
"""Creates a new model, reply contains the handle."""
return self.stub.CreateModel(
explain_pb2.CreateModelRequest(
type=source,
name=name))
def list_models(self):
"""List existing models in the service."""
return self.stub.ListModel(explain_pb2.ListModelRequest())
def delete_model(self, model_name):
"""Delete a model, deletes all corresponding data."""
return self.stub.DeleteModel(
explain_pb2.DeleteModelRequest(
handle=model_name),
metadata=self.metadata())
def explain_denied(self, member_name, resource_names, roles=None,
permission_names=None):
"""List possibilities to grant access which is currently denied."""
roles = [] if roles is None else roles
permission_names = [] if permission_names is None else permission_names
if not oneof(roles != [], permission_names != []):
raise Exception('Either roles or permission names must be set')
request = explain_pb2.ExplainDeniedRequest(
member=member_name,
resources=resource_names,
roles=roles,
permissions=permission_names)
return self.stub.ExplainDenied(request, metadata=self.metadata())
def explain_granted(self, member_name, resource_name, role=None,
permission=None):
"""Provide data on all possibilities on
how a member has access to a resources."""
if not oneof(role is not None, permission is not None):
raise Exception('Either role or permission name must be set')
request = explain_pb2.ExplainGrantedRequest()
if role is not None:
request.role = role
else:
request.permission = permission
request.resource = resource_name
request.member = member_name
return self.stub.ExplainGranted(request, metadata=self.metadata())
@require_model
def query_access_by_resources(self, resource_name, permission_names,
expand_groups=False):
"""List members who have access to a given resource."""
request = explain_pb2.GetAccessByResourcesRequest(
resource_name=resource_name,
permission_names=permission_names,
expand_groups=expand_groups)
return self.stub.GetAccessByResources(
request, metadata=self.metadata())
@require_model
def query_access_by_members(self, member_name, permission_names,
expand_resources=False):
"""List resources to which a set of members has access to."""
request = explain_pb2.GetAccessByMembersRequest(
member_name=member_name,
permission_names=permission_names,
expand_resources=expand_resources)
return self.stub.GetAccessByMembers(request, metadata=self.metadata())
@require_model
def query_access_by_permissions(self,
role_name,
permission_name,
expand_groups=False,
expand_resources=False):
"""List (resource, member) tuples satisfying the authorization
Args:
role_name (str): Role name to query for.
permission_name (str): Permission name to query for.
expand_groups (bool): Whether or not to expand groups.
epxand_resources (bool) Whether or not to expand resources.
Returns:
object: Generator yielding access tuples.
"""
request = explain_pb2.GetAccessByPermissionsRequest(
role_name=role_name,
permission_name=permission_name,
expand_groups=expand_groups,
expand_resources=expand_resources)
return self.stub.GetAccessByPermissions(
request,
metadata=self.metadata())
@require_model
def query_permissions_by_roles(self, role_names=None, role_prefixes=None):
"""List all the permissions per given roles."""
role_names = [] if role_names is None else role_names
role_prefixes = [] if role_prefixes is None else role_prefixes
request = explain_pb2.GetPermissionsByRolesRequest(
role_names=role_names, role_prefixes=role_prefixes)
return self.stub.GetPermissionsByRoles(
request, metadata=self.metadata())
@require_model
def denormalize(self):
"""Denormalize the entire model into access triples."""
return self.stub.Denormalize(
explain_pb2.DenormalizeRequest(),
metadata=self.metadata())
class PlaygroundClient(IAMClient):
"""Provides an interface to add entities into the IAM model.
It allows the modification of:
- Roles & Permissions
- Membership relations
- Resource hierarchy
- Get/Set policies
- Perform access checks
This allows a client to perform simulations based on imported
or empty models.
"""
def __init__(self, config):
super(PlaygroundClient, self).__init__(config)
self.stub = playground_pb2_grpc.PlaygroundStub(config['channel'])
def is_available(self):
"""Check if the Playground service is available."""
data = binascii.hexlify(os.urandom(16))
return self.stub.Ping(
playground_pb2.PingRequest(
data=data)).data == data
@require_model
def add_role(self, role_name, permissions):
"""Add a role associated with a list of permissions to the model."""
return self.stub.AddRole(
playground_pb2.AddRoleRequest(
role_name=role_name,
permissions=permissions),
metadata=self.metadata())
@require_model
def del_role(self, role_name):
"""Delete a role from the model."""
return self.stub.DelRole(
playground_pb2.DelRoleRequest(
role_name=role_name),
metadata=self.metadata())
@require_model
def list_roles(self, role_name_prefix):
"""List roles by prefix, can be empty."""
return self.stub.ListRoles(
playground_pb2.ListRolesRequest(
prefix=role_name_prefix),
metadata=self.metadata())
@require_model
def add_resource(self,
resource_type_name,
parent_type_name,
no_parent=False):
"""Add a resource to the hierarchy."""
return self.stub.AddResource(
playground_pb2.AddResourceRequest(
resource_type_name=resource_type_name,
parent_type_name=parent_type_name,
no_require_parent=no_parent),
metadata=self.metadata())
@require_model
def del_resource(self, resource_type_name):
"""Delete a resource from the hierarchy and the subtree."""
return self.stub.DelResource(
playground_pb2.DelResourceRequest(
resource_type_name=resource_type_name),
metadata=self.metadata())
@require_model
def list_resources(self, resource_name_prefix):
"""List resources by name prefix."""
return self.stub.ListResources(
playground_pb2.ListResourcesRequest(
prefix=resource_name_prefix),
metadata=self.metadata())
@require_model
def add_member(self, member_type_name, parent_type_names=None):
"""Add a member to the member relationship."""
if parent_type_names is None:
parent_type_names = []
return self.stub.AddGroupMember(
playground_pb2.AddGroupMemberRequest(
member_type_name=member_type_name,
parent_type_names=parent_type_names),
metadata=self.metadata())
@require_model
def del_member(self, member_name, parent_name=None,
only_delete_relationship=False):
"""Delete a member from the member relationship."""
return self.stub.DelGroupMember(
playground_pb2.DelGroupMemberRequest(
member_name=member_name,
parent_name=parent_name,
only_delete_relationship=only_delete_relationship),
metadata=self.metadata())
@require_model
def list_members(self, member_name_prefix):
"""List members by prefix."""
return self.stub.ListGroupMembers(
playground_pb2.ListGroupMembersRequest(
prefix=member_name_prefix),
metadata=self.metadata())
@require_model
def set_iam_policy(self, full_resource_name, policy):
"""Set the IAM policy on the resource."""
bindingspb = [
playground_pb2.Binding(
role=role,
members=members) for role,
members in policy['bindings'].iteritems()]
policypb = playground_pb2.Policy(
bindings=bindingspb, etag=policy['etag'])
return self.stub.SetIamPolicy(
playground_pb2.SetIamPolicyRequest(
resource=full_resource_name,
policy=policypb),
metadata=self.metadata())
@require_model
def get_iam_policy(self, full_resource_name):
"""Get the IAM policy from the resource."""
return self.stub.GetIamPolicy(
playground_pb2.GetIamPolicyRequest(
resource=full_resource_name),
metadata=self.metadata())
@require_model
def check_iam_policy(self, full_resource_name, permission_name,
member_name):
"""Check access via IAM policy."""
return self.stub.CheckIamPolicy(
playground_pb2.CheckIamPolicyRequest(
resource=full_resource_name,
permission=permission_name,
identity=member_name),
metadata=self.metadata())
class ClientComposition(object):
"""Client composition class.
Most convenient to use since it comprises the common use cases among
the different services.
"""
DEFAULT_ENDPOINT = 'localhost:50058'
def __init__(self, endpoint=DEFAULT_ENDPOINT):
self.channel = grpc.insecure_channel(endpoint)
self.config = ClientConfig({'channel': self.channel, 'handle': ''})
self.explain = ExplainClient(self.config)
self.playground = PlaygroundClient(self.config)
self.clients = [self.explain, self.playground]
if not all([c.is_available() for c in self.clients]):
raise Exception('gRPC connected but services not registered')
def new_model(self, source, name):
"""Create a new model from the specified source."""
return self.explain.new_model(source, name)
def list_models(self):
"""List existing models."""
return self.explain.list_models()
def switch_model(self, model_name):
"""Switch the client into using a model."""
self.config['handle'] = model_name
def delete_model(self, model_name):
"""Delete a model. Deletes all associated data."""
return self.explain.delete_model(model_name)
```
#### File: iam/explain/explainer.py
```python
from google.cloud.security.iam import dao
from google.cloud.security.iam.explain.importer import importer
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
# pylint: disable=missing-param-doc,missing-yield-doc
# pylint: disable=missing-yield-type-doc
# pylint: disable=invalid-name,no-self-use
class Explainer(object):
"""Implements the IAM Explain API."""
def __init__(self, config):
self.config = config
def ExplainDenied(self, model_name, member, resources, permissions, roles):
"""Provides information on granting a member access to a resource."""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
result = data_access.explain_denied(session,
member,
resources,
permissions,
roles)
return result
def ExplainGranted(self, model_name, member, resource, role, permission):
"""Provides information on why a member has access to a resource."""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
result = data_access.explain_granted(session,
member,
resource,
role,
permission)
return result
def GetAccessByResources(self, model_name, resource_name, permission_names,
expand_groups):
"""Returns members who have access to the given resource."""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
mapping = data_access.query_access_by_resource(session,
resource_name,
permission_names,
expand_groups)
return mapping
def CreateModel(self, source, name):
"""Creates a model from the import source."""
model_manager = self.config.model_manager
model_handle = model_manager.create(name=name)
scoped_session, data_access = model_manager.get(model_handle)
with scoped_session as session:
def doImport():
"""Import runnable."""
importer_cls = importer.by_source(source)
import_runner = importer_cls(
session,
model_manager.model(model_handle, expunge=False),
data_access,
self.config)
import_runner.run()
self.config.run_in_background(doImport)
return model_manager.model(model_handle, expunge=True)
def GetAccessByPermissions(self, model_name, role_name, permission_name,
expand_groups, expand_resources):
"""Returns access tuples satisfying the permission or role.
Args:
model_name (str): Model to operate on.
role_name (str): Role name to query for.
permission_name (str): Permission name to query for.
expand_groups (bool): Whether to expand groups in policies.
expand_resources (bool): Whether to expand resources.
Yields:
Generator for access tuples.
"""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
for role, resource, members in (
data_access.query_access_by_permission(session,
role_name,
permission_name,
expand_groups,
expand_resources)):
yield role, resource, members
def GetAccessByMembers(self, model_name, member_name, permission_names,
expand_resources):
"""Returns access to resources for the provided member."""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
for role, resources in data_access.query_access_by_member(
session, member_name, permission_names, expand_resources):
yield role, resources
def GetPermissionsByRoles(self, model_name, role_names, role_prefixes):
"""Returns the permissions associated with the specified roles."""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
for result in data_access.query_permissions_by_roles(
session, role_names, role_prefixes):
yield result
def ListModel(self):
"""Lists all models."""
model_manager = self.config.model_manager
return model_manager.models()
def DeleteModel(self, model_name):
"""Deletes a model."""
model_manager = self.config.model_manager
model_manager.delete(model_name)
def Denormalize(self, model_name):
"""Denormalizes a model."""
model_manager = self.config.model_manager
scoped_session, data_access = model_manager.get(model_name)
with scoped_session as session:
for tpl in data_access.denormalize(session):
permission, resource, member = tpl
yield permission, resource, member
if __name__ == "__main__":
class DummyConfig(object):
"""Dummy configuration."""
def __init__(self):
engine = dao.create_engine('sqlite:////tmp/test.db')
self.model_manager = dao.ModelManager(engine)
def run_in_background(self, function):
"""Dummy implementation."""
function()
e = Explainer(config=DummyConfig())
e.CreateModel("TEST", 'test')
```
#### File: notifier/pipelines/base_notification_pipeline.py
```python
import abc
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.data_access import violation_dao
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
# pylint: disable=too-many-instance-attributes
class BaseNotificationPipeline(object):
"""Base pipeline to perform notifications"""
__metaclass__ = abc.ABCMeta
def __init__(self, resource, cycle_timestamp,
violations, global_configs, notifier_config, pipeline_config):
"""Constructor for the base pipeline.
Args:
resource (str): Violation resource name.
cycle_timestamp (str): Snapshot timestamp,
formatted as YYYYMMDDTHHMMSSZ.
violations (dict): Violations.
global_configs (dict): Global configurations.
notifier_config (dict): Notifier configurations.
pipeline_config (dict): Pipeline configurations.
"""
self.cycle_timestamp = cycle_timestamp
self.resource = resource
self.global_configs = global_configs
self.notifier_config = notifier_config
self.pipeline_config = pipeline_config
# TODO: import api_client
# self.api_client = api_client
# Initializing DAOs
self.dao = dao.Dao(global_configs)
self.project_dao = project_dao.ProjectDao(global_configs)
self.violation_dao = violation_dao.ViolationDao(global_configs)
# Get violations
self.violations = violations
def _get_violations(self, timestamp):
"""Get all violtions.
Args:
timestamp (str): String of timestamp, formatted as YYYYMMDDTHHMMSSZ.
Returns:
dict: Violations organized per resource type.
"""
violations = {
'violations': self.violation_dao.get_all_violations(
timestamp, 'violations'),
'bucket_acl_violations': self.violation_dao.get_all_violations(
timestamp, 'buckets_acl_violations')
}
return violations
@abc.abstractmethod
def _send(self, **kwargs):
"""Send notifications.
Args:
**kwargs: Arbitrary keyword arguments.
"""
pass
@abc.abstractmethod
def _compose(self, **kwargs):
"""Compose notifications.
Args:
**kwargs: Arbitrary keyword arguments.
"""
pass
@abc.abstractmethod
def run(self):
"""Runs the pipeline."""
pass
```
#### File: security/scanner/scanner.py
```python
import sys
import gflags as flags
from google.apputils import app
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access import errors as db_errors
from google.cloud.security.common.util import file_loader
from google.cloud.security.common.util import log_util
from google.cloud.security.scanner import scanner_builder
# Setup flags
FLAGS = flags.FLAGS
# Format: flags.DEFINE_<type>(flag_name, default_value, help_text)
# Example:
# https://github.com/google/python-gflags/blob/master/examples/validator.py
# Hack to make the test pass due to duplicate flag error here
# and inventory_loader.
# TODO: Find a way to remove this try/except, possibly dividing the tests
# into different test suites.
try:
flags.DEFINE_string(
'forseti_config',
'/home/ubuntu/forseti-security/configs/forseti_conf.yaml',
'Fully qualified path and filename of the Forseti config file.')
except flags.DuplicateFlagError:
pass
LOGGER = log_util.get_logger(__name__)
SCANNER_OUTPUT_CSV_FMT = 'scanner_output.{}.csv'
OUTPUT_TIMESTAMP_FMT = '%Y%m%dT%H%M%SZ'
def _get_timestamp(global_configs, statuses=('SUCCESS', 'PARTIAL_SUCCESS')):
"""Get latest snapshot timestamp.
Args:
global_configs (dict): Global configurations.
statuses (tuple): The snapshot statuses to search for latest timestamp.
Returns:
str: The latest snapshot timestamp.
"""
latest_timestamp = None
try:
latest_timestamp = (
dao.Dao(global_configs).get_latest_snapshot_timestamp(statuses))
except db_errors.MySQLError as err:
LOGGER.error('Error getting latest snapshot timestamp: %s', err)
return latest_timestamp
def main(_):
"""Run the scanners.
Args:
_ (list): argv, unused due to apputils.
"""
forseti_config = FLAGS.forseti_config
if forseti_config is None:
LOGGER.error('Path to Forseti Security config needs to be specified.')
sys.exit()
try:
configs = file_loader.read_and_parse_file(forseti_config)
except IOError:
LOGGER.error('Unable to open Forseti Security config file. '
'Please check your path and filename and try again.')
sys.exit()
global_configs = configs.get('global')
scanner_configs = configs.get('scanner')
log_util.set_logger_level_from_config(scanner_configs.get('loglevel'))
snapshot_timestamp = _get_timestamp(global_configs)
if not snapshot_timestamp:
LOGGER.warn('No snapshot timestamp found. Exiting.')
sys.exit()
runnable_scanners = scanner_builder.ScannerBuilder(
global_configs, scanner_configs, snapshot_timestamp).build()
# TODO: Make resilient by letting the batch continue to run even if one
# scanner errors out.
# TODO: fix the bare except
# pylint: disable=bare-except
for scanner in runnable_scanners:
try:
scanner.run()
except:
LOGGER.error('Error running scanner: %s',
scanner.__class__.__name__, exc_info=True)
# pylint: enable=bare-except
LOGGER.info('Scan complete!')
if __name__ == '__main__':
app.run()
```
#### File: scanner/scanners/instance_network_interface_scanner.py
```python
from google.cloud.security.common.util import log_util
from google.cloud.security.common.data_access import instance_dao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.gcp_type.resource import ResourceType
from google.cloud.security.scanner.scanners import base_scanner
from google.cloud.security.scanner.audit import instance_network_interface_rules_engine
# pylint: enable=line-too-long
LOGGER = log_util.get_logger(__name__)
class InstanceNetworkInterfaceScanner(base_scanner.BaseScanner):
"""Pipeline to network enforcer from DAO."""
def __init__(self, global_configs, scanner_configs,
snapshot_timestamp, rules):
"""Initialization.
Args:
global_configs (dict): Global configurations.
scanner_configs (dict): Scanner configurations.
snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ.
rules (str): Fully-qualified path and filename of the rules file.
"""
super(InstanceNetworkInterfaceScanner, self).__init__(
global_configs,
scanner_configs,
snapshot_timestamp,
rules)
self.rules_engine = (
instance_network_interface_rules_engine
.InstanceNetworkInterfaceRulesEngine(
rules_file_path=self.rules,
snapshot_timestamp=self.snapshot_timestamp))
self.rules_engine.build_rule_book(self.global_configs)
@staticmethod
def _flatten_violations(violations):
"""Flatten RuleViolations into a dict for each RuleViolation member.
Args:
violations (list): The RuleViolations to flatten.
Yields:
dict: Iterator of RuleViolations as a dict per member.
"""
for violation in violations:
violation_data = {}
violation_data['project'] = violation.project
violation_data['network'] = violation.network
violation_data['ip'] = violation.ip
violation_data['raw_data'] = violation.raw_data
yield {
'resource_id': 'instance_network_interface',
'resource_type': violation.resource_type,
'rule_index': violation.rule_index,
'rule_name': violation.rule_name,
'violation_type': violation.violation_type,
'violation_data': violation_data
}
def _output_results(self, all_violations):
"""Output results.
Args:
all_violations (list): All violations
"""
resource_name = 'violations'
all_violations = self._flatten_violations(all_violations)
self._output_results_to_db(resource_name, all_violations)
# pylint: disable=invalid-name
def get_instance_networks_interfaces(self):
"""Get network info from a particular snapshot.
Returns:
list: A list of networks from a particular project
Raises:
MySQLError if a MySQL error occurs.
"""
instances = instance_dao.InstanceDao(self.global_configs).get_instances(
self.snapshot_timestamp)
return [instance.create_network_interfaces() for instance in instances]
@staticmethod
def parse_instance_network_instance(instance_object):
"""Create a list of network interface obj.
Args:
instance_object (instance_object): an instance object
Returns:
list: a list of network interface objects
"""
return instance_object.create_network_interfaces()
def _get_project_policies(self):
"""Get projects from data source.
Returns:
dict: project policies
"""
project_policies = {}
project_policies = (
project_dao
.ProjectDao(self.global_configs)
.get_project_policies('projects',
self.
snapshot_timestamp))
return project_policies
@staticmethod
def _get_resource_count(project_policies, instance_network_interfaces):
"""Get resource count for org and project policies.
Args:
project_policies (dict): containing the projects
(gcp_type.project.Project) and their iam policies (dict).
instance_network_interfaces (list): of network_interface objects.
Returns:
dict: Resource count map
"""
resource_counts = {
ResourceType.PROJECT: len(project_policies),
ResourceType.INSTANCE: len(instance_network_interfaces),
}
return resource_counts
def _retrieve(self):
"""Run the data collection.
Return:
list: instance_networks_interfaces
"""
return self.get_instance_networks_interfaces()
def _find_violations(self, enforced_networks_data):
"""Find violations in the policies.
Args:
enforced_networks_data (list): Enforced networks data
to find violations in
Returns:
list: A list of violations
"""
all_violations = []
LOGGER.info('Finding enforced networks violations...')
for instance_network_interface in enforced_networks_data:
LOGGER.debug('%s', instance_network_interface)
violations = self.rules_engine.find_policy_violations(
instance_network_interface)
LOGGER.debug(violations)
all_violations.extend(violations)
return all_violations
def run(self):
"""Runs the data collection."""
instance_network_interface_data = self._retrieve()
all_violations = (
self._find_violations(instance_network_interface_data))
self._output_results(all_violations)
```
#### File: gcp_setup/environment/gcloud_env.py
```python
from __future__ import print_function
import datetime
import json
import os
import re
import subprocess
import sys
import time
DEFAULT_BUCKET_FMT = 'gs://{}-data-{}'
DEFAULT_CLOUDSQL_INSTANCE_NAME = 'forseti-security'
GCLOUD_MIN_VERSION = (163, 0, 0)
GCLOUD_VERSION_REGEX = r'Google Cloud SDK (.*)'
GCLOUD_ALPHA_REGEX = r'alpha.*'
GSUITE_KEY_SCP_ATTEMPTS = 5
GSUITE_KEY_NAME = 'gsuite_key.json'
ORG_IAM_ROLES = [
'roles/browser',
'roles/compute.networkViewer',
'roles/iam.securityReviewer',
'roles/appengine.appViewer',
'roles/servicemanagement.quotaViewer',
'roles/cloudsql.viewer',
'roles/compute.securityAdmin',
]
PROJECT_IAM_ROLES = [
'roles/storage.objectViewer',
'roles/storage.objectCreator',
'roles/cloudsql.client',
'roles/logging.logWriter',
]
REQUIRED_APIS = [
{'name': 'Admin SDK',
'service': 'admin.googleapis.com'},
{'name': 'AppEngine Admin',
'service': 'appengine.googleapis.com'},
{'name': 'Cloud Resource Manager',
'service': 'cloudresourcemanager.googleapis.com'},
{'name': 'Cloud SQL',
'service': 'sql-component.googleapis.com'},
{'name': 'Cloud SQL Admin',
'service': 'sqladmin.googleapis.com'},
{'name': 'Compute Engine',
'service': 'compute.googleapis.com'},
{'name': 'Deployment Manager',
'service': 'deploymentmanager.googleapis.com'},
{'name': 'IAM',
'service': 'iam.googleapis.com'},
]
SERVICE_ACCT_FMT = 'forseti-{}-reader-{}'
SERVICE_ACCT_EMAIL_FMT = <EMAIL>'
ROOT_DIR_PATH = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__))))
def org_id_from_org_name(org_name):
"""Extract the organization id (number) from the organization name.
Args:
org_name (str): The name of the organization, formatted as
"organizations/${ORGANIZATION_ID}".
Returns:
str: just the organization_id.
"""
return org_name[len('organizations/'):]
# pylint: disable=no-self-use
# pylint: disable=too-many-instance-attributes
class ForsetiGcpSetup(object):
"""Setup the Forseti Security GCP components."""
def __init__(self, **kwargs):
"""Init.
Args:
kwargs (dict): The kwargs.
"""
self.timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
self.timeonly = self.timestamp[8:]
self.force_no_cloudshell = kwargs.get('no_cloudshell')
self.branch = kwargs.get('branch') or 'master'
self.is_devshell = False
self.authed_user = None
self.project_id = None
self.organization_id = None
self.gcp_service_account = SERVICE_ACCT_FMT.format(
'gcp', self.timeonly)
self.gsuite_service_account = SERVICE_ACCT_FMT.format(
'gsuite', self.timeonly)
self.gsuite_svc_acct_key_location = None
self.bucket_name = None
self.bucket_location = kwargs.get('gcs_location') or 'us-central1'
self.cloudsql_instance = '{}-{}'.format(
DEFAULT_CLOUDSQL_INSTANCE_NAME,
self.timestamp)
self.cloudsql_region = kwargs.get('cloudsql_region') or 'us-central1'
self.gce_zone = '{}-c'.format(self.cloudsql_region)
self.deployment_name = False
self.deploy_tpl_path = None
self.forseti_conf_path = None
self.skip_email = False
self.sendgrid_api_key = '""'
self.notification_sender_email = '""'
self.notification_recipient_email = '""'
def run_setup(self):
"""Run the setup steps."""
# Pre-flight checks.
self._print_banner('Pre-flight checks')
self.gcloud_info()
self.check_cloudshell()
self.get_authed_user()
self.get_project()
self.get_organization()
self.check_billing_enabled()
self.has_permissions()
self.enable_apis()
# Generate names and config.
self._print_banner('Generate configs')
self.generate_bucket_name()
self.generate_deployment_templates()
self.generate_forseti_conf()
# Actual deployment.
# 1. Create deployment.
# 2. If fails, continue to next step.
# 3. Otherwise, copy configs (forseti_conf.yaml, rules) to bucket.
# 4. Grant service account roles and create and download
# G Suite service account key.
# 5. Poll the Forseti VM until it responds, then scp the key.
return_code = self.create_deployment()
if not return_code:
self.copy_config_to_bucket()
self.grant_gcp_svc_acct_roles()
self.download_gsuite_svc_acct_key()
self.copy_gsuite_key()
self.post_install_instructions(deploy_success=(not return_code))
@staticmethod
def _print_banner(text):
"""Print a banner.
Args:
text (str): Text to put in the banner.
"""
print('')
print('+-------------------------------------------------------')
print('| %s' % text)
print('+-------------------------------------------------------')
print('')
@staticmethod
def _run_command(cmd_args):
"""Wrapper to run a command in subprocess.
Args:
cmd_args (str): The list of command arguments.
Returns:
int: The return code. 0 is "ok", anything else is "error".
str: Output, if command was successful.
err: Error output, if there was an error.
"""
proc = subprocess.Popen(cmd_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return proc.returncode, out, err
def check_proper_gcloud(self):
"""Check gcloud version and presence of alpha components."""
return_code, out, err = self._run_command(
['gcloud', '--version'])
version_regex = re.compile(GCLOUD_VERSION_REGEX)
alpha_regex = re.compile(GCLOUD_ALPHA_REGEX)
version = None
alpha_match = None
if return_code:
print('Error trying to determine your gcloud version:')
print(err)
sys.exit(1)
else:
for line in out.split('\n'):
version_match = version_regex.match(line)
if version_match:
version = tuple(
[int(i) for i in version_match.group(1).split('.')])
continue
alpha_match = alpha_regex.match(line)
if alpha_match:
break
print('Current gcloud version: {}'.format(version))
print('Has alpha components? {}'.format(alpha_match is not None))
if version < GCLOUD_MIN_VERSION or not alpha_match:
print('You need the following gcloud setup:\n\n'
'gcloud version >= {}\n'
'gcloud alpha components\n\n'
'To install gcloud alpha components: '
'gcloud components install alpha\n\n'
'To update gcloud: gcloud components update\n'.
format('.'.join(
[str(i) for i in GCLOUD_MIN_VERSION])))
sys.exit(1)
def gcloud_info(self):
"""Read gcloud info, and check if running in Cloud Shell."""
# Read gcloud info
return_code, out, err = self._run_command(
['gcloud', 'info', '--format=json'])
if return_code:
print(err)
sys.exit(1)
else:
try:
gcloud_info = json.loads(out)
config = gcloud_info.get('config', {})
self.project_id = config.get('project')
self.authed_user = config.get('account')
props = config.get('properties', {})
metrics = props.get('metrics', {})
self.is_devshell = metrics.get('environment') == 'devshell'
print('Got gcloud info')
except ValueError as verr:
print(verr)
sys.exit(1)
def check_cloudshell(self):
"""Check whether using Cloud Shell or bypassing Cloud Shell."""
if not self.force_no_cloudshell:
if not self.is_devshell:
print('Forseti highly recommends running this setup within '
'Cloud Shell. If you would like to run the setup '
'outside Cloud Shell, please be sure to do the '
'following:\n\n'
'1) Create a project.\n'
'2) Enable billing for the project.\n'
'3) Install gcloud and authenticate your account using '
'"gcloud auth login".\n'
'4) Set your project using '
'"gcloud config project set <PROJECT_ID>".\n'
'5) Run this setup again, with the --no-cloudshell flag, '
'i.e.\n\n python setup_forseti.py --no-cloudshell\n')
sys.exit(1)
else:
print('Using Cloud Shell, continuing...')
else:
print('Bypass Cloud Shell check, continuing...')
def get_authed_user(self):
"""Get the current authed user."""
if not self.authed_user:
print('Error getting authed user. You may need to run '
'"gcloud auth login". Exiting.')
sys.exit(1)
print('You are: {}'.format(self.authed_user))
def get_project(self):
"""Get the project."""
if not self.project_id:
print('You need to have an active project! Exiting.')
sys.exit(1)
print('Project id: %s' % self.project_id)
def check_billing_enabled(self):
"""Check if billing is enabled."""
return_code, out, err = self._run_command(
['gcloud', 'alpha', 'billing', 'projects', 'describe',
self.project_id, '--format=json'])
if return_code:
print(err)
self._billing_not_enabled()
try:
billing_info = json.loads(out)
if billing_info.get('billingEnabled'):
print('Billing IS enabled.')
else:
self._billing_not_enabled()
except ValueError:
self._billing_not_enabled()
def _billing_not_enabled(self):
"""Print message and exit."""
print('\nIt seems that billing is not enabled for your project. '
'You can check whether billing has been enabled in the '
'Cloud Platform Console:\n\n'
' https://console.cloud.google.com/billing/linkedaccount?'
'project={}&organizationId={}\n\n'
'Once you have enabled billing, re-run this setup.\n'.format(
self.project_id, self.organization_id))
sys.exit(1)
def get_organization(self):
"""Infer the organization from the project's parent."""
return_code, out, err = self._run_command(
['gcloud', 'projects', 'describe',
self.project_id, '--format=json'])
if return_code:
print(err)
print('Error trying to find current organization from '
'project! Exiting.')
sys.exit(1)
try:
project = json.loads(out)
project_parent = project.get('parent')
if not project_parent:
self._no_organization()
parent_type = project_parent['type']
parent_id = project_parent['id']
except ValueError:
print('Error retrieving organization id')
self._no_organization()
if parent_type == 'folder':
self._find_org_from_folder(parent_id)
elif parent_type == 'organization':
self.organization_id = parent_id
else:
self._no_organization()
if self.organization_id:
print('Organization id: %s' % self.organization_id)
def _no_organization(self):
"""No organization, so print a message and exit."""
print('You need to have an organization set up to use Forseti. '
'Refer to the following documentation for more information.\n\n'
'https://cloud.google.com/resource-manager/docs/'
'creating-managing-organization')
sys.exit(1)
def _find_org_from_folder(self, folder_id):
"""Find the organization from some folder.
Args:
folder_id (str): The folder id, just a number.
"""
parent_type = 'folders'
parent_id = folder_id
while parent_type != 'organizations':
return_code, out, err = self._run_command(
['gcloud', 'alpha', 'resource-manager', 'folders',
'describe', parent_id, '--format=json'])
if return_code:
print(err)
self._no_organization()
try:
folder = json.loads(out)
parent_type, parent_id = folder['parent'].split('/')
print('Check parent: %s' % folder['parent'])
except ValueError as verr:
print(verr)
self._no_organization()
self.organization_id = parent_id
def has_permissions(self):
"""Check if current user is an org admin and project owner.
User must be an org admin in order to assign a service account roles
on the organization IAM policy.
"""
self._print_banner('Checking permissions')
if self._is_org_admin() and self._can_modify_project_iam():
print('You have the necessary roles to grant roles that Forseti '
'needs. Continuing...')
else:
print('You do not have the necessary roles to grant roles that '
'Forseti needs. Please have someone who is an Org Admin '
'and either Project Editor or Project Owner for this project '
'to run this setup. Exiting.')
sys.exit(1)
def _is_org_admin(self):
"""Check if current user is an org admin.
Returns:
bool: Whether current user is Org Admin.
"""
is_org_admin = self._has_roles(
'organizations',
self.organization_id,
['roles/resourcemanager.organizationAdmin'])
print('%s is Org Admin? %s' % (self.authed_user, is_org_admin))
return is_org_admin
def _can_modify_project_iam(self):
"""Check whether user can modify the current project's IAM policy.
To make it simple, check that user is either Project Editor or Owner.
Returns:
bool: If user can modify a project.
"""
can_modify_project = self._has_roles(
'projects',
self.project_id,
['roles/editor', 'roles/owner'])
print('%s is either Project Editor or Owner? %s' %
(self.authed_user, can_modify_project))
return can_modify_project
def _has_roles(self, resource_type, resource_id, roles):
"""Check if user has one or more roles in a resource.
Args:
resource_type (str): The resource type.
resource_id (str): The resource id.
roles (list): The roles to check user's membership in.
Returns:
bool: True if has roles, otherwise False.
"""
has_roles = False
return_code, out, err = self._run_command(
['gcloud', resource_type, 'get-iam-policy',
resource_id, '--format=json'])
if return_code:
print(err)
else:
try:
# Search resource's policy bindings for:
# 1) Members who have certain roles.
# 2) Whether the current authed user is in the members list.
iam_policy = json.loads(out)
role_members = []
for binding in iam_policy.get('bindings', []):
if binding['role'] in roles:
role_members.extend(binding['members'])
for member in role_members:
if member.find(self.authed_user) > -1:
has_roles = True
break
except ValueError as verr:
print(verr)
print('Error reading output of %s.getIamPolicy().' %
resource_type)
return has_roles
def enable_apis(self):
"""Enable necessary APIs for Forseti Security.
Technically, this could be done in Deployment Manager, but if you
delete the deployment, you'll disable the APIs. This could cause errors
if there are resources still in use (e.g. Compute Engine), and then
your deployment won't be cleanly deleted.
"""
self._print_banner('Enabling required APIs')
for api in REQUIRED_APIS:
print('Enabling the {} API...'.format(api['name']))
return_code, _, err = self._run_command(
['gcloud', 'alpha', 'service-management',
'enable', api['service']])
if return_code:
print(err)
else:
print('Done.')
def _full_service_acct_email(self, account_id):
"""Generate the full service account email.
Args:
account_id (str): The service account id, i.e. the part before
the "@".
Returns:
str: The full service account email.
"""
return SERVICE_ACCT_EMAIL_FMT.format(account_id, self.project_id)
def download_gsuite_svc_acct_key(self):
"""Download the service account key."""
print('\nDownloading GSuite service account key for %s'
% self.gsuite_service_account)
proc = subprocess.Popen(
['gcloud', 'iam', 'service-accounts', 'keys',
'create', GSUITE_KEY_NAME,
'--iam-account=%s' % (self._full_service_acct_email(
self.gsuite_service_account))],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate()
if proc.returncode:
print(err)
self.gsuite_svc_acct_key_location = os.path.abspath(
os.path.join(
os.getcwd(),
GSUITE_KEY_NAME))
def grant_gcp_svc_acct_roles(self):
"""Grant the following IAM roles to GCP service account.
Org:
AppEngine App Viewer
Cloud SQL Viewer
Network Viewer
Project Browser
Security Reviewer
Service Management Quota Viewer
Security Admin
Project:
Cloud SQL Client
Storage Object Viewer
Storage Object Creator
"""
self._print_banner('Assigning roles to the GCP service account')
if not self.organization_id:
self._no_organization()
roles = {
'organizations': ORG_IAM_ROLES,
'projects': PROJECT_IAM_ROLES
}
for (resource_type, roles) in roles.iteritems():
if resource_type == 'organizations':
resource_id = self.organization_id
else:
resource_id = self.project_id
for role in roles:
iam_role_cmd = [
'gcloud',
resource_type,
'add-iam-policy-binding',
resource_id,
'--member=serviceAccount:%s' % (
self._full_service_acct_email(
self.gcp_service_account)),
'--role=%s' % role,
]
print('Assigning %s on %s...' % (role, resource_id))
proc = subprocess.Popen(iam_role_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate()
if proc.returncode:
print(err)
else:
print('Done.')
def generate_bucket_name(self):
"""Generate bucket name for the rules."""
self.bucket_name = DEFAULT_BUCKET_FMT.format(
self.project_id, self.timeonly)
def generate_deployment_templates(self):
"""Generate deployment templates."""
print('Generate Deployment Manager templates...')
# Deployment template in file
deploy_tpl_path = os.path.abspath(
os.path.join(
ROOT_DIR_PATH,
'deployment-templates',
'deploy-forseti.yaml.in'))
out_tpl_path = os.path.abspath(
os.path.join(
ROOT_DIR_PATH,
'deployment-templates',
'deploy-forseti-{}.yaml'.format(self.timestamp)))
deploy_values = {
'CLOUDSQL_REGION': self.cloudsql_region,
'CLOUDSQL_INSTANCE_NAME': self.cloudsql_instance,
'SCANNER_BUCKET': self.bucket_name[len('gs://'):],
'BUCKET_LOCATION': self.bucket_location,
'SERVICE_ACCT_GCP_READER': self.gcp_service_account,
'SERVICE_ACCT_GSUITE_READER': self.gsuite_service_account,
'BRANCH_OR_RELEASE': 'branch-name: "{}"'.format(self.branch),
}
# Create Deployment template with values filled in.
with open(deploy_tpl_path, 'r') as in_tmpl:
tmpl_contents = in_tmpl.read()
out_contents = tmpl_contents.format(**deploy_values)
with open(out_tpl_path, 'w') as out_tmpl:
out_tmpl.write(out_contents)
self.deploy_tpl_path = out_tpl_path
print('\nCreated a deployment template:\n %s\n' %
self.deploy_tpl_path)
def generate_forseti_conf(self):
"""Generate Forseti conf file."""
# Create a forseti_conf_dm.yaml config file with values filled in.
# forseti_conf.yaml in file
print('Generate forseti_conf_dm.yaml...\n')
forseti_conf_in = os.path.abspath(
os.path.join(
ROOT_DIR_PATH, 'configs', 'forseti_conf.yaml.in'))
forseti_conf_gen = os.path.abspath(
os.path.join(
ROOT_DIR_PATH, 'configs', 'forseti_conf_dm.yaml'))
# Ask for SendGrid API Key
print('Forseti can send email notifications through SendGrid '
'via an API key. '
'This step is optional and can be configured later.')
sendgrid_api_key = raw_input(
'What is your SendGrid API key? (press [enter] to skip) ').strip()
if sendgrid_api_key:
self.sendgrid_api_key = sendgrid_api_key
# Ask for notification sender email
self.notification_sender_email = '<EMAIL>'
# Ask for notification recipient email
notification_recipient_email = raw_input(
'At what email address do you want to receive notifications? '
'(press [enter] to skip) ').strip()
if notification_recipient_email:
self.notification_recipient_email = notification_recipient_email
else:
self.skip_email = True
conf_values = {
'EMAIL_RECIPIENT': self.notification_recipient_email,
'EMAIL_SENDER': self.notification_sender_email,
'SENDGRID_API_KEY': self.sendgrid_api_key,
'SCANNER_BUCKET': self.bucket_name[len('gs://'):],
'GROUPS_SERVICE_ACCOUNT_KEY_FILE':
'/home/ubuntu/{}'.format(GSUITE_KEY_NAME),
'DOMAIN_SUPER_ADMIN_EMAIL': '""',
'ENABLE_GROUP_SCANNER': 'true',
}
with open(forseti_conf_in, 'r') as in_tmpl:
tmpl_contents = in_tmpl.read()
out_contents = tmpl_contents.format(**conf_values)
with open(forseti_conf_gen, 'w') as out_tmpl:
out_tmpl.write(out_contents)
self.forseti_conf_path = forseti_conf_gen
print('\nCreated forseti_conf_dm.yaml config file:\n %s\n' %
self.forseti_conf_path)
def create_deployment(self):
"""Create the GCP deployment.
Returns:
int: The return code value of running `gcloud` command to create
the deployment.
"""
self._print_banner('Create Forseti deployment')
print ('This may take a few minutes.')
self.deployment_name = 'forseti-security-{}'.format(self.timestamp)
print('Deployment name: %s' % self.deployment_name)
print('Deployment Manager Dashboard: '
'https://console.cloud.google.com/deployments/details/'
'{}?project={}&organizationId={}\n'.format(
self.deployment_name, self.project_id, self.organization_id))
return_code, out, err = self._run_command(
['gcloud', 'deployment-manager', 'deployments', 'create',
self.deployment_name, '--config={}'.format(self.deploy_tpl_path)])
time.sleep(2)
if return_code:
print(err)
else:
print(out)
print('\nCreated deployment successfully.')
return return_code
def copy_config_to_bucket(self):
"""Copy the config to the created bucket.
Returns:
bool: True if copy config succeeded, otherwise False.
bool: True if copy rules succeeded, otherwise False.
"""
copy_config_ok = False
copy_rules_ok = False
self._print_banner('Copy configs to bucket')
print('Copy forseti_conf.yaml to {}'.format(self.bucket_name))
return_code, out, err = self._run_command(
['gsutil', 'cp', self.forseti_conf_path,
'{}/configs/forseti_conf.yaml'.format(self.bucket_name)])
if return_code:
print(err)
else:
print(out)
copy_config_ok = True
rules_dir = os.path.abspath(
os.path.join(
ROOT_DIR_PATH, 'rules'))
print('Copy rules to {}'.format(self.bucket_name))
return_code, out, err = self._run_command(
['gsutil', 'cp', '-r', rules_dir, self.bucket_name])
if return_code:
print(err)
else:
print(out)
copy_rules_ok = True
return copy_config_ok, copy_rules_ok
def copy_gsuite_key(self):
"""scp the G Suite key to the VM after deployment.
Use 2**<attempt #> seconds of sleep() between attempts.
"""
self._print_banner('Copy G Suite key to Forseti VM')
print('scp-ing your gsuite_key.json to your Forseti GCE instance...')
for i in range(1, GSUITE_KEY_SCP_ATTEMPTS+1):
print('Attempt {} of {} ...'.format(i, GSUITE_KEY_SCP_ATTEMPTS))
return_code, out, err = self._run_command(
['gcloud',
'compute',
'scp',
'--zone={}'.format(self.gce_zone),
'--quiet',
self.gsuite_svc_acct_key_location,
'ubuntu@{}-vm:/home/ubuntu/{}'.format(
self.deployment_name, GSUITE_KEY_NAME),
])
if return_code:
print(err)
if i+1 < GSUITE_KEY_SCP_ATTEMPTS:
sleep_time = 2**(i+1)
print('Trying again in %s seconds.' % (sleep_time))
time.sleep(sleep_time)
else:
print(out)
print('Done')
break
def post_install_instructions(self, deploy_success):
"""Show post-install instructions.
Print link for deployment manager dashboard.
Print link to go to GSuite service account and enable DWD.
Args:
deploy_success (bool): Whether deployment was successful.
"""
self._print_banner('Post-setup instructions')
print('Your generated Deployment Manager template can be '
'found here:\n\n {}\n\n'.format(self.deploy_tpl_path))
if not deploy_success:
print ('Your deployment had some issues. Please review the error '
'messages. If you need help, please either file an issue '
'on our Github Issues or email '
'<EMAIL>.\n')
print('You can see the details of your deployment in the '
'Cloud Console:\n\n '
'https://console.cloud.google.com/deployments/details/'
'{}?project={}&organizationId={}\n\n'.format(
self.deployment_name, self.project_id, self.organization_id))
if self.skip_email:
print('If you would like to enable email notifications via '
'SendGrid, please refer to:\n\n '
'http://forsetisecurity.org/docs/howto/configure/'
'email-notification\n\n')
print('Finalize your installation by enabling G Suite Groups '
'collection in Forseti:\n\n'
' '
'http://forsetisecurity.org/docs/howto/configure/'
'gsuite-group-collection\n\n')
print('A default configuration file '
'(configs/forseti_conf_dm.yaml) '
'has been generated. If you wish to change your '
'Forseti configuration or rules, e.g. enabling G Suite '
'Groups collection, copy the changed files '
'from the root directory of forseti-security/ to '
'your Forseti bucket:\n\n'
' gsutil cp configs/forseti_conf_dm.yaml '
'{}/configs/forseti_conf.yaml\n\n'
' gsutil cp -r rules {}\n\n'.format(
self.bucket_name,
self.bucket_name))
```
#### File: iam/api_tests/playground_test.py
```python
import unittest
from google.cloud.security.iam.explain.service import GrpcExplainerFactory
from google.cloud.security.iam.playground.service import GrpcPlaygrounderFactory
from google.cloud.security.iam.dao import ModelManager
from tests.iam.api_tests.api_tester import ApiTestRunner, create_test_engine, cleanup
from tests.unittest_utils import ForsetiTestCase
class TestServiceConfig(object):
"""ServiceConfig Stub."""
def __init__(self):
engine = create_test_engine()
self.model_manager = ModelManager(engine)
def run_in_background(self, function):
"""Stub."""
function()
return self
def create_tester():
"""Create API test runner."""
return ApiTestRunner(
TestServiceConfig(),
[GrpcExplainerFactory,
GrpcPlaygrounderFactory])
class ApiTest(ForsetiTestCase):
"""Api Test."""
def setUp(self):
self.setup = create_tester()
def has_no_models(self, client):
"""Returns true iff the server has no model."""
return self.has_n_models(client, 0)
def has_n_models(self, client, number):
"""Returns true iff the server has n models."""
return len(client.list_models().models) == number
def test_create_empty_model_and_delete(self):
"""Test: Create empty model, then delete again."""
def test(client):
"""API test callback."""
self.assertEquals(
len(client.list_models().models),
0,
'Expect no previous models')
model1 = client.new_model("EMPTY", name='model1').model.handle
model2 = client.new_model("EMPTY", name='model2').model.handle
self.assertTrue(self.has_n_models(client, 2))
client.delete_model(model1)
self.assertTrue(self.has_n_models(client, 1))
client.delete_model(model2)
self.assertTrue(self.has_no_models(client))
self.setup.run(test)
def test_create_empty_model(self):
"""Test: create and empty model."""
@cleanup
def test(client):
"""API test callback."""
self.assertEqual(
[m.handle for m in client.list_models().models],
[],
'Expect no previous models')
client.new_model('EMPTY', 'test_model')
self.assertTrue(
self.has_n_models(client, 1),
'One model must be created')
self.setup.run(test)
def test_create_and_list_members(self):
"""Test: create and list members."""
@cleanup
def test(client):
"""API test callback."""
reply = client.new_model('EMPTY', name='test1')
client.switch_model(reply.model.handle)
self.assertEqual(
len(client.playground.list_members("").member_names),
0,
'Expect no members in the empty model')
client.playground.add_member('user/user1')
self.assertEqual(
len(client.playground.list_members("").member_names),
1,
'Expect one members in the empty model')
client.playground.add_member('group/group1')
self.assertEqual(
len(client.playground.list_members("").member_names),
2,
'Expect two members in the empty model')
client.playground.add_member('user/user2', ['group/group1'])
self.assertEqual(
len(client.playground.list_members("").member_names),
3,
'Expect three members in the empty model')
self.assertEqual(
len(client.playground.list_members("user").member_names),
2)
self.assertEqual(
len(client.playground.list_members("group").member_names),
1)
client.playground.del_member('user/user1')
self.assertEqual(
len(client.playground.list_members("user").member_names),
1)
self.assertEqual(
len(client.playground.list_members("group").member_names),
1)
client.playground.del_member('group/group1')
client.playground.del_member('user/user2')
self.assertEqual(
len(client.playground.list_members("").member_names),
0,
'Expect no members in the empty model')
self.setup.run(test)
if __name__ == '__main__':
unittest.main()
```
#### File: scanner/audit/instance_network_interface_engine_test.py
```python
import unittest
import mock
import yaml
from google.apputils import basetest
from google.cloud.security.common.gcp_type import instance
from google.cloud.security.common.util import file_loader
from google.cloud.security.scanner.audit import instance_network_interface_rules_engine as ini
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
from tests.scanner.test_data import fake_instance_scanner_data
def create_list_of_instence_network_interface_obj_from_data():
fake_instance_scanner_list = []
for data in fake_instance_scanner_data.INSTANCE_DATA:
fake_instance_scanner_list.append(
instance.Instance(**data).create_network_interfaces())
return fake_instance_scanner_list
# TODO: Define more tests
class InstanceNetworkInterfaceTest(basetest.TestCase):
"""Tests for the InstanceNetworkInterface."""
def setUp(self):
"""Set up."""
self.rule_index = 0
self.ini = ini
self.ini.LOGGER = mock.MagicMock()
# patch the organization resource relation dao
self.patcher = mock.patch(
'google.cloud.security.common.' +
'data_access.instance_dao.InstanceDao')
self.mock_instance_dao = self.patcher.start()
self.mock_instance_dao.return_value = None
def tearDown(self):
self.patcher.stop()
def test_build_rule_book_from_local_yaml_file_works(self):
"""Test that a RuleBook is built correctly
with a yaml file."""
rules_local_path = get_datafile_path(
__file__,
'instance_network_interface_test_rules_1.yaml')
rules_engine = ini.InstanceNetworkInterfaceRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book()
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
@mock.patch.object(file_loader,
'_read_file_from_gcs', autospec=True)
def test_build_rule_book_from_gcs_works(self, mock_load_rules_from_gcs):
"""Test that a RuleBook is built correctly with a mocked gcs file.
Setup:
* Create a mocked GCS object from a test yaml file.
* Get the yaml file content.
Expected results:
There are 2 resources that have rules, in the rule book.
"""
bucket_name = 'bucket-name'
rules_path = 'input/instance_network_interface_test_rules_1.yaml'
full_rules_path = 'gs://{}/{}'.format(bucket_name, rules_path)
rules_engine = ini.InstanceNetworkInterfaceRulesEngine(
rules_file_path=full_rules_path)
# Read in the rules file
file_content = None
with open(
get_datafile_path(__file__,
'instance_network_interface_test_rules_1.yaml'),
'r') as rules_local_file:
try:
file_content = yaml.safe_load(rules_local_file)
except yaml.YAMLError:
raise
mock_load_rules_from_gcs.return_value = file_content
rules_engine.build_rule_book()
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
def test_networks_in_whitelist_and_allowed_projects(self):
"""Test to make sure violations are created"""
rules_local_path = get_datafile_path(
__file__,
'instance_network_interface_test_rules_2.yaml')
rules_engine = ini.InstanceNetworkInterfaceRulesEngine(rules_local_path)
rules_engine.build_rule_book()
fake_ini_data = (
create_list_of_instence_network_interface_obj_from_data())
actual_violations_list = []
for instance_network_interface in fake_ini_data:
violation = rules_engine.find_policy_violations(
instance_network_interface)
actual_violations_list.extend(violation)
self.assertEqual([], actual_violations_list)
def test_network_in_allowed_project_but_not_whitelist_with_extern_ip(self):
"""Test to make sure violations are created where the project
is allowed but not the network is not and there is an external ip"""
rules_local_path = get_datafile_path(
__file__,
'instance_network_interface_test_rules_3.yaml')
rules_engine = ini.InstanceNetworkInterfaceRulesEngine(
rules_local_path)
rules_engine.build_rule_book()
fake_ini_data = (
create_list_of_instence_network_interface_obj_from_data())
actual_violations_list = []
for instance_network_interface in fake_ini_data:
violation = rules_engine.find_policy_violations(
instance_network_interface)
actual_violations_list.extend(violation)
self.assertEqual(1, len(actual_violations_list))
self.assertEqual('project-1', actual_violations_list[0].project)
self.assertEqual('network-1', actual_violations_list[0].network)
def test_network_in_allowed_project_with_no_external_ip(self):
"""Test to make sure violations are not created where the project
is allowed but not the network is not and there is not an
external ip"""
rules_local_path = get_datafile_path(
__file__,
'instance_network_interface_test_rules_4.yaml')
rules_engine = ini.InstanceNetworkInterfaceRulesEngine(rules_local_path)
rules_engine.build_rule_book()
fake_ini_data = (
create_list_of_instence_network_interface_obj_from_data())
actual_violations_list = []
for instance_network_interface in fake_ini_data:
violation = rules_engine.find_policy_violations(
instance_network_interface)
actual_violations_list.extend(violation)
self.assertEqual([], actual_violations_list)
def test_network_not_in_allowed_project(self):
"""Test to make sure violations are where the project
is not allowed"""
rules_local_path = get_datafile_path(
__file__,
'instance_network_interface_test_rules_5.yaml')
rules_engine = ini.InstanceNetworkInterfaceRulesEngine(rules_local_path)
rules_engine.build_rule_book()
fake_ini_data = (
create_list_of_instence_network_interface_obj_from_data())
actual_violations_list = []
for instance_network_interface in fake_ini_data:
violation = rules_engine.find_policy_violations(
instance_network_interface)
actual_violations_list.extend(violation)
self.assertEqual(1, len(actual_violations_list))
self.assertEqual('project-3', actual_violations_list[0].project)
self.assertEqual('network-3', actual_violations_list[0].network)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joshivaibhav/Net2TextQuery",
"score": 3
} |
#### File: joshivaibhav/Net2TextQuery/gml2csv.py
```python
import json
import pandas as pd
from pandas.io.json import json_normalize
def process_gml(file):
"""
Parser which process the GML file and converts it to JSON objects
:param file: gml file
:return: JSON converted from GML
"""
lines = []
for line in file.split('\n'):
line = line.strip()
lines.append(line)
file = "\n".join(lines)
file = file.replace('\n\n', '\n')
file = file.replace(']\n', '},\n')
file = file.replace('[\n', '{')
file = file.replace('\n{', '\n {')
for s in ['id', 'label', 'source', 'target', 'value','Country','Longitude','Internal','Latitude','LinkLabel','type']:
file = file.replace(s, '"%s" :' % s)
file = file.replace('\n"', ', "')
file = file.replace('\n}', '}')
return file.strip('\n')
if __name__ == '__main__':
# replace the sample file here with your gml file
graphfile = "sample.gml"
with open(graphfile, 'r') as f:
file = f.read()
file = ''.join(file.split('node')[1:])
nodes = file.split('edge')[0]
edges = ''.join(file.split('edge')[1:]).strip().rstrip(']')
nodes = process_gml(nodes)
edges = process_gml(edges)
edges = edges.rstrip(",")
nodes = nodes.rstrip(",")
converted_json = "{\"node\":[" + nodes + "],\n" + "\"edges\":[" + edges + "]}"
data = json.loads(converted_json)
# converting the node data and edges into dataframes
df1 = pd.DataFrame.from_dict(json_normalize(data['node']), orient='columns')
df2 = pd.DataFrame.from_dict(json_normalize(data['edges']),orient='columns')
# for the column headers
final_columns = []
# populate with column names from both data frames
for col_name in df1.columns:
final_columns.append(col_name)
for col_name in df2.columns:
final_columns.append(col_name)
# combine both dataframes
final = pd.concat([df1,df2], ignore_index=True, axis=1)
# final converted csv
final.to_csv("final.csv", index=False,header=final_columns)
``` |
{
"source": "joshjchayes/PriorGen",
"score": 3
} |
#### File: PriorGen/priorgen/accuracy_utils.py
```python
from scipy.optimize import fsolve
from scipy.spatial import distance
import numpy as np
from ._scaler import Scaler
class RetrievalMetricCalculator:
def __init__(self, parameter_limits):
'''
The RetrievalMetricCalculator generates metrics which can be used to
quantify quality of retrieval. The two main metrics are the accuracy
metric, which is a dimensionless distance between two points
(assumed to be true values and retrieved values), and the precision
metric M2, which is defined as the number of standard deviations
away from the true value a retrieved value is. For more information
see Hayes et. al. (2019).
Parameters
----------
parameter_limits : array_like, shape (n_variables, 2)
The physical values of the limits on each parameter, provided in
(lower, upper) pairs.
'''
# set up the scaler
self.scaler = Scaler(parameter_limits)
self.n_variables = len(parameter_limits)
def calculate_accuracy_metric(self, true_parameters, retrieved_parameters):
'''
Calculates the accuracy metric, defined as the Euclidean distance
between two points in unit-normalised physical parameter space.
Parameters
----------
true_parameters : array_like, shape (n_parameters, )
The accepted 'true' values of the parameters, provided in physical
space (i.e. with units)
retrieved_parameters : array_like, shape (n_parameters, )
The retrieved values of the parameters, provided in physical space
(i.e. with units)
Returns
-------
accuracy_metric : float
The Euclidean distance between the two given points
'''
dimensionless_true = self.scaler.point_to_dimensionless(true_parameters)
dimensionless_retrieved = self.scaler.point_to_dimensionless(retrieved_parameters)
return distance.euclidean(dimensionless_true, dimensionless_retrieved)
def calculate_precision_metric(self, true_parameters, retrieved_parameters,
uncertainty):
'''
Calculates the precision metric, which is defined as the accuracy
metric scaled by the 1 sigma error in the direction of the vector
between the true and retrieved parameters
Parameters
----------
true_parameters : array_like, shape (n_parameters, )
The accepted 'true' values of the parameters, provided in physical
space (i.e. with units)
retrieved_parameters : array_like, shape (n_parameters, )
The retrieved values of the parameters, provided in physical space
(i.e. with units)
uncertainty : array_like, shape (n_parameters, ) or (n_parameters, 2)
The uncertainy associated with each retrieved parameter value.
If 1D array is provided, assumes uniform upper and lower errors.
If 2D array provided, assumes errors are provided as(lower, upper)
pairs.
Returns
-------
precision_metric : float
The precision metric associated with the retrieval results
sigma : float
The 1 sigma value in the direction of the vector between the true
and retrieved parameters.
'''
# Scale the points and errors
dimensionless_true = self.scaler.point_to_dimensionless(true_parameters)
dimensionless_retrieved = self.scaler.point_to_dimensionless(retrieved_parameters)
dimensionless_errors = self.scaler.errors_to_dimensionless(uncertainty)
# which values of error to use based on direction of the true value
# compared to the retrieved one. Note that we default to the upper
# error in the event that the retrieval is exact.
delta = dimensionless_true - dimensionless_retrieved
mask = np.vstack((delta < 0, delta >= 0)).T
# get the principal semi-axes which define the error ellipse
semiaxes = dimensionless_errors[mask]
# Find the intercept between the error ellipse and the line joining
# the true and retrieved position
intercept = _find_intercept(dimensionless_true, dimensionless_retrieved, semiaxes)
# The 1 sigma distance is the distance between this intercept and the
# retrieved parameter values (note dropping the scale factor from
# intercept)
sigma = distance.euclidean(dimensionless_retrieved, intercept[:-1])
# Calculate the precision metric
# Distance between points
precision_metric = distance.euclidean(dimensionless_true, dimensionless_retrieved)/sigma
return precision_metric, sigma
def calculate_metrics(self, true_parameters, retrieved_parameters,
uncertainty):
'''
Calculates the accuracy and precision metrics
'''
accuracy = self.calculate_accuracy_metric(true_parameters, retrieved_parameters)
precision, sigma = self.calculate_precision_metric(true_parameters, retrieved_parameters, uncertainty)
return accuracy, precision, sigma
def _intercept_eqn(p, true_pos, retr_pos, errors):
'''
Function to pass to fsolve to find the intercept between the
line between the retrieved and true position and the one sigma
error ellipsoid around the retrieved position
'''
A = p[-1] # Get the scalar
p = np.array(p[:-1]) # Get the variable coordinates
true_pos = np.asarray(true_pos)
retr_pos = np.asarray(retr_pos)
errors = np.asarray(errors)
diff = retr_pos - true_pos
line_results = p - true_pos - A*diff
ellipsoid_result = sum((p - retr_pos)**2 / errors**2) - 1
return tuple(line_results) + (ellipsoid_result, )
def _find_intercept(true_position, retrieved_position, errors):
'''
Finds the intercept between the line joining the true position
and the retrieved position in parameter space and the error ellipsoid
surrounding the retrieved position
Parameters
----------
true_position : array_like, shape (n_variables, )
The set of accepted 'true' values for the variables
retrieved_position : array_like, shape (n_variables, )
The set of values for the variables found through retrieval
'''
start_pos = np.array(tuple(true_position) + (0.3,))
return fsolve(_intercept_eqn, start_pos, args=(true_position, retrieved_position, errors))
```
#### File: PriorGen/priorgen/classified_retriever.py
```python
from .classifier import Classifier
import dynesty
import numpy as np
import csv
class ClassifiedRetriever:
def __init__(self, training_parameters, training_observables, n_classes=50,
variance=0.999, n_components=None, n_bins=20, n_nuisance=0,
nuisance_limits=None):
'''
The ClassifiedRetriever is built to use a Classifier to run retrievals
using the informed priors generated by the Classifer
Parameters
----------
training_parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
training_observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_classes : int, optional
The number of different classes to use when training the classifier
Default is 50.
variance : float, optional
The fraction of explained variance to keep in the principal
components. Default is 0.999
n_components : int or None, optional
If provided, will override the `variance` kwarg and specify the
number of principal components to use when conducting PCA on
the observables. Default is None.
n_bins : int, optional
The number of bins to split each maginalised parameter distribution
into. The more bins you have, the more detail you will have on the
shape of each class' prior. However, if you have too many, you will
encounter issues of undersampling. Default is 20.
n_nuisance : int, optional
The number of nuisance parameters to include in fitting. Can be
changed by calling set_nuisance_parameters. Default is 0.
nuisance_limits : None, or array_like, shape (n_nuisance, 2)
The limits for each nuisance parameter, provided as (lower, upper)
pairs. If None, defaults to (-1000, 1000) for each parameter.
'''
self.classifier = Classifier(training_parameters, training_observables,
n_classes, variance, n_components, n_bins)
self.set_nuisance_parameters(n_nuisance)
def run_dynesty(self, data_to_fit, lnprob, nlive=200, bound='multi',
sample='auto', maxiter=None, maxcall=None, dlogz=None,
filepath='output.csv', **dynesty_kwargs):
'''
Runs nested sampling retrieval through Dynesty using the Classifier
to inform priors
Parameters
----------
data_to_fit : array_like, shape (X,)
The data you want to fit. Required for classification purposes.
lnprob : function
A function which must be passed a set of parameters and returns
their ln likelihood. Signature should be `lnprob(params)` where
params is an array with shape (n_variables, ). Note that you will
need to have hard-coded the data and associated uncertainties into
the `lnprob` function.
nlive : int, optional
The number of live points to use in the nested sampling. Default is
200.
bound : str, optional
Method used to approximately bound the prior using the current set
of live points. Conditions the sampling methods used to propose new
live points. Choices are no bound ('none'), a single bounding
ellipsoid ('single'), multiple bounding ellipsoids ('multi'), balls
centered on each live point ('balls'), and cubes centered on each
live point ('cubes'). Default is 'multi'.
sample : str, optional
Method used to sample uniformly within the likelihood constraint,
conditioned on the provided bounds. Unique methods available are:
uniform sampling within the bounds('unif'), random walks with fixed
proposals ('rwalk'), random walks with variable (“staggering”)
proposals ('rstagger'), multivariate slice sampling along preferred
orientations ('slice'), “random” slice sampling along all
orientations ('rslice'), and “Hamiltonian” slices along random
trajectories ('hslice'). 'auto' selects the sampling method based
on the dimensionality of the problem (from ndim). When ndim < 10,
this defaults to 'unif'. When 10 <= ndim <= 20, this defaults to
'rwalk'. When ndim > 20, this defaults to 'hslice' if a gradient is
provided and 'slice' otherwise. 'rstagger' and 'rslice' are
provided as alternatives for 'rwalk' and 'slice', respectively.
Default is 'auto'.
maxiter : int or None, optional
The maximum number of iterations to run. If None, will run until
stopping criterion is met. Default is None.
maxcall : int or None, optional
If not None, sets the maximum number of calls to the likelihood
function. Default is None.
**dynesty_kwargs : optional
kwargs to be passed to the dynesty.NestedSampler() initialisation
Returns
-------
results : dict
The dynesty results dictionary, with the addition of the following
attributes:
weights - normalised weights for each sample
cov - the covariance matrix
uncertainties - the uncertainty on each fitted parameter,
calculated from the square root of the diagonal of the
covariance matrix.
'''
# First up, we need to define some variables for the Retriever
# Number of dimensions we are retrieving
n_dims = self.classifier.n_variables + self.n_nuisance
# Make the prior transform function
prior_transform = self.classifier.create_dynesty_prior_transform(
data_to_fit, self.n_nuisance, self.nuisance_limits)
# Set up and run the sampler here!!
sampler = dynesty.NestedSampler(lnprob, prior_transform,
n_dims, bound=bound, sample=sample,
update_interval=float(n_dims), nlive=nlive,
**dynesty_kwargs)
sampler.run_nested(maxiter=maxiter, maxcall=maxcall, dlogz=dlogz)
results = sampler.results
# Get some normalised weights
results.weights = np.exp(results.logwt - results.logwt.max()) / \
np.sum(np.exp(results.logwt - results.logwt.max()))
# Calculate a covariance matrix for these results to get uncertainties
cov = np.cov(results.samples, rowvar=False, aweights=results.weights)
# Get the uncertainties from the diagonal of the covariance matrix
diagonal = np.diag(cov)
uncertainties = np.sqrt(diagonal)
# Add the covariance matrix and uncertainties to the results object
results.cov = cov
results.uncertainties = uncertainties
self._print_best(results)
self._save_results(results, filepath)
return results
def _save_results(self, results, filepath):
'''
Saves the results to a file
'''
write_dict = []
best_results = results.samples[np.argmax(results.logl)]
for i in range(self.classifier.n_variables):
value = best_results[i]
unc = results.uncertainties[i]
write_dict.append({'Variable':i, 'Best value' : value,
'Uncertainty' : unc})
with open(filepath, 'w') as f:
columns = ['Variable', 'Best value', 'Uncertainty']
writer = csv.DictWriter(f, columns)
writer.writeheader()
writer.writerows(write_dict)
def _print_best(self, results):
'''
Prints the best results to terminal
Parameters
----------
results : dynesty.results.Results
The Dynesty results object, but must also have weights, cov and
uncertainties as entries.
'''
best_results = results.samples[np.argmax(results.logl)]
print('Best results:')
for i in range(self.classifier.n_variables):
value = round(best_results[i], 4)
unc = round(results.uncertainties[i], 4)
print('Variable {}: {}±{}'.format(i, value, unc))
def set_nuisance_parameters(self, n_nuisance, nuisance_limits=None):
'''
Sets n nusiance parameters to fitting. The nusiance parameters must be
included in the lnprob function.
Parameters
----------
n_nuisance : int
The number of nuisance parameters
nuisance_limits : None, or array_like, shape (n_nuisance, 2)
The limits for each nuisance parameter, provided as (lower, upper)
pairs. If None, defaults to (-1000, 1000) for each parameter.
'''
if type(n_nuisance) is not int:
raise ValueError('n_nuisance must be an integer!')
if n_nuisance < 0:
raise ValueError("Can't have negative nuisance parameters!")
if nuisance_limits is None:
nuisance_limits = np.array([[-1000, 1000] for i in range(n_nuisance)])
if not n_nuisance == 0:
nlimshape = nuisance_limits.shape
if not len(nlimshape) == 2:
raise ValueError('Invalid nuisance_limits shape {}'.format(nlimshape))
if not nlimshape[0] == n_nuisance:
raise ValueError('{} limits provided for {} nuisance parameters'.format(nlimshape[0], n_nuisance))
if not nlimshape[1] == 2:
raise ValueError('Limits need to be provided as (lower, upper) pairs.')
self.n_nuisance = n_nuisance
self.nuisance_limits = nuisance_limits
```
#### File: PriorGen/priorgen/pca_utils.py
```python
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
def run_PCA(parameters, observables, n_components):
'''
Runs a principal component analysis to reduce dimensionality of
observables.
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
Returns
-------
pca : sklearn.decomposition.PCA
The scikit-learn PCA object
reduced_d_observables : array_like, shape(N, n_components)
The observables after PCA has been applied to them
'''
pca = PCA(n_components=n_components)
fitted_pca = pca.fit(observables)
reduced_d_observables = fitted_pca.transform(observables)
return pca, reduced_d_observables
def pca_plot(parameters, observables, n_components, save=True,
save_path='PCA_plot.pdf'):
'''
Produces a plot of the explained variance of the first n_components
principal components, along with a cumulative variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
save : bool, optional:
If True, will save the output figure to save_path. Default is True.
save_path : str, optional
If save is True, this is the path that the figures will
be saved to. Default is 'PCA_plot.pdf'.
Returns
-------
fig : matplotlib.Figure
The pca plot
'''
pca, _ = run_PCA(parameters, observables, n_components)
variance = pca.explained_variance_ratio_
cumulative_variance = np.cumsum(variance).round(4)
fig, ax = plt.subplots(2,1, sharex=True)
# Plot the
ax[0].bar(np.arange(n_components), variance, label='Associated variance')
#ax[0].set_xlabel('Principal component')
ax[0].set_ylabel('Fractional variance')
ax[0].set_yscale('log')
ax[1].plot(np.arange(n_components), cumulative_variance, 'r', label='Cumulative variance')
ax[1].set_xlabel('Principal component')
ax[1].set_ylabel('Cumulative variance')
ax[1].margins(x=0.01)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
if save:
fig.savefig(save_path)
return fig
def find_required_components(parameters, observables, variance):
'''
Calculates the number of principal components required for reduced
dimensionality obserables to contain a given fraction of explained variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
variance : float
The fraction of explained variance you want the principal components
to contain
Returns
-------
n_components : int
The smallest number of principal comonents required to contain the
specified fraction of explained variance
'''
if not 0 <= variance < 1:
raise ValueError('variance must be between 0 and 1')
# run PCA and keep all components
pca, _ = run_PCA(parameters, observables, None)
cumulative_variance = np.cumsum(pca.explained_variance_ratio_)
# The +1 is required because the first part finds an index where the
# cumulative explained variance ratio is larger than the threshold
# and the indices start from 0
n_PCs = np.where(cumulative_variance >= variance)[0][0] + 1
if n_PCs > 30:
print('WARNING: {} principal components are required - this may lead to slow run times.'.format(n_PCs))
return n_PCs
``` |
{
"source": "josh-jhs8/summer-challenge-response",
"score": 3
} |
#### File: summer-challenge-response/PythonVariant/game_connection.py
```python
import socket
import threading
import time
import json
import game_constants as const
class GameSocketManager:
"""
Class to manage the connection to the challenge server
"""
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.lock = threading.Lock()
def connect(self, host, port):
"""
Connect to the challenge server and wait for the challenge to start
"""
self.lock.acquire(True)
self.sock.connect((host, port))
while True:
response = self.sock.recv(10)
msg = response.decode("utf-8")
if msg == const.BEGIN:
print("Let the challenge begin...")
break
time.sleep(0.1)
self.lock.release()
def run_command(self, cmd):
"""
Send command to server and receive a response
"""
msg = json.dumps(cmd)
self.lock.acquire(True)
sent = self.sock.send(msg.encode("utf-8"))
if sent == 0:
raise RuntimeError("connection broken")
response = self.sock.recv(1000000)
self.lock.release()
msg = response.decode("utf-8")
return json.loads(msg)
def make_command(cmd_type, action, subject="", arguments=None):
"""
Create a command in the correct format
"""
if arguments is None:
arguments = []
return {const.TYPE: cmd_type,
const.SUBJECT: subject,
const.ACTION: action,
const.ARGUMENTS: arguments}
```
#### File: summer-challenge-response/PythonVariant/game_manager.py
```python
import time as t
import game_player as gp
def play_game(host, port):
"""
Play game on specific remote endpoint
"""
player = gp.GamePlayer()
print("Created game player")
player.connect(host, port)
print("Connected")
state_man = player.initialise()
print("Initialised")
tasks = []
tasks.append(player.explore())
tasks.append(player.draw())
is_alive = True
while is_alive:
is_alive = False
for task in tasks:
if task.is_alive():
is_alive = True
t.sleep(1)
state_man.active = False
def quick():
"""
Quick access to standard game game location
"""
play_game("localhost", 2092)
if __name__ == "__main__":
quick()
```
#### File: summer-challenge-response/PythonVariant/game_player.py
```python
import game_connection as gc
import game_state as gs
import game_state_manager as gsm
import exploration_manager as em
import game_drawer as gd
class GamePlayer:
"""
Class represent game player and their available action threads
"""
def __init__(self):
self.conn = gc.GameSocketManager()
self.state = None
def connect(self, host, port):
"""
Connect to challenge server
"""
self.conn.connect(host, port)
def initialise(self):
"""
Initialise the state of the challenge
"""
print("Establishing initial position...")
self.state = gs.GameState()
state_man = gsm.StateManager(self.conn, self.state)
state_man.start()
return state_man
def explore(self):
"""
Explore the challenge provided
"""
ex_man = em.ExplorationManager(self.conn, self.state)
ex_man.start()
return ex_man
def draw(self):
"""
Draw the state of the challenge
"""
drawer = gd.GameDrawer(self.state)
drawer.start()
return drawer
```
#### File: summer-challenge-response/PythonVariant/ship_commands.py
```python
import game_connection as gc
import game_constants as const
def move(conn, ship, dest):
"""
Order the ship to move to a destination
"""
cmd = gc.make_command(const.SHIP, const.MOVE, ship[const.NAME], [dest])
data = conn.run_command(cmd)
if data[const.SUCCESS]:
ship[const.LOCATION] = data[const.RESULT_OBJECT][const.LOCATION]
ship[const.STATUS] = data[const.RESULT_OBJECT][const.STATUS]
print("Moving " + ship[const.NAME] + " to " + ship[const.LOCATION])
else:
print(data[const.MESSAGE])
raise RuntimeError("Move command failed")
def observe(conn, ship):
"""
Observe the system that the ship is currently in
"""
cmd = gc.make_command(const.SHIP, const.OBSERVE, ship[const.NAME])
data = conn.run_command(cmd)
if data[const.SUCCESS]:
system = data[const.RESULT_OBJECT]
print("Observed " + system[const.NAME])
print("Stars:")
for star in system[const.STARS]:
print("\t" + star[const.NAME])
print("Planets:")
for planet in system[const.PLANETS]:
print("\t" + planet[const.NAME])
print("Hyperlanes:")
for lane in system[const.HYPERLANES]:
print("\t" + lane)
x_str = str(system[const.LOCATION][const.X])
y_str = str(system[const.LOCATION][const.Y])
print("Location: (" + x_str + ", " + y_str + ")")
return system
print(data[const.MESSAGE])
raise RuntimeError("Observe command failed")
def ship_list(conn):
"""
List all current ships
"""
cmd = gc.make_command(const.SHIP, const.LIST)
data = conn.run_command(cmd)
if data[const.SUCCESS]:
ships = data[const.RESULT_OBJECT]
for ship in ships:
print(ship[const.NAME] + " is currently in " +
ship[const.LOCATION])
return ships
print(data[const.MESSAGE])
raise RuntimeError("List command failed")
``` |
{
"source": "joshjo/django-webpack-loader",
"score": 2
} |
#### File: app/tests/test_custom_loaders.py
```python
from imp import reload
from django.test import TestCase
from webpack_loader import utils, config, loader
DEFAULT_CONFIG = 'DEFAULT'
LOADER_PAYLOAD = {'status': 'done', 'chunks': []}
class ValidCustomLoader(loader.WebpackLoader):
def load_assets(self):
return LOADER_PAYLOAD
class CustomLoadersTestCase(TestCase):
def tearDown(self):
self.reload_webpack()
def reload_webpack(self):
'''
Reloads webpack loader modules that have cached values to avoid polluting certain tests
'''
reload(utils)
reload(config)
def test_bad_custom_loader(self):
'''
Tests that a bad custom loader path will raise an error
'''
loader_class = 'app.tests.bad_loader_path.BadCustomLoader'
with self.settings(WEBPACK_LOADER={
'DEFAULT': {
'CACHE': False,
'BUNDLE_DIR_NAME': 'django_webpack_loader_bundles/',
'LOADER_CLASS': loader_class
}
}):
self.reload_webpack()
try:
loader = utils.get_loader(DEFAULT_CONFIG)
self.fail('The loader should fail to load with a bad LOADER_CLASS')
except ImportError as e:
self.assertIn(
'{} doesn\'t look like a valid module path'.format(loader_class),
str(e)
)
def test_good_custom_loader(self):
'''
Tests that a good custom loader will return the correct assets
'''
loader_class = 'app.tests.test_custom_loaders.ValidCustomLoader'
with self.settings(WEBPACK_LOADER={
'DEFAULT': {
'CACHE': False,
'BUNDLE_DIR_NAME': 'django_webpack_loader_bundles/',
'LOADER_CLASS': loader_class,
}
}):
self.reload_webpack()
assets = utils.get_loader(DEFAULT_CONFIG).load_assets()
self.assertEqual(assets, LOADER_PAYLOAD)
``` |
{
"source": "JoshJQ/stock-analyzer",
"score": 3
} |
#### File: src/stock-calculate/stock-calculate.py
```python
import pymongo
import logging
import configparser
import traceback
from pathlib import Path
def get_date(data):
return data['date']
logging.basicConfig(filename='stocks-calculate.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
config = configparser.ConfigParser()
configFile = "{0}/config/config.ini".format(str(Path.home()))
config.read(configFile)
mongo_url = config['DEFAULT']['MongoDBUrl']
try:
dbclient = pymongo.MongoClient(mongo_url)
stock_db = dbclient["stockdb"]
stocks = stock_db["stocks"]
for stock in stocks.find({"prices": {"$exists": True}, "pb": {"$exists": True}, "pe": {"$exists": True}}).batch_size(20):
prices = stock['prices']
pes = stock['pe']
pbs = stock['pb']
prices.sort(key=get_date, reverse=True)
pes.sort(key=get_date, reverse=True)
pbs.sort(key=get_date, reverse=True)
results = []
i = 0
j = 0
for price in prices:
pe = 0
pb = 0
while i < len(pes) and price['date'] < pes[i]['date']:
i = i + 1
if i < len(pes) and pes[i]['value'] != 0:
earning = float(pes[i]['value'].replace(',', ''))
if earning > 0:
pe = float(price['close'].replace(',', '')) / earning
while j < len(pbs) and price['date'] < pbs[j]['date']:
j = j + 1
if j < len(pbs) and pbs[j]['value'] != 0:
book_value = float(pbs[j]['value'].replace(',', ''))
if book_value > 0:
pb = float(price['close'].replace(',', '')) / book_value
results.append({
'date': price['date'],
'open': price['open'],
'high': price['high'],
'low': price['low'],
'close': price['close'],
'volume': price['volume'],
'pe': pe,
'pb': pb
})
stocks.update_one(
{
'ticker': stock["ticker"]
},
{
'$set': {
"prices": results
}
}
)
except pymongo.errors.ConnectionFailure as err:
logging.error("Cannot connect to mongodb: {0}".format(err))
raise SystemExit(err)
except Exception as e:
logging.error('Unexpected exception')
logging.error(traceback.format_exc())
raise SystemExit(1)
else:
logging.info('Process complete')
```
#### File: src/stock-list/prepare-stock-list.py
```python
import pymongo
import configparser
import requests
import json
import time
import logging
import traceback
from pathlib import Path
def get_market_cap(stock):
return float(stock["market_val"])
logging.basicConfig(filename='stocks-list.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
config = configparser.ConfigParser()
configFile = "{0}/config/config.ini".format(str(Path.home()))
config.read(configFile)
stock_number = int(config['DEFAULT']['StockNumber'])
mongo_url = config['DEFAULT']['MongoDBUrl']
try:
logging.info('Start to macrotrends to fetch stock list')
response = requests.get('https://www.macrotrends.net/stocks/stock-screener')
start_flag = 'var originalData = ['
index_start = response.text.index(start_flag)
index_end = response.text.index('];', index_start)
stock_data = response.text[index_start + len(start_flag) - 1 : index_end + 1]
stock_list = json.loads(stock_data)
stock_list.sort(key=get_market_cap, reverse=True)
logging.info('Fetched stock list')
db_client = pymongo.MongoClient(mongo_url)
stock_db = db_client["stockdb"]
stocks = stock_db["stocks"]
logging.info('Start to store data into database')
count = 1
current_date = time.strftime("%Y-%m-%d", time.localtime())
# stock list
for stock in stock_list:
if stock["exchange"] in ['NYSE', 'NSDQ']:
if count <= stock_number:
try:
if float(stock["market_val"]) < 100000 and "." not in stock["ticker"]:
stocks.update_one(
{
"ticker": stock["ticker"]
},
{
'$set': {
"stock_name": stock["comp_name"],
"industry": stock["zacks_x_ind_desc"],
"sector": stock["zacks_x_sector_desc"],
"company_name": stock["comp_name_2"],
"exchange": stock["exchange"],
"market_cap": stock["market_val"],
"update_date": current_date
}
},
upsert = True
)
count = count + 1
except:
logging.error("Failed to save stock {0}".format(stock["ticker"]))
else:
logging.debug("Saved stock {0}".format(stock["ticker"]))
else:
break
except requests.exceptions.RequestException as err:
logging.error("Error to crawl stock list from the website: {0}".format(err))
raise SystemExit(err)
except OSError as err:
logging.error("OS error: {0}".format(err))
raise SystemExit(err)
except pymongo.errors.ConnectionFailure as err:
logging.error("Cannot connect to mongodb: {0}".format(err))
raise SystemExit(err)
except Exception as e:
logging.error('Unexpected exception')
logging.error(traceback.format_exc())
raise SystemExit(1)
else:
logging.info('Process complete')
```
#### File: src/stock-select/stock-select.py
```python
import pymongo
import datetime
import configparser
import logging
import traceback
from pathlib import Path
def get_date(stock):
return stock["date"]
logging.basicConfig(filename='stocks-select.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
config = configparser.ConfigParser()
configFile = "{0}/config/config.ini".format(str(Path.home()))
config.read(configFile)
mongo_url = config['DEFAULT']['MongoDBUrl']
try:
dbclient = pymongo.MongoClient(mongo_url)
stock_db = dbclient["stockdb"]
stocks = stock_db["stocks"]
selected_stocks = []
for stock in stocks.find({"prices": {"$exists": True}, "pb": {"$exists": True}, "pe": {"$exists": True}}):
prices = stock['prices']
prices.sort(key=get_date, reverse=True)
pe_rank = 0
pb_rank = 0
begin_date = (datetime.datetime.now() - datetime.timedelta(days = 3650)).strftime('%Y-%m-%d')
latest_pe = prices[0]['pe']
latest_pb = prices[0]['pb']
if latest_pe == 0 or latest_pb == 0:
continue
count = 1
for price in prices[1:]:
if (price['date'] < begin_date):
break
if (latest_pe > price['pe']):
pe_rank = pe_rank + 1
if (latest_pb > price['pb']):
pb_rank = pb_rank + 1
count = count + 1
pe_rank_percent = pe_rank / count
pb_rank_percent = pb_rank / count
if pe_rank_percent < 0.5 and pb_rank_percent < 0.2:
selected_stocks.append({
'stock_name': stock['stock_name'],
'ticker': stock['ticker'],
'company_name': stock['company_name'],
'industry': stock['industry'],
'market_cap': stock['market_cap'],
'pe_rank': pe_rank_percent,
'pb_rank': pb_rank_percent,
'roe': stock['roe']
})
roe_begin_date = (datetime.datetime.now() - datetime.timedelta(days = 365 * 5)).strftime('%Y-%m-%d')
results = []
for stock in selected_stocks:
roes = stock['roe']
roes.sort(key=get_date, reverse=True)
roe_match = True
for roe in stock['roe']:
if roe['date'] < roe_begin_date:
break;
historical_roe = roe['value'].rstrip('%')
if float(historical_roe) < 15:
print('Stock ', stock['stock_name'], ' ROE Not match')
roe_match = False
break
if roe_match == True:
results.append({
'stock_name': stock['stock_name'],
'ticker': stock['ticker'],
'company_name': stock['company_name'],
'industry': stock['industry'],
'market_cap': stock['market_cap'],
'pe_rank': stock['pe_rank'],
'pb_rank': stock['pb_rank']
})
print('***********************************************************************************************************')
with open("stocks_result.csv", "w") as outfile:
outfile.write('stock_name, ticker, company_name, industry, market_cap, pe_rank, pb_rank\n')
for stock in results:
outfile.write('{}, {}, {}, {}, {}, {}, {}\n'.format(stock['stock_name'], stock['ticker'], stock['company_name'],
stock['industry'], stock['market_cap'], stock['pe_rank'], stock['pb_rank']))
except pymongo.errors.ConnectionFailure as err:
logging.error("Cannot connect to mongodb: {0}".format(err))
raise SystemExit(err)
except IOError as err:
logging.error("Failed to write file: {0}".format(err))
raise SystemExit(err)
except Exception as e:
logging.error('Unexpected exception')
logging.error(traceback.format_exc())
raise SystemExit(1)
else:
logging.info('Process complete')
``` |
{
"source": "JoshJson/autowiki",
"score": 3
} |
#### File: JoshJson/autowiki/bot.py
```python
import amino
import time
from selenium import webdriver
import requests
from io import BytesIO
import wikipedia
import random
from googletrans import Translator
def convert_url(url):
"""
:param url: url-путь к изображению / url-path to image (str)
:return: файл / file (bytes)
"""
img = requests.get(url).content
f = BytesIO(img)
return f
def fetch_image_urls(query: str, max_links_to_fetch: int, wd: webdriver, sleep_between_interactions: int = 1):
"""
:param query: конкретный запрос / specific query (str)
:param max_links_to_fetch: максимальное количество изображений для добавления / maximum of images to add (int)
:param wd: вебдрайвер / webdriver (webdriver)
:param sleep_between_interactions: время в секундах перед осуществлением запросов / time in seconds between
requests (int)
:return: список url / url list (list)
"""
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# Построение запроса
# Build search query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
# Загрузка страницы
# Loading the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
# получить обложки всех существующих изображений
# getting thumbnailes of images
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Найдено: {number_results} изображений. Веду извелечение по {results_start}:{number_results}")
for img in thumbnail_results[ results_start:number_results ]:
# нажатие на изображение для получения url
# clicking on every found image in order to get the url
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
# извлечение url из изображения
# extracting url from image
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
# завершение цикла по нахождению количества изображений равного параметру max_links_to_fetch
# loop ending on finding amount of images which is equal to max_links_to_fetch parameter
print(f"Найдено: {len(image_urls)} url, итерация завершена!")
break
else:
print("Найдено:", len(image_urls), "url. Продолжаю поиск ...")
time.sleep(30)
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
# перевод курсора вниз
# move cursor down
results_start = len(thumbnail_results)
return image_urls
client = amino.Client()
client.login(email="email", password="password")
subclient = amino.SubClient(comId="comid", profile=client.profile)
translator = Translator(service_urls=['translate.googleapis.com'])
DRIVER_PATH = 'PATH_TO_DRIVER'
print('[ОК] Подключение осуществлено')
@client.event("on_text_message")
def on_text_message(data):
# генерация осущесвтляется по вызову команды q
# generation is being held on issuing q command
cmd = data.message.content.split(' ')
if cmd[0] == 'q':
# создание страниц осуществляется по структуризированным данным. В данном случае, это - names_abstract.txt
# wiki creation is being issued on structured data. In this case it is names_abstract.txt
with open('names_abstract.txt', 'r', encoding='windows-1251') as file:
for line in file.readlines():
wd = webdriver.Chrome(executable_path=DRIVER_PATH)
wikipedia.set_lang('ru')
list_of_images = []
query_for_search = "_".join(line.split())
total_string = ''
query_for_search = (wikipedia.search(query_for_search, results=1))[ 0 ]
print(query_for_search)
page = wikipedia.page(query_for_search).content.split('\n\n')
title = wikipedia.page(query_for_search).title
len_of_page = len(page) - 3
for section in page[ :len(page) - len_of_page ]:
if '\n=' in section:
section = section.replace('\n=', '\n[BC]=')
if section.endswith('='):
section = ''
total_string += section + ' '
# дополнительная строка добавляется для более успешного поиска по запросу, в данном случае это '_художник'
# additional string is being added to make search more specific, in this case it's '_artist'
d_img = fetch_image_urls(query_for_search+'_художник', 5, wd)
for key in d_img:
print(list_of_images)
byte_image = convert_url(key)
list_of_images.append(byte_image)
# публикация статьи
# wiki publication
subclient.post_wiki(title=title, content=f'{total_string} \n \n- Подготовлено автоматизированной системой создания Вики статей', keywords='художник, википедия, Россия, абстрактные', imageList=list_of_images)
time.sleep(random.uniform(1, 3))
# поиск id-статьи для дальнейшего добавления в Вики сообщества
# wiki-id search for future submission to community's Wiki
IDS = subclient.get_user_wikis(userId=client.userId, start=0, size=1).wikiId
lastid = str(IDS[ 0 ])
time.sleep(random.uniform(1, 3)) # рандомное время сна для обмана системы Амино / random sleep time in order to trick Amino-systems
subclient.submit_to_wiki(wikiId=lastid, message='Автомат')
print('Успешный запрос')
time.sleep(random.uniform(1, 3)) # рандомное время сна для обмана системы Амино / random sleep time in order to trick Amino-systems
requestsids = subclient.get_wiki_submissions(start=0, size=1).requestId
time.sleep(random.uniform(2, 3)) # рандомное время сна для обмана системы Амино / random sleep time in order to trick Amino-systems
requestId = str(requestsids[ 0 ])
# добавление статьи в Вики
# page submission to Wiki
subclient.accept_wiki_request(requestId=requestId,
destinationCategoryIdList=[ 'cornerstone_directory_id',
'desired_directory_id' ])
print('Добавление прошло успешно')
``` |
{
"source": "JoshJson/nummethod",
"score": 2
} |
#### File: lib/NMisS/approximation.py
```python
import pip
try:
__import__('matplotlib')
except ImportError:
pip.main([ 'install', 'matplotlib' ])
try:
__import__('numpy')
except ImportError:
pip.main([ 'install', 'numpy' ])
try:
__import__('math')
except ImportError:
pip.main([ 'install', 'math' ])
import numpy as np
import matplotlib.pyplot as plt
import math
def Crammer_method(matrix, vector):
"""
Функция, которая решает СЛАУ методом Краммера
:params matrix: матрица на вход
:params vector: вектор свободных членов
:return solution: решение СЛАУ
"""
det = Determinant(matrix)
if det != 0:
matrix_a = [[], [], []]
matrix_b = [[], [], []]
matrix_c = [[], [], []]
for i in range(len(matrix)):
matrix_a[i].append(vector[i])
matrix_a[i].append(matrix[i][1])
matrix_a[i].append(matrix[i][2])
matrix_b[i].append(matrix[i][0])
matrix_b[i].append(vector[i])
matrix_b[i].append(matrix[i][2])
matrix_c[i].append(matrix[i][0])
matrix_c[i].append(matrix[i][1])
matrix_c[i].append(vector[i])
solution = []
solution.append(Determinant(matrix_a) / det)
solution.append(Determinant(matrix_b) / det)
solution.append(Determinant(matrix_c) / det)
return solution
else:
solution = [0, 0, 0]
return solution
def det2(matrix): # определитель 2*2
return matrix[0][0] * matrix[1][1] - matrix[0][1] * matrix[1][0]
def minor(matrix, i, j): # миноры матрицы
tmp = [row for k, row in enumerate(matrix) if k != i]
tmp = [col for k, col in enumerate(zip(*tmp)) if k != j]
return tmp
def Determinant(matrix): # определитель в целом поиск
"""
Функция, которая находит определитель квадратной матрицы
:params matrix: квадратная матрица
:return number: определитель квадратной матрицы
"""
if (len(matrix)) == (len(matrix[0])):
size = len(matrix)
if size == 2:
return det2(matrix)
return sum((-1) ** j * matrix[0][j] * Determinant(minor(matrix, 0, j))
for j in range(size))
def Lagrange(x_list, y_list, x0, x1, step): # функция интерполяции лагранжа
"""
Функция, которая аппроксимирует методом Лагранжа
:params x_list: список из значений x
:params y_list: список из значений y
:params x0: начальная точка аппроксимации
:params x1: конечная точка аппроксимации
:params step: шаг аппроксимации
:return Lagrange_massive_y, Lagrange_massive_x: аппроксимированные значения y, x
"""
Lagrange_massive_x = []
Lagrange_massive_y = []
xx = x0
for i in range(0, int((x1 - x0) / step) + 1):
yy = 0
if len(x_list) == len(y_list):
for i in range(len(y_list)):
L = 1
for j in range(len(y_list)):
if i != j:
try:
L = L * (xx - x_list[j]) / (x_list[i] - x_list[j])
except ZeroDivisionError:
L = L * 1
yy = yy + y_list[i] * L
Lagrange_massive_y.append(yy)
Lagrange_massive_x.append(xx)
print('При x = ', xx, 'значение в точке = ', yy)
xx = xx + step
return Lagrange_massive_y, Lagrange_massive_x
def approximation_linear_func(list_x, list_y, x0, x1, step): # аппроксимация линейной функцией
"""
Функция, которая аппроксимирует линейной функцией
:params x_list: список из значений x
:params y_list: список из значений y
:params x0: начальная точка аппроксимации
:params x1: конечная точка аппроксимации
:params step: шаг аппроксимации
:return Linear_massive_x, Linear_massive_y: аппроксимированные значения x, y
"""
Linear_massive_x = []
Linear_massive_y = []
xx = x0
for r in range(0, int((x1 - x0) / step) + 1):
s1 = 0;
s2 = 0;
s3 = 0;
s4 = 0
c1 = 1;
c0 = 1
yy = 0
for i in range(len(list_x)):
s1 = s1 + list_x[i] * list_x[i]
s2 = s2 + list_x[i]
s3 = s3 + list_x[i] * list_y[i]
s4 = s4 + list_y[i]
c1 = (s3 * len(list_x) - s2 * s4) / (s1 * len(list_x) - s2 * s2)
c0 = (s1 * s4 - s2 * s3) / (s1 * len(list_x) - s2 * s2)
yy = c0 + c1 * xx
print('При x = ', xx, 'значение в точке = ', yy)
Linear_massive_x.append(xx)
Linear_massive_y.append(yy)
xx = xx + step
return Linear_massive_x, Linear_massive_y
def approximation_quadratic_func(list_x, list_y, x0, x1, step): # аппроксимация квадратичной функцией
"""
Функция, которая аппроксимирует квадратичной функцией
:params x_list: список из значений x
:params y_list: список из значений y
:params x0: начальная точка аппроксимации
:params x1: конечная точка аппроксимации
:params step: шаг аппроксимации
:return Quadratic_massive_x, Quadratic_massive_y: аппроксимированные значения x, y
"""
Quadratic_massive_x = []
Quadratic_massive_y = []
xx = x0
for r in range(0, int((x1 - x0) / step) + 1):
a = [[], [], []]
b = []
s1 = 0;
s2 = 0;
s3 = 0;
s4 = 0;
s5 = 0;
s6 = 0;
s7 = 0;
for i in range(len(list_x)):
s1 = s1 + list_x[i] ** 4
s2 = s2 + list_x[i] ** 3
s3 = s3 + list_x[i] ** 2
s4 = s4 + list_x[i]
s5 = s5 + (list_x[i] ** 2) * list_y[i]
s6 = s6 + list_x[i] * list_y[i]
s7 = s7 + list_y[i]
a[0].append(s1);
a[0].append(s2);
a[0].append(s3)
a[1].append(s2);
a[1].append(s3);
a[1].append(s4)
a[2].append(s3);
a[2].append(s4);
a[2].append(len(list_x))
b.append(s5);
b.append(s6);
b.append(s7)
a_1 = Crammer_method(a, b) # пока вычисляю через numpy, потом поменяю
yy = a_1[2] + a_1[1] * xx + a_1[0] * (xx ** 2)
print('При x = ', xx, 'значение в точке = ', yy)
Quadratic_massive_x.append(xx)
Quadratic_massive_y.append(yy)
xx = xx + step
return Quadratic_massive_x, Quadratic_massive_y
def normal_distibution(list_x, list_y, x0, x1, step):
"""
Функция, которая аппроксимирует функцией нормального распределения
:params x_list: список из значений x
:params y_list: список из значений y
:params x0: начальная точка аппроксимации
:params x1: конечная точка аппроксимации
:params step: шаг аппроксимации
:return normal_massive_x, normal_massive_y: аппроксимированные значения x, y
"""
normal_massive_x = []
normal_massive_y = []
xx = x0
a = [[], [], []]
b = []
s1 = 0;
s2 = 0;
s3 = 0;
s4 = 0;
s5 = 0;
s6 = 0;
s7 = 0;
for i in range(len(list_x)):
s1 = s1 + list_x[i] ** 4
s2 = s2 + list_x[i] ** 3
s3 = s3 + list_x[i] ** 2
s4 = s4 + list_x[i]
s5 = s5 + (list_x[i] ** 2) * list_y[i]
s6 = s6 + list_x[i] * list_y[i]
s7 = s7 + list_y[i]
a[0].append(s1);
a[0].append(s2);
a[0].append(s3)
a[1].append(s2);
a[1].append(s3);
a[1].append(s4)
a[2].append(s3);
a[2].append(s4);
a[2].append(len(list_x))
b.append(s5);
b.append(s6);
b.append(s7)
a_1 = Crammer_method(a, b) # пока вычисляю через numpy, потом поменяю
print(a_1)
for i in range(len(a_1)):
if a_1[i] < 0:
a_1[i] = a_1[i] * (-1)
print(a_1)
for r in range(0, int((x1 - x0) / step) + 1):
try:
yy = a_1[2] * math.e ** (-(((xx - a_1[1]) ** 2) / a_1[0] ** 2))
except ZeroDivisionError:
yy = 0
normal_massive_x.append(xx)
normal_massive_y.append(yy)
xx = xx + step
return normal_massive_x, normal_massive_y
```
#### File: lib/NMisS/optimization.py
```python
import pip
try:
__import__('math')
except ImportError:
pip.main([ 'install', 'math' ])
try:
__import__('pandas')
except ImportError:
pip.main([ 'install', 'pandas' ])
try:
__import__('scipy')
except ImportError:
pip.main([ 'install', 'scipy' ])
try:
__import__('matplotlib')
except ImportError:
pip.main([ 'install', 'matplotlib' ])
try:
__import__('networkx')
except ImportError:
pip.main([ 'install', 'networkx' ])
try:
__import__('numpy')
except ImportError:
pip.main([ 'install', 'numpy' ])
try:
__import__('datetime')
except ImportError:
pip.main([ 'install', 'datetime' ])
import math
import numpy as np
import pandas as pd
from scipy.stats import cauchy
import random
import matplotlib.pyplot as plt
import networkx as nx
from numpy.random import choice as np_choice
random_matrix = pd.DataFrame([[int(random.random() * 100) for _ in range(100)]
for _ in range(100)])
random_matrix.to_csv('random_matrix.csv', header=True, index=False)
random_matrix = pd.read_csv('random_matrix.csv')
spisok = random_matrix.values.tolist()
def simulated_annealing(dist, n, t0):
"""
Функция, в которой реализован алгоритм имитации отжига
:param dist: list -- матрица весов
:param n: int -- длина пути
:param t0: int -- оптимальная температура
"""
def temperatura(k, t):
"""
Функция расчета оптимальной температуры для алгоритма имитации отжига
:param k: int -- количество городов
:param t: int -- температура
:return t/k: float -- коэффициент,
который нужен для вычисления следующей температуры
"""
return t / k
way = [element for element in range(n)]
rand0 = [element for element in range(1, n)]
tk = 1
m = 1
s = 0
x0 = 0.1
x = [x0]
t = t0
s_list = []
while t > tk:
sp = 0
t = temperatura(m, t0)
x.append(random.uniform(0, 1))
way_p = [way[j] for j in range(n)]
rand = random.sample(rand0, 2)
way_p[rand[0]], way_p[rand[1]] = way_p[rand[1]], way_p[rand[0]]
for j in range(n - 1):
sp = sp + dist[way_p[j]][way_p[j + 1]]
sp = sp + dist[way_p[0]][way_p[-1]]
if m == 1 or sp < s:
s = sp
way = [way_p[j] for j in range(n)]
else:
p = math.exp(-(sp - s) / t)
if x[m - 1] < p:
x[m - 1], x[m] = x[m], x[m - 1]
s = sp
way = [way_p[j] for j in range(n)]
m += 1
s_list.append(s)
way.append(way[0])
return way, s, m, s_list
def inlet():
"""
Функция ввода и выбора, каким путем мы хотим задать матрицу весов
:return dist: list -- матрица весов
"""
def file():
"""
Функция, которая считывает файл csv и заполняет матрицу
значениями, взятыми оттуда
:return matrix_1: list -- матрица, считываемая с csv файла
"""
import csv
matrix_1 = []
name = input("Введите названи файла. Например, city.csv: ")
with open(name) as file:
reader = csv.reader(file, delimiter=';', quotechar=',')
for row in reader:
matrix_1.append(row)
matrix_1 = [[float(matrix_1[i][j]) for j in range(len(matrix_1))]
for i in range(len(matrix_1))]
return matrix_1
def random_dist(k):
"""
Функция, которая герерирует матрицу
:param k: int -- количество городов
:return d: list -- сгенерируемая матрица
"""
d = [[0 if elem == j else random.uniform(0, 10) for j in range(k)]
for elem in range(k)]
for elem in range(k):
print(d[elem])
return d
def matr(m, n):
"""
Функция заполнения матрицы элементов.
:param m: int -- количество строк в матрице
:param n: int -- количество столбцов в матрице
:return matrix: list -- заполненная элементами матрица
"""
def el_int(el):
"""
Функция на проверку типа введенного элемента в матрице (целое).
Она возвращает True, если число целое, False - если нет.
:param el: элемент матрицы
"""
try:
int(el)
return True
except ValueError:
return False
def el_float(el):
"""
Функция на проверку типа введенного элемента в матрице (вещественное).
Она возвращает True, если число вещественное, False - если нет.
:param el: элемент матрицы
"""
try:
float(el)
return True
except ValueError:
return False
def el_complex(el):
"""
Функция на проверку типа введенного элемента в матрице (комплексное).
Она возвращает True, если число комплексное, False - если нет.
:param el: элемент матрицы
"""
try:
complex(el)
return True
except ValueError:
return False
def rev_complex(h):
"""
Функция преобразует комплексное число в нормальный вид, т. е. в вид a + i*b
Пример: если вы ввели -j + 1, функция преобразует это в 1 - j
:param h: str -- элемент матрицы
:return h_rev: str -- преобразованный элемент
"""
h_rev = ''
sep = 0
if h[0] == '+' or h[0] == '-':
for element_matr in range(1, len(h)):
if h[element_matr] == '+' or h[element_matr] == '-':
sep = element_matr
break
h_rev = h[sep:len(h)] + h[0:sep]
else:
for element_matr in range(0, len(h)):
if h[element_matr] == '+' or h[element_matr] == '-':
sep = element_matr
break
h_rev = h[sep:len(h)] + '+' + h[0:sep]
return (h_rev)
matrix = []
print('Введите элементы строки матрицы через пробел:')
for elem_matr in range(0, m):
a = []
row = input()
row = row.split(' ')
matrix.append(row)
if len(row) != n:
print('Некорректное количество элементов в строке матрицы.')
exit()
for j in range(0, n):
el = matrix[elem_matr][j]
k = 0
while k == 0:
if el_int(el) is True:
matrix[elem_matr][j] = int(el)
k = 1
else:
if el_float(el) is True:
matrix[elem_matr][j] = float(el)
k = 1
else:
if el_complex(el) is True:
matrix[elem_matr][j] = complex(el)
k = 1
else:
if el_complex(rev_complex(el)) is True:
matrix[elem_matr][j] = complex(
rev_complex(el))
k = 1
else:
el = input('Неверный формат ввода. '
'Повторите ввод '
'элемента [{}, '
'{}]: '.format(elem_matr, j))
return (matrix)
print("Ввод данных")
length = int(input("Введите: 1 - для считывания файла с устройства, "
"2 - для случайной генерации, "
"3 - для ввода матрицы с клавиатуры\n"))
if length == 1:
dist = file()
if length == 2:
k = int(input("Введите количество городов: "))
dist = random_dist(k)
if length == 3:
k = int(input("Введите количество городов: "))
dist = matr(k, k)
return dist
class AntColony(object):
"""
Класс для нахождения оптимального пути алгоритмом Муравьиной колонии.
"""
def __init__(self, distances, n_ants, n_best, n_iterations,
decay, alpha=1, beta=1):
"""
Функция для замены 0 на inf
:param distances: list -- матрица весов
:param n_ants: int -- количество муравьев
:param n_best: int
:param n_iterations: int -- количество итераций
:param decay: float
:param alpha: int -- значение ориентации феромонов
:param beta: int -- значение ориентации на длину пути
"""
i = 0
j = 0
while i < len(distances):
while j < len(distances):
if distances[i][j] == 0:
distances[i][j] = np.inf
i += 1
j += 1
else:
continue
self.distances = np.array(distances)
self.pheromone = np.ones(self.distances.shape) / len(self.distances)
self.all_inds = range(len(self.distances))
self.n_ants = n_ants
self.n_best = n_best
self.n_iterations = n_iterations
self.decay = decay
self.alpha = alpha
self.beta = beta
def run(self):
"""
Функция для нахождения лучшего пути и его стоимости
:return all_time_shortest_path: tuple -- кортеж, в котором список
корттежей лучшего пути и его стоимость
"""
shortest_path = None
all_time_shortest_path = ("placeholder", np.inf)
for elem in range(self.n_iterations):
all_paths = self.gen_all_paths()
self.spread_pheronome(all_paths, self.n_best,
shortest_path=shortest_path)
shortest_path = min(all_paths, key=lambda x: x[1])
if shortest_path[1] < all_time_shortest_path[1]:
all_time_shortest_path = shortest_path
self.pheromone * self.decay
return all_time_shortest_path
def spread_pheronome(self, all_paths, n_best, shortest_path):
"""
Функция для нахождения оптимального значения феромона
:param all_paths: list -- список кортежей пути и их стоимости
:param n_best: int
:param shortest_path: tuple -- кортеж, в котором список кортежей
пути и их стоимость
"""
sorted_paths = sorted(all_paths, key=lambda x: x[1])
for path, dist in sorted_paths[:n_best]:
for move in path:
self.pheromone[move] += 1.0 / self.distances[move]
def gen_path_dist(self, path):
"""
Функция для расчета стоимости пути
:param path: list -- список кортежей пути
:return total_dist: numpy.float64 -- стоимость пути
"""
total_dist = 0
for ele in path:
total_dist += self.distances[ele]
return total_dist
def gen_all_paths(self):
"""
Функция, в которой в список добавляются кортежи путей и их стоимость
:return all_path: list -- список кортежей пути и их стоимости
"""
all_paths = []
for elem in range(self.n_ants):
path = self.gen_path(0)
all_paths.append((path, self.gen_path_dist(path)))
return all_paths
def gen_all_cost(self):
"""
Функция для расчета стоимости каждого пути
:return cost: list -- список стоимости каждого пути
"""
cost = []
for elem in range(self.n_ants):
path = self.gen_path(0)
cost_1 = self.gen_path_dist(path)
cost.append(cost_1.tolist())
return cost
def gen_path(self, start):
"""
Функция для расчета пути
:param start: int -- начальная вершина
:return path: list -- список кортежей пути
"""
path = []
visited = set()
visited.add(start)
prev = start
for elem in range(len(self.distances) - 1):
move = self.pick_move(self.pheromone[prev], self.distances[prev],
visited)
path.append((prev, move))
prev = move
visited.add(move)
path.append((prev, start))
return path
def pick_move(self, pheromone, dist, visited):
"""
Функция для нахождения вершин, в которых путь оптимален
:param pheromone: numpy.ndarray -- феромон, который необходим для
поиска лучшего пути
:param dist: list -- матрица весов
:param visited: set -- множество посещенных вершин
:return move: numpy.int64 -- вершины пути
"""
pheromone = np.copy(pheromone)
pheromone[list(visited)] = 0
row = pheromone ** self.alpha * ((1.0 / dist) ** self.beta)
norm_row = row / row.sum()
move = np_choice(self.all_inds, 1, p=norm_row)[0]
return move
def route_conversion(lst):
"""
Функция для получения лучшего пути в формате 0-2-1-0
:param lst: list -- список кортежей лучшего пути
:return '-'.join(result): numpy.float64 -- лучший путь в формате 0-1-2-0
"""
result = []
for elem in range(len(lst)):
if elem == 0:
result.append('-'.join([str(lst[elem][0]), str(lst[elem][1])]))
else:
result.append(str(lst[elem][1]))
return '-'.join(result)
def route_con(lst):
"""
Функция для получения списка лучшего пути
:param lst: list -- список кортежей лучшего пути
:return result: list -- список лучшего пути
"""
result = []
for elem in range(len(lst)):
if elem == 0:
result.append(lst[elem][0])
result.append(lst[elem][1])
else:
result.append(lst[elem][1])
return result
def graph(n, way, dist):
"""
Функция для построения графа алгоритма Имитации отжига
:param n: int -- длина пути
:param way: list -- полученный самый оптимальный путь
:param dist: list -- матрица весов
"""
rand = [i for i in range(n)]
g = nx.Graph()
g.add_nodes_from(rand)
for elem in range(n):
for j in range(elem + 1, n):
if dist[elem][j] != 0:
g.add_edge(rand[elem], rand[j])
comb = []
for elem in range(n):
if rand.index(way[elem]) > rand.index(way[elem + 1]):
comb.append(tuple([way[elem + 1], way[elem]]))
else:
comb.append(tuple([way[elem], way[elem + 1]]))
edge_colors = ["red" if elem in comb else "blue" for elem in g.edges()]
plt.figure(figsize=(10, 10))
pos = nx.spring_layout(g)
nx.draw_networkx(g, pos, edge_color=edge_colors)
plt.title("Алгоритм Отжига")
plt.show()
def graph_1(n, way, dist):
"""
Функция для построения графа алгоритма Муравьиной колонии
:param n: int -- длина пути
:param way: list -- полученный самый оптимальный путь
:param dist: list -- матрица весов
"""
rand = [_ for _ in range(n)]
g = nx.Graph()
g.add_nodes_from(rand)
for elem in range(n):
for j in range(elem + 1, n):
if dist[elem][j] != 0:
g.add_edge(rand[elem], rand[j])
comb = []
for elem in range(n):
if rand.index(way[elem]) > rand.index(way[elem + 1]):
comb.append(tuple([way[elem + 1], way[elem]]))
else:
comb.append(tuple([way[elem], way[elem + 1]]))
edge_colors = ["red" if elem in comb else "blue" for elem in g.edges()]
plt.figure(figsize=(10, 10))
pos = nx.spring_layout(g)
nx.draw_networkx(g, pos, edge_color=edge_colors)
plt.title("<NAME>")
plt.show()
def runoptimisationscript():
"""
Функция для запуска итерационного цикла (показа работы самих программ оптимищации)
:return:
"""
distant = inlet()
len_m = len(distant)
temper = len_m ** 2
w, s, q, s_list = simulated_annealing(distant, len_m, temper)
print("Длина маршрута: ", s)
print("Маршрут алгоритма имитации отжига: ", w)
print("Количество итераций в маршруте имитации отжига: ", q)
graph(len_m, w, distant)
distance = distant
ant_colony = AntColony(distance, len(distance) * 2, 5, len(distance) * 4,
0.95, alpha=1, beta=1)
shortest_path = ant_colony.run()
c = ant_colony.gen_all_cost()
route = shortest_path[0]
len_m = len(distance)
results = route_con(shortest_path[0])
print("Полученный путь алгоритмом муравьиной колонии:",
route_conversion(shortest_path[0]))
print("Стоимость пути муравьиной колонии:", shortest_path[1])
graph_1(len_m, results, distance)
plt.subplot(2, 1, 1)
plt.plot(s_list)
plt.title('Алгоритм отжига')
plt.xlabel('Номер итерации')
plt.ylabel('Длина маршрута')
plt.subplot(2, 1, 2)
plt.plot(c)
plt.title('Алгоритм Муравьиной колонии')
plt.xlabel('Номер итерации')
plt.ylabel('Длина маршрута')
plt.show()
``` |
{
"source": "josh-justjosh/empty-reverie",
"score": 3
} |
#### File: josh-justjosh/empty-reverie/PhantomStats.py
```python
import requests
import json
import datetime
def getlisteners(url):
stats = json.loads(requests.get(url).text)
print(stats)
try:
listeners = stats['icestats']['source'][0]['listeners']
except KeyError:
listeners = stats['icestats']['source']['listeners']
return listeners
def now():
return datetime.datetime.utcnow()
newdata = {}
newdata['time'] = str(now())
newdata['listeners'] = getlisteners('https://phantommedia.radioca.st/status-json.xsl')
print(newdata)
try:
with open('_data/PhantomListeners.json','r', encoding='utf-8', newline='\n') as f:
olddata = json.loads(f.read())
except FileNotFoundError:
old_data = []
olddata.append(newdata)
with open('_data/PhantomListeners.json','wt', encoding='utf-8', newline='\n') as f:
f.write(json.dumps(olddata,indent=4)+"\n")
print(now(),"PhantomListeners.json saved")
``` |
{
"source": "josh-justjosh/parkrun-Cancellations",
"score": 3
} |
#### File: josh-justjosh/parkrun-Cancellations/parkrun data.py
```python
import datetime
def now():
return datetime.datetime.utcnow()
print(now(),'Script Start')
import requests
import json
import csv
from bs4 import BeautifulSoup
from html_table_extractor.extractor import Extractor
from html.parser import HTMLParser
import xml.etree.ElementTree as ET
import twython
import os
import collections
consumer_key = os.environ['consumer_key']
consumer_secret = os.environ['consumer_secret']
access_token = os.environ['access_token']
access_token_secret = os.environ['access_token_secret']
from twython import Twython
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret)
def tweet(message):
twitter.update_status(status=message)
print("Tweeted: "+message)
def rem_dups(x):
return list(dict.fromkeys(x))
old_cancellations_data = []
with open('_data/cancellations.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
row = rem_dups(row)
old_cancellations_data.append(row)
print(now(),'cancellations.tsv read')
old_cancellations_data.remove(['Date','Event','Country','Cancellation Note','Website'])
states_list = []
with open('_data/raw/states.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
states_list.append(row)
print(now(),'raw/states.tsv read')
states_list.remove(['Event','Country','State','County'])
def same_week(dateString):
'''returns true if a dateString in %Y%m%d format is part of the current week'''
d1 = datetime.datetime.strptime(dateString,'%Y-%m-%d')
d2 = datetime.datetime.today()
return d1.isocalendar()[1] == d2.isocalendar()[1]
events = requests.get('https://images.parkrun.com/events.json').text
with open('_data/raw/events.json','wt', encoding='utf-8', newline='') as f:
f.write(json.dumps(json.loads(events), indent=2))
print(now(),"raw/events.json saved")
technical_event_info = requests.get('https://wiki.parkrun.com/index.php/Technical_Event_Information').text
#with open('_data/raw/tei.html','wt', encoding='utf-8', newline='') as f:
# f.write(technical_event_info)
# print(now(),"raw/tei.html saved")
cancellations = requests.get('https://wiki.parkrun.com/index.php/Cancellations/Global').text
#with open('_data/raw/cancellations.html','wt', encoding='utf-8', newline='') as f:
# f.write(cancellations)
# print(now(),"raw/cancellations.html saved")
events = json.loads(events)['events']
soup = BeautifulSoup(technical_event_info, 'html.parser')
extractor = Extractor(soup)
extractor.parse()
tei_table = extractor.return_list()
#print(now(),tei_table)
upcoming_events_table = []
upcoming_events = []
for i in tei_table:
out = []
for j in i:
j = j.strip()
out.append(j)
#print(now(),out)
if 'AcceptingRegistrations' in out:
upcoming_events.append(out[0])
upcoming_events_table.append(out)
#print(now(),upcoming_events)
soup = BeautifulSoup(cancellations, 'html.parser')
extractor = Extractor(soup)
extractor.parse()
cancellation_table = extractor.return_list()
cancellation_table.pop(-1)
cancellation_table.pop(0)
cancellations_data = []
cancellations_list = []
for i in range(len(cancellation_table)):
try:
for x in range(5):
cancellation_table[i][x] = cancellation_table[i][x].strip()
except IndexError:
break
if same_week(cancellation_table[i][0]) == True:
#print(now(),cancellation_table[i])
cancellations_data.append([cancellation_table[i][0],cancellation_table[i][1],cancellation_table[i][3],cancellation_table[i][4]])
cancellations_list.append(cancellation_table[i][1])
def sortByIndex0(e):
return e[0]
def sortByIndex1(e):
return e[1]
cancellation_table.sort(key=sortByIndex0)
cancellation_table.sort(key=sortByIndex1)
with open('_data/all-cancellations.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Date','Event','Region','Country','Cancellation Note'])
for i in cancellation_table:
tsv_writer.writerow(i)
print(now(),"all-cancellations.tsv saved")
cancellation_dates = []
new_states_list = []
x = 0
#upcoming_events.append('Central parkrun, Plymouth') #01/01/99 https://www.parkrun.org.uk/centralplymouth/
#upcoming_events.append('Church Mead parkrun') #01/01/99 https://www.parkrun.org.uk/churchmead/
#upcoming_events.append('Edgbaston Reservoir parkrun') #01/01/99 https://www.parkrun.org.uk/edgbastonreservoir/
#upcoming_events.append('Henlow Bridge Lakes parkrun') #01/01/99 https://www.parkrun.org.uk/henlowbridgelakes/
#upcoming_events.append('<NAME> parkrun') #01/01/99 https://www.parkrun.org.uk/penryncampus/
#upcoming_events.append('Roberts Park parkrun') #01/01/99 https://www.parkrun.org.uk/robertspark/
for parkrun in events['features']:
if parkrun['properties']['EventLongName'] in upcoming_events:
#print(now(),parkrun)
events['features'].remove(parkrun)
for parkrun in events['features']:
#print(now(),parkrun['properties']['EventLongName'])
if 'junior' in parkrun['properties']['EventLongName']:
if parkrun['properties']['EventLongName'] in cancellations_list:
parkrun['properties']['Status'] = 'junior Cancellation'
else:
parkrun['properties']['Status'] = 'junior parkrunning'
else:
if parkrun['properties']['EventLongName'] in cancellations_list:
parkrun['properties']['Status'] = '5k Cancellation'
else:
parkrun['properties']['Status'] = 'parkrunning'
parkrun['properties']['Cancellations'] = []
for cancellation in cancellation_table:
if parkrun['properties']['EventLongName'] == cancellation[1] and same_week(cancellation[0]) == True:
newcancellation = {'DateCancelled': cancellation[0], 'ReasonCancelled': cancellation[4]}
parkrun['properties']['Cancellations'].append(newcancellation)
cancellation_dates.append(cancellation[0])
if parkrun['properties']['countrycode'] == 3 :
parkrun['properties']['Website'] = 'https://www.parkrun.com.au/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Australia'
elif parkrun['properties']['countrycode'] == 4 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.at/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Austria'
elif parkrun['properties']['countrycode'] == 14 :
parkrun['properties']['Website'] = 'https://www.parkrun.ca/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Canada'
elif parkrun['properties']['countrycode'] == 23 :
parkrun['properties']['Website'] = 'https://www.parkrun.dk/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Denmark'
elif parkrun['properties']['countrycode'] == 30 :
parkrun['properties']['Website'] = 'https://www.parkrun.fi/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Finland'
elif parkrun['properties']['countrycode'] == 31 :
parkrun['properties']['Website'] = 'https://www.parkrun.fr/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'France'
elif parkrun['properties']['countrycode'] == 32 :
parkrun['properties']['Website'] = 'https://www.parkrun.com.de/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Germany'
elif parkrun['properties']['countrycode'] == 42 :
parkrun['properties']['Website'] = 'https://www.parkrun.ie/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Ireland'
elif parkrun['properties']['countrycode'] == 44 :
parkrun['properties']['Website'] = 'https://www.parkrun.it/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Italy'
elif parkrun['properties']['countrycode'] == 46 :
parkrun['properties']['Website'] = 'https://www.parkrun.jp/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Japan'
elif parkrun['properties']['countrycode'] == 57 :
parkrun['properties']['Website'] = 'https://www.parkrun.my/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Malaysia'
elif parkrun['properties']['countrycode'] == 65 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.nz/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'New Zealand'
elif parkrun['properties']['countrycode'] == 67 :
parkrun['properties']['Website'] = 'https://www.parkrun.no/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Norway'
elif parkrun['properties']['countrycode'] == 74 :
parkrun['properties']['Website'] = 'https://www.parkrun.pl/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Poland'
elif parkrun['properties']['countrycode'] == 79 :
parkrun['properties']['Website'] = 'https://www.parkrun.ru/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Russia'
elif parkrun['properties']['countrycode'] == 82 :
parkrun['properties']['Website'] = 'https://www.parkrun.sg/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Singapore'
elif parkrun['properties']['countrycode'] == 85 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.za/'+parkrun['properties']['eventname']
if parkrun['properties']['EventLongName'] in ['Windhoek parkrun','Omeya parkrun','Swakopmund parkrun','Walvis Bay parkrun']:
parkrun['properties']['Country'] = 'Namibia'
elif parkrun['properties']['EventLongName'] in ['Mbabane parkrun']:
parkrun['properties']['Country'] = 'Eswatini'
else:
parkrun['properties']['Country'] = 'South Africa'
elif parkrun['properties']['countrycode'] == 88 :
parkrun['properties']['Website'] = 'https://www.parkrun.se/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Sweden'
elif parkrun['properties']['countrycode'] == 97 :
parkrun['properties']['Website'] = 'https://www.parkrun.org.uk/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'United Kingdom'
elif parkrun['properties']['countrycode'] == 98 :
parkrun['properties']['Website'] = 'https://www.parkrun.us/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'USA'
elif parkrun['properties']['countrycode'] == 64 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.nl/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Netherlands'
else: parkrun['properties']['Website'] = 'Unavailable'
new = True
for event in states_list:
if event[0] == parkrun['properties']['EventLongName']:
#print(now(),parkrun['properties']['EventShortName'],'already saved state')
new_states_list.append(event)
parkrun['properties']['State'] = event[2]
parkrun['properties']['County'] = event[3]
new = False
if new == True:
#print(now(),parkrun['properties']['EventShortName'],'not saved state')
GEONAME_USERNAME = '_josh_justjosh'
url = "http://api.geonames.org/countrySubdivision?lat="+str(parkrun['geometry']['coordinates'][1])+"&lng="+str(parkrun['geometry']['coordinates'][0])+"&radius=1.5&maxRows=1&level=2&username="+GEONAME_USERNAME
root = ET.fromstring(requests.get(url).text.strip())
try:
state = root.find('countrySubdivision').find('adminName1').text
except:
state = "-Unknown-"
print(now(),parkrun['properties']['EventLongName'],"- State not Found -",url)
try:
county = root.find('countrySubdivision').find('adminName2').text
except:
county = "-Unknown-"
print(now(),parkrun['properties']['EventLongName'],'- County not found -',url)
parkrun['properties']['State'] = state
parkrun['properties']['County'] = county
add = [parkrun['properties']['EventLongName'],parkrun['properties']['Country'],state,county]
new_states_list.append(add)
parkrun['properties']['description']='<h4 style="margin: 0 0 8px;">'+parkrun['properties']['EventLongName']+'</h4><table><tr><th>Status:</th><td'
if len(parkrun['properties']['Cancellations']) > 1:
parkrun['properties']['description']+=' colspan='+str(len(parkrun['properties']['Cancellations']))+' '
parkrun['properties']['description']+='>'+parkrun['properties']['Status']+'</td></tr>'
if len(parkrun['properties']['Cancellations']) == 1:
parkrun['properties']['description']+='<tr><th>Date Cancelled:</th><td>'+datetime.datetime.strptime(parkrun['properties']['Cancellations'][0]['DateCancelled'],'%Y-%m-%d').strftime('%A, %e %B %Y')+'</td></tr>'
parkrun['properties']['description']+='<tr><th>Cancellation Note:</th><td>'+parkrun['properties']['Cancellations'][0]['ReasonCancelled']+'</td></tr>'
elif len(parkrun['properties']['Cancellations']) > 1:
parkrun['properties']['description']+='<tr><th>Date Cancelled:</th>'
for i in parkrun['properties']['Cancellations']:
parkrun['properties']['description']+='<td>'+datetime.datetime.strptime(i['DateCancelled'],'%Y-%m-%d').strftime('%A, %e %B %Y')+'</td>'
parkrun['properties']['description']+='</tr><tr><th>Cancellation Note:</th>'
for i in parkrun['properties']['Cancellations']:
parkrun['properties']['description']+='<td>'+i['ReasonCancelled']+'</td>'
parkrun['properties']['description']+='</tr>'
if parkrun['properties']['Website'] != 'Unavailable':
parkrun['properties']['description']+='<tr><th>Website:</th><td'
if len(parkrun['properties']['Cancellations']) > 1:
parkrun['properties']['description']+=' colspan='+str(len(parkrun['properties']['Cancellations']))+' '
parkrun['properties']['description']+='><a href="'+parkrun['properties']['Website']+'">'+parkrun['properties']['Website'].replace('https://www.','')+'</a></td></tr>'
else: print(now(),parkrun['properties']['EventShortName'],'- Website Not Generated')
parkrun['properties']['description']+='</table>'
x += 1
#print(now(),x,"/",len(events['features']),'-',parkrun['properties']['EventShortName'],"processed")
#if x == 1750:
# break
with open('_data/events.json','w', encoding='utf-8') as f:
f.write(json.dumps(events, indent=2))
print(now(),'events.json saved')
cancellation_dates = list(dict.fromkeys(cancellation_dates))
cancellation_dates.sort()
with open('_data/cancellation-dates.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Dates'])
for date in cancellation_dates:
tsv_writer.writerow([date])
print(now(),"cancellation-dates.tsv saved")
events_data = []
for event in events['features']:
out = []
out.append(event['properties']['EventLongName'])
out.append(event['geometry']['coordinates'][1])
out.append(event['geometry']['coordinates'][0])
out.append(event['properties']['Country'])
out.append(event['properties']['State'])
out.append(event['properties']['County'])
out.append(event['properties']['Status'])
out.append(event['properties']['Cancellations'])
out.append(event['properties']['Website'])
events_data.append(out)
events_data.sort()
with open('_data/events-table.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Event','Latitude','Longitude','Country','State','County','Status','Cancellations','Website'])
for event in events_data:
tsv_writer.writerow(event)
print(now(),"events-table.tsv saved")
countries = {
'Australia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Austria': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Canada': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Denmark': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Eswatini': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Finland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'France': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Germany': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Ireland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Italy': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Japan': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Malaysia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Namibia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Netherlands': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'New Zealand': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Norway': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Poland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Russia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Singapore': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'South Africa': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Sweden': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'United Kingdom': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'USA': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Total': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
}
totals= {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
for parkrun in events['features']:
if parkrun['properties']['Status'] == 'parkrunning':
countries[parkrun['properties']['Country']]['parkrunning'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior parkrunning':
countries[parkrun['properties']['Country']]['junior parkrunning'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == '5k Cancellation':
countries[parkrun['properties']['Country']]['5k Cancellations'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior Cancellation':
countries[parkrun['properties']['Country']]['junior Cancellations'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == 'PtR':
countries[parkrun['properties']['Country']]['5k Cancellations'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
else:
print(now(),"Error:",parkrun['properties']['EventLongName'])
#print(now(),countries)
for country,data in countries.items():
totals['parkrunning'] += data['parkrunning']
totals['junior parkrunning'] += data['junior parkrunning']
totals['5k Cancellations'] += data['5k Cancellations']
totals['junior Cancellations'] += data['junior Cancellations']
totals['Total'] += data['Total']
countries['Total'] = totals
with open('_data/countries-data.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Country','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total'])
for i,j in countries.items():
out = [i]
for k,l in j.items():
if l != 0:
out.append(l)
else:
out.append('')
tsv_writer.writerow(out)
print(now(),"countries-data.tsv saved")
uk = {
'England': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Northern Ireland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Scotland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Wales': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Other': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Total': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
}
uk_totals= {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
for parkrun in events['features']:
if parkrun['properties']['Country'] == "United Kingdom":
if parkrun['properties']['State'] in ['England','Northern Ireland','Scotland','Wales']:
if parkrun['properties']['Status'] == 'parkrunning':
uk[parkrun['properties']['State']]['parkrunning'] += 1
uk[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior parkrunning':
uk[parkrun['properties']['State']]['junior parkrunning'] += 1
uk[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == '5k Cancellation':
uk[parkrun['properties']['State']]['5k Cancellations'] += 1
uk[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior Cancellation':
uk[parkrun['properties']['State']]['junior Cancellations'] += 1
uk[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == 'PtR':
uk[parkrun['properties']['State']]['5k Cancellations'] += 1
uk[parkrun['properties']['State']]['Total'] += 1
else:
if parkrun['properties']['Status'] == 'parkrunning':
uk['Other']['parkrunning'] += 1
uk['Other']['Total'] += 1
elif parkrun['properties']['Status'] == 'junior parkrunning':
uk['Other']['junior parkrunning'] += 1
uk['Other']['Total'] += 1
elif parkrun['properties']['Status'] == '5k Cancellation':
uk['Other']['5k Cancellations'] += 1
uk['Other']['Total'] += 1
elif parkrun['properties']['Status'] == 'junior Cancellation':
uk['Other']['junior Cancellations'] += 1
uk['Other']['Total'] += 1
elif parkrun['properties']['Status'] == 'PtR':
uk['Other']['5k Cancellations'] += 1
uk['Other']['Total'] += 1
#print(now(),countries)
for state,data in uk.items():
uk_totals['parkrunning'] += data['parkrunning']
uk_totals['junior parkrunning'] += data['junior parkrunning']
uk_totals['5k Cancellations'] += data['5k Cancellations']
uk_totals['junior Cancellations'] += data['junior Cancellations']
uk_totals['Total'] += data['Total']
uk['Total'] = uk_totals
with open('_data/uk-data.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Country','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total'])
for i,j in uk.items():
out = [i]
for k,l in j.items():
if l != 0:
out.append(l)
else:
out.append('')
tsv_writer.writerow(out)
print(now(),"uk-data.tsv saved")
aus = {
'Australian Capital Territory': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'New South Wales': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Northern Territory': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Queensland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'South Australia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Tasmania': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Victoria': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Western Australia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Total': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
}
aus_totals= {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
for parkrun in events['features']:
if parkrun['properties']['Country'] == "Australia":
if parkrun['properties']['State'] in ['Queensland','New South Wales','Victoria','Australian Capital Territory','Western Australia','Tasmania','South Australia','Northern Territory']:
if parkrun['properties']['Status'] == 'parkrunning':
aus[parkrun['properties']['State']]['parkrunning'] += 1
aus[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior parkrunning':
aus[parkrun['properties']['State']]['junior parkrunning'] += 1
aus[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == '5k Cancellation':
aus[parkrun['properties']['State']]['5k Cancellations'] += 1
aus[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior Cancellation':
aus[parkrun['properties']['State']]['junior Cancellations'] += 1
aus[parkrun['properties']['State']]['Total'] += 1
elif parkrun['properties']['Status'] == 'PtR':
aus[parkrun['properties']['State']]['5k Cancellations'] += 1
aus[parkrun['properties']['State']]['Total'] += 1
else:
print(now(),parkrun['properties']['EventLongName'],"in Australia but not in state")
#print(now(),countries)
for state,data in aus.items():
aus_totals['parkrunning'] += data['parkrunning']
aus_totals['junior parkrunning'] += data['junior parkrunning']
aus_totals['5k Cancellations'] += data['5k Cancellations']
aus_totals['junior Cancellations'] += data['junior Cancellations']
aus_totals['Total'] += data['Total']
aus['Total'] = aus_totals
with open('_data/aus-data.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Country','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total'])
for i,j in aus.items():
out = [i]
for k,l in j.items():
if l != 0:
out.append(l)
else:
out.append('')
tsv_writer.writerow(out)
print(now(),"aus-data.tsv saved")
uk_ie_counties = {}
for parkrun in events['features']:
if parkrun['properties']['Country'] in ["United Kingdom", "Ireland"]:
if parkrun['properties']['County'] not in ['', 'Douglas']:
#England
if parkrun['properties']['County'] in ['Bedford','Central Bedfordshire','Luton']:
parkrun['properties']['County'] = 'Bedfordshire'
elif parkrun['properties']['County'] in ['Bracknell Forest','Reading','Slough','West Berkshire','Windsor and Maidenhead','Wokingham']:
parkrun['properties']['County'] = 'Berkshire'
elif parkrun['properties']['County'] in ['Buckinghamshire','Milton Keynes']:
parkrun['properties']['County'] = 'Buckinghamshire'
elif parkrun['properties']['County'] in ['Cambridgeshire','Peterborough']:
parkrun['properties']['County'] = 'Cambridgeshire'
elif parkrun['properties']['County'] in ['Cheshire East','Cheshire','Halton','Warrington']:
parkrun['properties']['County'] = 'Cheshire'
elif parkrun['properties']['County'] in ['Derbyshire','Derby']:
parkrun['properties']['County'] = 'Derbyshire'
elif parkrun['properties']['County'] in ['Devon','Plymouth','Torbay']:
parkrun['properties']['County'] = 'Devon'
elif parkrun['properties']['County'] in ['Dorset','Bournemouth, Christchurch and Poole Council']:
parkrun['properties']['County'] = 'Dorset'
elif parkrun['properties']['County'] in ['Durham','Darlington','Hartlepool']:
parkrun['properties']['County'] = 'Durham'
elif parkrun['properties']['EventLongName'] in ['Tees Barrage parkrun','Billingham junior parkrun']:
parkrun['properties']['County'] = 'Durham'
elif parkrun['properties']['County'] in ['East Yorkshire','Kingston upon Hull']:
parkrun['properties']['County'] = 'East Yorkshire'
elif parkrun['properties']['County'] in ['East Sussex','Brighton and Hove']:
parkrun['properties']['County'] = 'East Sussex'
elif parkrun['properties']['County'] in ['Essex','Southend-on-Sea','Thurrock']:
parkrun['properties']['County'] = 'Essex'
elif parkrun['properties']['County'] in ['Gloucestershire','South Gloucestershire']:
parkrun['properties']['County'] = 'Gloucestershire'
#elif parkrun['properties']['County'] in ['City of Westminster', 'Kensington and Chelsea', 'Hammersmith and Fulham', 'Wandsworth', 'Lambeth', 'Southwark', 'Tower Hamlets', 'Hackney', 'Islington', 'Camden', 'Brent', 'Ealing', 'Hounslow', 'Richmond upon Thames', 'Kingston upon Thames', 'Merton', 'Sutton', 'Croydon', 'Bromley', 'Lewisham', 'Greenwich', 'Bexley', 'Havering', 'Barking and Dagenham', 'Redbridge', 'Newham', 'Waltham Forest', 'Haringey', 'Enfield', 'Barnet', 'Harrow', 'Hillingdon']:
#parkrun['properties']['County'] = 'Greater London'
#pass
elif parkrun['properties']['County'] in ['Manchester','Bolton','Stockport','Tameside','Oldham','Rochdale','Bury','Bolton','Wigan','Salford','Trafford']:
parkrun['properties']['County'] = 'Greater Manchester'
elif parkrun['properties']['County'] in ['Liverpool','Wirral','Knowsley','Sefton','St. Helens']:
parkrun['properties']['County'] = 'Merseyside'
elif parkrun['properties']['County'] in ['Hampshire','Portsmouth','Southampton']:
parkrun['properties']['County'] = 'Hampshire'
elif parkrun['properties']['County'] in ['Kent','Medway']:
parkrun['properties']['County'] = 'Kent'
elif parkrun['properties']['County'] in ['Blackburn with Darwen','Blackpool','Lancashire']:
parkrun['properties']['County'] = 'Lancashire'
elif parkrun['properties']['County'] in ['Leicestershire','Leicester']:
parkrun['properties']['County'] = 'Leicestershire'
elif parkrun['properties']['County'] in ['Lincolnshire','North Lincolnshire','North East Lincolnshire']:
parkrun['properties']['County'] = 'Lincolnshire'
elif parkrun['properties']['County'] in ['Middlesbrough','North Yorkshire','Redcar and Cleveland','York']:
parkrun['properties']['County'] = 'North Yorkshire'
elif parkrun['properties']['County'] in ['Nottinghamshire','Nottingham']:
parkrun['properties']['County'] = 'Nottinghamshire'
elif parkrun['properties']['County'] in ['Shropshire','Telford and Wrekin']:
parkrun['properties']['County'] = 'Shropshire'
elif parkrun['properties']['County'] in ['Bath and North East Somerset','North Somerset','Somerset']:
parkrun['properties']['County'] = 'Somerset'
elif parkrun['properties']['County'] in ['Barnsley','Doncaster','Rotherham','Sheffield']:
parkrun['properties']['County'] = 'South Yorkshire'
elif parkrun['properties']['County'] in ['Staffordshire','Stoke-on-Trent']:
parkrun['properties']['County'] = 'Staffordshire'
elif parkrun['properties']['County'] in ['Gateshead','Newcastle upon Tyne','North Tyneside','South Tyneside','Sunderland']:
parkrun['properties']['County'] = 'Tyne and Wear'
elif parkrun['properties']['County'] in ['Birmingham','Wolverhampton','Dudley','Walsall','Sandwell','Solihull','Coventry']:
parkrun['properties']['County'] = 'West Midlands'
elif parkrun['properties']['County'] in ['Leeds','Wakefield','Kirklees','Calderdale','Bradford']:
parkrun['properties']['County'] = 'West Yorkshire'
elif parkrun['properties']['County'] in ['Swindon','Wiltshire']:
parkrun['properties']['County'] = 'Wiltshire'
#Wales
elif parkrun['properties']['County'] in ['Conwy','Denbighshire','Flintshire','Wrexham']:
parkrun['properties']['County'] = 'Clwyd'
elif parkrun['properties']['County'] in ['Carmarthenshire','Ceredigion','Pembrokeshire']:
parkrun['properties']['County'] = 'Dyfed'
elif parkrun['properties']['County'] in ['Blaenau Gwent','Caerphilly','Monmouthshire','Newport','Torfaen County Borough']:
parkrun['properties']['County'] = 'Gwent'
elif parkrun['properties']['County'] in ['Gwynedd','Anglesey']:
parkrun['properties']['County'] = 'Gwynedd'
elif parkrun['properties']['County'] in ['County Borough of Bridgend','Merthyr Tydfil','<NAME>']:
parkrun['properties']['County'] = 'Mid Glamorgan'
elif parkrun['properties']['County'] in ['Cardiff','Vale of Glamorgan']:
parkrun['properties']['County'] = 'South Glamorgan'
elif parkrun['properties']['County'] in ['Neath Port Talbot','City and County of Swansea']:
parkrun['properties']['County'] = 'West Glamorgan'
if parkrun['properties']['County'] not in uk_ie_counties:
if parkrun['properties']['State'] in ['England','Northern Ireland','Scotland','Wales']:
uk_ie_counties[parkrun['properties']['County']] = {'country': parkrun['properties']['State'],'parkrunning': 0,'junior parkrunning':0,'5k Cancellations':0,'junior Cancellations':0,'Total':0,'events parkrunning':'','events junior parkrunning':'','events 5k cancellation':'','events junior cancellation':''}
else:
uk_ie_counties[parkrun['properties']['County']] = {'country': parkrun['properties']['Country'],'parkrunning': 0,'junior parkrunning':0,'5k Cancellations':0,'junior Cancellations':0,'Total':0,'events parkrunning':'','events junior parkrunning':'','events 5k cancellation':'','events junior cancellation':''}
if parkrun['properties']['Status'] == 'parkrunning':
uk_ie_counties[parkrun['properties']['County']]['parkrunning'] += 1
uk_ie_counties[parkrun['properties']['County']]['Total'] += 1
uk_ie_counties[parkrun['properties']['County']]['events parkrunning'] += parkrun['properties']['EventShortName'] + '|'
elif parkrun['properties']['Status'] == 'junior parkrunning':
uk_ie_counties[parkrun['properties']['County']]['junior parkrunning'] += 1
uk_ie_counties[parkrun['properties']['County']]['Total'] += 1
uk_ie_counties[parkrun['properties']['County']]['events junior parkrunning'] += parkrun['properties']['EventShortName'] + '|'
elif parkrun['properties']['Status'] == '5k Cancellation':
uk_ie_counties[parkrun['properties']['County']]['5k Cancellations'] += 1
uk_ie_counties[parkrun['properties']['County']]['Total'] += 1
uk_ie_counties[parkrun['properties']['County']]['events 5k cancellation'] += parkrun['properties']['EventShortName'] + '|'
elif parkrun['properties']['Status'] == 'junior Cancellation':
uk_ie_counties[parkrun['properties']['County']]['junior Cancellations'] += 1
uk_ie_counties[parkrun['properties']['County']]['Total'] += 1
uk_ie_counties[parkrun['properties']['County']]['events junior cancellation'] += parkrun['properties']['EventShortName'] + '|'
uk_ie_counties_od = collections.OrderedDict(sorted(uk_ie_counties.items()))
uk_ie_counties = {}
for k, v in uk_ie_counties_od.items():
uk_ie_counties[k] = v
uk_ie_counties_totals= {
'country':'',
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
england_totals= {
'country':'England',
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
ni_totals= {
'country':'Northern Ireland',
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
scotland_totals= {
'country':'Scotland',
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
wales_totals= {
'country':'Wales',
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
ireland_totals= {
'country':'Ireland',
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
for county,data in uk_ie_counties.items():
uk_ie_counties_totals['parkrunning'] += data['parkrunning']
uk_ie_counties_totals['junior parkrunning'] += data['junior parkrunning']
uk_ie_counties_totals['5k Cancellations'] += data['5k Cancellations']
uk_ie_counties_totals['junior Cancellations'] += data['junior Cancellations']
uk_ie_counties_totals['Total'] += data['Total']
if data['country'] == 'England':
england_totals['parkrunning'] += data['parkrunning']
england_totals['junior parkrunning'] += data['junior parkrunning']
england_totals['5k Cancellations'] += data['5k Cancellations']
england_totals['junior Cancellations'] += data['junior Cancellations']
england_totals['Total'] += data['Total']
if data['country'] == 'Northern Ireland':
ni_totals['parkrunning'] += data['parkrunning']
ni_totals['junior parkrunning'] += data['junior parkrunning']
ni_totals['5k Cancellations'] += data['5k Cancellations']
ni_totals['junior Cancellations'] += data['junior Cancellations']
ni_totals['Total'] += data['Total']
if data['country'] == 'Scotland':
scotland_totals['parkrunning'] += data['parkrunning']
scotland_totals['junior parkrunning'] += data['junior parkrunning']
scotland_totals['5k Cancellations'] += data['5k Cancellations']
scotland_totals['junior Cancellations'] += data['junior Cancellations']
scotland_totals['Total'] += data['Total']
if data['country'] == 'Wales':
wales_totals['parkrunning'] += data['parkrunning']
wales_totals['junior parkrunning'] += data['junior parkrunning']
wales_totals['5k Cancellations'] += data['5k Cancellations']
wales_totals['junior Cancellations'] += data['junior Cancellations']
wales_totals['Total'] += data['Total']
if data['country'] == 'Ireland':
ireland_totals['parkrunning'] += data['parkrunning']
ireland_totals['junior parkrunning'] += data['junior parkrunning']
ireland_totals['5k Cancellations'] += data['5k Cancellations']
ireland_totals['junior Cancellations'] += data['junior Cancellations']
ireland_totals['Total'] += data['Total']
uk_ie_counties['England Total'] = england_totals
uk_ie_counties['NI Total'] = ni_totals
uk_ie_counties['Scotland Total'] = scotland_totals
uk_ie_counties['Wales Total'] = wales_totals
uk_ie_counties['Ireland Total'] = ireland_totals
uk_ie_counties['Total'] = uk_ie_counties_totals
#print(json.dumps(uk_ie_counties, indent=4))
usa_states = {}
for parkrun in events['features']:
if parkrun['properties']['Country'] in ["USA"]:
if parkrun['properties']['State'] not in usa_states.keys():
usa_states[parkrun['properties']['State']] = {'country': parkrun['properties']['Country'],'parkrunning': 0,'junior parkrunning':0,'5k Cancellations':0,'junior Cancellations':0,'Total':0,'events parkrunning':'','events junior parkrunning':'','events 5k cancellation':'','events junior cancellation':''}
if parkrun['properties']['Status'] == 'parkrunning':
usa_states[parkrun['properties']['State']]['parkrunning'] += 1
usa_states[parkrun['properties']['State']]['Total'] += 1
usa_states[parkrun['properties']['State']]['events parkrunning'] += parkrun['properties']['EventShortName'] + '|'
elif parkrun['properties']['Status'] == 'junior parkrunning':
usa_states[parkrun['properties']['State']]['junior parkrunning'] += 1
usa_states[parkrun['properties']['State']]['Total'] += 1
usa_states[parkrun['properties']['State']]['events junior parkrunning'] += parkrun['properties']['EventShortName'] + '|'
elif parkrun['properties']['Status'] == '5k Cancellation':
usa_states[parkrun['properties']['State']]['5k Cancellations'] += 1
usa_states[parkrun['properties']['State']]['Total'] += 1
usa_states[parkrun['properties']['State']]['events 5k cancellation'] += parkrun['properties']['EventShortName'] + '|'
elif parkrun['properties']['Status'] == 'junior Cancellation':
usa_states[parkrun['properties']['State']]['junior Cancellations'] += 1
usa_states[parkrun['properties']['State']]['Total'] += 1
usa_states[parkrun['properties']['State']]['events junior cancellation'] += parkrun['properties']['EventShortName'] + '|'
usa_states_od = collections.OrderedDict(sorted(usa_states.items()))
usa_states = {}
for k, v in usa_states_od.items():
usa_states[k] = v
usa_states['USA Total'] = countries['USA']
usa_states['USA Total']['country'] = 'USA'
with open('_data/counties/england.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['County','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in uk_ie_counties.items():
if j['country'] == 'England':
if i == 'England Total':
out = ['Total']
else:
out = [i]
for k,l in j.items():
if l == 'England':
pass
elif l not in [0,[]]:
out.append(l)
else:
out.append('')
if i == 'England Total':
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"counties/england.tsv saved")
with open('_data/counties/ni.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['County','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in uk_ie_counties.items():
if j['country'] == 'Northern Ireland':
if i == 'NI Total':
out = ['Total']
else:
out = [i]
for k,l in j.items():
if l == 'Northern Ireland':
pass
elif l not in [0,[]]:
out.append(l)
else:
out.append('')
if i == 'NI Total':
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"counties/ni.tsv saved")
with open('_data/counties/scotland.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['County','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in uk_ie_counties.items():
if j['country'] == 'Scotland':
if i == 'Scotland Total':
out = ['Total']
else:
out = [i]
for k,l in j.items():
if l == 'Scotland':
pass
elif l not in [0,[]]:
out.append(l)
else:
out.append('')
if i == 'Scotland Total':
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"counties/scotalnd.tsv saved")
with open('_data/counties/wales.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['County','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in uk_ie_counties.items():
if j['country'] == 'Wales':
if i == 'Wales Total':
out = ['Total']
else:
out = [i]
for k,l in j.items():
if l == 'Wales':
pass
elif l not in [0,[]]:
out.append(l)
else:
out.append('')
if i == 'Wales Total':
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"counties/wales.tsv saved")
with open('_data/counties/ireland.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['County','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in uk_ie_counties.items():
if j['country'] == 'Ireland':
if i == 'Ireland Total':
out = ['Total']
else:
out = [i]
for k,l in j.items():
if l == 'Ireland':
pass
elif l not in [0,[]]:
out.append(l)
else:
out.append('')
if i == 'Ireland Total':
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"counties/ireland.tsv saved")
with open('_data/counties/all.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['County','Country','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in uk_ie_counties.items():
out = [i]
for k,l in j.items():
if l not in [0,[]] :
out.append(l)
else:
out.append('')
if 'Total' in i:
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"counties/all.tsv saved")
with open('_data/usa-states.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['States','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total','5k Events Running','junior Events Running','5k Events Cancelled','junior Events Cancelled'])
for i,j in usa_states.items():
if j['country'] == 'USA':
if i == 'USA Total':
out = ['USA']
else:
out = [i]
for k,l in j.items():
if l == 'USA':
pass
elif l not in [0,[]]:
out.append(l)
else:
out.append('')
if i == 'USA Total':
for x in range(4):
out.append('')
tsv_writer.writerow(out)
print(now(),"usa-states.tsv saved")
cancellations_changes = []
cancellations_additions = []
cancellations_removals = []
for i in old_cancellations_data:
oldwebsite = i[4]
i.pop(4)
if i not in cancellations_data:
#i.append('Removed')
out = i
for parkrun in events['features']:
if parkrun['properties']['EventLongName'] == i[1]:
out.append(oldwebsite)
break
cancellations_removals.append(out)
for i in cancellations_data:
if i not in old_cancellations_data:
#i.append('Added')
out = i
for parkrun in events['features']:
if parkrun['properties']['EventLongName'] == i[1]:
out.append(parkrun['properties']['Website'])
break
cancellations_additions.append(out)
for cancellation in cancellations_data:
if len(cancellation) <= 4:
out = ''
for parkrun in events['features']:
if parkrun['properties']['EventLongName'] == cancellation[1]:
out = parkrun['properties']['Website']
break
cancellation.append(out)
cancellation = rem_dups(cancellation)
#print(now(),cancellation)
#print(now(),cancellations_changes)
cancellations_additions.sort()
cancellations_removals.sort()
cancellations_data.sort(key=sortByIndex0)
cancellations_data.sort(key=sortByIndex1)
with open('_data/cancellations.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Date','Event','Country','Cancellation Note','Website'])
for event in cancellations_data:
tsv_writer.writerow(event)
print(now(),"cancellations.tsv saved")
if cancellations_additions != []:
with open('_data/cancellation-additions.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Date','Event','Country','Cancellation Note','Website'])
for event in cancellations_additions:
tsv_writer.writerow(event)
event.append('Added')
cancellations_changes.append(event)
tsv_writer.writerow([now(),'','',''])
print(now(),"cancellation-additions.tsv saved")
if cancellations_removals != []:
with open('_data/cancellation-removals.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Date','Event','Country','Previous Cancellation Note','Website'])
for event in cancellations_removals:
tsv_writer.writerow(event)
event.append('Removed')
cancellations_changes.append(event)
tsv_writer.writerow([now(),'','',''])
print(now(),"cancellation-removals.tsv saved")
cancellations_changes.sort()
if cancellations_changes != []:
with open('_data/cancellation-changes.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Date','Event','Country','Cancellation Note','Website','Added or Removed'])
for event in cancellations_changes:
tsv_writer.writerow(event)
tsv_writer.writerow([now(),'','','',''])
print(now(),"cancellation-changes.tsv saved")
now_saved = now()
if now_saved.month < 10:
month = '0'+str(now_saved.month)
else:
month = str(now_saved.month)
if now_saved.day <10:
day = '0'+str(now_saved.day)
else:
day = str(now_saved.day)
if now_saved.hour < 10:
hour = '0'+str(now_saved.hour)
else:
hour = str(now_saved.hour)
if now_saved.minute <10:
minute = '0'+str(now_saved.minute)
else:
minute = str(now_saved.minute)
if now_saved.second <10:
second = '0'+str(now_saved.second)
else:
second = str(now_saved.second)
file = str(now_saved.year)+'-'+month+'-'+day+'-'+hour+minute+second+'-update.md'
with open('_posts/Cancellation Updates/'+file, "w+", encoding='utf-8', newline='') as f:
out = '---' + '\n'
out += 'layout: post' + '\n'
out += 'title: '+str(now_saved.year)+'/'+month+'/'+ day +' '+hour+':'+minute+' UTC Update' + '\n'
out += 'date: '+str(now_saved.year)+'-'+month+'-'+day+' '+hour+':'+minute+':'+second+' +0000' + '\n'
out += 'author: Cancellations Bot' + '\n'
out += "category: 'Cancellation Update'" + '\n'
out += '---' + '\n'
out += '\n'
if cancellations_additions != []:
out += '<h3>New Cancellations</h3>' + '\n'
out += "<div class='hscrollable'>" + '\n'
out += "<table style='width: 100%'>" + '\n'
out += ' <tr>' + '\n'
out += ' <th>Event</th>' + '\n'
out += ' <th>Country</th>' + '\n'
out += ' <th>Date</th>' + '\n'
out += ' <th>Cancellation Note</th>' + '\n'
out += ' </tr>' + '\n'
for event in cancellations_additions:
out += ' <tr>' + '\n'
if event[3] not in ['','Added']:
out += ' <td><a href="' + event[4] + '">' + event[1] + '</a></td>' + '\n'
else:
out += ' <td>' + event[1] + '</td>' + '\n'
out += ' <td>' + event[2] + '</td>' + '\n'
out += ' <td>' + event[0] + '</td>' + '\n'
out += ' <td>' + event[3] + '</td>' + '\n'
out += ' </tr>' + '\n'
out += '</table>' + '\n'
out += '</div>' + '\n'
if cancellations_removals != []:
out += '<h3>Cancellations Removed</h3>' + '\n'
out += "<div class='hscrollable'>" + '\n'
out += "<table style='width: 100%'>" + '\n'
out += ' <tr>' + '\n'
out += ' <th>Event</th>' + '\n'
out += ' <th>Country</th>' + '\n'
out += ' <th>Date</th>' + '\n'
out += ' <th>Previous Cancellation Note</th>' + '\n'
out += ' </tr>' + '\n'
for event in cancellations_removals:
out += ' <tr>' + '\n'
if event[3] not in ['','Removed']:
out += ' <td><a href="' + event[4] + '">' + event[1] + '</a></td>' + '\n'
else:
out += ' <td>' + event[1] + '</td>' + '\n'
out += ' <td>' + event[2] + '</td>' + '\n'
out += ' <td>' + event[0] + '</td>' + '\n'
out += ' <td>' + event[3] + '</td>' + '\n'
out += ' </tr>' + '\n'
out += '</table>' + '\n'
out += '</div>' + '\n'
f.write(out)
print(now(),file,'saved')
out = 'New Cancellations Update:\nhttps://parkruncancellations.com/'+str(now_saved.year)+'/'+month+'/'+day+'/'+hour+minute+second+'-update/'
tweet(out)
with open('_data/raw/states.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Event','Country','State','County'])
for event in new_states_list:
tsv_writer.writerow(event)
print(now(),'raw/states.tsv saved')
upcoming_events_table.sort()
with open('_data/raw/ue.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Event','Country'])
for event in upcoming_events_table:
tsv_writer.writerow([event[0],event[4]])
print(now(),'raw/ue.tsv saved')
def findpendatapoint(data, key):
for i in range(-2,-100,-1):
try:
return data[i][key]
except KeyError:
pass
except IndexError:
return 0
def writehistory(file, data):
data['time'] = str(now())
try:
with open('_data/history/'+file,'r', encoding='utf-8', newline='\n') as f:
old_data = json.loads(f.read())
if now().weekday() == 0 and now().hour <= 1:
olddate = datetime.datetime.strptime(old_data[-1]['time'], '%Y-%m-%d %H:%M:%S.%f')
if olddate.date() != now().date() and olddate.hour != now().hour:
old_data = [old_data[-1],data]
except FileNotFoundError:
old_data = []
print(now(),"history/"+file+" read")
last_data = old_data[-1]
new_last_data = {}
if last_data['parkrunning'] != data['parkrunning'] or findpendatapoint(old_data,'parkrunning') != data['parkrunning']:
new_last_data['parkrunning'] = last_data['parkrunning']
if last_data['junior parkrunning'] != data['junior parkrunning'] or findpendatapoint(old_data,'junior parkrunning') != data['junior parkrunning']:
new_last_data['junior parkrunning'] = last_data['junior parkrunning']
if last_data['5k Cancellations'] != data['5k Cancellations'] or findpendatapoint(old_data,'5k Cancellations') != data['5k Cancellations']:
new_last_data['5k Cancellations'] = last_data['5k Cancellations']
if last_data['junior Cancellations'] != data['junior Cancellations'] or findpendatapoint(old_data,'junior Cancellations') != data['junior Cancellations']:
new_last_data['junior Cancellations'] = last_data['junior Cancellations']
old_data.pop(-1)
if len(new_last_data) != 0:
new_last_data['Total'] = last_data['Total']
new_last_data['time'] = last_data['time']
old_data.append(new_last_data)
new_data = {
"parkrunning": data['parkrunning'],
"junior parkrunning": data['junior parkrunning'],
"5k Cancellations": data['5k Cancellations'],
"junior Cancellations": data['junior Cancellations'],
"Total": data['Total'],
"time": data['time']
}
old_data.append(new_data)
with open('_data/history/'+file,'wt', encoding='utf-8', newline='\n') as f:
f.write(json.dumps(old_data,indent=4)+"\n")
print(now(),"history/"+file+" saved")
writehistory('global.json',countries['Total'])
writehistory('australia.json',countries['Australia'])
writehistory('austria.json',countries['Austria'])
writehistory('canada.json',countries['Canada'])
writehistory('denmark.json',countries['Denmark'])
writehistory('eswatini.json',countries['Eswatini'])
writehistory('finland.json',countries['Finland'])
writehistory('france.json',countries['France'])
writehistory('germany.json',countries['Germany'])
writehistory('ireland.json',countries['Ireland'])
writehistory('italy.json',countries['Italy'])
writehistory('japan.json',countries['Japan'])
writehistory('malaysia.json',countries['Malaysia'])
writehistory('namibia.json',countries['Namibia'])
writehistory('netherlands.json',countries['Netherlands'])
writehistory('newzealand.json',countries['New Zealand'])
writehistory('norway.json',countries['Norway'])
writehistory('poland.json',countries['Poland'])
writehistory('russia.json',countries['Russia'])
writehistory('singapore.json',countries['Singapore'])
writehistory('southafrica.json',countries['South Africa'])
writehistory('sweden.json',countries['Sweden'])
writehistory('unitedkingdom.json',countries['United Kingdom'])
writehistory('usa.json',countries['USA'])
writehistory('uk/england.json',uk['England'])
writehistory('uk/ni.json',uk['Northern Ireland'])
writehistory('uk/scotland.json',uk['Scotland'])
writehistory('uk/wales.json',uk['Wales'])
writehistory('aus/act.json',aus['Australian Capital Territory'])
writehistory('aus/nsw.json',aus['New South Wales'])
writehistory('aus/nt.json',aus['Northern Territory'])
writehistory('aus/qld.json',aus['Queensland'])
writehistory('aus/sa.json',aus['South Australia'])
writehistory('aus/tas.json',aus['Tasmania'])
writehistory('aus/vic.json',aus['Victoria'])
writehistory('aus/wa.json',aus['Western Australia'])
writehistory('usa/az.json',usa_states['Arizona'])
writehistory('usa/ca.json',usa_states['California'])
writehistory('usa/ca.json',usa_states['California'])
writehistory('usa/co.json',usa_states['Colorado'])
writehistory('usa/fl.json',usa_states['Florida'])
writehistory('usa/ga.json',usa_states['Georgia'])
writehistory('usa/id.json',usa_states['Idaho'])
writehistory('usa/il.json',usa_states['Illinois'])
writehistory('usa/in.json',usa_states['Indiana'])
writehistory('usa/ky.json',usa_states['Kentucky'])
writehistory('usa/md.json',usa_states['Maryland'])
writehistory('usa/ma.json',usa_states['Massachusetts'])
writehistory('usa/mi.json',usa_states['Michigan'])
writehistory('usa/mn.json',usa_states['Minnesota'])
writehistory('usa/nj.json',usa_states['New Jersey'])
writehistory('usa/nc.json',usa_states['North Carolina'])
writehistory('usa/oh.json',usa_states['Ohio'])
writehistory('usa/or.json',usa_states['Oregon'])
writehistory('usa/pa.json',usa_states['Pennsylvania'])
writehistory('usa/tn.json',usa_states['Tennessee'])
writehistory('usa/tx.json',usa_states['Texas'])
writehistory('usa/vt.json',usa_states['Vermont'])
writehistory('usa/va.json',usa_states['Virginia'])
writehistory('usa/wa.json',usa_states['Washington'])
writehistory('usa/dc.json',usa_states['Washington, D.C.'])
writehistory('usa/wv.json',usa_states['West Virginia'])
writehistory('usa/wi.json',usa_states['Wisconsin'])
print(now(),'Script End')
```
#### File: josh-justjosh/parkrun-Cancellations/parse changes.py
```python
import csv
import datetime
def parse():
cancellation_changes = []
with open('_data/cancellation-changes.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
cancellation_changes.append(row)
#print(row)
cancellation_changes.remove(['Event','Country','Cancellation Note','Added or<br />Removed'])
time = cancellation_changes[-1][0]
cancellation_changes.pop(-1)
cancellations_additions = []
with open('_data/cancellation-additions.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
cancellations_additions.append(row)
#print(row)
cancellations_additions.remove(['Event','Country','Cancellation Note'])
cancellations_additions.pop(-1)
cancellations_removals = []
with open('_data/cancellation-removals.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
cancellations_removals.append(row)
#print(row)
cancellations_removals.remove(['Event','Country','Previous Cancellation Note'])
cancellations_removals.pop(-1)
now = datetime.datetime(int(time[0:4]),int(time[5:7]),int(time[8:10]),int(time[11:13]),int(time[14:16]),int(time[17:19]))
if now.month < 10:
month = '0'+str(now.month)
else:
month = str(now.month)
if now.day <10:
day = '0'+str(now.day)
else:
day = str(now.day)
if now.hour < 10:
hour = '0'+str(now.hour)
else:
hour = str(now.hour)
if now.minute <10:
minute = '0'+str(now.minute)
else:
minute = str(now.minute)
if now.second <10:
second = '0'+str(now.second)
else:
second = str(now.second)
file = str(now.year)+"-"+month+"-"+day+"-"+hour+minute+second+"-update.md"
with open('_posts/Cancellation Updates/'+file, "w+", encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(['---'])
writer.writerow(['layout: post'])
writer.writerow(['title: '+str(now.year)+'/'+month+'/'+ day +' '+hour+':'+minute+' UTC Update'])
writer.writerow(['date: '+str(now.year)+'-'+month+'-'+day+' '+hour+':'+minute+':'+second+' +0000'])
writer.writerow(['author: Cancellations Bot'])
writer.writerow(['---'])
writer.writerow([])
if cancellations_additions != []:
writer.writerow(['<h3>New Cancellations</h3>'])
writer.writerow(["<table style='width: 100%'>"])
writer.writerow([' <tr>'])
writer.writerow([' <th>Event</th>'])
writer.writerow([' <th>Country</th>'])
writer.writerow([' <th>Cancellation Note</th>'])
writer.writerow([' </tr>'])
for event in cancellations_additions:
writer.writerow([' <tr>'])
for cell in event:
towrite = ' <td>'+cell+'</td>'
writer.writerow([towrite.replace('"','')])
writer.writerow([' </tr>'])
writer.writerow(['</table>'])
if cancellations_removals != []:
writer.writerow(['<h3>Cancellations Removed</h3>'])
writer.writerow(["<table style='width: 100%'>"])
writer.writerow([' <tr>'])
writer.writerow([' <th>Event</th>'])
writer.writerow([' <th>Country</th>'])
writer.writerow([' <th>Previous Cancellation Note</th>'])
writer.writerow([' </tr>'])
for event in cancellations_removals:
writer.writerow([' <tr>'])
for cell in event:
towrite = ' <td>'+cell+'</td>'
writer.writerow([towrite.replace('"','')])
writer.writerow([' </tr>'])
writer.writerow(['</table>'])
print(file,'saved')
``` |
{
"source": "joshjzaslow/jekkish",
"score": 2
} |
#### File: jekkish/jekkish/jekkish.py
```python
import sys
from os.path import expanduser
from os.path import splitext
from os.path import split
from os.path import exists
from os.path import realpath
from os.path import dirname
from os.path import join
from os import remove, stat
import errno
from subprocess import check_call
import yaml
import argparse
from time import sleep
from jinja2 import Environment
from jekkish.texhelpers import escape_tex, TeXLoader
__version__ = "0.3.2"
class Jekkish():
def __init__(self, target_file, job_name=False):
self.target_file = target_file
fullpath, ext = splitext(self.target_file.name)
path, filename = split(fullpath)
self.temp_file = filename + '._' + ext[1:]
self.job_name = job_name if job_name else filename
self.variables = self.load_variables()
self.home = expanduser("~")
self.template_dir = self.home + '/.jekkish'
if 'xelatex' in self.variables:
self.engine = 'xelatex'
self.default_template = 'default-xe'
else:
self.engine = 'pdflatex'
self.default_template = 'default'
self.command = '{} --jobname={} {}'.format(
self.engine,
self.job_name,
self.temp_file)
def load_variables(self, division_string="---\n"):
""" Converts the file to YAML and returns the parsed data.
Ignores any content above the YAML header (start_yaml),
Loads everything after the YAML as part of the 'content' variable """
start_yaml = False
end_yaml = False
variables = ""
content = "content: >"
for line in self.target_file:
if str(line) == division_string:
if start_yaml:
end_yaml = True
else:
start_yaml = True
else:
if start_yaml:
if not end_yaml:
variables += line
else:
if line == "\n":
content += " {}".format(line)
else:
content += " {}\n".format(line)
variables += content
return yaml.load(variables)
def make_file(self):
texenv = Environment(loader=TeXLoader(self.home))
texenv.block_start_string = '((*'
texenv.block_end_string = '*))'
texenv.variable_start_string = '((('
texenv.variable_end_string = ')))'
texenv.comment_start_string = '((='
texenv.comment_end_string = '=))'
texenv.filters['escape_tex'] = escape_tex
if "template" not in self.variables:
self.variables["template"] = self.default_template
template_file = self.template_dir + \
'/' + self.variables["template"] + '.tex'
if not exists(template_file):
template_file = join(
dirname(realpath(__file__)),
self.variables["template"] + '.tex')
template = texenv.get_template(template_file)
f = open(self.temp_file, "w")
f.write(template.render(self.variables))
f.close()
print("Temporary LaTeX file created ({})\n---".format(self.temp_file))
def make_pdf(self, clean=True):
""" Calls pdftex and (optionally) removes temporary files """
print("Generating PDF\n---")
check_call(self.command, shell=True)
if clean:
for ext in ['aux', 'log', 'out', 'ent']:
try:
remove(self.job_name + '.' + ext)
except (OSError, IOError) as e:
# Use FileNotFoundError when python 2 is dropped
if e.errno != errno.ENOENT:
raise
def render(self):
self.make_file()
self.make_pdf()
def watch(self):
print("---\nJekkish running in watch mode\n")
print("---\nPerforming initial rendering\n---")
last_time = False
while True:
if last_time != stat(self.target_file.name).st_mtime:
last_time = stat(self.target_file.name).st_mtime
sleep(0.1)
self.target_file = open(self.target_file.name, 'r')
self.variables = self.load_variables()
self.make_file()
sleep(0.1)
self.render()
print("---\nWatching {}\n---".format(self.target_file.name))
def main():
parser = argparse.ArgumentParser(
prog="Jekkish",
description="A template-based pdftex CLI frontend inspired by Jekyll"
)
parser.add_argument(
'filename',
type=argparse.FileType('r'),
default=sys.stdin,
help='The file to process'
)
parser.add_argument(
'jobname',
nargs="?",
default=False,
help='Job name for pdftex output'
)
parser.add_argument(
'--watch',
action='store_const',
const='watch',
help='Watch # for changes'
)
parser.add_argument(
'--xelatex',
action='store_const',
const=True,
help='Use xeLaTeX for rendering'
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(__version__)
)
args = parser.parse_args()
new_file = Jekkish(args.filename, args.jobname)
if args.watch:
new_file.watch()
else:
new_file.render()
if __name__ == "__main__":
main()
``` |
{
"source": "josh-kaplan/dndGenerator",
"score": 3
} |
#### File: lib/schemas/__init__.py
```python
import json
class Schema:
@classmethod
def _build(cls):
# Atrributes
#print(cls)
attrs = [ attr for attr in dir(cls) if not attr.startswith('_') ]
# Create instance
ctx = json.dumps(json.loads(cls._ctx))
instance = cls()
# Convert to JSON object
obj = {}
for attr in attrs:
obj[attr] = getattr(cls, attr).lookup(ctx)
# Print it
formatter = cls().__str__()
print(formatter.format(**obj))
``` |
{
"source": "josh-kaplan/numerical-methods",
"score": 4
} |
#### File: newtons_method/py/newton.py
```python
from __future__ import print_function, division
import sys
from sympy import *
MAJ = sys.version_info.major
MIN = sys.version_info.minor
# These paramaters may be tweaked to adjust the performance of the program
# TOL - the tolerance used to test if the answer is "close enough"
# MAX - the maximum number of iterations before giving up
# GUESS - the initial guess for the root
TOL = 1e-5
MAX = 1e5
GUESS = -1
# Define your symbols, function, and symbol you're solving for.
#M, E, e, p = symbols('M E e p')
#FUNC = E - e*sin(E) - M
#SOLVE_FOR = E
x, y, z = symbols('x y z')
FUNC = x**2 + 2*x
SOLVE_FOR = E
# This is the function being used.
def f(x):
return FUNC.subs(SOLVE_FOR, x)
# This is the derivative of the function, f.
def df(x):
return diff(FUNC).subs(SOLVE_FOR, x)
# This is Newton's Method. This should not be changed.
def newtons_method(f, df, x0, tol):
x = x0
for n in xrange(0, int(MAX)):
x = x - f(x)/df(x) # This is the heart of Newton's Method
if abs(0 - f(x)) < tol: # If within tolerance of zero, stop
break
else:
print('Maximum number of iterations reached.')
print('Root: ', x)
print('f(%.2f) = ' % x, f(x))
print('Iterations: ', n)
if __name__ == '__main__':
newtons_method(f, df, GUESS, TOL)
``` |
{
"source": "joshkarbi/NBA-Fantasy-Draft-Tool",
"score": 4
} |
#### File: NBA-Fantasy-Draft-Tool/archive/rank-players.py
```python
import json
import sys
class Player:
def __init__(self, score, name, position):
self.score = score
self.name = name
self.pos = position
def __lt__(self, other):
return self.score < other.score
# read config file, based on categories and weights assign score to each player
# and output to "drafting list" CSV to use in draft
with open('data/player_combined_data.csv') as data:
strategy_name = str(sys.argv[1])
config = json.load(open(strategy_name) )
print(config)
# get indices of each CSV category
tokens = {}
i = 0
for token in data.readlines()[0].split(','):
tokens[token.replace('\n', '')] = i
i+= 1
print("FOUND TOKENS:")
print(tokens)
print("\n\n")
# final score -> (player name, position)
results = {}
data.seek(0)
results = []
for line in data.readlines()[1:]:
line = line.split(',')
score = 0
for category in config["player_stat_categories"].keys():
score_this_category = float(line[tokens[category]]) * config["player_stat_categories"][category]
score += score_this_category
print(category, " ", score_this_category)
print("Total player score ", score)
name = line[tokens["name"]]
positions = line[tokens["positions"]]
results.append(Player(score, name, positions))
results.sort(reverse=True)
outfile2 = open('output_files/players_ranked_by_score_' + strategy_name.replace('.json', '').replace('config/', '') + ".csv", 'w+')
outfile2.write("Name,Position,Score\n")
for result in results:
outfile2.write(result.name+","+result.pos+","+str(result.score)+"\n")
``` |
{
"source": "JoshKarpel/brood",
"score": 2
} |
#### File: brood/brood/message.py
```python
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum, unique
from functools import total_ordering
from typing import Union
from brood.config import CommandConfig
@unique
@total_ordering
class Verbosity(str, Enum):
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
@property
def is_debug(self) -> bool:
return self <= self.DEBUG
def __int__(self) -> int:
if self is self.DEBUG:
return 0
elif self is self.INFO:
return 1
elif self is self.WARNING:
return 2
elif self is self.ERROR:
return 3
else: # pragma: unreachable
raise Exception("unreachable")
def __lt__(self, other: object) -> bool:
if isinstance(other, Verbosity):
return int(self) < int(other)
return NotImplemented
@dataclass(frozen=True)
class InternalMessage:
text: str
verbosity: Verbosity
timestamp: datetime = field(default_factory=datetime.now)
@dataclass(frozen=True)
class CommandMessage:
text: str
command_config: CommandConfig
timestamp: datetime = field(default_factory=datetime.now)
Message = Union[InternalMessage, CommandMessage]
```
#### File: brood/tests/conftest.py
```python
from io import StringIO
import pytest
from rich.console import Console
from typer.testing import CliRunner
@pytest.fixture
def runner() -> CliRunner:
return CliRunner()
@pytest.fixture
def output() -> StringIO:
return StringIO()
@pytest.fixture
def console(output: StringIO) -> Console:
return Console(
file=output,
force_terminal=True,
width=80,
)
```
#### File: brood/tests/test_cli.py
```python
from typing import List
import pytest
from typer.testing import CliRunner
from brood.constants import PACKAGE_NAME, __version__
from brood.main import app
def test_help(runner: CliRunner) -> None:
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
def test_version(runner: CliRunner) -> None:
result = runner.invoke(app, ["version"])
assert result.exit_code == 0
assert PACKAGE_NAME in result.output
assert __version__ in result.output
@pytest.mark.parametrize("args", [[], ["--plain"]])
def test_schema(runner: CliRunner, args: List[str]) -> None:
result = runner.invoke(app, ["schema", *args])
assert result.exit_code == 0
```
#### File: brood/tests/test_command_manager.py
```python
from __future__ import annotations
from asyncio import Queue
from typing import Tuple
import pytest
from brood.command import Command, Event
from brood.config import CommandConfig, OnceConfig
from brood.constants import ON_WINDOWS
from brood.fanout import Fanout
from brood.message import CommandMessage, Message
from brood.utils import drain_queue
@pytest.fixture
def once_config(command: str) -> CommandConfig:
return CommandConfig(
name="test",
command=command,
starter=OnceConfig(),
)
PackedManagerFixtureOutput = Tuple[Command, "Queue[Event]", "Queue[Message]"]
@pytest.fixture
async def once_manager_(once_config: CommandConfig) -> PackedManagerFixtureOutput:
events: Fanout[Event] = Fanout()
events_consumer = events.consumer()
messages: Fanout[Message] = Fanout()
messages_consumer = messages.consumer()
return (
await Command.start(
config=once_config,
events=events,
messages=messages,
width=80,
),
events_consumer,
messages_consumer,
)
@pytest.fixture
def once_manager(once_manager_: PackedManagerFixtureOutput) -> Command:
return once_manager_[0]
@pytest.fixture
def events(once_manager_: PackedManagerFixtureOutput) -> Queue[Event]:
return once_manager_[1]
@pytest.fixture
def messages(once_manager_: PackedManagerFixtureOutput) -> Queue[Message]:
return once_manager_[2]
@pytest.mark.parametrize("command", ["echo hi", "echo hi 1>&2"])
async def test_command_output_captured_as_command_message(
once_manager: Command, messages: Queue[Message], command: str
) -> None:
await once_manager.wait()
drained = await drain_queue(messages)
print(drained)
command_messages = [message for message in drained if isinstance(message, CommandMessage)]
print(command_messages)
assert len(command_messages) == 1
message = command_messages[0]
assert message.text == "hi"
assert message.command_config is once_manager.config
@pytest.mark.parametrize("command, exit_code", [("exit 0", 0), ("exit 1", 1)])
async def test_capture_exit_code(once_manager: Command, command: str, exit_code: int) -> None:
await once_manager.wait()
assert once_manager.exit_code == exit_code
@pytest.mark.parametrize("command", ["sleep 1"])
async def test_has_exited_lifecycle(once_manager: Command, command: str) -> None:
assert not once_manager.has_exited
await once_manager.wait()
assert once_manager.has_exited
@pytest.mark.parametrize("command", ["sleep 1000"])
async def test_can_terminate_before_completion(once_manager: Command, command: str) -> None:
await once_manager.terminate()
await once_manager.wait()
assert once_manager.exit_code == (-15 if not ON_WINDOWS else 1)
@pytest.mark.parametrize("command", ["sleep 1000"])
async def test_can_kill_before_completion(once_manager: Command, command: str) -> None:
await once_manager.kill()
await once_manager.wait()
assert once_manager.exit_code == (-9 if not ON_WINDOWS else 1)
@pytest.mark.parametrize("command", ["echo hi"])
async def test_can_stop_after_exit(once_manager: Command, command: str) -> None:
await once_manager.wait()
await once_manager.terminate()
@pytest.mark.parametrize("command", ["echo hi"])
async def test_can_kill_after_exit(once_manager: Command, command: str) -> None:
await once_manager.wait()
await once_manager.kill()
```
#### File: brood/tests/test_fanout.py
```python
from brood.fanout import Fanout
from brood.utils import drain_queue
async def test_each_subscriber_gets_each_message() -> None:
fq: Fanout[int] = Fanout()
a = fq.consumer()
b = fq.consumer()
await fq.put(0)
await fq.put(1)
assert await drain_queue(a, buffer=None) == [0, 1]
assert await drain_queue(b, buffer=None) == [0, 1]
```
#### File: brood/tests/test_monitor.py
```python
from __future__ import annotations
from asyncio import Queue, create_task, sleep
from brood.utils import drain_queue
async def sleep_then_put(queue: Queue[float], s: float) -> None:
await sleep(s)
await queue.put(s)
async def test_drain_queue_with_buffer() -> None:
queue: Queue[float] = Queue()
await sleep_then_put(queue, 0)
create_task(sleep_then_put(queue, 0.5))
assert queue.qsize() == 1
assert [0, 0.5] == await drain_queue(queue, buffer=1)
async def test_drain_queue() -> None:
queue: Queue[float] = Queue()
await sleep_then_put(queue, 0)
create_task(sleep_then_put(queue, 0.5))
assert queue.qsize() == 1
assert [0] == await drain_queue(queue, buffer=None)
``` |
{
"source": "JoshKarpel/chtc-bot",
"score": 2
} |
#### File: tests/commands/test_jobads.py
```python
import textwrap
import time
import bs4
import pytest
from web.commands import scrapers
@pytest.fixture
def jch():
return scrapers.JobAdsCommandHandler(rescrape_timeout=300)
def test_get_description_returns_none_if_it_fails_to_find_the_attr(jch):
assert jch.get_description(bs4.BeautifulSoup("", features="html.parser"), "foo") is None
# This is a big blob of sample text taken from the HTCondor manual HTML.
# If that format ever changes, tests that depend on this will need to change as well!
ATTRS_HTML = textwrap.dedent(
"""
<dt><code class="docutils literal notranslate"><span class="pre">AcctGroup</span></code></dt>
<dd>The accounting group name, as set in the submit description file via
the
<strong>accounting_group</strong> <span class="target" id="index-5"></span>
command. This attribute is only present if an accounting group was
requested by the submission. See the <a class="reference internal" href="../admin-manual/user-priorities-negotiation.html"><span class="doc">User Priorities and Negotiation</span></a> section
for more information about accounting groups.
<span class="target" id="index-6"></span>
<span class="target" id="index-7"></span> </dd>
<dt><code class="docutils literal notranslate"><span class="pre">AcctGroupUser</span></code></dt>
<dd>The user name associated with the accounting group. This attribute
is only present if an accounting group was requested by the
submission. <span class="target" id="index-8"></span>
<span class="target" id="index-9"></span> </dd>
<dt><code class="docutils literal notranslate"><span class="pre">AllRemoteHosts</span></code></dt>
<dd>String containing a comma-separated list of all the remote machines
running a parallel or mpi universe job.
<span class="target" id="index-10"></span>
<span class="target" id="index-11"></span> </dd>
<dt><code class="docutils literal notranslate"><span class="pre">Args</span></code></dt>
<dd>A string representing the command line arguments passed to the job,
when those arguments are specified using the old syntax, as
specified in
the <a class="reference internal" href="../man-pages/condor_submit.html"><span class="doc">condor_submit</span></a> section.
<span class="target" id="index-12"></span>
<span class="target" id="index-13"></span> </dd>
<dt><code class="docutils literal notranslate"><span class="pre">Arguments</span></code></dt>
<dd>A string representing the command line arguments passed to the job,
when those arguments are specified using the new syntax, as
specified in
the <a class="reference internal" href="../man-pages/condor_submit.html"><span class="doc">condor_submit</span></a> section.
<span class="target" id="index-14"></span>
<span class="target" id="index-15"></span> </dd>
"""
).strip()
ATTRS_SOUP = bs4.BeautifulSoup(ATTRS_HTML, "html.parser")
@pytest.mark.parametrize(
"attr, expected, anchor",
[
(
"AcctGroupUser",
"""
*AcctGroupUser*
>The user name associated with the accounting group. This attribute is only present if an accounting group was requested by the submission.
""",
"index-7",
),
("NOPE", None, None),
],
)
def test_get_description(jch, attr, expected, anchor):
# clean up the triple-quoted string
expected = (textwrap.dedent(expected).strip(), anchor) if expected is not None else expected
assert jch.get_description(ATTRS_SOUP, attr) == expected
@pytest.mark.parametrize("memory", [False, True])
@pytest.mark.parametrize("channel_id", ["1234", "4321"])
def test_handle_jobads_end_to_end(mocker, client, memory, channel_id):
mock_get_url = mocker.patch("web.http.cached_get_url")
mock_get_url.return_value.text = ATTRS_HTML
mock = mocker.patch("web.slack.post_message")
client.post(
"/slash/jobads", data=dict(channel_id=channel_id, user_id="5678", text="AcctGroupUser"),
)
# let the executor run
# Strictly speaking, this should (a) depend on the memory_time value
# and (b) poll until the executor signals that it has run.
time.sleep(0.1)
if not memory:
assert mock.call_count == 1
channel = mock.call_args[1]["channel"]
assert channel == channel_id
msg = mock.call_args[1]["text"]
# make a few assertions about the output message,
# but without holding on too tight
assert "<@5678>" in msg
assert "AcctGroupUser" in msg
assert "user name associated" in msg
assert "AllRemoteHosts" not in msg
else:
assert mock.call_count == 0
```
#### File: tests/commands/test_submits.py
```python
import textwrap
import time
import bs4
import pytest
from web.commands import scrapers
@pytest.fixture
def sch():
return scrapers.SubmitsCommandHandler(rescrape_timeout=300)
def test_get_description_returns_none_if_it_fails_to_find_the_submit(sch):
assert sch.get_description(bs4.BeautifulSoup("", features="html.parser"), "foo") is None
# This is a big blob of sample text taken from the HTCondor manual HTML.
# If that format ever changes, tests that depend on this will need to change as well!
SUBMITS_HTML = textwrap.dedent(
"""
<div class="section" id="submit-description-file-commands">
<h2>Submit Description File Commands<a class="headerlink" href="#submit-description-file-commands" title="Permalink to this headline">¶</a></h2>
<p><span class="target" id="index-12"></span> </p>
<p>Note: more information on submitting HTCondor jobs can be found here:
<a class="reference internal" href="../users-manual/submitting-a-job.html"><span class="doc">Submitting a Job</span></a>.</p>
<p>As of version 8.5.6, the <em>condor_submit</em> language supports multi-line
values in commands. The syntax is the same as the configuration language
(see more details here:
<a class="reference internal" href="../admin-manual/introduction-to-configuration.html#multi-line-values"><span class="std std-ref">Multi-Line Values</span></a>).</p>
<p>Each submit description file describes one or more clusters of jobs to
be placed in the HTCondor execution pool. All jobs in a cluster must
share the same executable, but they may have different input and output
files, and different program arguments. The submit description file is
generally the last command-line argument to <em>condor_submit</em>. If the
submit description file argument is omitted, <em>condor_submit</em> will read
the submit description from standard input.</p>
<p>The submit description file must contain at least one <em>executable</em>
command and at least one <em>queue</em> command. All of the other commands have
default actions.</p>
<p><strong>Note that a submit file that contains more than one executable command
will produce multiple clusters when submitted. This is not generally
recommended, and is not allowed for submit files that are run as DAG node
jobs by condor_dagman.</strong></p>
<p>The commands which can appear in the submit description file are
numerous. They are listed here in alphabetical order by category.</p>
<p>BASIC COMMANDS <span class="target" id="index-13"></span> </p>
<blockquote>
<div><dl class="docutils">
<dt>arguments = <argument_list></dt>
<dd><p class="first">List of arguments to be supplied to the executable as part of the
command line.</p>
<p>In the <strong>java</strong> universe, the first argument must be the name of the
class containing <code class="docutils literal notranslate"><span class="pre">main</span></code>.</p>
<p>There are two permissible formats for specifying arguments,
identified as the old syntax and the new syntax. The old syntax
supports white space characters within arguments only in special
circumstances; when used, the command line arguments are represented
in the job ClassAd attribute <code class="docutils literal notranslate"><span class="pre">Args</span></code>. The new syntax supports
uniform quoting of white space characters within arguments; when
used, the command line arguments are represented in the job ClassAd
attribute <code class="docutils literal notranslate"><span class="pre">Arguments</span></code>.</p>
<p><strong>Old Syntax</strong></p>
<p>In the old syntax, individual command line arguments are delimited
(separated) by space characters. To allow a double quote mark in an
argument, it is escaped with a backslash; that is, the two character
sequence " becomes a single double quote mark within an argument.</p>
<p>Further interpretation of the argument string differs depending on
the operating system. On Windows, the entire argument string is
passed verbatim (other than the backslash in front of double quote
marks) to the Windows application. Most Windows applications will
allow spaces within an argument value by surrounding the argument
with double quotes marks. In all other cases, there is no further
interpretation of the arguments.</p>
<p>Example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">arguments</span> <span class="o">=</span> <span class="n">one</span> \\<span class="s2">"two</span><span class="se">\\"</span><span class="s2"> 'three'</span>
</pre></div>
</div>
<p>Produces in Unix vanilla universe:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">argument</span> <span class="mi">1</span><span class="p">:</span> <span class="n">one</span>
<span class="n">argument</span> <span class="mi">2</span><span class="p">:</span> <span class="s2">"two"</span>
<span class="n">argument</span> <span class="mi">3</span><span class="p">:</span> <span class="s1">'three'</span>
</pre></div>
</div>
<p><strong>New Syntax</strong></p>
<p>Here are the rules for using the new syntax:</p>
<ol class="arabic simple">
<li>The entire string representing the command line arguments is
surrounded by double quote marks. This permits the white space
characters of spaces and tabs to potentially be embedded within a
single argument. Putting the double quote mark within the
arguments is accomplished by escaping it with another double
quote mark.</li>
<li>The white space characters of spaces or tabs delimit arguments.</li>
<li>To embed white space characters of spaces or tabs within a single
argument, surround the entire argument with single quote marks.</li>
<li>To insert a literal single quote mark, escape it within an
argument already delimited by single quote marks by adding
another single quote mark.</li>
</ol>
<p>Example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">arguments</span> <span class="o">=</span> <span class="s2">"3 simple arguments"</span>
</pre></div>
</div>
<p>Produces:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">argument</span> <span class="mi">1</span><span class="p">:</span> <span class="mi">3</span>
<span class="n">argument</span> <span class="mi">2</span><span class="p">:</span> <span class="n">simple</span>
<span class="n">argument</span> <span class="mi">3</span><span class="p">:</span> <span class="n">arguments</span>
</pre></div>
</div>
<p>Another example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">arguments</span> <span class="o">=</span> <span class="s2">"one 'two with spaces' 3"</span>
</pre></div>
</div>
<p>Produces:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">argument</span> <span class="mi">1</span><span class="p">:</span> <span class="n">one</span>
<span class="n">argument</span> <span class="mi">2</span><span class="p">:</span> <span class="n">two</span> <span class="k">with</span> <span class="n">spaces</span>
<span class="n">argument</span> <span class="mi">3</span><span class="p">:</span> <span class="mi">3</span>
</pre></div>
</div>
<p>And yet another example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">arguments</span> <span class="o">=</span> <span class="s2">"one ""two"" 'spacey ''quoted'' argument'"</span>
</pre></div>
</div>
<p>Produces:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">argument</span> <span class="mi">1</span><span class="p">:</span> <span class="n">one</span>
<span class="n">argument</span> <span class="mi">2</span><span class="p">:</span> <span class="s2">"two"</span>
<span class="n">argument</span> <span class="mi">3</span><span class="p">:</span> <span class="n">spacey</span> <span class="s1">'quoted'</span> <span class="n">argument</span>
</pre></div>
</div>
<p class="last">Notice that in the new syntax, the backslash has no special meaning.
This is for the convenience of Windows users.
<span class="target" id="index-14"></span> </p>
</dd>
<dt>environment = <parameter_list></dt>
<dd><p class="first">List of environment
<span class="target" id="index-15"></span> variables.</p>
<p>There are two different formats for specifying the environment
variables: the old format and the new format. The old format is
retained for backward-compatibility. It suffers from a
platform-dependent syntax and the inability to insert some special
characters into the environment.</p>
<p>The new syntax for specifying environment values:</p>
<ol class="arabic">
<li><p class="first">Put double quote marks around the entire argument string. This
distinguishes the new syntax from the old. The old syntax does
not have double quote marks around it. Any literal double quote
marks within the string must be escaped by repeating the double
quote mark.</p>
</li>
<li><p class="first">Each environment entry has the form</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o"><</span><span class="n">name</span><span class="o">>=<</span><span class="n">value</span><span class="o">></span>
</pre></div>
</div>
</li>
<li><p class="first">Use white space (space or tab characters) to separate environment
entries.</p>
</li>
<li><p class="first">To put any white space in an environment entry, surround the
space and as much of the surrounding entry as desired with single
quote marks.</p>
</li>
<li><p class="first">To insert a literal single quote mark, repeat the single quote
mark anywhere inside of a section surrounded by single quote
marks.</p>
</li>
</ol>
<p>Example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">environment</span> <span class="o">=</span> <span class="s2">"one=1 two=""2"" three='spacey ''quoted'' value'"</span>
</pre></div>
</div>
<p>Produces the following environment entries:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">one</span><span class="o">=</span><span class="mi">1</span>
<span class="n">two</span><span class="o">=</span><span class="s2">"2"</span>
<span class="n">three</span><span class="o">=</span><span class="n">spacey</span> <span class="s1">'quoted'</span> <span class="n">value</span>
</pre></div>
</div>
<p>Under the old syntax, there are no double quote marks surrounding
the environment specification. Each environment entry remains of the
form</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o"><</span><span class="n">name</span><span class="o">>=<</span><span class="n">value</span><span class="o">></span>
</pre></div>
</div>
<p>Under Unix, list multiple environment entries by separating them
with a semicolon (;). Under Windows, separate multiple entries with
a vertical bar (|). There is no way to insert a literal semicolon
under Unix or a literal vertical bar under Windows. Note that spaces
are accepted, but rarely desired, characters within parameter names
and values, because they are treated as literal characters, not
separators or ignored white space. Place spaces within the parameter
list only if required.</p>
<p>A Unix example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">environment</span> <span class="o">=</span> <span class="n">one</span><span class="o">=</span><span class="mi">1</span><span class="p">;</span><span class="n">two</span><span class="o">=</span><span class="mi">2</span><span class="p">;</span><span class="n">three</span><span class="o">=</span><span class="s2">"quotes have no 'special' meaning"</span>
</pre></div>
</div>
<p>This produces the following:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">one</span><span class="o">=</span><span class="mi">1</span>
<span class="n">two</span><span class="o">=</span><span class="mi">2</span>
<span class="n">three</span><span class="o">=</span><span class="s2">"quotes have no 'special' meaning"</span>
</pre></div>
</div>
<p class="last">If the environment is set with the
<strong>environment</strong> <span class="target" id="index-16"></span>
command and <strong>getenv</strong> <span class="target" id="index-17"></span> is
also set to true, values specified with <strong>environment</strong> override
values in the submitter’s environment (regardless of the order of
the <strong>environment</strong> and <strong>getenv</strong> commands).
<span class="target" id="index-18"></span> </p>
</dd>
<dt>error = <pathname></dt>
<dd>A path and file name used by HTCondor to capture any error messages
the program would normally write to the screen (that is, this file
becomes <code class="docutils literal notranslate"><span class="pre">stderr</span></code>). A path is given with respect to the file system
of the machine on which the job is submitted. The file is written
(by the job) in the remote scratch directory of the machine where
the job is executed. When the job exits, the resulting file is
transferred back to the machine where the job was submitted, and the
path is utilized for file placement. If not specified, the default
value of <code class="docutils literal notranslate"><span class="pre">/dev/null</span></code> is used for submission to a Unix machine. If
not specified, error messages are ignored for submission to a
Windows machine. More than one job should not use the same error
file, since this will cause one job to overwrite the errors of
another. If HTCondor detects that the error and output files for a
job are the same, it will run the job such that the output and error
data is merged. <span class="target" id="index-19"></span> </dd>
<dt>executable = <pathname></dt>
<dd><p class="first">An optional path and a required file name of the executable file for
this job cluster. Only one
<strong>executable</strong> <span class="target" id="index-20"></span> command
within a submit description file is guaranteed to work properly.
More than one often works.</p>
<p class="last">If no path or a relative path is used, then the executable file is
presumed to be relative to the current working directory of the user
as the <em>condor_submit</em> command is issued.</p>
</dd>
"""
).strip()
SUBMITS_SOUP = bs4.BeautifulSoup(SUBMITS_HTML, "html.parser")
@pytest.mark.parametrize(
"submit, expected, anchor",
[
(
"error",
"""
*error = <pathname>*
>A path and file name used by HTCondor to capture any error messages the program would normally write to the screen (that is, this file becomes `stderr`). A path is given with respect to the file system of the machine on which the job is submitted. The file is written (by the job) in the remote scratch directory of the machine where the job is executed. When the job exits, the resulting file is transferred back to the machine where the job was submitted, and the path is utilized for file placement. If not specified, the default value of `/dev/null` is used for submission to a Unix machine. If not specified, error messages are ignored for submission to a Windows machine. More than one job should not use the same error file, since this will cause one job to overwrite the errors of another. If HTCondor detects that the error and output files for a job are the same, it will run the job such that the output and error data is merged.
""",
"index-18",
),
("NOPE", None, None),
],
)
def test_get_description(sch, submit, expected, anchor):
# clean up the triple-quoted string
expected = (textwrap.dedent(expected).strip(), anchor) if expected is not None else expected
assert sch.get_description(SUBMITS_SOUP, submit) == expected
@pytest.mark.parametrize("memory", [False, True])
@pytest.mark.parametrize("channel_id", ["1234", "4321"])
def test_handle_submits_end_to_end(mocker, client, memory, channel_id):
mock_get_url = mocker.patch("web.http.cached_get_url")
mock_get_url.return_value.text = SUBMITS_HTML
mock = mocker.patch("web.slack.post_message")
client.post(
"/slash/submits", data=dict(channel_id=channel_id, user_id="5678", text="error"),
)
# let the executor run
# Strictly speaking, this should (a) depend on the memory_time value
# and (b) poll until the executor signals that it has run.
time.sleep(0.1)
if not memory:
assert mock.call_count == 1
channel = mock.call_args[1]["channel"]
assert channel == channel_id
msg = mock.call_args[1]["text"]
# make a few assertions about the output message,
# but without holding on too tight
assert "<@5678>" in msg
assert "error" in msg
assert "to capture any error messages" in msg
assert "stream_error" not in msg
else:
assert mock.call_count == 0
```
#### File: tests/events/test_linkers.py
```python
import re
import pytest
from web.events import linkers
def test_generic_ticket_linker_matches():
linker = linkers.TicketLinker(
regex=re.compile(r"bot#(\d+)", re.IGNORECASE),
url="foobar/{}",
prefix="bot",
relink_timeout=300,
)
matches = linker.get_matches({"text": "bot#1234 bot#55678 help", "channel": "foo"})
assert matches == ["1234", "55678"]
def test_generic_ticket_linker_message():
linker = linkers.TicketLinker(
regex=re.compile(r"bot#(\d+)", re.IGNORECASE),
url="foobar/{}",
prefix="bot",
relink_timeout=300,
)
matches = linker.get_matches({"text": "bot#1234 bot#55678 help", "channel": "foo"})
message = linker.generate_reply(matches)
assert message == "<foobar/1234|bot#1234>\n<foobar/55678|bot#55678>"
@pytest.mark.parametrize(
"linker, prefix",
[
(linkers.RTTicketLinker(relink_timeout=300), "rt"),
(linkers.FlightworthyTicketLinker(relink_timeout=300), "gt"),
],
)
@pytest.mark.parametrize(
"message, expected",
[
(
"xt#0755 xt#0x755 xt#1 (xt#0755) (xt#0x755) (xt#2) xt#3, xt#4; xt#5. xt#6! xt#7",
("1", "2", "3", "4", "5", "6", "7"),
),
("xt#0755", ()),
("xt#0x755", ()),
(
"xt#1111 (xt#2222) xt#3333, xt#4444; xt#5555. xt#6666! xt#7777",
("1111", "2222", "3333", "4444", "5555", "6666", "7777"),
),
(
"xt#755x xt#755x! xt#8888 random other text *xt#9999* _xt#1010_",
("8888", "9999", "1010"),
),
],
)
def test_linker_matches(linker, prefix, message, expected):
message = message.replace("xt", prefix)
matches = linker.get_matches({"text": message, "channel": "foo"})
assert len(matches) == len(expected)
for (match, expect) in zip(matches, expected):
assert match == expect
@pytest.mark.parametrize(
"message, expected",
[
(
{"text": "bot#1", "channel": "1234"},
{"text": "<foobar/1|bot#1>", "channel": "1234", "thread_ts": None},
),
(
{"text": "bot#1", "channel": "1234", "thread_ts": "1.2"},
{"text": "<foobar/1|bot#1>", "channel": "1234", "thread_ts": "1.2"},
),
],
)
def test_generic_linker_end_to_end(mocker, message, expected):
mock = mocker.patch("web.slack.post_message")
linker = linkers.TicketLinker(
regex=re.compile(r"bot#(\d+)", re.IGNORECASE),
url="foobar/{}",
prefix="bot",
relink_timeout=300,
)
linker.handle(message)
assert mock.call_count == 1
args, kwargs = mock.call_args
assert kwargs == expected
```
#### File: chtc-bot/tests/test_formatting.py
```python
import os
import bs4
import pytest
from web import formatting
@pytest.mark.parametrize("input, expected", [("foo", "*foo*")])
def test_bold(input, expected):
assert formatting.bold(input) == expected
@pytest.mark.parametrize("input, expected", [("foo", "_foo_")])
def test_italic(input, expected):
assert formatting.italic(input) == expected
@pytest.mark.parametrize("input, expected", [("foo", "`foo`")])
def test_fixed(input, expected):
assert formatting.fixed(input) == expected
@pytest.mark.parametrize(
"input, text, expected",
[
("http://htcondor.org", None, "http://htcondor.org"),
("http://htcondor.org", "HTCondor", "<http://htcondor.org|HTCondor>"),
],
)
def test_link(input, text, expected):
assert formatting.link(input, text) == expected
ATTRS_URL = (
"https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html"
)
KNOBS_URL = "https://htcondor.readthedocs.io/en/latest/admin-manual/configuration-macros.html"
@pytest.mark.parametrize(
"converter, html, expected",
[
(
formatting.inplace_convert_em_to_underscores,
"<em>this</em> <em>many_many</em> <em>condor_daemon_name</em>",
"_this_ _many_many_ _condor_daemon_name_",
),
(
formatting.inplace_convert_inline_code_to_backticks,
'<code class="docutils literal notranslate"><span class="pre">MASTER_NAME</span></code>',
"`MASTER_NAME`",
),
(
formatting.inplace_convert_strong_to_stars,
"<strong>very powerful</strong>",
"*very powerful*",
),
(
lambda soup: formatting.inplace_convert_internal_links_to_links(
soup, KNOBS_URL, "std.std-ref"
),
"""<a class="reference internal" href="#condor-master-configuration-file-macros"><span class="std std-ref">condor_master Configuration File Macros</span></a>""",
f"<{KNOBS_URL}#condor-master-configuration-file-macros|condor_master Configuration File Macros>",
),
(
lambda soup: formatting.inplace_convert_internal_links_to_links(
soup, os.path.dirname(ATTRS_URL), "doc"
),
"""<a class="reference internal" href="../admin-manual/user-priorities-negotiation.html"><span class="doc">User Priorities and Negotiation</span></a>""",
f"<{os.path.dirname(ATTRS_URL)}/../admin-manual/user-priorities-negotiation.html|User Priorities and Negotiation>",
),
(
formatting.inplace_convert_code_block_to_code_block,
"""<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o"><</span><span class="n">name</span><span class="o">>=<</span><span class="n">value</span><span class="o">></span></pre></div>""",
"```<name>=<value>```",
),
(
formatting.inplace_convert_external_links_to_links,
"""<a href="https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=7643" class="new" title="Get HTCondor to build on Modern Fedora without warnings">#7643</a>""",
"<https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=7643|#7643>",
),
],
)
def test_convert_html_to_markdown(converter, html, expected):
# This could include nested tags, but we're not testing BeautfulSoup here.
soup = bs4.BeautifulSoup(html, "html.parser")
converter(soup)
assert soup.text == expected
```
#### File: web/events/events.py
```python
import traceback
from flask import current_app
from .. import slack
from ..executor import executor
EVENT_HANDLERS = []
def event_handler(*args, **kwargs):
def _(func):
EVENT_HANDLERS.append((func, args, kwargs))
return func
return _
@event_handler("message")
def handle_message_event(event_data):
"""
This is the raw incoming event handler; it will be passed a Slack API event
as a Python dictionary parsed from JSON.
"""
subtype = event_data["event"].get("subtype")
# skip edits
if subtype == "message_changed":
return
# skip deletes
if subtype == "message_deleted":
return
# don't respond to our own messages
if event_data["event"].get("user") == current_app.config["BOT_USER_ID"]:
return
executor.submit(_handle_message, event_data)
def _handle_message(event_data):
message = event_data["event"]
for handler in current_app.config["MESSAGE_HANDLERS"]:
try:
handler.handle(message)
except Exception as e:
current_app.logger.exception(f"Uncaught exception in {handler}: {e}")
executor.submit(
slack.notify_error,
f"Uncaught exception in `{handler}`: `{e}`\n```\n{traceback.format_exc()}\n```",
)
pass
```
#### File: chtc-bot/web/rss.py
```python
import bs4
from flask import current_app, request
from . import formatting, slack
class RSSCommandHandler:
def handle(self):
secret = current_app.config["RSS_SHARED_SECRET"]
if request.headers.get("x-shared-secret") != secret:
return "invalid", 400
blob = request.json
if blob is None:
return "no JSON found", 400
for entry in blob:
# slack.post_message(channel="#chtcbot-dev", text=entry)
text = self.get_description(entry)
if text is not None:
# Don't forget to add the 'bot to the channel if you
# change this!
slack.post_message(channel="#birdwatcher", text=text)
return "", 200
def get_description(self, entry):
link = entry.get("link")
title = entry.get("title")
# OK, WT_A_F: this is 'description' in the RSS feed and when
# parsed by feedreader in the Lambda function!
description = entry.get("summary")
if link is None or title is None or description is None:
return None
soup = bs4.BeautifulSoup(description, "html.parser")
formatting.inplace_convert_em_to_underscores(soup, selector="i")
formatting.inplace_convert_external_links_to_links(soup)
text_description = formatting.compress_whitespace(soup.text)
text = f"<{link}|{title}>\n{text_description}"
return text
``` |
{
"source": "JoshKarpel/condor-analyze-job-log",
"score": 2
} |
#### File: JoshKarpel/condor-analyze-job-log/condor_analyze_job_log.py
```python
import itertools
import math
from pathlib import Path
import sys
import collections
import datetime
import enum
import shutil
import htcondor
import click
def get_events(event_log_path):
yield from htcondor.JobEventLog(Path(event_log_path).as_posix()).events(0)
class JobStatus(enum.IntEnum):
UNKNOWN = 0
IDLE = 1
RUNNING = 2
REMOVED = 3
COMPLETED = 4
HELD = 5
SUSPENDED = 6
SYMBOLS = [' ', 'I', 'R', 'X', 'C', 'H', 'S']
STATUS_TO_SYMBOL = dict(zip(JobStatus, SYMBOLS))
COLORS = ['black', 'yellow', 'blue', 'magenta', 'green', 'red', 'magenta']
SYMBOL_TO_COLOR = dict(zip(SYMBOLS, COLORS))
JOB_EVENT_STATUS_TRANSITIONS = {
htcondor.JobEventType.SUBMIT: JobStatus.IDLE,
htcondor.JobEventType.JOB_EVICTED: JobStatus.IDLE,
htcondor.JobEventType.JOB_UNSUSPENDED: JobStatus.IDLE,
htcondor.JobEventType.JOB_RELEASED: JobStatus.IDLE,
htcondor.JobEventType.SHADOW_EXCEPTION: JobStatus.IDLE,
htcondor.JobEventType.JOB_RECONNECT_FAILED: JobStatus.IDLE,
htcondor.JobEventType.JOB_TERMINATED: JobStatus.COMPLETED,
htcondor.JobEventType.EXECUTE: JobStatus.RUNNING,
htcondor.JobEventType.JOB_HELD: JobStatus.HELD,
htcondor.JobEventType.JOB_SUSPENDED: JobStatus.SUSPENDED,
htcondor.JobEventType.JOB_ABORTED: JobStatus.REMOVED,
}
def main(event_log_path):
job_states = {}
job_state_counts = collections.Counter()
counts_over_time = []
for event in get_events(event_log_path):
event_key = (event.cluster, event.proc)
new_status = JOB_EVENT_STATUS_TRANSITIONS.get(event.type, None)
if new_status is not None:
old_status = job_states.get(event_key, None)
job_states[event_key] = new_status
job_state_counts[new_status] += 1
if old_status is not None:
job_state_counts[old_status] -= 1
counts_over_time.append((event.timestamp, job_state_counts.copy()))
# print(job_state_counts)
# for timestamp, counts in counts_over_time:
# print(timestamp, counts)
term = shutil.get_terminal_size((80, 20))
width = term.columns - 10
height = term.lines - 10
hist = histogram(counts_over_time, width, height)
rows = ['│' + row for row in hist.splitlines()]
rows.append('└' + ('─' * (width)))
first_time, _ = counts_over_time[0]
last_time, _ = counts_over_time[-1]
left_date_str = datetime.datetime.fromtimestamp(first_time).strftime('%y-%m-%d %H:%M:%S').ljust(width + 1)
right_date_str = datetime.datetime.fromtimestamp(last_time).strftime('%y-%m-%d %H:%M:%S').rjust(width + 1)
time_str = 'Time'.center(width + 1)
rows.append(merge_strings(left_date_str, right_date_str, time_str))
max_jobs = max(total_counts(c) for _, c in counts_over_time)
extra_len = max(len(str(max_jobs)), len('# Jobs'))
new_rows = []
for idx, row in enumerate(rows):
if idx == 0:
new_rows.append(str(max_jobs).rjust(extra_len) + row)
elif idx == len(rows) - 2:
new_rows.append('0'.rjust(extra_len) + row)
elif idx == len(rows) // 2:
new_rows.append('# Jobs'.rjust(extra_len) + row)
else:
new_rows.append((' ' * extra_len) + row)
rows = new_rows
hist = '\n'.join(rows)
click.echo(hist)
def merge_strings(*strings):
max_len = max(len(s) for s in strings)
out = [' '] * max_len
for string in strings:
for idx, char in enumerate(string):
if out[idx] == ' ' and char != ' ':
out[idx] = char
return ''.join(out)
def histogram(counts_over_time, width, height):
first_time, _ = counts_over_time[0]
last_time, last_counts = counts_over_time[-1]
groups = list(group_counts_by_time(counts_over_time, width))
counts = [avg_counts(group) for group in groups]
counts[0] = groups[0][-1][1]
counts[-1] = last_counts
max_jobs = max(total_counts(c) for c in counts if c is not None)
columns = []
for count in counts:
if count is None:
columns.append(columns[-1])
continue
bar_lens = calculate_column_partition(count, max_jobs, height)
columns.append(''.join(symbol * bar_lens[status] for status, symbol in STATUS_TO_SYMBOL.items()))
rows = list(reversed(list(map(list, itertools.zip_longest(*columns, fillvalue = ' ')))))
rows = [''.join(click.style('█' * len(list(group)), fg = SYMBOL_TO_COLOR[symbol]) for symbol, group in itertools.groupby(row)) for row in rows]
return '\n'.join(rows)
def calculate_column_partition(counts, max_jobs, height):
raw_split = [(counts.get(status, 0) / max_jobs) * height for status in JobStatus]
int_split = [0 for _ in range(len(raw_split))]
carry = 0
for idx, entry in enumerate(raw_split):
dec = entry - math.floor(entry)
if entry == 0:
int_split[idx] = 0
elif dec >= 0.5:
int_split[idx] = math.ceil(entry)
elif math.floor(entry) == 0:
int_split[idx] = 1
carry += 1
elif dec < 0.5:
int_split[idx] = math.floor(entry)
else:
raise Exception("Unreachable")
int_split[int_split.index(max(int_split))] -= carry
return {k: v for k, v in zip(JobStatus, int_split)}
def _calculate_bar_component_len(count, total, bar_width):
if count == 0:
return 0
return max(int((count / total) * bar_width), 1)
def total_counts(counter):
return sum(counter.values())
def group_counts_by_time(counts_over_time, n_divisions):
first_time, _ = counts_over_time[0]
last_time, _ = counts_over_time[-1]
dt = (last_time - first_time) / n_divisions
left_idx = 0
right_idx = 0
for left_time in (first_time + (n * dt) for n in range(n_divisions)):
right_time = left_time + dt
for right_idx, (timestamp, _) in enumerate(counts_over_time[left_idx:], start = left_idx):
if timestamp > right_time:
break
yield counts_over_time[left_idx: right_idx]
left_idx = right_idx
def avg_counts(counts_over_time):
lc = len(counts_over_time)
if lc == 0:
return None
counts = [counts for _, counts in counts_over_time]
return collections.Counter({k: v / lc for k, v in sum(counts, collections.Counter()).items()})
if __name__ == '__main__':
main(sys.argv[1])
``` |
{
"source": "JoshKarpel/condormap",
"score": 3
} |
#### File: condormap/htmap/mapped.py
```python
import logging
from typing import Any, Callable, Dict, Iterable, Optional, Union
from . import mapping, maps, options
logger = logging.getLogger(__name__)
class MappedFunction:
def __init__(self, func: Callable, map_options: Optional[options.MapOptions] = None):
"""
Parameters
----------
func
A function to wrap in a :class:`MappedFunction`.
map_options
An instance of :class:`htmap.MapOptions`.
Any map calls from the :class:`MappedFunction` produced by this decorator will inherit from this.
"""
self.func = func
if map_options is None:
map_options = options.MapOptions()
self.map_options = map_options
logger.debug(f"Created mapped function for {self.func} with options {self.map_options}")
def __repr__(self):
return f"{self.__class__.__name__}(func = {self.func}, map_options = {self.map_options})"
def __call__(self, *args, **kwargs):
"""Call the function as normal, locally."""
return self.func(*args, **kwargs)
def map(
self,
args: Iterable[Any],
tag: Optional[str] = None,
map_options: Optional[options.MapOptions] = None,
) -> maps.Map:
"""As :func:`htmap.map`, but the ``func`` argument is the mapped function."""
if map_options is None:
map_options = options.MapOptions()
return mapping.map(
func=self.func,
args=args,
tag=tag,
map_options=options.MapOptions.merge(map_options, self.map_options),
)
def starmap(
self,
args: Optional[Iterable[tuple]] = None,
kwargs: Optional[Iterable[Dict[str, Any]]] = None,
tag: Optional[str] = None,
map_options: Optional[options.MapOptions] = None,
) -> maps.Map:
"""As :func:`htmap.starmap`, but the ``func`` argument is the mapped function."""
if map_options is None:
map_options = options.MapOptions()
return mapping.starmap(
func=self.func,
args=args,
kwargs=kwargs,
tag=tag,
map_options=options.MapOptions.merge(map_options, self.map_options),
)
def build_map(
self, tag: Optional[str] = None, map_options: Optional[options.MapOptions] = None,
) -> mapping.MapBuilder:
"""As :func:`htmap.build_map`, but the ``func`` argument is the mapped function."""
if map_options is None:
map_options = options.MapOptions()
return mapping.build_map(
func=self.func,
tag=tag,
map_options=options.MapOptions.merge(map_options, self.map_options),
)
def mapped(map_options: Optional[options.MapOptions] = None,) -> Union[Callable, MappedFunction]:
"""
A decorator that wraps a function in an :class:`MappedFunction`,
which provides an interface for mapping functions calls out to an HTCondor cluster.
Parameters
----------
map_options
An instance of :class:`htmap.MapOptions`.
Any map calls from the :class:`MappedFunction` produced by this decorator will inherit from this.
Returns
-------
mapped_function
A :class:`MappedFunction` that wraps the function (or a wrapper function that does the wrapping).
"""
if map_options is None: # call with parens but no args
def wrapper(func: Callable) -> MappedFunction:
return MappedFunction(func)
return wrapper
elif callable(map_options): # call with no parens on function
return MappedFunction(map_options)
elif isinstance(map_options, options.MapOptions): # call with map options
def wrapper(func: Callable) -> MappedFunction:
return MappedFunction(func, map_options=map_options)
return wrapper
raise TypeError(
"incorrect use of @mapped decorator: argument should be a callable or a MapOptions, or no argument"
)
```
#### File: htmap/run/_htmap_run.py
```python
import datetime
import getpass
import gzip
import os
import shutil
import socket
import subprocess
import sys
import textwrap
import traceback
from pathlib import Path
TRANSFER_DIR = "_htmap_transfer"
USER_TRANSFER_DIR = "_htmap_user_transfer"
CHECKPOINT_PREP = "_htmap_prep_checkpoint"
CHECKPOINT_CURRENT = "_htmap_current_checkpoint"
CHECKPOINT_OLD = "_htmap_old_checkpoint"
TRANSFER_PLUGIN_CACHE = "_htmap_transfer_plugin_cache"
USER_URL_TRANSFER_DIR = "_htmap_user_url_transfer"
TRANSFER_PLUGIN_MARKER = "_htmap_do_output_transfer"
# import cloudpickle goes in the functions that need it directly
# so that errors are raised later
class ExecutionError:
def __init__(
self,
*,
component,
exception_msg,
node_info,
python_info,
scratch_dir_contents,
stack_summary,
):
self.component = component
self.exception_msg = exception_msg
self.node_info = node_info
self.python_info = python_info
self.scratch_dir_contents = [str(p.absolute()) for p in scratch_dir_contents]
self.stack_summary = stack_summary
def __repr__(self):
return "<ExecutionError for component {}>".format(self.component)
def get_node_info():
try:
user = getpass.getuser()
except:
user = None
return (
socket.getfqdn(),
socket.gethostbyname(socket.gethostname()),
datetime.datetime.utcnow(),
user,
)
def print_node_info(node_info):
print("Landed on execute node {} ({}) at {} as {}".format(*node_info))
def get_python_info():
if sys.executable == "":
raise Exception("Was not able to determine Python executable.")
v = sys.version_info
return (
sys.executable,
"{}.{}.{}".format(v.major, v.minor, v.micro),
pip_freeze(),
)
def print_python_info(python_info):
executable, version, packages = python_info
print("Python executable is {} (version {})".format(executable, version))
print("with installed packages")
print("\n".join(" {}".format(line) for line in packages.splitlines()))
def pip_freeze() -> str:
freeze = subprocess.run(
[sys.executable, "-m", "pip", "freeze", "--disable-pip-version-check"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if freeze.returncode != 0:
raise Exception(
"Failed to get pip freeze due to:\n{}".format(freeze.stderr.decode("utf-8"))
)
return freeze.stdout.decode("utf-8").strip()
def print_dir_contents(root):
msg = "\n".join(_yield_dir_contents_tree(root))
print(msg)
def _yield_dir_contents_tree(root, prefix=""):
contents = list(root.iterdir())
for idx, path in enumerate(sorted(contents)):
if idx < len(contents) - 1:
tree = "|-"
next_prefix = prefix + "| "
else:
tree = "\\-"
next_prefix = prefix + " "
yield f"{prefix}{tree} {'* ' if path.is_dir() else ''}{path.name}"
if path.is_dir():
yield from _yield_dir_contents_tree(
path, prefix=next_prefix,
)
def print_run_info(component, func, args, kwargs):
s = "\n".join(
(
"Running component {}".format(component),
" {}".format(func),
"with args",
" {}".format(args),
"and kwargs",
" {}".format(kwargs),
)
)
print(s)
def load_object(path):
import cloudpickle
with gzip.open(path, mode="rb") as file:
return cloudpickle.load(file)
def load_func():
return load_object(Path("func"))
def load_args_and_kwargs(component):
return load_object(Path("{}.in".format(component)))
def save_objects(objects, path):
import cloudpickle
with gzip.open(path, mode="wb") as file:
for obj in objects:
cloudpickle.dump(obj, file)
def save_output(component, status, result_or_error, transfer_dir):
save_objects([status, result_or_error], transfer_dir / "{}.out".format(component))
def build_frames(tb):
iterator = traceback.walk_tb(tb)
next(iterator) # skip main's frame
for frame, lineno in iterator:
fname = frame.f_code.co_filename
summ = traceback.FrameSummary(
filename=fname,
lineno=lineno,
name=frame.f_code.co_name,
lookup_line=os.path.exists(fname),
locals=frame.f_locals,
)
yield summ
def load_checkpoint(scratch_dir, transfer_dir):
"""Move checkpoint files back into the scratch directory."""
curr_dir = scratch_dir / CHECKPOINT_CURRENT
old_dir = scratch_dir / CHECKPOINT_OLD
if curr_dir.exists():
for path in curr_dir.iterdir():
path.rename(scratch_dir / path.name)
curr_dir.rename(transfer_dir / curr_dir.name)
elif old_dir.exists():
for path in old_dir.iterdir():
path.rename(scratch_dir / path.name)
old_dir.rename(transfer_dir / curr_dir.name)
def clean_and_remake_dir(dir: Path) -> None:
if dir.exists():
shutil.rmtree(dir)
dir.mkdir()
def main(component):
os.environ["HTMAP_ON_EXECUTE"] = "1"
os.environ["HTMAP_COMPONENT"] = f"{component}"
node_info = get_node_info()
print_node_info(node_info)
print()
scratch_dir = Path.cwd()
transfer_dir = scratch_dir / TRANSFER_DIR
transfer_dir.mkdir(exist_ok=True)
user_transfer_dir = scratch_dir / USER_TRANSFER_DIR / os.getenv("HTMAP_COMPONENT")
user_transfer_dir.mkdir(exist_ok=True, parents=True)
Path(TRANSFER_PLUGIN_CACHE).mkdir(exist_ok=True, parents=True)
Path(TRANSFER_PLUGIN_MARKER).touch(exist_ok=True)
load_checkpoint(scratch_dir, transfer_dir)
print("Scratch directory contents before run:")
print_dir_contents(scratch_dir)
print()
try:
python_info = get_python_info()
print_python_info(python_info)
except Exception:
print("Failed to get information on Python due to:\n{}".format(traceback.format_exc()))
python_info = None
print()
try:
func = load_func()
args, kwargs = load_args_and_kwargs(component)
print_run_info(component, func, args, kwargs)
print("\n----- MAP COMPONENT OUTPUT START -----\n")
result_or_error = func(*args, **kwargs)
status = "OK"
print("\n----- MAP COMPONENT OUTPUT END -----\n")
except Exception:
print("\n------- MAP COMPONENT ERROR --------\n")
type, value, trace = sys.exc_info()
stack_summ = traceback.StackSummary.from_list(build_frames(trace))
exc_msg = textwrap.dedent("\n".join(traceback.format_exception_only(type, value))).rstrip()
result_or_error = ExecutionError(
component=component,
exception_msg=exc_msg,
stack_summary=stack_summ,
node_info=node_info,
python_info=python_info,
scratch_dir_contents=list(scratch_dir.iterdir()),
)
status = "ERR"
traceback.print_exc(file=sys.stdout)
traceback.print_exc(file=sys.stderr)
print("\n------ MAP COMPONENT ERROR END -------\n")
clean_and_remake_dir(scratch_dir / CHECKPOINT_CURRENT)
clean_and_remake_dir(transfer_dir)
save_output(component, status, result_or_error, transfer_dir)
print("Finished executing component at {}".format(datetime.datetime.utcnow()))
print()
print("Scratch directory contents after run:")
print_dir_contents(scratch_dir)
if __name__ == "__main__":
main(component=sys.argv[1])
```
#### File: integration/user_file_transfer/test_url_input_transfer.py
```python
from pathlib import Path
import pytest
import htmap
TIMEOUT = 300
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.xfail(reason="I don't understand yet why this doesn't work...")
def test_input_transfer_via_file_protocol(tmp_path):
f = htmap.TransferPath(__file__, protocol="file")
MARKER = 12345
def test(file):
return "MARKER = 12345" in file.read_text()
m = htmap.map(test, [f])
assert m.get(0)
@pytest.mark.timeout(TIMEOUT)
def test_input_transfer_via_https_protocol(tmp_path):
f = htmap.TransferPath("status/200", protocol="https", location="httpbin.org")
def test(file):
return file.read_text() == ""
m = htmap.map(test, [f])
assert m.get(0)
```
#### File: tests/unit/test_transfer_path.py
```python
from pathlib import Path
import pytest
from htmap import TransferPath
@pytest.mark.parametrize(
"transfer_path, expected",
[
(TransferPath.cwd() / "foobar.txt", (Path.cwd() / "foobar.txt").as_posix(),),
(TransferPath.home() / "foobar.txt", (Path.home() / "foobar.txt").as_posix(),),
(
TransferPath(path="foo/0.txt", protocol="s3", location="s3.server.com",),
"s3://s3.server.com/foo/0.txt",
),
],
)
def test_as_url(transfer_path, expected):
assert transfer_path.as_url() == expected
def test_must_have_protocol_if_has_location():
with pytest.raises(ValueError):
TransferPath("foo.txt", location="foo.bar.com")
``` |
{
"source": "JoshKarpel/Euler",
"score": 3
} |
#### File: JoshKarpel/Euler/euler.py
```python
import click
import functools
import time
import importlib
import os
import collections
import re
import timeit as _timeit
ANSWERS = collections.defaultdict(lambda: object())
THIS_DIR = os.path.dirname(__file__)
answers_file = os.path.join(THIS_DIR, 'answers.txt')
with open(answers_file) as f:
for line in f:
problem, answer = line.strip().split(':')
if answer == '':
ANSWERS[problem] = object()
else:
ANSWERS[problem] = int(answer)
Answer = collections.namedtuple('answer_with_diags', ['answer', 'correct', 'elapsed_time'])
def solve_with_diagnostics(func):
@functools.wraps(func)
def solver():
t_start = time.clock()
answer = func()
elapsed_time = time.clock() - t_start
problem = func.__module__[-3:]
try:
correct = answer == ANSWERS[problem]
except KeyError:
correct = None
return Answer(answer, correct, elapsed_time)
return solver
CORRECT_TO_STR = {
True: '✔',
False: '✘',
None: '?',
}
CORRECT_TO_COLOR = {
True: 'green',
False: 'red',
None: 'yellow',
}
@click.group()
def cli():
pass
ANSWER_WIDTH = 20
@cli.command()
@click.argument('problem')
def solve(problem):
"""Solve a problem."""
try:
problem = problem.rjust(3, '0')
mod = importlib.import_module(f'problems.{problem}')
except (ImportError, ModuleNotFoundError):
click.secho('SOLVER NOT FOUND',
fg = 'yellow')
return 0
solver = solve_with_diagnostics(mod.solve)
try:
answer = solver()
except NotImplementedError:
click.secho('SOLVER NOT IMPLEMENTED',
fg = 'yellow')
return 0
click.secho(f'Answer: {answer.answer} {CORRECT_TO_STR[answer.correct]} │ Elapsed Time: {answer.elapsed_time:.6f} seconds',
fg = CORRECT_TO_COLOR[answer.correct])
return answer
def get_maximally_solved_problem_number():
problem_regex = re.compile('[0-9]{3}.py')
solvers = (int(f[:3]) for f in os.listdir(os.path.join(THIS_DIR, 'problems')) if problem_regex.search(f))
return max(solvers)
@cli.command()
@click.option('--start', '-s', default = 1, help = 'First problem to solve.')
@click.option('--end', '-e', default = get_maximally_solved_problem_number(), help = 'Last problem to solve.')
def check(start, end):
"""Solve many problems."""
start = max(start, 1)
header = f' P │ {"Answer".center(ANSWER_WIDTH)} │ C │ Elapsed Time'
bar = ''.join('─' if char != '│' else '┼' for char in header)
click.echo(header)
click.echo(bar)
for problem in range(start, end + 1):
problem = str(problem).rjust(3, '0')
try:
mod = importlib.import_module(f'problems.{problem}')
solver = solve_with_diagnostics(mod.solve)
answer = solver()
click.secho(f' {problem} │ {str(answer.answer).center(ANSWER_WIDTH)} │ {CORRECT_TO_STR[answer.correct]} │ {answer.elapsed_time:.6f} seconds',
fg = CORRECT_TO_COLOR[answer.correct])
except (ImportError, ModuleNotFoundError):
click.secho(f' {problem} │ {"SOLVER NOT FOUND".center(ANSWER_WIDTH)} │ ? │',
fg = 'yellow')
except Exception as e:
click.secho(f' {problem} │ {"EXCEPTION".center(ANSWER_WIDTH)} │ ? │',
fg = 'yellow')
@cli.command()
@click.argument('problem')
def timeit(problem):
"""Time the solver for a problem."""
problem = problem.rjust(3, '0')
timer = _timeit.Timer('mod.solve()', setup = f'import importlib; mod = importlib.import_module(f"problems.{problem}")')
loops, total_time = timer.autorange()
click.echo(f'Time per Solve: {total_time / loops:.6f} seconds')
if __name__ == '__main__':
cli()
```
#### File: Euler/problems/017.py
```python
ONES = {0: '', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'}
TEENS = {11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'}
TENS = {0: '', 1: 'ten', 2: 'twenty', 3: 'thirty', 4: 'forty', 5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety'}
HUNDREDS = {1: 'onehundred', 2: 'twohundred', 3: 'threehundred', 4: 'fourhundred', 5: 'fivehundred', 6: 'sixhundred', 7: 'sevenhundred', 8: 'eighthundred', 9: 'ninehundred'}
def solve():
words = []
# for number_as_str in (str(i) for i in range(1, 1001)):
# print(number_as_str, len(number_as_str))
# if len_i == 1:
# print(ONES[i])
# words += ONES[i]
# elif len_i == 2:
# try:
# print(TEENS[i])
# words += TEENS[i]
# except KeyError:
# print(TENS[int(str_i[0])] + ONES[int(str_i[1])])
# words += TENS[int(str_i[0])] + ONES[int(str_i[1])]
# elif len_i == 3:
# try:
# print(HUNDREDS[int(str_i[0])] + 'and' + TEENS[int(str_i[1:])])
# words += HUNDREDS[int(str_i[0])] + 'and' + TEENS[int(str_i[1:])]
# except KeyError:
# if i % 100 != 0:
# print(HUNDREDS[int(str_i[0])] + 'and' + TENS[int(str_i[1])] + ONES[int(str_i[2])])
# words += HUNDREDS[int(str_i[0])] + 'and' + TENS[int(str_i[1])] + ONES[int(str_i[2])]
# elif i % 100 == 0:
# print(HUNDREDS[int(str_i[0])])
# words += HUNDREDS[int(str_i[0])]
# elif len_i == 4:
# print('onethousand')
# words += 'onethousand'
return len(''.join(words))
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/019.py
```python
def days_per_month(year):
if year % 4 == 0:
return 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
else:
return 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
def days_per_year(year):
return sum(days_per_month(year))
def first_day_of_month(year):
if year % 4 == 0:
return 1, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336
else:
return 1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335
def solve():
days = []
sundays = []
current_day = 365
for year in range(1901, 2001):
for day in range(days_per_year(year)):
current_day += 1
days.append(current_day)
if day in first_day_of_month(year) and current_day % 7 == 1:
sundays.append(current_day)
return len(sundays)
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/032.py
```python
import itertools
from problems import mymath
def solve():
digits_sorted = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
permutations = itertools.permutations(digits_sorted)
correct_products = set()
for digit_string in permutations:
for i in range(1, 5):
if int(''.join(digit_string[0:i])) * int(''.join(digit_string[i:5])) == int(''.join(digit_string[5:])):
correct_products.add(int(''.join(digit_string[5:])))
return sum(correct_products)
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/056.py
```python
from problems import mymath
def digit_sum(n):
return sum([int(i) for i in str(n)])
def solve():
digit_sums = dict()
for a in range(1, 101):
for b in range(1, 101):
digit_sums[(a, b)] = digit_sum(a ** b)
max_key = mymath.key_of_max_value(digit_sums)
return max_key
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/060.py
```python
from problems import primes
def solve():
current_primes = []
set_size = 0
for latest_prime in primes.generate_primes():
if set_size < 4:
set_size += 1
current_primes.append(latest_prime)
else:
current_primes.pop(0)
current_primes.append(latest_prime)
found = True
for i in range(set_size):
for j in range(set_size):
prime_test = int(''.join([str(current_primes[i]), str(current_primes[j])]))
if not primes.is_prime(prime_test):
found *= False
if found:
return sum(current_primes)
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/069.py
```python
from problems import mymath, primes
def solve():
upper_bound = 1000001
prime_list = primes.find_primes_less_than_n(upper_bound)
divisors = dict(zip(range(upper_bound), prime_list))
prime_factorization_dictionary = dict()
def prime_factorization_specialized(n):
if n in prime_list:
return [n]
factors = []
divisor_key = 0
while n != 1:
divisor = divisors[divisor_key]
if n in prime_factorization_dictionary:
factors += prime_factorization_dictionary[n]
break
elif n % divisor == 0:
factors.append(divisor)
n /= divisor
else:
divisor_key += 1
return factors
for n in range(2, upper_bound):
prime_factorization_dictionary[n] = prime_factorization_specialized(n)
def phi(n):
return round(n * mymath.iterable_product([1 - (1 / p) for p in set(prime_factorization_dictionary[n])]))
ratios = {i: i / phi(i) for i in range(2, upper_bound)}
max_key = mymath.key_of_max_value(ratios)
return max_key
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/072.py
```python
from fractions import Fraction
from problems import primes
def solve():
max_denominator = 10000
primes_list = primes.find_primes_less_than_n(max_denominator + 1) + [max_denominator]
powers_of_two = []
i = 4
while i < max_denominator:
powers_of_two.append(i)
i += 2
fractions_set = {Fraction(n, d) for d in primes_list for n in range(1, d)}.union({Fraction(1, d) for d in powers_of_two}).union({Fraction(d - 1, d) for d in powers_of_two})
return len(fractions_set)
# should be possible to just count them instead of constructing them by looking at the pattern in the numerators and denominators, constructing backwards from the upper bound
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/074.py
```python
import math
factorials = {i: math.factorial(i) for i in range(0, 10)}
def sum_digit_factorial(n):
return sum([factorials[(int(i))] for i in str(n)])
def solve():
upper_bound = 1000000
chains = dict()
for start_number in range(1, upper_bound):
chain = [start_number]
current = sum_digit_factorial(start_number)
while current not in chain:
if current in chains:
chain += chains[current]
break
else:
chain.append(current)
current = sum_digit_factorial(current)
chains[start_number] = chain
sixty_terms = [i for i in chains if len(chains[i]) == 60]
return len(sixty_terms)
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/095.py
```python
from problems import utils, mymath
@utils.memoize
def sum_proper_factors(n):
return sum(mymath.proper_factorization(n))
def solve():
upper_bound = 1000000
chains = dict()
for start_number in range(1, upper_bound):
chain = [start_number]
current_number = sum_proper_factors(start_number)
while current_number != start_number:
if current_number > upper_bound or current_number == 0 or len(chain) > 100:
break
elif current_number in chains:
chain += chains[current_number]
break
else:
chain.append(current_number)
current_number = sum_proper_factors(current_number)
if current_number == start_number:
chains[start_number] = chain
chain_lengths = {i: len(chains[i]) for i in chains}
max_key = mymath.key_of_max_value(chain_lengths)
return min(chains[max_key])
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/097.py
```python
def truncated_double(x, truncate = 20):
return int(str(2 * x)[-truncate:])
def solve():
x = 28433
for i in range(7830457):
x = truncated_double(x)
x += 1
last_ten = str(x)[-10:]
return last_ten
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/099.py
```python
import os
from math import log
def solve():
filepath = os.path.join(os.path.dirname(__file__), '099_pairs.txt')
with open(filepath) as file:
pairs = [[int(x) for x in line.strip('\n').split(',')] for line in file]
log_values = [pair[1] * log(pair[0]) for pair in pairs]
return max(log_values), log_values.index(max(log_values)) + 1
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/102.py
```python
import os
def sign(x):
if x < 0:
return -1
if x > 0:
return 1
return 0
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.quadrant = self.get_quadrant()
self.tilted_quadrant = self.get_tilted_quadrant()
def get_quadrant(self):
if self.x > 0 and self.y > 0:
return 1
elif self.x > 0 and self.y < 0:
return 2
elif self.x < 0 and self.y < 0:
return 3
elif self.x < 0 and self.y > 0:
return 4
def get_tilted_quadrant(self):
if self.y > self.x and self.y > -self.x:
return 1
elif self.y < self.x and self.y > -self.x:
return 2
elif self.y < self.x and self.y < -self.x:
return 3
elif self.y > self.x and self.y < - self.x:
return 4
class Triangle:
def __init__(self, points):
self.points = points
self.a = points[0]
self.b = points[1]
self.c = points[2]
self.quadrants = [p.quadrant for p in points]
def legal(self):
# if self.a.quadrant == self.b.quadrant == self.c.quadrant: # if all in same quadrant, not as general as "side check" below
# return False
if sign(self.a.x) == sign(self.b.x) == sign(self.c.x) or sign(self.a.y) == sign(self.b.y) == sign(self.c.y): # if all to "one side" of plane
return False
elif self.a.tilted_quadrant == self.b.tilted_quadrant or self.b.tilted_quadrant == self.c.tilted_quadrant or self.c.tilted_quadrant == self.a.tilted_quadrant:
return False
else:
return True
def solve():
filepath = os.path.join(os.path.dirname(__file__), '102_triangles.txt')
with open(filepath) as file:
raw_triangles = [[int(x) for x in line.strip('\n').split(',')] for line in file]
triangles = []
for raw_triangle in raw_triangles:
triangles.append(Triangle([Point(raw_triangle[0], raw_triangle[1]), Point(raw_triangle[2], raw_triangle[3]), Point(raw_triangle[4], raw_triangle[5])]))
return len([triangle for triangle in triangles if triangle.legal()])
if __name__ == '__main__':
print(solve())
```
#### File: Euler/problems/utils.py
```python
import functools
def memoize(func):
"""Memoize a function by storing a dictionary of {inputs: outputs}."""
memo = {}
@functools.wraps(func)
def memoizer(*args):
try:
return memo[args]
except KeyError:
memo[args] = func(*args)
return memo[args]
return memoizer
``` |
{
"source": "JoshKarpel/hephaestus",
"score": 3
} |
#### File: hephaestus/dev/html.py
```python
import inspect
import sys
import hephaestus as heph
import imported
class Foo:
def __init__(self, name):
self.name = name
def bar(self, clap = '\n', x = 10, a = 'foo'):
print('bar')
def selfonly(self):
pass
def __repr__(self):
return self.name
@classmethod
def classmethod(cls, a = 5):
pass
@staticmethod
def staticmethod(b = 3):
pass
def changename(self):
self.name = self.name.upper()
def recurse(level):
print(level)
if level:
recurse(level - 1)
def a():
print('a')
b()
recurse(2)
b()
foo = Foo('joe')
foo.bar()
foo.selfonly()
foo.classmethod()
foo.staticmethod()
foo.changename() # changes name everywhere because repr is called at printing time
foo.bar()
b()
imported.imported_func()
imported.imported_func(z = 'kangaroo')
def b():
print('b')
c()
c()
def c():
print('c')
if __name__ == '__main__':
print('IF NAME MAIN BEFORE WITH', hash(inspect.currentframe()))
# with heph.Tracer() as tracer:
# print('WITH BLOCK', hash(inspect.currentframe()))
# a()
# recurse(4)
tracer = heph.Tracer()
tracer.start()
a()
a()
for _ in range(5):
c()
recurse(4)
tracer.stop()
print('IF NAME MAIN AFTER WITH', hash(inspect.currentframe()))
print("\n===== REPORT =====\n")
with open('report.html', mode = 'w', encoding = 'utf-8') as f:
f.write(tracer.report_html())
rep = tracer.report_html()
print(rep)
```
#### File: src/hephaestus/cli.py
```python
import argparse
# look at coverage.py for this
def parse_arguments(**kwargs):
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument(
'file',
type = str,
help = 'the file to run',
)
return parser.parse_args()
``` |
{
"source": "JoshKarpel/htcondor-dags",
"score": 2
} |
#### File: tests/dag/test_walks.py
```python
import pytest
from htcondor import dags
def test_walk_depth_first(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = a.child_layer(name="c")
d = dag.layer(name="d")
d.add_parents(b, c)
nodes = list(dag.walk(dags.WalkOrder.DEPTH_FIRST))
# sibling order is not specified
assert nodes in ([a, b, d, c], [a, c, d, b])
def test_walk_breadth_first(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = a.child_layer(name="c")
d = dag.layer(name="d")
d.add_parents(b, c)
nodes = list(dag.walk(dags.WalkOrder.BREADTH_FIRST))
# sibling order is not specified
assert nodes in ([a, b, c, d], [a, c, b, d])
def test_walk_bad_order_raises(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = a.child_layer(name="c")
d = dag.layer(name="d")
d.add_parents(b, c)
with pytest.raises(dags.exceptions.UnrecognizedWalkOrder):
list(dag.walk(order="foobar"))
def test_ancestors_has_all_ancestors_linear(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = b.child_layer(name="c")
assert list(c.walk_ancestors()) == [b, a]
def test_ancestors_has_all_ancestors_branching_depth_first(dag):
a = dag.layer(name="a")
b1 = a.child_layer(name="b1")
b2 = dag.layer(name="b2")
c = dags.Nodes(b1, b2).child_layer(name="c")
assert list(c.walk_ancestors(dags.WalkOrder.DEPTH_FIRST)) in ([b1, a, b2], [b2, b1, a],)
def test_ancestors_has_all_ancestors_branching_breadth_first(dag):
a = dag.layer(name="a")
b1 = a.child_layer(name="b1")
b2 = dag.layer(name="b2")
c = dags.Nodes(b1, b2).child_layer(name="c")
assert list(c.walk_ancestors(dags.WalkOrder.BREADTH_FIRST)) in ([b1, b2, a], [b2, b1, a],)
def test_ancestors_doesnt_include_disconnected_piece(dag):
a = dag.layer(name="a")
b1 = a.child_layer(name="b1")
b2 = dag.layer(name="b2")
c = dags.Nodes(b1, b2).child_layer(name="c")
d = dag.layer(name="d")
assert d not in set(c.walk_ancestors())
def test_ancestors_of_nodes_joins_ancestors(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = dag.layer(name="c")
d = c.child_layer(name="d")
assert set(dags.Nodes(b, d).walk_ancestors()) == {a, c}
def test_descendants_has_all_descendants_linear(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = b.child_layer(name="c")
assert list(a.walk_descendants()) == [b, c]
def test_descendants_has_all_descendants_branching_depth_first(dag):
a = dag.layer(name="a")
b1 = a.child_layer(name="b1")
c = b1.child_layer(name="c")
b2 = a.child_layer(name="b2")
assert list(a.walk_descendants(dags.WalkOrder.DEPTH_FIRST)) in ([b1, c, b2], [b2, b1, c],)
def test_descendants_has_all_descendants_branching_breadth_first(dag):
a = dag.layer(name="a")
b1 = a.child_layer(name="b1")
c = b1.child_layer(name="c")
b2 = a.child_layer(name="b2")
assert list(a.walk_descendants(dags.WalkOrder.BREADTH_FIRST)) in ([b1, b2, c], [b2, b1, c],)
def test_descendants_of_nodes_joins_descendants(dag):
a = dag.layer(name="a")
b = a.child_layer(name="b")
c = dag.layer(name="c")
d = c.child_layer(name="d")
assert set(dags.Nodes(a, c).walk_descendants()) == {b, d}
```
#### File: htcondor-dags/tests/test_rescue.py
```python
import pytest
import textwrap
import htcondor
from htcondor import dags
from htcondor.dags.rescue import _rescue
@pytest.fixture(scope="session")
def rescue_dag():
sub = htcondor.Submit(
dict(executable="/bin/echo", arguments="hi", request_memory="16MB", request_disk="1MB",)
)
dag = dags.DAG()
a = dag.layer(name="a", submit_description=sub)
b = a.child_layer(name="b", submit_description=sub)
c = b.child_layer(
name="c",
submit_description=sub,
abort=dags.DAGAbortCondition(node_exit_value=0, dag_return_value=1),
)
d = c.child_layer(name="d", submit_description=sub)
return dag
@pytest.fixture(scope="session")
def rescue_file_text():
return textwrap.dedent(
"""
# Rescue DAG file, created after running
# the dagfile.dag DAG file
# Created 11/8/2019 04:08:46 UTC
# Rescue DAG version: 2.0.1 (partial)
# Total number of Nodes: 4
# Nodes premarked DONE: 2
# Nodes that failed: 0
# <ENDLIST>
DONE a:0
DONE b:0
"""
)
def test_rescue(rescue_dag, rescue_file_text):
_rescue(rescue_dag, rescue_file_text, formatter=dags.SimpleFormatter())
assert rescue_dag._nodes["a"].done == {0: True}
assert rescue_dag._nodes["b"].done == {0: True}
assert rescue_dag._nodes["c"].done == {}
assert rescue_dag._nodes["d"].done == {}
@pytest.mark.parametrize("num_rescues", [1, 5, 15, 150])
def test_find_rescue_file_with_existing_rescue_file(tmp_path, num_rescues):
d = tmp_path / "dag-dir"
d.mkdir()
base = "dagfile.dag"
for n in range(num_rescues):
(d / f"{base}.rescue{n + 1:03d}").touch()
assert dags.find_rescue_file(d, base) == (d / f"{base}.rescue{num_rescues:03d}")
def test_find_rescue_file_raises_if_no_rescue_found(tmp_path):
d = tmp_path / "dag-dir"
d.mkdir()
with pytest.raises(htcondor.dags.exceptions.NoRescueFileFound):
dags.find_rescue_file(d, "dagfile.dag")
# @pytest.fixture(scope="session")
# def rescue_dag_path(rescue_dag):
# cwd = Path.cwd()
#
# dag_dir = Path.home() / "rescue-dag-test"
# dag_dir.mkdir(parents=True)
# os.chdir(dag_dir)
#
# dag_file = dags.write_dag(rescue_dag, dag_dir)
#
# sub = htcondor.Submit.from_dag(dag_file.as_posix(), {})
#
# schedd = htcondor.Schedd()
# with schedd.transaction() as txn:
# cid = sub.queue(txn)
#
# rescue_dag_path = dag_dir / f"{dags.DEFAULT_DAG_FILE_NAME}.rescue001"
#
# start = time.time()
# while not rescue_dag_path.exists():
# time.sleep(0.1)
# if time.time() - start > 120:
# print((dag_dir / "dagfile.dag.dagman.out").read_text())
# os.system("condor_q -better")
# os.system("condor_status")
# raise TimeoutError
#
# yield rescue_dag_path
#
# shutil.rmtree(dag_dir)
# os.chdir(cwd)
```
#### File: tests/writer/test_subdag_edges.py
```python
import pytest
from .conftest import s, dagfile_lines
def test_one_parent_one_child(dag, writer):
parent = dag.subdag(name="parent", dag_file="parent.dag")
child = parent.child_subdag(name="child", dag_file="child.dag")
assert "PARENT parent CHILD child" in dagfile_lines(writer)
def test_two_parents_one_child(dag, writer):
parent1 = dag.subdag(name="parent1", dag_file="parent.dag")
parent2 = dag.subdag(name="parent2", dag_file="parent.dag")
child = parent1.child_subdag(name="child", dag_file="child.dag")
child.add_parents(parent2)
lines = dagfile_lines(writer)
assert f"PARENT parent1 CHILD child" in lines
assert f"PARENT parent2 CHILD child" in lines
def test_one_parent_two_children(dag, writer):
parent1 = dag.subdag(name="parent", dag_file="parent.dag")
child1 = parent1.child_subdag(name="child1", dag_file="child.dag")
child2 = parent1.child_subdag(name="child2", dag_file="child.dag")
lines = dagfile_lines(writer)
assert f"PARENT parent CHILD child1" in lines
assert f"PARENT parent CHILD child2" in lines
``` |
{
"source": "JoshKarpel/htcondor-executor",
"score": 2
} |
#### File: htcondor-executor/htcondor_executor/htio.py
```python
from typing import Any, List, Tuple, Iterator, Dict, Callable
import logging
import gzip
import json
from pathlib import Path
import cloudpickle
import htcondor
logger = logging.getLogger(__name__)
def save_object(obj: Any, path: Path) -> None:
"""Serialize a Python object (including "objects", like functions) to a file at the given ``path``."""
with gzip.open(path, mode="wb") as file:
cloudpickle.dump(obj, file)
def load_object(path: Path) -> Any:
"""Deserialize an object from the file at the given ``path``."""
with gzip.open(path, mode="rb") as file:
return cloudpickle.load(file)
def load_objects(path: Path) -> Iterator[Any]:
"""Deserialize a stream of objects from the file at the given ``path``."""
with gzip.open(path, mode="rb") as file:
while True:
yield cloudpickle.load(file)
``` |
{
"source": "JoshKarpel/htcondor-jobs",
"score": 2
} |
#### File: htcondor-jobs/htcondor_jobs/submit.py
```python
from typing import Optional, Union, List, Iterable, Mapping, Sequence, TypeVar
import logging
import collections.abc
import htcondor
from . import descriptions, handles, locate, exceptions
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
T_ITEMDATA = Union[str, int, float]
T_ITEMDATA_MAPPING = Mapping[str, T_ITEMDATA]
T_ITEMDATA_SEQUENCE = Sequence[T_ITEMDATA]
T_ITEMDATA_ELEMENT = TypeVar(
"T_ITEMDATA_ELEMENT", T_ITEMDATA_MAPPING, T_ITEMDATA_SEQUENCE
)
def submit(
description: descriptions.SubmitDescription,
count: Optional[int] = 1,
itemdata: Optional[Iterable[T_ITEMDATA_ELEMENT]] = None,
collector: Optional[str] = None,
scheduler: Optional[str] = None,
) -> handles.ClusterHandle:
"""
Submit a single cluster of jobs based on a submit description.
If you are submitting many clusters at once,
you should do so on a single :class:`Transaction`.
Parameters
----------
description
A submit description.
count
The number of jobs to submit **for each element of the itemdata**.
If ``itemdata`` is ``None``, this is the total number of jobs to submit.
itemdata
collector
scheduler
Returns
-------
handle : :class:`ClusterHandle`
A handle connected to the jobs that were submitted.
"""
with Transaction(collector=collector, scheduler=scheduler) as txn:
handle = txn.submit(description, count, itemdata)
return handle
class Transaction:
__slots__ = ("collector", "scheduler", "_schedd", "_txn")
def __init__(
self, collector: Optional[str] = None, scheduler: Optional[str] = None
):
"""
Open a transaction with a schedd.
If you are submitting many clusters at once,
you should do so on a single transaction.
Parameters
----------
collector
scheduler
"""
self.collector = collector
self.scheduler = scheduler
self._schedd: Optional[htcondor.Schedd] = None
self._txn: Optional[htcondor.Transaction] = None
def submit(
self,
description: descriptions.SubmitDescription,
count: Optional[int] = 1,
itemdata: Optional[Iterable[T_ITEMDATA_ELEMENT]] = None,
) -> handles.ClusterHandle:
"""
Identical to :func:`submit`,
except without the ``collector`` and ``scheduler`` arguments,
which are instead given to the :class:`Transaction`.
"""
if any((self._schedd is None, self._txn is None)):
raise exceptions.UninitializedTransaction(
"the Transaction has not been initialized (use it as a context manager)"
)
sub = description.as_submit()
if itemdata is not None:
itemdata = list(itemdata)
check_itemdata(itemdata)
itemdata_msg = f" and {len(itemdata)} elements of itemdata"
else:
itemdata_msg = ""
result = sub.queue_with_itemdata(self._txn, count, itemdata)
handle = handles.ClusterHandle(
result, collector=self.collector, scheduler=self.scheduler
)
logger.info(
f"Submitted {repr(sub)} to {self._schedd} on transaction {self._txn} with count {count}{itemdata_msg}"
)
return handle
def __enter__(self) -> "Transaction":
self._schedd = locate.get_schedd(self.collector, self.scheduler)
self._txn = self._schedd.transaction()
self._txn.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._txn.__exit__(exc_type, exc_val, exc_tb)
def check_itemdata(itemdata: List[T_ITEMDATA_ELEMENT]) -> None:
if len(itemdata) < 1:
raise exceptions.InvalidItemdata("empty itemdata, pass itemdata = None instead")
if all(isinstance(item, collections.abc.Mapping) for item in itemdata):
return _check_itemdata_as_mappings(itemdata)
elif all(isinstance(item, collections.abc.Sequence) for item in itemdata):
return _check_itemdata_as_sequences(itemdata)
raise exceptions.InvalidItemdata(f"mixed or illegal itemdata types")
def _check_itemdata_as_mappings(itemdata: List[T_ITEMDATA_MAPPING]) -> None:
"""All of the provided itemdata must have exactly identical keys, which must be strings."""
first_item = itemdata[0]
first_keys = set(first_item.keys())
for item in itemdata:
# keys must be strings
if any(not isinstance(key, str) for key in item.keys()):
raise exceptions.InvalidItemdata("keys must be strings")
# key sets must all be the same
if len(set(item.keys()) - first_keys) != 0:
raise exceptions.InvalidItemdata("key mismatch")
def _check_itemdata_as_sequences(itemdata: List[T_ITEMDATA_SEQUENCE]) -> None:
"""All of the provided itemdata must be the same length."""
first_item = itemdata[0]
first_len = len(first_item)
for item in itemdata:
# same length
if len(item) != first_len:
raise exceptions.InvalidItemdata("bad len")
```
#### File: htcondor-jobs/htcondor_jobs/utils.py
```python
from typing import Optional, Any, Mapping, Iterable
import enum
class StrEnum(str, enum.Enum):
pass
class SlotPickleMixin:
"""A mixin class which lets classes with __slots__ be pickled."""
__slots__ = ()
def __getstate__(self):
# get all the __slots__ in the inheritance tree
# if any class has a __dict__, it will be included! no special case needed
slots = sum((getattr(c, "__slots__", ()) for c in self.__class__.__mro__), ())
state = dict(
(slot, getattr(self, slot)) for slot in slots if hasattr(self, slot)
)
# __weakref__ should always be removed from the state dict
state.pop("__weakref__", None)
return state
def __setstate__(self, state: Mapping):
for slot, value in state.items():
object.__setattr__(self, slot, value)
def chain_get(mapping: Mapping, keys: Iterable[str], default: Optional[Any] = None):
"""
As Mapping.get(key, default), except that it will try multiple keys before returning the default.
Parameters
----------
mapping
The :class:`collections.abc.Mapping` to get from.
keys
The keys to try, in order.
default
What to return if none of the keys are in the mapping.
Defaults to ``None``.
Returns
-------
val :
The value of the first key that was in the mapping,
or the ``default`` if none of the keys were in the mapping.
"""
for k in keys:
try:
return mapping[k]
except KeyError:
pass
return default
```
#### File: integration/test_handles/test_combining_handles.py
```python
import time
import pytest
import htcondor_jobs as jobs
def test_and_of_cluster_handles_gives_right_number_of_jobs_in_query(long_sleep):
a = jobs.submit(long_sleep, count=1)
b = jobs.submit(long_sleep, count=1)
num_jobs = len(list((a & b).query()))
assert num_jobs == 0
def test_or_of_cluster_handles_gives_right_number_of_jobs_in_query(long_sleep):
a = jobs.submit(long_sleep, count=1)
b = jobs.submit(long_sleep, count=1)
num_jobs = len(list((a | b).query()))
assert num_jobs == 2
def test_hold_half_of_cluster(long_sleep):
a = jobs.submit(long_sleep, count=4)
(a & "ProcID < 2").hold()
time.sleep(5)
assert a.state[:2] == [jobs.JobStatus.HELD, jobs.JobStatus.HELD]
assert a.state.counts()[jobs.JobStatus.HELD] == 2
```
#### File: integration/test_handles/test_edit.py
```python
import pytest
import htcondor_jobs as jobs
def get_job_attr(handle, attr):
ad = next(handle.query(projection=[attr]))
return ad[attr]
def test_change_request_memory(long_sleep):
handle = jobs.submit(long_sleep, count=1)
handle.edit("RequestMemory", 12345)
assert get_job_attr(handle, "RequestMemory") == 12345
```
#### File: unit/handles/test_constraint_handle.py
```python
import pytest
import operator
import classad
import htcondor_jobs as jobs
@pytest.mark.parametrize("combinator", [operator.and_, operator.or_])
def test_cannot_combine_handles_with_different_collectors(combinator):
h1 = jobs.ConstraintHandle("foo == bar", collector="foo")
h2 = jobs.ConstraintHandle("foo == bar", collector="bar")
with pytest.raises(jobs.exceptions.InvalidHandle):
combinator(h1, h2)
@pytest.mark.parametrize("combinator", [operator.and_, operator.or_])
def test_cannot_combine_handles_with_different_schedulers(combinator):
h1 = jobs.ConstraintHandle("foo == bar", scheduler="foo")
h2 = jobs.ConstraintHandle("foo == bar", scheduler="bar")
with pytest.raises(jobs.exceptions.InvalidHandle):
combinator(h1, h2)
@pytest.fixture(scope="function")
def dummy_constraint_handle():
return jobs.ConstraintHandle("foo == bar")
@pytest.mark.parametrize("combinator", [operator.and_, operator.or_])
def test_can_combine_handle_with_exprtree(dummy_constraint_handle, combinator):
c = classad.ExprTree("fizz == buzz")
combined = combinator(dummy_constraint_handle, c)
assert isinstance(combined, jobs.ConstraintHandle)
@pytest.mark.parametrize("combinator", [operator.and_, operator.or_])
def test_can_combine_handle_with_string(dummy_constraint_handle, combinator):
c = "fizz == buzz"
combined = combinator(dummy_constraint_handle, c)
assert isinstance(combined, jobs.ConstraintHandle)
@pytest.mark.parametrize("combinator", [operator.and_, operator.or_])
@pytest.mark.parametrize("bad_value", [None, True, 1, 5.5, {}, [], set()])
def test_cannot_combine_handle_with_other_types(
dummy_constraint_handle, combinator, bad_value
):
c = bad_value
with pytest.raises(jobs.exceptions.InvalidHandle):
combined = combinator(dummy_constraint_handle, c)
``` |
{
"source": "JoshKarpel/hypoxia",
"score": 2
} |
#### File: hypoxia/tests/test_impl.py
```python
import pytest
from hypoxia import impl
@pytest.fixture(scope = 'function')
def dummy_class():
class Dummy:
def method(self):
pass
return Dummy
def test_impl_has_method_type(dummy_class):
@impl(dummy_class)
def impl_method(self):
pass
d = dummy_class()
assert type(d.impl_method) == type(d.method)
def test_impl_method_name_refers_to_None(dummy_class):
@impl(dummy_class)
def impl_method(self):
pass
assert impl_method is None
def test_impl_is_called_correctly(dummy_class, mocker):
mock = mocker.MagicMock()
@impl(dummy_class)
def impl_method(self):
mock()
d = dummy_class()
d.impl_method()
assert mock.call_count == 1
def test_impl_on_multiple_classes():
class DummyA:
pass
class DummyB:
pass
@impl(DummyA, DummyB)
def impl_method(self):
pass
a = DummyA()
a.impl_method()
b = DummyB()
b.impl_method()
```
#### File: hypoxia/tests/test_result.py
```python
import pytest
from hypoxia import Ok, Err, Some, Nun, Panic
def test_err_val_must_be_exception():
with pytest.raises(Panic):
Err(0)
def test_is_ok_with_ok():
x = Ok(-3)
assert x.is_ok()
def test_is_ok_with_err():
x = Err(Exception("error message"))
assert not x.is_ok()
def test_is_err_with_ok():
x = Ok(-3)
assert not x.is_err()
def test_is_err_with_err():
x = Err(Exception("error message"))
assert x.is_err()
def test_ok_with_ok():
x = Ok(2)
assert x.ok() == Some(2)
def test_ok_with_err():
x = Err(Exception("error message"))
assert x.ok() == Nun()
def test_err_with_ok():
x = Ok(2)
assert x.err() == Nun()
def test_err_with_err():
x = Err(Exception("error message"))
assert x.err() == Some("error message")
def test_map_with_ok():
x = Ok(2)
assert x.map(lambda x: x ** 2) == Ok(4)
def test_map_with_err():
x = Err(Exception('error message'))
assert x.map(lambda x: x ** 2) == x
def test_map_err_with_ok():
x = Ok(2)
assert x.map_err(lambda x: type(x)(x.args[0].upper())) == x
def test_map_err_with_err():
x = Err(Exception('error message'))
assert x.map_err(lambda x: Err(type(x)(x.args[0].upper()))) == Err(Exception('ERROR MESSAGE'))
def test_and_with_ok_ok():
x = Ok(2)
y = Ok(3)
assert x.and_(y) == Ok(3)
def test_and_with_ok_err():
x = Ok(2)
y = Err(Exception('late'))
assert x.and_(y) == Err(Exception('late'))
def test_and_with_err_ok():
x = Err(Exception('early'))
y = Ok(2)
assert x.and_(y) == Err(Exception('early'))
def test_and_with_err_err():
x = Err(Exception('early'))
y = Err(Exception('late'))
assert x.and_(y) == Err(Exception('early'))
def test_and_then_with_ok():
x = Ok(2)
assert x.and_then(lambda x: Ok(x ** 2)) == Ok(4)
def test_and_then_with_ok_and_then_err():
x = Ok(2)
assert x.and_then(lambda x: Err(Exception(x + 1))) == Err(Exception(3))
def test_and_then_with_err():
x = Err(Exception('error message'))
assert x.and_then(lambda x: x + 1) == x
def test_or_with_ok():
x = Ok(2)
assert x.or_(Err(Exception('error message'))) == x
def test_or_with_err():
x = Err(Exception('error message'))
assert x.or_(Ok(5)) == Ok(5)
def test_or_else_with_ok():
x = Ok(2)
assert x.or_else(lambda x: Ok(True)) == x
def test_or_else_with_err():
x = Err(Exception('error message'))
assert x.or_else(lambda x: Ok(True)) == Ok(True)
def test_unwrap_with_ok():
x = Ok(2)
assert x.unwrap() == 2
def test_unwrap_with_err():
x = Err(Exception('error message'))
with pytest.raises(Panic):
x.unwrap()
def test_unwrap_or_with_ok():
x = Ok(2)
assert x.unwrap_or(0) == 2
def test_unwrap_or_with_err():
x = Err(Exception('error message'))
assert x.unwrap_or(0) == 0
def test_unwrap_or_else_with_ok():
x = Ok(2)
assert x.unwrap_or_else(lambda x: x.args[0].upper()) == 2
def test_unwrap_or_else_with_err():
x = Err(Exception('error message'))
assert x.unwrap_or_else(lambda x: x.args[0].upper()) == 'ERROR MESSAGE'
def test_unwrap_err_with_ok():
x = Ok(2)
with pytest.raises(Panic):
x.unwrap_err()
def test_unwrap_err_with_err():
x = Err(Exception('error message'))
assert x.unwrap_err().args == Exception('error message').args
def test_iter_with_ok():
x = Ok(2)
assert list(iter(x)) == [2]
def test_iter_with_err():
x = Err(Exception('error message'))
assert list(iter(x)) == []
def test_hash_ok():
x = Ok(2)
y = Ok(2)
assert x == y
assert hash(x) == hash(y)
def test_hash_ok_not_eq():
x = Ok(2)
y = Ok(3)
assert x != y
assert hash(x) != hash(y)
def test_hash_err():
x = Err(Exception('error message'))
y = Err(Exception('error message'))
assert x == y
assert hash(x) == hash(y)
def test_hash_err_different_type_same_message():
x = Err(IOError('error message'))
y = Err(TypeError('error message'))
assert x != y
assert hash(x) != hash(y)
def test_hash_err_same_type_different_message():
x = Err(Exception('foo'))
y = Err(Exception('bar'))
assert x != y
assert hash(x) != hash(y)
``` |
{
"source": "JoshKarpel/math715",
"score": 2
} |
#### File: JoshKarpel/math715/hw1.py
```python
import os
from utils import *
import functools as ft
import numpy as np
import matplotlib.pyplot as plt
OUT_DIR = os.path.join(os.getcwd(), 'out')
def p1_disc(x, y, a):
return ((x ** 2) - x - a) * (y ** 2)
def characterization_plot(a, x_bound = 5, y_bound = 5, points = 1000, **kwargs):
x, y = np.linspace(-x_bound, x_bound, points), np.linspace(-y_bound, y_bound, points)
x_mesh, y_mesh = np.meshgrid(x, y, indexing = 'ij')
d_mesh = p1_disc(x_mesh, y_mesh, a)
fig = get_figure('full')
ax = fig.add_subplot(111)
plt.set_cmap(plt.cm.get_cmap('seismic'))
colormesh = ax.pcolormesh(x_mesh, y_mesh, d_mesh, vmin = -.5, vmax = .5, shading = 'gouraud')
# plt.colorbar(colormesh, extend = 'both')
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'a = {}'.format(a))
save_current_figure(name = 'a={}'.format(a), **kwargs)
if __name__ == '__main__':
characterization_plot(-.25, x_bound = 10, y_bound = 10, points = 500,
target_dir = OUT_DIR)
``` |
{
"source": "JoshKarpel/pytest-condor",
"score": 2
} |
#### File: pytest-condor/ornithology/condor.py
```python
import logging
import subprocess
from pathlib import Path
import shutil
import time
import functools
import shlex
import re
import textwrap
import htcondor
from . import job_queue, env, cmd, daemons, handles
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
DEFAULT_PARAMS = {
"LOCAL_CONFIG_FILE": "",
"CONDOR_HOST": "$(IP_ADDRESS)",
"COLLECTOR_HOST": "$(CONDOR_HOST):0",
"MASTER_ADDRESS_FILE": "$(LOG)/.master_address",
"COLLECTOR_ADDRESS_FILE": "$(LOG)/.collector_address",
"SCHEDD_ADDRESS_FILE": "$(LOG)/.schedd_address",
"UPDATE_INTERVAL": "2",
"POLLING_INTERVAL": "2",
"NEGOTIATOR_INTERVAL": "2",
"STARTER_UPDATE_INTERVAL": "2",
"STARTER_INITIAL_UPDATE_INTERVAL": "2",
"NEGOTIATOR_CYCLE_DELAY": "2",
"MachineMaxVacateTime": "2",
"RUNBENCHMARKS": "0",
"JOB_QUEUE_LOG": "$(SPOOL)/job_queue.log",
"MAX_JOB_QUEUE_LOG_ROTATIONS": "0",
"STARTER_LIST": "STARTER", # no standard universe starter
}
def master_is_not_alive(self):
return not self.master_is_alive
def condor_is_ready(self):
return self.condor_is_ready
def condor_master_was_started(self):
return self.condor_master is not None
def skip_if(condition):
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if condition(self):
logger.debug(
"Skipping call to {} for {} because {} was True".format(
func.__name__, self, condition.__name__
)
)
return
return func(self, *args, **kwargs)
return wrapper
return decorator
class Condor:
def __init__(
self, local_dir: Path, config=None, raw_config=None, clean_local_dir_before=True
):
self.local_dir = local_dir
self.execute_dir = self.local_dir / "execute"
self.lock_dir = self.local_dir / "lock"
self.log_dir = self.local_dir / "log"
self.run_dir = self.local_dir / "run"
self.spool_dir = self.local_dir / "spool"
self.passwords_dir = self.local_dir / "passwords.d"
self.tokens_dir = self.local_dir / "tokens.d"
self.config_file = self.local_dir / "condor_config"
if config is None:
config = {}
self.config = {k: v if v is not None else "" for k, v in config.items()}
self.raw_config = raw_config or ""
self.clean_local_dir_before = clean_local_dir_before
self.condor_master = None
self.condor_is_ready = False
self.job_queue = job_queue.JobQueue(self)
def use_config(self):
return env.SetCondorConfig(self.config_file)
def __repr__(self):
return "{}(local_dir = {})".format(self.__class__.__name__, self.local_dir)
@property
def master_is_alive(self):
return self.condor_master is not None and self.condor_master.poll() is None
def __enter__(self):
self._start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._cleanup()
def _start(self):
logger.info("Starting {}".format(self))
try:
self._setup_local_dirs()
self._write_config()
self._start_condor()
self._wait_for_ready()
except BaseException:
logger.exception(
"Encountered error during setup of {}, cleaning up!".format(self)
)
self._cleanup()
raise
logger.info("Started {}".format(self))
def _setup_local_dirs(self):
if self.clean_local_dir_before and self.local_dir.exists():
shutil.rmtree(self.local_dir)
logger.debug("Removed existing local dir for {}".format(self))
for dir in (
self.local_dir,
self.execute_dir,
self.lock_dir,
self.log_dir,
self.run_dir,
self.spool_dir,
self.passwords_dir,
self.tokens_dir,
):
dir.mkdir(parents=True, exist_ok=False)
logger.debug("Created dir {}".format(dir))
def _write_config(self):
# TODO: how to ensure that this always hits the right config?
# TODO: switch to -summary instead of -write:up
write = cmd.run_command(
["condor_config_val", "-write:up", self.config_file.as_posix()],
echo=False,
suppress=True,
)
if write.returncode != 0:
raise Exception("Failed to copy base OS config: {}".format(write.stderr))
param_lines = []
param_lines += ["#", "# ROLES", "#"]
param_lines += [
"use ROLE: CentralManager",
"use ROLE: Submit",
"use ROLE: Execute",
]
base_config = {
"LOCAL_DIR": self.local_dir.as_posix(),
"EXECUTE": self.execute_dir.as_posix(),
"LOCK": self.lock_dir.as_posix(),
"LOG": self.log_dir.as_posix(),
"RUN": self.run_dir.as_posix(),
"SPOOL": self.spool_dir.as_posix(),
"SEC_PASSWORD_DIRECTORY": self.passwords_dir.as_posix(),
"SEC_TOKEN_SYSTEM_DIRECTORY": self.tokens_dir.as_posix(),
"STARTD_DEBUG": "D_FULLDEBUG D_COMMAND",
}
param_lines += ["#", "# BASE PARAMS", "#"]
param_lines += ["{} = {}".format(k, v) for k, v in base_config.items()]
param_lines += ["#", "# DEFAULT PARAMS", "#"]
param_lines += ["{} = {}".format(k, v) for k, v in DEFAULT_PARAMS.items()]
param_lines += ["#", "# CUSTOM PARAMS", "#"]
param_lines += ["{} = {}".format(k, v) for k, v in self.config.items()]
param_lines += ["#", "# RAW PARAMS", "#"]
param_lines += textwrap.dedent(self.raw_config).splitlines()
with self.config_file.open(mode="a") as f:
f.write("\n".join(param_lines))
logger.debug("Wrote config file for {} to {}".format(self, self.config_file))
@skip_if(condor_master_was_started)
def _start_condor(self):
with env.SetCondorConfig(self.config_file):
self.condor_master = subprocess.Popen(
["condor_master", "-f"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
logger.debug(
"Started condor_master (pid {})".format(self.condor_master.pid)
)
@skip_if(condor_is_ready)
def _wait_for_ready(self, timeout=120, dump_logs_if_fail=False):
daemons = set(
self.run_command(
["condor_config_val", "DAEMON_LIST"], echo=False
).stdout.split(" ")
)
master_log_path = self.master_log.path
logger.debug(
"Starting up daemons for {}, waiting for: {}".format(
self, " ".join(sorted(daemons))
)
)
start = time.time()
while time.time() - start < timeout:
time_to_give_up = int(timeout - (time.time() - start))
# if the master log does not exist yet, we can't use condor_who
if not master_log_path.exists():
logger.debug(
"MASTER_LOG at {} does not yet exist for {}, retrying in 1 seconds (giving up in {} seconds)".format(
self.master_log, self, time_to_give_up
)
)
time.sleep(1)
continue
# TODO: what if we aren't starting up a startd?
who = self.run_command(
shlex.split(
"condor_who -wait:10 'IsReady && STARTD_State =?= \"Ready\"'"
),
echo=False,
suppress=True,
)
if who.stdout.strip() == "":
logger.debug(
"condor_who stdout was unexpectedly blank for {}, retrying in 1 second (giving up in {} seconds)".format(
self, time_to_give_up
)
)
time.sleep(1)
continue
who_ad = dict(kv.split(" = ") for kv in who.stdout.splitlines())
# TODO: same as above - what if we aren't starting up a startd?
if (
who_ad.get("IsReady") == "true"
and who_ad.get("STARTD_State") == '"Ready"'
and all(who_ad.get(d) == '"Alive"' for d in daemons)
):
self.condor_is_ready = True
return
logger.debug(
"{} is waiting for daemons to be ready (giving up in {} seconds)".format(
self, time_to_give_up
)
)
self.run_command(["condor_who", "-quick"])
if dump_logs_if_fail:
for logfile in self.log_dir.iterdir():
logger.error("Contents of {}:\n{}".format(logfile, logfile.read_text()))
raise TimeoutError("Standup for {} failed".format(self))
def _cleanup(self):
logger.info("Cleaning up {}".format(self))
self._condor_off()
self._wait_for_master_to_terminate()
# TODO: look for core dumps
# self._remove_local_dir()
logger.info("Cleaned up {}".format(self))
@skip_if(master_is_not_alive)
def _condor_off(self):
off = self.run_command(
["condor_off", "-daemon", "master"], timeout=30, echo=False
)
if not off.returncode == 0:
logger.error(
"condor_off failed, exit code: {}, stderr: {}".format(
off.returncode, off.stderr
)
)
self._terminate_condor_master()
return
logger.debug("condor_off succeeded: {}".format(off.stdout))
@skip_if(master_is_not_alive)
def _wait_for_master_to_terminate(self, kill_after=60, timeout=120):
logger.debug(
"Waiting for condor_master (pid {}) to terminate".format(
self.condor_master.pid
)
)
start = time.time()
killed = False
while True:
try:
self.condor_master.communicate(timeout=5)
break
except TimeoutError:
pass
elapsed = time.time() - start
if not killed:
logger.debug(
"condor_master has not terminated yet, will kill in {} seconds".format(
int(kill_after - elapsed)
)
)
if elapsed > kill_after and not killed:
# TODO: in this path, we should also kill the other daemons
# TODO: we can find their pids by reading the master log
self._kill_condor_master()
killed = True
if elapsed > timeout:
raise TimeoutError(
"Timed out while waiting for condor_master to terminate"
)
logger.debug(
"condor_master (pid {}) has terminated with exit code {}".format(
self.condor_master.pid, self.condor_master.returncode
)
)
@skip_if(master_is_not_alive)
def _terminate_condor_master(self):
if not self.master_is_alive:
return
self.condor_master.terminate()
logger.debug(
"Sent terminate signal to condor_master (pid {})".format(
self.condor_master.pid
)
)
@skip_if(master_is_not_alive)
def _kill_condor_master(self):
self.condor_master.kill()
logger.debug(
"Sent kill signal to condor_master (pid {})".format(self.condor_master.pid)
)
def read_config(self):
return self.config_file.read_text()
def run_command(self, *args, **kwargs):
with self.use_config():
return cmd.run_command(*args, **kwargs)
@property
def master_log(self) -> daemons.DaemonLog:
return self._get_daemon_log("MASTER")
@property
def collector_log(self) -> daemons.DaemonLog:
return self._get_daemon_log("COLLECTOR")
@property
def negotiator_log(self) -> daemons.DaemonLog:
return self._get_daemon_log("NEGOTIATOR")
@property
def schedd_log(self) -> daemons.DaemonLog:
return self._get_daemon_log("SCHEDD")
@property
def startd_log(self) -> daemons.DaemonLog:
return self._get_daemon_log("STARTD")
@property
def shadow_log(self) -> daemons.DaemonLog:
return self._get_daemon_log("SHADOW")
@property
def job_queue_log(self) -> Path:
return self._get_log_path("JOB_QUEUE")
@property
def startd_address(self):
return self._get_address_file("STARTD").read_text().splitlines()[0]
def _get_log_path(self, subsystem):
return self._get_path_from_condor_config_val("{}_LOG".format(subsystem))
def _get_address_file(self, subsystem):
return self._get_path_from_condor_config_val(
"{}_ADDRESS_FILE".format(subsystem)
)
def _get_path_from_condor_config_val(self, attr):
return Path(
self.run_command(
["condor_config_val", attr], echo=False, suppress=True
).stdout
)
def _get_daemon_log(self, daemon_name):
return daemons.DaemonLog(self._get_log_path(daemon_name))
def get_local_schedd(self):
with self.use_config():
return htcondor.Schedd()
def get_local_collector(self):
with self.use_config():
return htcondor.Collector()
def status(self, ad_type=htcondor.AdTypes.Any, constraint="true", projection=None):
projection = projection or []
with self.use_config():
result = self.get_local_collector().query(
ad_type=ad_type, constraint=constraint, projection=projection
)
logger.debug(
'Ads returned by status query for {} ads with constraint "{}":\n'.format(
ad_type, constraint
)
+ "\n".join(str(ad) for ad in result)
)
return result
def direct_status(self, daemon_type, ad_type, constraint="true", projection=None):
projection = projection or []
# TODO: we would like this to use Collector.directQuery, but it can't because of https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=7420
with self.use_config():
daemon_location = self.get_local_collector().locate(daemon_type)
# pretend the target daemon is a collector so we can query it
daemon = htcondor.Collector(daemon_location["MyAddress"])
result = daemon.query(
ad_type=ad_type, constraint=constraint, projection=projection
)
logger.debug(
'Ads returned by direct status query against {} for {} ads with constraint "{}":\n'.format(
daemon_type, ad_type, constraint
)
+ "\n".join(str(ad) for ad in result)
)
return result
def query(
self,
constraint="true",
projection=None,
limit=-1,
opts=htcondor.QueryOpts.Default,
):
if projection is None:
projection = []
with self.use_config():
result = self.get_local_schedd().query(
constraint=constraint, attr_list=projection, limit=limit, opts=opts
)
logger.debug(
'Ads returned by queue query with constraint "{}":\n'.format(constraint)
+ "\n".join(str(ad) for ad in result)
)
return result
def act(self, action, constraint="true"):
with self.use_config():
logger.debug(
'Executing action: {} with constraint "{}"'.format(action, constraint)
)
return self.get_local_schedd().act(action, constraint)
def edit(self, attr, value, constraint="true"):
with self.use_config():
logger.debug(
'Executing edit: setting {} to {} with constraint "{}"'.format(
attr, value, constraint
)
)
return self.get_local_schedd().edit(constraint, attr, value)
def submit(self, description, count=1, itemdata=None):
sub = htcondor.Submit(dict(description))
logger.debug(
"Submitting jobs with description:\n{}\nCount: {}\nItemdata: {}".format(
sub, count, itemdata
)
)
with self.use_config():
schedd = self.get_local_schedd()
with schedd.transaction() as txn:
result = sub.queue_with_itemdata(txn, count, itemdata)
logger.debug("Got submit result:\n{}".format(result))
return handles.ClusterHandle(self, result)
RE_PORT_HOST = re.compile(r"\d+\.\d+\.\d+\.\d+:\d+")
def get_port_host_from_sinful(sinful):
return RE_PORT_HOST.search(sinful)[0]
```
#### File: pytest-condor/ornithology/meta.py
```python
import logging
import inspect
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def get_current_func_name() -> str:
"""
Return the name of the function this function is called from.
::
def foo():
print(get_current_func_name())
foo() # prints "foo"
"""
return inspect.currentframe().f_back.f_code.co_name
```
#### File: JoshKarpel/pytest-condor/test_run_sleep_job.py
```python
import logging
from conftest import config, standup, action
from ornithology import (
write_file,
parse_submit_result,
JobID,
SetAttribute,
SetJobStatus,
JobStatus,
in_order,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@action
def submit_sleep_job_cmd(test_dir, default_condor):
sub_description = """
executable = /bin/sleep
arguments = 0
queue
"""
submit_file = write_file(test_dir / "submit" / "job.sub", sub_description)
return default_condor.run_command(["condor_submit", submit_file])
@action
def finished_sleep_jobid(default_condor, submit_sleep_job_cmd):
clusterid, num_procs = parse_submit_result(submit_sleep_job_cmd)
jobid = JobID(clusterid, 0)
default_condor.job_queue.wait_for_events(
expected_events={jobid: [SetJobStatus(JobStatus.COMPLETED)]},
unexpected_events={jobid: {SetJobStatus(JobStatus.HELD)}},
)
return jobid
@action
def job_queue_events_for_sleep_job(default_condor, finished_sleep_jobid):
return default_condor.job_queue.by_jobid[finished_sleep_jobid]
class TestCanRunSleepJob:
def test_submit_cmd_succeeded(self, submit_sleep_job_cmd):
assert submit_sleep_job_cmd.returncode == 0
def test_only_one_proc(self, submit_sleep_job_cmd):
_, num_procs = parse_submit_result(submit_sleep_job_cmd)
assert num_procs == 1
def test_job_queue_events_in_correct_order(self, job_queue_events_for_sleep_job):
assert in_order(
job_queue_events_for_sleep_job,
[
SetJobStatus(JobStatus.IDLE),
SetJobStatus(JobStatus.RUNNING),
SetJobStatus(JobStatus.COMPLETED),
],
)
def test_job_executed_successfully(self, job_queue_events_for_sleep_job):
assert SetAttribute("ExitCode", "0") in job_queue_events_for_sleep_job
``` |
{
"source": "JoshKarpel/quandary",
"score": 3
} |
#### File: src/quandary/__init__.py
```python
from typing import Any, Callable, Union, Hashable, Container
class QuandaryException(Exception):
pass
class UnevaluatedQuandary(QuandaryException):
pass
class NoMatch(QuandaryException):
pass
class InvalidKey(QuandaryException):
pass
def closed_range(start, stop, step):
"""Return a `range` that includes the endpoint."""
return range(start, stop + 1, step)
class ContainerDict(dict):
"""
A dictionary that can also store unhashable "keys" that implement the `in` operator via a `__contains__` method.
You get the value of that key back out by accessing an element of that iterable.
These "container keys" have lower priority than any true dictionary keys in the `ContainerDict`.
Subclassing from dict instead of `collections.UserDict` is a ~30% speedup for certain benchmarks.
It's generally not safe, but we only use the `ContainerDict` for one very specific purpose, and we've overridden the things we need to for that to work.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.containers = []
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError:
for key, value in self.containers:
if item in key:
return value
raise KeyError
def __setitem__(self, key, result_and_kwargs, force_contains = False):
try:
if force_contains:
raise TypeError
super().__setitem__(key, result_and_kwargs)
except TypeError: # unhashable
if not hasattr(key, '__contains__'):
raise InvalidKey("{key} is not hashable and does not have a __contains__ method, so it cannot be used as the key of a quandary's case")
self.containers.append((key, result_and_kwargs))
def __str__(self):
return f'{self} | {self.containers}'
class quandary:
"""A context manager that implements a switch statement."""
_no_result = object()
def __init__(self, control: Any):
"""
Parameters
----------
control
The control value for the quandary.
"""
self._control = control
self._cases = ContainerDict()
self._result = self._no_result
def __enter__(self):
return self
def case(self, key: Union[Hashable, Container], result: Union[Callable, Any], force_contains: bool = False, **kwargs):
"""
Adds a case to the quandary with key `key` and possible result `result`.
Parameters
----------
key
Either a hashable or a container that is checked against the control.
result
What the result of the quandary will be if this case matches. Can be a value or a callable.
force_contains
If `True`, forces the key to be treated as a container even if it is hashable.
kwargs
Keywords arguments are passed to the `result` if it is callable.
"""
self._cases.__setitem__(key, (result, kwargs), force_contains = force_contains)
def default(self, result: Union[Callable, Any], **kwargs):
"""
Add a case that is used if no explicit case is matched.
Parameters
----------
result
The result of the quandary if no case matches.
kwargs
Keywords arguments are passed to the `result` if it is callable.
"""
self._default = result, kwargs
def __exit__(self, exc_type, exc_val, exc_tb):
"""When the `with` block ends the quandary determines which case the control matches and assigns the value of that case to its result."""
if exc_type is not None:
return False # returning False from __exit__ propagates the exception
try:
result, kwargs = self._cases[self._control]
except KeyError:
try:
result, kwargs = self._default
except AttributeError:
raise NoMatch('Failed to match any case and no default has been set')
if callable(result):
result = result(self._control, **kwargs)
self._result = result
@property
def result(self) -> Any:
"""A property that gets the result of the quandary, if the quandary has been evaluated."""
if self._result is self._no_result:
raise UnevaluatedQuandary("You haven't left the with block, so the quandary hasn't been evaluated yet")
return self._result
``` |
{
"source": "JoshKarpel/simulacra",
"score": 2
} |
#### File: simulacra/dev/richardson.py
```python
import logging
import os
import numpy as np
import simulacra as si
from simulacra.units import *
import matplotlib.pyplot as plt
FILE_NAME = os.path.splitext(os.path.basename(__file__))[0]
OUT_DIR = os.path.join(os.getcwd(), "out", FILE_NAME)
if __name__ == "__main__":
with si.utils.LogManager(
"simulacra",
stdout_logs=True,
stdout_level=logging.DEBUG,
file_dir=OUT_DIR,
file_logs=False,
) as logger:
x = np.linspace(-5, 5, 200)
y = np.linspace(-5, 5, 200)
x_mesh, y_mesh = np.meshgrid(x, y, indexing="ij")
# z_mesh = 1j * np.sin(y_mesh)
# z_mesh = np.zeros(np.shape(x_mesh))
z_mesh = x_mesh + (1j * y_mesh)
rich = si.plots.RichardsonColormap()
for equator_mag in (0.2, 1, 5):
for shading in ("flat", "gouraud"):
si.plots.xyz_plot(
f"richardson_xyz_eq={equator_mag}_{shading}",
x_mesh,
y_mesh,
z_mesh,
x_unit="rad",
y_unit="rad",
shading=shading,
colormap=plt.get_cmap("richardson"),
richardson_equator_magnitude=equator_mag,
target_dir=OUT_DIR,
show_colorbar=False,
aspect_ratio=1,
)
def z(x_mesh, y_mesh, t):
return z_mesh * np.exp(1j * t)
t = np.linspace(0, 10, 900) * pi
for equator_mag in (0.2, 1, 5):
for shading in ("flat", "gouraud"):
si.plots.xyzt_plot(
f"richardson_xyzt_eq={equator_mag}_{shading}",
x_mesh,
y_mesh,
t,
z,
x_label=r"$x$",
y_label=r"$y$",
x_unit="rad",
y_unit="rad",
title=r"$(x + iy) e^{i t}$",
shading=shading,
colormap=plt.get_cmap("richardson"),
richardson_equator_magnitude=equator_mag,
target_dir=OUT_DIR,
show_colorbar=False,
aspect_ratio=1,
)
def z2(x_mesh, y_mesh, t):
return z_mesh * np.exp(1j * t) * np.sin(x_mesh ** 2 + y_mesh ** 2 + t)
for equator_mag in (0.2, 1, 5):
for shading in ("flat", "gouraud"):
si.plots.xyzt_plot(
f"richardson_xyzt2_eq={equator_mag}_{shading}",
x_mesh,
y_mesh,
t,
z2,
x_label=r"$x$",
y_label=r"$y$",
x_unit="rad",
y_unit="rad",
title=r"$(x + iy) e^{i t} \sin(x^2 + y^2 + t)$",
shading=shading,
colormap=plt.get_cmap("richardson"),
richardson_equator_magnitude=equator_mag,
target_dir=OUT_DIR,
show_colorbar=False,
aspect_ratio=1,
)
```
#### File: simulacra/dev/watcher.py
```python
import simulacra as si
def not_watched_func(x):
print("not_watched_func ran")
return x + 1
class Foo:
def __init__(self):
self.w = 0
@si.utils.watched_memoize(lambda s: s.w)
def watched_method(self, x):
print("watched_method ran")
return x + 1
@si.utils.watched_memoize(lambda s: s.w)
def watched_method_no_args(self):
print("watched_method_no_args ran")
return "foo"
if __name__ == "__main__":
print(not_watched_func(1))
print(not_watched_func(2))
print(not_watched_func(3))
print()
f = Foo()
print(f.watched_method(1))
print(f.watched_method(1))
print(f.watched_method(1))
f.w = 1
print(f.watched_method(2))
print(f.watched_method(3))
print(f.watched_method(4))
print()
print(f.watched_method_no_args())
print(f.watched_method_no_args())
print(f.watched_method_no_args())
f.w = 2
print(f.watched_method_no_args())
print(f.watched_method_no_args())
print(f.watched_method_no_args())
```
#### File: source/figs/figs.py
```python
import numpy as np
import simulacra.vis as vis
def create():
x = np.linspace(0, 2 * np.pi, 1000)
y = np.exp(np.sin(x))
vis.xy_plot("y_vs_x", x, y, x_label=r"$x$", y_label=r"$ e^{\sin(x)} $")
vis.xy_plot(
"y_vs_x__v2", x, y, x_label=r"$x$", x_unit="rad", y_label=r"$ e^{\sin(x)} $"
)
```
#### File: simulacra/simulacra/math.py
```python
import logging
from typing import Callable, Tuple, Union
import numpy as np
import numpy.random as rand
import scipy.special as special
import scipy.integrate as integ
from . import exceptions
from . import units as u
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def rand_phases(shape_tuple: Tuple[int]) -> np.ndarray:
"""Return random phases (0 to 2pi) in the specified shape."""
return rand.random_sample(shape_tuple) * u.twopi
def rand_phases_like(example: np.ndarray) -> np.ndarray:
"""Return random phases (0 to 2pi) in the same shape as the ``example``."""
return rand.random_sample(example.shape) * u.twopi
class SphericalHarmonic:
"""A class that represents a spherical harmonic."""
__slots__ = ("_l", "_m")
def __init__(self, l: int = 0, m: int = 0):
"""
Parameters
----------
l
Orbital angular momentum "quantum number". Must be >= 0.
m
Azimuthal angular momentum "quantum number". Must have
``abs(m) <= l``.
"""
if not l >= 0:
raise exceptions.IllegalSphericalHarmonic(
f"invalid spherical harmonic: l = {l} must be greater than or equal to 0"
)
if not abs(m) <= l:
raise exceptions.IllegalSphericalHarmonic(
f"invalid spherical harmonic: |m| = {abs(m)} must be less than l = {l}"
)
self._l = l
self._m = m
@property
def l(self) -> int:
return self._l
@property
def m(self) -> int:
return self._m
def __repr__(self):
return f"{self.__class__.__name__}(l={self.l}, m={self.m})"
def __str__(self):
return f"Y_({self.l},{self.m})"
@property
def latex(self) -> str:
"""Returns a LaTeX-formatted string for the SphericalHarmonic."""
return fr"Y_{{{self.m}}}^{{{self.l}}}"
@property
def lm(self):
"""The tuple (l, m) for this spherical harmonic."""
return self.l, self.m
def __hash__(self):
return hash((self.__class__, self.lm))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.lm == other.lm
def __le__(self, other):
return self.lm <= other.lm
def __ge__(self, other):
return self.lm >= other.lm
def __lt__(self, other):
return self.lm < other.lm
def __gt__(self, other):
return self.lm > other.lm
def __call__(
self, theta: Union[int, float, np.array], phi: Union[int, float, np.array] = 0
):
"""
Evaluate the spherical harmonic at a point, or vectorized over an array
of points.
Parameters
----------
theta
The polar coordinate.
phi
The azimuthal coordinate.
Returns
-------
val
The value of the spherical harmonic evaluated at (``theta``, ``phi``).
"""
return special.sph_harm(self.m, self.l, phi, theta)
def complex_quad(
integrand: Callable, a: float, b: float, **kwargs
) -> Tuple[complex, tuple, tuple]:
"""
As :func:`scipy.integrate.quad`, but works with complex integrands.
Parameters
----------
integrand
The function to integrate.
a
The lower limit of integration.
b
The upper limit of integration.
kwargs
Additional keyword arguments are passed to
:func:`scipy.integrate.quad`.
Returns
-------
(result, real_extras, imag_extras)
A tuple containing the result, as well as the other things returned
by :func:`scipy.integrate.quad` for the real and imaginary parts
separately.
"""
def real_func(*args, **kwargs):
return np.real(integrand(*args, **kwargs))
def imag_func(*args, **kwargs):
return np.imag(integrand(*args, **kwargs))
real_integral = integ.quad(real_func, a, b, **kwargs)
imag_integral = integ.quad(imag_func, a, b, **kwargs)
return (
real_integral[0] + (1j * imag_integral[0]),
real_integral[1:],
imag_integral[1:],
)
def complex_quadrature(
integrand: Callable, a: float, b: float, **kwargs
) -> Tuple[complex, tuple, tuple]:
"""
As :func:`scipy.integrate.quadrature`, but works with complex integrands.
Parameters
----------
integrand
The function to integrate.
a
The lower limit of integration.
b
The upper limit of integration.
kwargs
Additional keyword arguments are passed to
:func:`scipy.integrate.quadrature`.
Returns
-------
(result, real_extras, imag_extras)
A tuple containing the result, as well as the other things returned
by :func:`scipy.integrate.quadrature` for the real and imaginary parts
separately.
"""
def real_func(*args, **kwargs):
return np.real(integrand(*args, **kwargs))
def imag_func(*args, **kwargs):
return np.imag(integrand(*args, **kwargs))
real_integral = integ.quadrature(real_func, a, b, **kwargs)
imag_integral = integ.quadrature(imag_func, a, b, **kwargs)
return (
real_integral[0] + (1j * imag_integral[0]),
real_integral[1:],
imag_integral[1:],
)
def complex_dblquad(
integrand: Callable, a: float, b: float, gfun: Callable, hfun: Callable, **kwargs
) -> Tuple[complex, tuple, tuple]:
"""
As :func:`scipy.integrate.dblquad`, but works with complex integrands.
Parameters
----------
integrand
The function to integrate.
a
The lower limit of integration.
b
The upper limit of integration.
kwargs
Additional keyword arguments are passed to
:func:`scipy.integrate.dblquad`.
Returns
-------
(result, real_extras, imag_extras)
A tuple containing the result, as well as the other things returned
by :func:`scipy.integrate.dblquad` for the real and imaginary parts
separately.
"""
def real_func(y, x):
return np.real(integrand(y, x))
def imag_func(y, x):
return np.imag(integrand(y, x))
real_integral = integ.dblquad(real_func, a, b, gfun, hfun, **kwargs)
imag_integral = integ.dblquad(imag_func, a, b, gfun, hfun, **kwargs)
return (
real_integral[0] + (1j * imag_integral[0]),
real_integral[1:],
imag_integral[1:],
)
def complex_nquad(integrand, ranges, **kwargs) -> Tuple[complex, tuple, tuple]:
"""
As :func:`scipy.integrate.nquad`, but works with complex integrands.
Parameters
----------
integrand
The function to integrate.
a
The lower limit of integration.
b
The upper limit of integration.
kwargs
Additional keyword arguments are passed to
:func:`scipy.integrate.nquad`.
Returns
-------
(result, real_extras, imag_extras)
A tuple containing the result, as well as the other things returned
by :func:`scipy.integrate.nquad` for the real and imaginary parts
separately.
"""
def real_func(y, x):
return np.real(integrand(y, x))
def imag_func(y, x):
return np.imag(integrand(y, x))
real_integral = integ.nquad(real_func, ranges, **kwargs)
imag_integral = integ.nquad(imag_func, ranges, **kwargs)
return (
real_integral[0] + (1j * imag_integral[0]),
real_integral[1:],
imag_integral[1:],
)
```
#### File: simulacra/simulacra/parameters.py
```python
import logging
from typing import Any, Optional, Union, List, Collection, Dict, Tuple, Callable
import itertools
from copy import deepcopy
import textwrap
# these imports need to be here so that ask_for_eval works
import numpy as np
import scipy as sp
from simulacra import units as u
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Parameter:
"""A class that represents a parameter of a :class:`simulacra.Specification`."""
def __init__(
self,
name: str,
value: Union[Any, Collection[Any]] = None,
expandable: bool = False,
):
"""
Parameters
----------
name
The name of the Parameter, which should match a keyword argument of
the target :class:`simulacra.Specification`.
value
The value of the Parameter, or an iterable of values.
expandable
If ``True``, :func:`expand_parameters` will expand along an iterable
`value`.
"""
self.name = name
self.value = value
self.expandable = expandable
def __repr__(self):
if not self.expandable:
return (
f"{self.__class__.__name__}(name = {self.name}, value = {self.value})"
)
else:
header = f"{self.__class__.__name__}(name = {self.name}, expandable = {self.expandable}, values ="
val_text = textwrap.wrap(", ".join(repr(v) for v in self.value), width=60)
out = "\n ".join([header] + val_text) + ")"
return out
def expand_parameters(parameters: Collection[Parameter]) -> List[Dict[str, Any]]:
"""
Expand an iterable of :class:`Parameter` to a list of dictionaries
containing all of the combinations of parameter values.
Each of these dictionaries can then be unpacked into a :class:`Specification`.
If a :class:`Parameter` has ``expandable = True``, it will be expanded
across the values in the outermost iterable in that :class:`Parameter`'s
``value``.
Parameters
----------
parameters
The parameters to expand over.
Returns
-------
expanded_parameters
An list of dictionaries containing all of the combinations of parameters.
"""
expanded: List[Dict[str, Any]] = [{}]
for par in parameters:
if par.expandable:
expanded = [deepcopy(d) for d in expanded for _ in range(len(par.value))]
for d, v in zip(expanded, itertools.cycle(par.value)):
d[par.name] = v
else:
for d in expanded:
d[par.name] = par.value
return expanded
def ask_for_input(
question: str, default: Any = None, callback: Callable[[Any], Any] = str
) -> Any:
"""
Ask for input from the user at the command line.
Parameters
----------
question
A string to display as a prompt for the user.
default
The default answer to the question.
callback
The return value of this callback is what is returned from this
function. Useful for simple conversions, like receiving an integer
instead of a raw string.
Returns
-------
answer
The input, passed through ``callback``.
"""
while True:
input_str = input(question + " [Default: {}] > ".format(default))
trimmed = input_str.replace(" ", "")
if trimmed == "":
return default
try:
return callback(trimmed)
except Exception as e:
print(e)
def ask_for_choices(
question: str,
choices: Union[List[str], Tuple[str, ...], Dict[str, Any]],
default: Optional[str] = None,
):
"""
Ask for input from the user, restricted to a given set of options.
Parameters
----------
question
A string to display as a prompt prompt for the user.
choices
The choices to present to the user. If it is a tuple or list of strings,
these will be the choices and whichever one is chosen will be returned.
If it is a dictionary, the user will be asked to choose from the keys,
and the matching value will be returned.
default
The default answer to the question. If this is ``None``, the default
will be the first element of the choices.
Returns
-------
answer
The input, interpreted as a boolean.
"""
if default is None:
default = list(choices)[0]
while True:
answer = ask_for_input(question + f' [{" | ".join(choices)}]', default=default)
if answer not in choices:
print(f"{answer} is not a valid choice, try again")
continue
try:
return choices[answer]
except TypeError:
return answer
except Exception as e:
print(e)
TRUE_ANSWERS = {"true", "t", "yes", "y", "1", "on", True, 1}
FALSE_ANSWERS = {"false", "f", "no", "n", "0", "off", False, "", 0}
def ask_for_bool(question: str, default: Union[str, bool, int] = "") -> bool:
"""
Ask for input from the user, which will be interpreted
as a boolean. The interpretation is case-insensitive.
Synonyms for ``True``: ``true``, ``t``, ``yes``, ``y``, ``1``, ``on``
Synonyms for ``False``: ``false``, ``f``, ``no``, ``n``, ``0``, ``off``, ```` (i.e., nothing)
Parameters
----------
question
A string to display as a prompt prompt for the user.
default
The default answer to the question, which is ``False``.
Returns
-------
answer
The input, interpreted as a boolean.
"""
while True:
input_str = input(question + " [Default: {}] > ".format(default))
trimmed = input_str.replace(" ", "").lower()
if trimmed == "":
if isinstance(default, str):
trimmed = default.replace(" ", "").lower()
else:
trimmed = default
if trimmed in TRUE_ANSWERS:
return True
elif trimmed in FALSE_ANSWERS:
return False
else:
print(f"Answer could not be interpreted as a boolean, try again")
def ask_for_eval(question: str, default: str = "None") -> Any:
"""
Ask for input from the user, which will be evaluated as a Python command.
Numpy and Scipy's top-level interfaces (imported as ``np`` and ``sp``) and
Simulacra's own unit module (imported as ``u``) are all available.
For example, entering ``np.linspace(0, 1, 100)`` will produce the
expected result of an array of 100 numbers evenly spaced between 0 and 1.
.. warning ::
This function is not safe!
The user can execute arbitrary Python code!
You should only expose this function to known, trusted users.
Parameters
----------
question
A string to display as a prompt prompt for the user.
default
The default answer to the question.
Returns
-------
answer
The result of evaluating the user's input.
"""
while True:
input_str = input(question + " [Default: {}] (eval) > ".format(default))
trimmed = input_str.replace(" ", "")
if trimmed == "":
input_str = str(default)
try:
return eval(input_str)
except Exception as e:
print(e)
```
#### File: simulacra/utils/logging.py
```python
import logging
from typing import Optional
import os
import sys
import datetime
from pathlib import Path
from . import filesystem
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
DEFAULT_LOG_FORMATTER = logging.Formatter(
"%(asctime)s [%(levelname)s] ~ %(message)s", datefmt="%y-%m-%d %H:%M:%S"
)
class LogManager:
"""
A context manager to set up logging.
Within a managed block, logging messages are intercepted if their
logger is named in ``logger_names``.
The object returned by the LogManager ``with`` statement can be used as a
logger, with name given by ``manual_logger_name``.
"""
def __init__(
self,
*logger_names,
manual_logger_name: Optional[str] = None,
log_formatter: logging.Formatter = DEFAULT_LOG_FORMATTER,
stdout_logs: bool = True,
stdout_level: int = logging.DEBUG,
file_logs: bool = False,
file_level: int = logging.DEBUG,
file_name: Optional[str] = None,
file_dir: Optional[os.PathLike] = None,
file_mode: str = "a",
):
"""
Parameters
----------
logger_names
The names of loggers to intercept.
manual_logger_name
The name used by the logger returned by the :class:`LogManager`
``with`` statement. If not given, it will be the first logger
passed in ``logger_names``.
log_formatter
A log formatter to use for all log messages.
stdout_logs
If ``True``, log messages will be displayed on stdout.
stdout_level
Sets the level of messages to display on stdout.
file_logs
If ``True`, log messages will be sent to a file.
file_level
Sets the level of messages to send to the file.
file_name
The name of the log file. If not given, it will named ``log__<timestamp>``.
file_dir
The directory to place the log file in. If not given, it will be in
the current working directory.
file_mode
The file mode to open the log file with, defaults to 'a' (append).
"""
self.logger_names = list(logger_names)
if manual_logger_name is None:
manual_logger_name = self.logger_names[0]
if manual_logger_name not in self.logger_names:
self.logger_names = [manual_logger_name] + self.logger_names
self.stdout_logs = stdout_logs
self.stdout_level = stdout_level
self.file_logs = file_logs
self.file_level = file_level
if file_name is None:
file_name = f"log__{datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S')}"
self.file_name = file_name
if not self.file_name.endswith(".log"):
self.file_name += ".log"
if file_dir is None:
file_dir = Path.cwd()
self.file_dir = Path(file_dir)
self.file_mode = file_mode
self.log_formatter = log_formatter
self.logger = None
def __enter__(self):
"""Gets a logger with the specified name, replace it's handlers with, and returns itself."""
self.loggers = {name: logging.getLogger(name) for name in self.logger_names}
new_handlers = [logging.NullHandler()]
if self.stdout_logs:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(self.stdout_level)
stdout_handler.setFormatter(self.log_formatter)
new_handlers.append(stdout_handler)
if self.file_logs:
log_file_path = self.file_dir / self.file_name
# NB: the log message emitted here will not be included in the
# logger being created by this context manager
filesystem.ensure_parents_exist(log_file_path)
file_handler = logging.FileHandler(
log_file_path, mode=self.file_mode, encoding="utf-8"
)
file_handler.setLevel(self.file_level)
file_handler.setFormatter(self.log_formatter)
new_handlers.append(file_handler)
self.old_levels = {name: logger.level for name, logger in self.loggers.items()}
self.old_handlers = {
name: logger.handlers for name, logger in self.loggers.items()
}
for logger in self.loggers.values():
logger.setLevel(logging.DEBUG)
logger.handlers = new_handlers
return self.loggers[self.logger_names[0]]
def __exit__(self, exc_type, exc_val, exc_tb):
logging.disable(logging.NOTSET)
for name, logger in self.loggers.items():
logger.level = self.old_levels[name]
logger.handlers = self.old_handlers[name]
```
#### File: simulacra/utils/timing.py
```python
import datetime
import time
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class BlockTimer:
"""A context manager that times the code in the ``with`` block. Print the :class:`BlockTimer` after exiting the block to see the results."""
__slots__ = (
"wall_time_start",
"wall_time_end",
"wall_time_elapsed",
"proc_time_start",
"proc_time_end",
"proc_time_elapsed",
)
def __init__(self):
self.wall_time_start = None
self.wall_time_end = None
self.wall_time_elapsed = None
self.proc_time_start = None
self.proc_time_end = None
self.proc_time_elapsed = None
def __enter__(self):
self.wall_time_start = datetime.datetime.now()
self.proc_time_start = time.process_time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.proc_time_end = time.process_time()
self.wall_time_end = datetime.datetime.now()
self.wall_time_elapsed = self.wall_time_end - self.wall_time_start
self.proc_time_elapsed = datetime.timedelta(
seconds=self.proc_time_end - self.proc_time_start
)
def __str__(self):
if self.wall_time_end is None:
return f"{self.__class__.__name__} started at {self.wall_time_start} and is still running"
return f"{self.__class__.__name__} started at {self.wall_time_start}, ended at {self.wall_time_end}. Elapsed time: {self.wall_time_elapsed}. Process time: {self.proc_time_elapsed}."
```
#### File: tests/sims/conftest.py
```python
import pytest
import simulacra as si
class DummySim(si.Simulation):
def run(self):
"""run method docstring"""
pass
class DummySpec(si.Specification):
simulation_type = DummySim
@pytest.fixture(scope="function")
def spec():
return DummySpec("dummy")
@pytest.fixture(scope="function")
def sim(spec):
return spec.to_sim()
```
#### File: tests/sims/test_beet.py
```python
import pytest
import simulacra as si
@pytest.fixture(scope="function")
def blank_beet():
return si.Beet("beet")
def test_clone_changed(blank_beet):
blank_beet.foo = 0
c = blank_beet.clone(foo=1)
assert c.foo != blank_beet.foo
def test_clone_unchanged(blank_beet):
blank_beet.foo = 0
c = blank_beet.clone()
assert c.foo == blank_beet.foo
def test_clone_changes_uuid(blank_beet):
c = blank_beet.clone()
assert c.uuid != blank_beet.uuid
def test_cloned_beet_not_equal(blank_beet):
c = blank_beet.clone()
assert c != blank_beet
def test_is_hashable(blank_beet):
assert hash(blank_beet)
def test_quality(blank_beet):
assert blank_beet == blank_beet
assert blank_beet != si.Beet("beet")
def test_can_be_put_in_set():
assert {blank_beet}
def test_can_be_used_as_dict_key():
assert {blank_beet: 0}
def test_round_trip_through_save(blank_beet, tmp_path):
p = blank_beet.save(tmp_path)
loaded = si.Beet.load(p)
assert blank_beet == loaded
assert blank_beet is not loaded
```
#### File: tests/sims/test_spec.py
```python
import pytest
import simulacra as si
def test_spec_absorbs_extra_kwargs():
s = si.Specification("foobar", extra="joe", bing="baz")
assert s._extra_attr_keys == {"extra", "bing"}
assert s.extra == "joe"
assert s.bing == "baz"
```
#### File: simulacra/tests/test_units.py
```python
import simulacra.units as u
def test_get_unit_value():
assert u.get_unit_value("km") == u.km
def test_get_unit_values():
assert u.get_unit_values("m", "km", "mA") == (u.m, u.km, u.mA)
def test_get_unit_value_and_latex():
assert u.get_unit_value_and_latex("m") == (u.m, r"\mathrm{m}")
```
#### File: tests/utils/test_watched_memoize.py
```python
import pytest
import simulacra as si
class Foo:
def __init__(self):
self.w = True
@si.utils.watched_memoize(lambda s: s.w)
def memoized(self, x):
return self.inner(x)
def inner(self, x):
raise NotImplementedError
@pytest.fixture(scope="function")
def watched_mock(mocker):
foo = Foo()
foo.inner = mocker.MagicMock()
return foo
def test_watched_mock_is_only_called_once_for_repeated_args(watched_mock):
watched_mock.memoized(1)
watched_mock.memoized(1)
watched_mock.memoized(1)
watched_mock.memoized(1)
watched_mock.memoized(1)
assert watched_mock.inner.call_count == 1
def test_watched_mock_is_called_multiple_times_for_different_args(watched_mock):
watched_mock.memoized(1)
watched_mock.memoized(1)
watched_mock.memoized(2)
watched_mock.memoized(2)
watched_mock.memoized(3)
watched_mock.memoized(3)
watched_mock.memoized(4)
watched_mock.memoized(4)
watched_mock.memoized(5)
watched_mock.memoized(5)
assert watched_mock.inner.call_count == 5
def test_watched_mock_resets_if_watched_value_changes(watched_mock):
watched_mock.memoized(1)
watched_mock.w = not watched_mock.w
watched_mock.memoized(1)
watched_mock.memoized(2)
watched_mock.w = not watched_mock.w
watched_mock.memoized(2)
watched_mock.memoized(3)
watched_mock.w = not watched_mock.w
watched_mock.memoized(3)
watched_mock.memoized(4)
watched_mock.memoized(4)
watched_mock.memoized(5)
watched_mock.memoized(5)
assert (
watched_mock.inner.call_count == 6 + 2
) # 6 from first three pairs, two from last two pairs
def test_watched_memoize(mocker):
func = mocker.MagicMock(return_value="foo")
class Foo:
def __init__(self):
self.a = True
self.counter = 0
@si.utils.watched_memoize(lambda self: self.a)
def method(self, x):
func()
self.counter += 1
return self.counter
f = Foo()
assert f.method(0) == 1
assert f.method(10) == 2
assert f.method(10) == 2
assert f.method(10) == 2
assert f.method(10) == 2
assert func.call_count == 2
assert f.method(0) == 1
assert f.method(3) == 3
assert func.call_count == 3
f.a = not f.a # resets memo
assert f.method(0) == 4
assert func.call_count == 4
``` |
{
"source": "JoshKarpel/snarl",
"score": 3
} |
#### File: snarl/examples/decorator.py
```python
from snarl import snarled
@snarled
def a():
b()
recurse(2)
def b():
for _ in range(10):
c()
recurse(3)
f = Foo()
f.method(5)
def c():
def inner():
pass
inner()
def recurse(n):
if n == 0:
return
recurse(n - 1)
class Foo:
def method(self, x):
pass
@classmethod
def clsmethod(cls):
return cls()
a()
recurse(5)
Foo.clsmethod()
dot = a.snarl.dot(name = 'decorator', format = 'png')
dot.render(view = True)
```
#### File: snarl/snarl/snarl.py
```python
from typing import Optional
import collections
import os
import sys
import time
from graphviz import Digraph
from .tracer import Tracer
class Snarl:
def __init__(self, whitelist = None, blacklist = None, timer = time.perf_counter_ns):
self.whitelist = whitelist
self.blacklist = blacklist
self.was_called_by = collections.defaultdict(lambda: collections.defaultdict(int))
self.timer = timer
self.total_time = collections.defaultdict(int)
self.own_time = collections.defaultdict(int)
self.func_start_times = {}
self.trace_function = Tracer(self)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self):
sys.settrace(self.trace_function)
def stop(self):
sys.settrace(None)
def dot(
self,
name: Optional[str] = None,
call_counts: bool = True,
timing: bool = True,
format: str = 'png',
dpi: int = 600,
):
g = Digraph(
name or 'snarl',
graph_attr = {
'dpi': str(dpi),
},
node_attr = {
'fontname': 'Courier New',
'shape': 'box',
},
edge_attr = {
'fontname': 'Courier New',
},
)
g.format = format
func_names = set(k.name for k in self.was_called_by)
try:
common = os.path.dirname(os.path.commonpath(k.file_name for k in self.was_called_by))
paths = (os.path.relpath(k.file_name, start = common).replace(r'\ '[0], r'\\') for k in self.was_called_by.keys())
except ValueError:
paths = (k.file_name.replace(r'\ '[0], r'\\') for k in self.was_called_by.keys())
for k, path in zip(self.was_called_by.keys(), paths):
label_lines = [
rf'{k.name}',
rf'{path}:{k.line_number}',
]
if timing:
label_lines.append(rf'Total: {fmt_ns(self.total_time[k])} | Own: {fmt_ns(self.own_time[k])}')
g.node(
k.name,
label = r'\n'.join(label_lines),
)
for k, v in self.was_called_by.items():
for func, count in v.items():
if func in func_names:
g.edge(func, k.name, label = str(f' {count:,}') if call_counts else None)
return g
units = ('ns', 'us', 'ms', 's')
def fmt_ns(t):
for unit in units[:-1]:
if t < 1000:
return f'{t:.3f} {unit}'
t /= 1000
return f'{t:.3f} {units[-1]}'
``` |
{
"source": "JoshKarpel/spiel",
"score": 3
} |
#### File: spiel/spiel/deck.py
```python
from __future__ import annotations
import dis
import inspect
import sys
from collections.abc import Collection
from dataclasses import dataclass, field
from textwrap import dedent
from typing import Callable, Iterator, List, Sequence
from spiel.example import Example
from spiel.presentable import Presentable
from spiel.slide import MakeRenderable, Slide
@dataclass
class Deck(Collection[Presentable]):
name: str
slides: List[Presentable] = field(default_factory=list)
def __getitem__(self, idx: int) -> Presentable:
return self.slides[idx]
def __len__(self) -> int:
return len(self.slides)
def __iter__(self) -> Iterator[Presentable]:
return iter(self.slides)
def __contains__(self, obj: object) -> bool:
return obj in self.slides
def add_slides(self, *slides: Presentable) -> Deck:
self.slides.extend(slides)
return self
def slide(
self,
title: str = "",
) -> Callable[[MakeRenderable], Slide]:
def slideify(content: MakeRenderable) -> Slide:
slide = Slide(
title=title,
content=content,
)
self.add_slides(slide)
return slide
return slideify
def example(
self,
title: str = "",
command: Sequence[str] = (sys.executable,),
name: str = "example.py",
language: str = "python",
) -> Callable[[Callable[[], None]], Example]:
def exampleify(example: Callable[[], None]) -> Example:
ex = Example(
title=title,
source=get_function_body(example),
command=command,
name=name,
language=language,
)
self.add_slides(ex)
return ex
return exampleify
def get_function_body(function: Callable[[], None]) -> str:
lines, line_of_def_start = inspect.getsourcelines(function)
line_of_first_instruction = list(dis.Bytecode(function))[0].starts_line or line_of_def_start
offset = line_of_first_instruction - line_of_def_start
return dedent("".join(lines[offset:]))
def count_leading_whitespace(s: str) -> int:
return len(s) - len(s.lstrip())
```
#### File: spiel/spiel/example.py
```python
from __future__ import annotations
import shlex
import sys
import tempfile
from dataclasses import dataclass
from pathlib import Path
from subprocess import PIPE, STDOUT, run
from typing import Callable, Optional, Sequence
from rich.align import Align
from rich.console import ConsoleRenderable
from rich.layout import Layout
from rich.panel import Panel
from rich.syntax import Syntax
from rich.text import Text
from .presentable import Presentable
from .triggers import Triggers
@dataclass
class CachedExample:
trigger_number: int
input: str
output: Optional[str]
def example_panels(example: Example) -> ConsoleRenderable:
root = Layout()
root.split_column(
Layout(
Align.center(
Panel(
example.input,
title=example.name,
title_align="left",
expand=False,
)
)
),
Layout(
Align.center(
Panel(
example.output,
title=f"$ {example.display_command}",
title_align="left",
expand=False,
)
if example.output is not None
else Text(" ")
)
),
)
return root
ExampleLayout = Callable[["Example"], ConsoleRenderable]
@dataclass
class Example(Presentable):
source: str = ""
command: Sequence[str] = (sys.executable,)
name: str = "example.py"
language: str = "python"
_layout: ExampleLayout = example_panels
_cache: Optional[CachedExample] = None
def layout(self, function: ExampleLayout) -> ExampleLayout:
self._layout = function
return function
@property
def display_command(self) -> str:
return shlex.join([Path(self.command[0]).stem, *self.command[1:], self.name])
def execute(self) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
dir = Path(tmpdir)
file = dir / self.name
file.write_text(self.source)
result = run([*self.command, file], stdout=PIPE, stderr=STDOUT, text=True)
return result.stdout
@property
def input(self) -> Syntax:
input = (self._cache.input or "") if self._cache is not None else ""
return Syntax(
input.strip(),
lexer_name=self.language,
code_width=max(len(line) for line in input.splitlines()),
)
@property
def output(self) -> Optional[Text]:
return (
Text(self._cache.output)
if (self._cache is not None and self._cache.output is not None)
else None
)
def clear_cache(self) -> None:
self._cache = None
def render(self, triggers: Triggers) -> ConsoleRenderable:
if self._cache is None:
self._cache = CachedExample(len(triggers), self.source, None)
elif self._cache.trigger_number != len(triggers):
self._cache = CachedExample(len(triggers), self.source, self.execute())
return self._layout(
self, **self.get_render_kwargs(function=self._layout, triggers=triggers)
)
```
#### File: spiel/spiel/image.py
```python
from __future__ import annotations
from dataclasses import dataclass
from functools import lru_cache
from math import floor
from pathlib import Path
from typing import Iterable, List, NamedTuple, Tuple, Union
from PIL import Image as Img
from rich.color import Color
from rich.console import Console, ConsoleOptions
from rich.segment import Segment
from rich.style import Style
from spiel.utils import chunks
class ImageSize(NamedTuple):
width: int
height: int
Pixels = Tuple[Union[Tuple[int, int, int], None], ...]
@lru_cache(maxsize=2 ** 8)
def _pixels_to_segments(pixels: Pixels, size: ImageSize) -> List[Segment]:
line = Segment.line()
segments = []
pixel_row_pairs = chunks(chunks(pixels, size.width), 2, fill_value=[None] * size.width)
for top_pixel_row, bottom_pixel_row in pixel_row_pairs:
for top_pixel, bottom_pixel in zip(top_pixel_row, bottom_pixel_row):
# use upper-half-blocks for the top pixel row and the background color for the bottom pixel row
segments.append(
Segment(
text="▀",
style=Style.from_color(
color=Color.from_rgb(*top_pixel) if top_pixel else None,
bgcolor=Color.from_rgb(*bottom_pixel) if bottom_pixel else None,
),
)
)
segments.append(line)
return list(Segment.simplify(segments))
@lru_cache(maxsize=2 ** 4)
def _load_image(path: Path) -> Image:
return Img.open(path)
@dataclass(frozen=True)
class Image:
img: Img
@classmethod
def from_file(cls, path: Path) -> Image:
return cls(img=_load_image(path))
def _determine_size(self, options: ConsoleOptions) -> ImageSize:
width, height = self.img.size
# multiply the max height by 2, because we're going to print 2 "pixels" per row
max_height = options.height * 2 if options.height else None
if max_height:
width, height = width * max_height / self.img.height, max_height
if width > options.max_width:
width, height = options.max_width, height * options.max_width / width
return ImageSize(floor(width), floor(height))
def _resize(self, size: ImageSize) -> Img:
return self.img.resize(
size=size,
resample=Img.LANCZOS,
)
def __rich_console__(self, console: Console, options: ConsoleOptions) -> Iterable[Segment]:
size = self._determine_size(options)
resized = self._resize(size)
pixels = tuple(resized.getdata())
yield from _pixels_to_segments(pixels, size)
```
#### File: spiel/spiel/main.py
```python
import shutil
from contextlib import nullcontext
from pathlib import Path
from textwrap import dedent
from rich.console import Console
from rich.control import Control
from rich.style import Style
from rich.syntax import Syntax
from rich.text import Text
from typer import Argument, Exit, Option, Typer
from spiel.constants import PACKAGE_NAME, __version__
from spiel.help import version_details
from spiel.load import DeckWatcher
from spiel.modes import Mode
from spiel.present import present_deck
from spiel.reloader import DeckReloader
from spiel.state import State
THIS_DIR = Path(__file__).resolve().parent
app = Typer(
help=dedent(
f"""\
Display richly-styled presentations using your terminal.
To see what {PACKAGE_NAME.capitalize()} can do, take a look at the demo deck:
$ spiel demo present
A {PACKAGE_NAME.capitalize()} presentation (a "deck [of slides]") is defined programmatically using a Python script.
"""
),
no_args_is_help=True,
)
@app.command()
def present(
path: Path = Argument(
...,
dir_okay=False,
help="The path to the slide deck file.",
),
mode: Mode = Option(
default=Mode.SLIDE,
help="The mode to start presenting in.",
),
slide: int = Option(
default=1,
help="The slide number to start the presentation on.",
),
watch: bool = Option(
default=False,
help="If enabled, reload the deck when the slide deck file changes.",
),
poll: bool = Option(
default=False,
help="If enabled, poll the filesystem for changes (implies --watch). Use this option on systems that don't support file modification notifications.",
),
) -> None:
"""
Present a deck.
"""
_present(path=path, mode=mode, slide=slide, watch=watch, poll=poll)
def _present(path: Path, mode: Mode, slide: int, watch: bool, poll: bool) -> None:
console = Console()
try:
state = State.from_file(path)
except FileNotFoundError as e:
console.print(Text(f"Error: {e}", style=Style(color="red")))
raise Exit(code=1)
state.mode = mode
state.jump_to_slide(slide - 1)
watcher = (
DeckWatcher(event_handler=DeckReloader(state, path), path=path, poll=poll)
if (watch or poll)
else nullcontext()
)
try:
with state, watcher:
present_deck(state)
except KeyboardInterrupt:
raise Exit(code=0)
finally:
state.console.print(Control.clear())
state.console.print(Control.move_to(0, 0))
@app.command()
def init(
path: Path = Argument(
...,
writable=True,
resolve_path=True,
help="The path to create a new deck script at.",
)
) -> None:
"""
Create a new deck script at the given path from a basic template.
This is a good starting point if you already know what you want to do.
If you're not so sure, consider taking a look at the demo deck to see what's possible:
$ spiel demo --help
"""
console = Console()
if path.exists():
console.print(
Text(f"Error: {path} already exists, refusing to overwrite.", style=Style(color="red"))
)
raise Exit(code=1)
name = path.stem.replace("_", " ").title()
try:
path.parent.mkdir(parents=True, exist_ok=True)
except Exception as e:
console.print(
Text(
f"Error: was not able to ensure that the parent directory {path.parent} exists due to: {e}.",
style=Style(color="red"),
)
)
raise Exit(code=1)
try:
path.write_text(
dedent(
f"""\
from textwrap import dedent
from spiel import Deck, Options
deck = Deck(name="{name}")
options = Options()
@deck.slide(title="Title")
def title():
markup = dedent(
\"""\\
# {name}
This is your title slide!
\"""
)
return Markdown(markup, justify="center")
"""
)
)
except Exception as e:
console.print(
Text(
f"Error: was not able to write template to {path} due to: {e}",
style=Style(color="red"),
)
)
raise Exit(code=1)
console.print(Text(f"Wrote deck template to {path}", style=Style(color="green")))
@app.command()
def version(
plain: bool = Option(
default=False,
help=f"Print only {PACKAGE_NAME}'s version.",
)
) -> None:
"""
Display version and debugging information.
"""
console = Console()
if plain:
print(__version__)
else:
console.print(version_details(console))
demo = Typer(
name="demo",
help=dedent(
"""\
Use the demonstration deck (present it, display source, etc.).
"""
),
)
DEMO_DIR = THIS_DIR / "demo"
DEMO_SOURCE = THIS_DIR / "demo" / "demo.py"
@demo.command(name="present")
def present_demo() -> None:
"""
Present the demo deck.
"""
_present(path=DEMO_SOURCE, mode=Mode.SLIDE, slide=0, watch=False, poll=False)
@demo.command()
def source() -> None:
"""
Display the source code for the demo deck in your PAGER.
"""
console = Console()
with console.pager(styles=True):
console.print(Syntax(DEMO_SOURCE.read_text(), lexer_name="python"))
@demo.command()
def copy(
path: Path = Argument(
default=...,
writable=True,
help="The path to copy the demo deck source code and assets to.",
)
) -> None:
"""
Copy the demo deck source code and assets to a new directory.
If you're looking for a more stripped-down starting point, try the init command:
$ spiel init --help
"""
console = Console()
if path.exists():
console.print(Text(f"Error: {path} already exists!", style=Style(color="red")))
raise Exit(code=2)
try:
shutil.copytree(DEMO_DIR, path)
except Exception as e:
console.print(Text(f"Failed to copy demo deck directory: {e}", style=Style(color="red")))
raise Exit(code=1)
console.print(
Text(f"Wrote demo deck source code and assets to {path}", style=Style(color="green"))
)
app.add_typer(demo)
```
#### File: spiel/spiel/present.py
```python
import sys
from itertools import islice
from math import ceil
from time import monotonic
from rich.console import ConsoleRenderable
from rich.layout import Layout
from rich.live import Live
from rich.padding import Padding
from rich.panel import Panel
from rich.style import Style
from spiel.constants import TARGET_RPS
from spiel.exceptions import UnknownModeError
from spiel.footer import Footer
from spiel.help import Help
from spiel.input import handle_input, no_echo
from spiel.modes import Mode
from spiel.presentable import Presentable
from spiel.rps import RPSCounter
from spiel.state import State
from spiel.triggers import Triggers
from spiel.utils import clamp, filter_join
def render_slide(state: State, slide: Presentable) -> ConsoleRenderable:
return Padding(
slide.render(triggers=Triggers(times=tuple(state.trigger_times))),
pad=1,
)
def split_layout_into_deck_grid(root: Layout, state: State) -> Layout:
grid_width = state.deck_grid_width
row_of_current_slide = state.current_slide_idx // grid_width
num_rows = ceil(len(state.deck) / grid_width)
start_row = clamp(
value=row_of_current_slide - (grid_width // 2),
lower=0,
upper=max(num_rows - grid_width, 0),
)
start_slide_idx = grid_width * start_row
slides = islice(enumerate(state.deck.slides, start=1), start_slide_idx, None)
rows = [Layout(name=str(r)) for r in range(grid_width)]
cols = [[Layout(name=f"{r}-{c}") for c in range(grid_width)] for r, _ in enumerate(rows)]
root.split_column(*rows)
for row, layouts in zip(rows, cols):
for layout in layouts:
slide_number, slide = next(slides, (None, None))
if slide is None:
layout.update("")
else:
is_active_slide = slide is state.current_slide
layout.update(
Panel(
slide.render(triggers=Triggers(times=(monotonic(),))),
title=filter_join(" | ", [slide_number, slide.title]),
border_style=Style(
color="bright_cyan" if is_active_slide else None,
dim=not is_active_slide,
),
)
)
row.split_row(*layouts)
return root
def present_deck(state: State) -> None:
rps_counter = RPSCounter()
footer = Layout(Footer(state, rps_counter), name="footer", size=1)
help = Layout(Help(state), name="help")
def get_renderable() -> Layout:
current_slide = state.deck[state.current_slide_idx]
body = Layout(name="body", ratio=1)
if state.mode is Mode.SLIDE:
body.update(render_slide(state, current_slide))
elif state.mode is Mode.DECK:
split_layout_into_deck_grid(body, state)
elif state.mode is Mode.HELP:
body.update(help)
elif state.mode is Mode.OPTIONS:
body.update(state.options)
else: # pragma: unreachable
raise UnknownModeError(f"Unrecognized mode: {state.mode!r}")
root = Layout(name="root")
root.split_column(body, footer)
rps_counter.mark()
return root
with no_echo(), Live(
get_renderable=get_renderable,
console=state.console,
screen=True,
auto_refresh=True,
refresh_per_second=TARGET_RPS,
vertical_overflow="visible",
) as live:
while True:
handle_input(state, sys.stdin)
live.refresh()
```
#### File: spiel/spiel/state.py
```python
from __future__ import annotations
from dataclasses import dataclass, field
from functools import cached_property
from pathlib import Path
from tempfile import TemporaryDirectory
from time import monotonic
from types import TracebackType
from typing import Callable, ContextManager, List, Optional, Type, Union
from rich.console import Console
from rich.style import Style
from rich.text import Text
from spiel.constants import PACKAGE_NAME
from spiel.deck import Deck
from spiel.load import load_deck_and_options
from spiel.modes import Mode
from spiel.options import Options
from spiel.presentable import Presentable
TextLike = Union[Text, Callable[[], Text]]
@dataclass
class State(ContextManager["State"]):
console: Console
deck: Deck
options: Options
_current_slide_idx: int = 0
_mode: Mode = Mode.SLIDE
_message: TextLike = Text("")
trigger_times: List[float] = field(default_factory=list)
@classmethod
def from_file(cls, path: Path, console: Optional[Console] = None) -> State:
deck, options = load_deck_and_options(path)
return cls(console=console or Console(), deck=deck, options=options)
@property
def mode(self) -> Mode:
return self._mode
@mode.setter
def mode(self, mode: Mode) -> None:
self._mode = mode
self.reset_trigger()
@property
def current_slide_idx(self) -> int:
return self._current_slide_idx
@current_slide_idx.setter
def current_slide_idx(self, idx: int) -> None:
self._current_slide_idx = max(0, min(len(self.deck) - 1, idx))
self.reset_trigger()
def next_slide(self, move: int = 1) -> None:
if self.current_slide_idx == len(self.deck) - 1:
return
self.current_slide_idx += move
def previous_slide(self, move: int = 1) -> None:
if self.current_slide_idx == 0:
return
self.current_slide_idx -= move
def jump_to_slide(self, idx: int) -> None:
self.current_slide_idx = idx
@property
def current_slide(self) -> Presentable:
return self.deck[self.current_slide_idx]
@property
def message(self) -> Text:
if callable(self._message):
try:
return self._message()
except Exception:
return Text(
"Internal Error: failed to display message.",
style=Style(color="bright_red"),
)
else:
return self._message
def set_message(self, message: TextLike) -> None:
self._message = message
def clear_message(self) -> None:
self.set_message(Text(""))
@property
def deck_grid_width(self) -> int:
return max(self.console.size.width // 30, 1)
def trigger(self) -> None:
self.trigger_times.append(monotonic())
def reset_trigger(self) -> None:
self.trigger_times.clear()
self.trigger()
@cached_property
def _tmp_dir(self) -> TemporaryDirectory[str]:
return TemporaryDirectory(prefix=f"{PACKAGE_NAME}-")
@cached_property
def tmp_dir(self) -> Path:
return Path(self._tmp_dir.name)
def __enter__(self) -> State:
return self
def __exit__(
self,
exctype: Optional[Type[BaseException]],
excinst: Optional[BaseException],
exctb: Optional[TracebackType],
) -> None:
self._tmp_dir.cleanup()
```
#### File: spiel/tests/test_cli.py
```python
import subprocess
import sys
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from pytest_mock import MockFixture
from typer.testing import CliRunner
from spiel.constants import PACKAGE_NAME, __version__
from spiel.main import DEMO_SOURCE, app
from spiel.modes import Mode
def test_help(runner: CliRunner) -> None:
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
def test_help_via_main() -> None:
result = subprocess.run([sys.executable, "-m", PACKAGE_NAME, "--help"])
print(result.stdout)
assert result.returncode == 0
def test_version(runner: CliRunner) -> None:
result = runner.invoke(app, ["version"])
assert result.exit_code == 0
assert __version__ in result.stdout
def test_plain_version(runner: CliRunner) -> None:
result = runner.invoke(app, ["version", "--plain"])
assert result.exit_code == 0
assert __version__ in result.stdout
def test_clean_keyboard_interrupt(runner: CliRunner, mocker: MockFixture) -> None:
mock = mocker.patch("spiel.main.present_deck", MagicMock(side_effect=KeyboardInterrupt()))
result = runner.invoke(app, ["present", str(DEMO_SOURCE)])
assert mock.called
assert result.exit_code == 0
def test_present_deck_on_missing_file(runner: CliRunner, tmp_path: Path) -> None:
result = runner.invoke(app, ["present", str(tmp_path / "missing.py")])
assert result.exit_code == 1
@pytest.mark.parametrize("mode", list(Mode))
@pytest.mark.parametrize("stdin", ["", "s", "d", "h", "p"])
def test_display_demo_deck(runner: CliRunner, mode: Mode, stdin: str) -> None:
result = runner.invoke(app, ["present", str(DEMO_SOURCE), "--mode", mode], input=stdin)
assert result.exit_code == 0
def test_demo_display(runner: CliRunner) -> None:
result = runner.invoke(app, ["demo", "present"])
assert result.exit_code == 0
def test_demo_source(runner: CliRunner) -> None:
result = runner.invoke(app, ["demo", "source"])
assert result.exit_code == 0
def test_demo_copy_to_new_path(runner: CliRunner, tmp_path: Path) -> None:
target = tmp_path / "new"
result = runner.invoke(app, ["demo", "copy", str(target)])
print(result.stdout)
assert result.exit_code == 0
def test_demo_copy_to_existing_file(runner: CliRunner, tmp_path: Path) -> None:
target = tmp_path / "new"
target.touch()
result = runner.invoke(app, ["demo", "copy", str(target)])
assert result.exit_code == 2
def test_demo_copy_to_existing_dir(runner: CliRunner, tmp_path: Path) -> None:
target = tmp_path / "new"
target.mkdir(parents=True)
result = runner.invoke(app, ["demo", "copy", str(target)])
assert result.exit_code == 2
def test_demo_copy_error_during_copytree(
runner: CliRunner,
tmp_path: Path,
mocker: MockFixture,
) -> None:
mock = mocker.patch("shutil.copytree", MagicMock(side_effect=Exception("foobar")))
target = tmp_path / "new"
result = runner.invoke(app, ["demo", "copy", str(target)])
assert mock.called
assert "foobar" in result.stdout
assert result.exit_code == 1
```
#### File: spiel/tests/test_demo.py
```python
import pytest
from spiel.main import DEMO_SOURCE
from spiel.present import render_slide
from spiel.state import State
@pytest.fixture
def state() -> State:
return State.from_file(DEMO_SOURCE)
def test_can_render_every_demo_slide(state: State) -> None:
deck = state.deck
for slide in deck:
for _ in range(10):
state.console.print(render_slide(state, slide))
state.trigger()
state.reset_trigger()
```
#### File: spiel/tests/test_init.py
```python
from pathlib import Path
import pytest
from typer.testing import CliRunner
from spiel import Options
from spiel.load import load_deck_and_options
from spiel.main import app
def test_init_cli_command_fails_if_file_exists(runner: CliRunner, tmp_path: Path) -> None:
target = tmp_path / "foo_bar.py"
target.touch()
result = runner.invoke(app, ["init", str(target)])
assert result.exit_code == 1
@pytest.fixture
def init_file(runner: CliRunner, tmp_path: Path) -> Path:
target = tmp_path / "foo_bar.py"
runner.invoke(app, ["init", str(target)])
return target
def test_title_slide_header_injection(init_file: Path) -> None:
assert "# Foo Bar" in init_file.read_text()
def test_can_load_init_file(init_file: Path) -> None:
deck, options = load_deck_and_options(init_file)
assert deck.name == "Foo Bar"
assert options == Options()
```
#### File: spiel/tests/test_load.py
```python
from pathlib import Path
from textwrap import dedent
import pytest
from spiel import Deck, Options
from spiel.constants import DECK
from spiel.exceptions import NoDeckFound
from spiel.load import load_deck_and_options
def test_loading_from_empty_file_fails(empty_file: Path) -> None:
with pytest.raises(NoDeckFound, match=DECK):
load_deck_and_options(empty_file)
def test_loading_from_missing_file_fails(tmp_path: Path) -> None:
missing_file = tmp_path / "no-such-path"
with pytest.raises(FileNotFoundError, match="no-such-path"):
load_deck_and_options(missing_file)
def test_can_load_deck_from_valid_file(file_with_empty_deck: Path) -> None:
deck, options = load_deck_and_options(file_with_empty_deck)
assert isinstance(deck, Deck)
assert isinstance(options, Options)
def test_can_load_custom_options(empty_file: Path) -> None:
empty_file.write_text(
dedent(
"""\
from spiel import Deck, Options
deck = Deck(name="deck")
options = Options(footer_time_format="foobar")
"""
)
)
_, options = load_deck_and_options(empty_file)
assert options.footer_time_format == "foobar"
def test_fails_to_load_not_deck(empty_file: Path) -> None:
empty_file.write_text(
dedent(
"""\
from spiel import Deck
deck = "not a Deck"
"""
)
)
with pytest.raises(NoDeckFound):
load_deck_and_options(empty_file)
def test_can_load_not_options(empty_file: Path) -> None:
empty_file.write_text(
dedent(
"""\
from spiel import Deck
deck = Deck(name="deck")
options = "not an Options"
"""
)
)
_, options = load_deck_and_options(empty_file)
assert isinstance(options, Options)
```
#### File: spiel/tests/test_options.py
```python
from typing import Any
import pytest
from _pytest.tmpdir import TempPathFactory
from hypothesis import given, infer
from hypothesis import strategies as st
from hypothesis.strategies import SearchStrategy
from rich.console import Console
from spiel import Options
from spiel.exceptions import InvalidOptionValue
from spiel.repls import REPLS
def valid_options() -> SearchStrategy[Options]:
return st.builds(
Options,
profiling=infer,
repl=st.sampled_from(list(REPLS.keys())),
)
@given(o=valid_options())
def test_round_trip_to_dict(o: Options) -> None:
assert o == Options.from_dict(o.as_dict())
@given(o=valid_options())
def test_round_trip_to_toml(o: Options) -> None:
assert o == Options.from_toml(o.as_toml())
@given(o=valid_options())
def test_round_trip_to_file(o: Options, tmp_path_factory: TempPathFactory) -> None:
dir = tmp_path_factory.mktemp(basename="options-roundtrip")
path = dir / "options.toml"
assert o == Options.load(o.save(path))
def test_can_render_options(console: Console, three_slide_options: Options) -> None:
console.print(three_slide_options)
@pytest.mark.parametrize(
"key, value",
[
("repl", "foobar"),
],
)
def test_reject_invalid_option_values(key: str, value: Any) -> None:
with pytest.raises(InvalidOptionValue):
Options(**{key: value})
```
#### File: spiel/tests/test_reloader.py
```python
from io import StringIO
from pathlib import Path
from textwrap import dedent
from time import sleep
from rich.console import Console
from spiel.constants import DECK
from spiel.load import DeckWatcher
from spiel.reloader import DeckReloader
from spiel.state import State
def test_reloader_triggers_when_file_modified(
file_with_empty_deck: Path,
console: Console,
output: StringIO,
) -> None:
state = State.from_file(file_with_empty_deck)
reloader = DeckReloader(state=state, deck_path=file_with_empty_deck)
with DeckWatcher(event_handler=reloader, path=file_with_empty_deck, poll=True):
sleep(0.01)
file_with_empty_deck.write_text(
dedent(
f"""\
from spiel import Deck
{DECK} = Deck(name="modified")
"""
)
)
sleep(0.01)
for attempt in range(10):
console.print(state.message)
result = output.getvalue()
if state.deck.name == "modified" and "Reloaded deck" in result:
return # test succeeded
sleep(0.1)
assert (
False
), f"Reloader never triggered, current file contents:\n{file_with_empty_deck.read_text()}" # pragma: debugging
def test_reloader_captures_error_in_message(
file_with_empty_deck: Path,
console: Console,
output: StringIO,
) -> None:
state = State.from_file(file_with_empty_deck)
reloader = DeckReloader(state=state, deck_path=file_with_empty_deck)
with DeckWatcher(event_handler=reloader, path=file_with_empty_deck, poll=True):
sleep(0.01)
file_with_empty_deck.write_text(
dedent(
f"""\
from spiel import Deck
{DECK} = Deck(name="modified")
foobar
"""
)
)
sleep(0.01)
for attempt in range(10):
console.print(state.message)
result = output.getvalue()
if "NameError" in result and "foobar" in result:
return # test succeeded
sleep(0.1)
assert (
False
), f"Reloader never triggered, current file contents:\n{file_with_empty_deck.read_text()}" # pragma: debugging
``` |
{
"source": "JoshKarpel/test-threaded-bindings",
"score": 2
} |
#### File: test-threaded-bindings/tests/single_threaded_submit.py
```python
from __future__ import print_function
import sys
import htcondor
import utils
def test_single_threaded_submit(num_jobs):
submit = utils.held_submit()
schedd = htcondor.Schedd()
with schedd.transaction() as txn:
for _ in range(num_jobs):
result = submit.queue(txn, 1)
print("submit result", result)
utils.condor_q()
if __name__ == "__main__":
num_jobs = int(sys.argv[1])
test_single_threaded_submit(num_jobs)
```
#### File: test-threaded-bindings/tests/utils.py
```python
import os
import htcondor
def held_submit():
return htcondor.Submit({"executable": "fubar", "hold": "true",})
def short_sleep_submit():
return htcondor.Submit({"executable": "/bin/sleep", "arguments": "1",})
def condor_q():
os.system("condor_q")
``` |
{
"source": "JoshKarpel/when",
"score": 3
} |
#### File: when/when/utils.py
```python
from typing import Callable, Iterable, List, Tuple, TypeVar
T = TypeVar("T")
def partition(items: Iterable[T], is_left: Callable[[T], bool]) -> Tuple[List[T], List[T]]:
left = []
right = []
for item in items:
if is_left(item):
left.append(item)
else:
right.append(item)
return left, right
``` |
{
"source": "joshkay/twinkly",
"score": 2
} |
#### File: twinkly/tests/test_twinkly.py
```python
import pytest
from twinkly.twinkly import Twinkly
def test_device_exists():
twinkly = Twinkly("192.168.30.29")
assert twinkly.login_auth()
``` |
{
"source": "JoshKCarroll/carbonwa",
"score": 3
} |
#### File: carbonwa/i732-signatures/app.py
```python
import cgi
import webapp2
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users
import MySQLdb
import os
import jinja2
# Configure the Jinja2 environment.
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape','jinja2.ext.loopcontrols'])
# Define your production Cloud SQL instance information.
_INSTANCE_NAME = 'i732-signatures:voterdb'
class MainPage(webapp2.RequestHandler):
def get(self):
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
#Check Authentication
user = users.get_current_user()
useremail = user.email()
cursor.execute('select 1 from users where email = %s', (useremail))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render())
return
# Create a list of voter entries to render with the HTML.
voterlist = [];
variables = {'voterlist': voterlist,
'fname':'',
'lname':'',
'city':'',
'county':''}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(variables))
class Results(webapp2.RequestHandler):
def post(self):
# Handle the post to get voter results.
fname = self.request.get('fname')
lname = self.request.get('lname')
city = self.request.get('city')
county = self.request.get('county')
address = self.request.get('address')
dob = self.request.get('dob')
fname = fname.lower()
lname = lname.lower()
city = city.lower()
county = county.lower()
address = address.lower()
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
cursor2 = db.cursor()
#Check Authentication
user = users.get_current_user()
useremail = user.email()
cursor.execute('select 1 from users where email = %s', (useremail))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render())
return
voterlist = [];
# Note that the only format string supported is %s
##cursor2.execute('select 1 from counties where 1=0;')
cursor.execute('select 1 from counties where 1=0;')
if ((cursor.rowcount == 0) and (fname != '') and (lname!='') and (city!='')):
cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where fname like %s and lname like %s and city like %s limit 300;', (fname, lname, city))
if ((cursor.rowcount == 0) and (fname != '') and (lname!='') and (county!='')):
cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where fname like %s and lname like %s and county like %s limit 300;', (fname, lname, county))
if ((cursor.rowcount == 0) and (fname != '') and (lname!='') and (city!='')):
cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where mname like %s and lname like %s and city like %s limit 300;', (fname, lname, city))
if ((cursor.rowcount == 0) and (fname != '') and (lname!='') and (county!='')):
cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where mname like %s and lname like %s and county like %s limit 300;', (fname, lname, county))
if ((cursor.rowcount == 0) and (fname != '') and (lname!='') and (address!='')):
cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where fname like %s and lname like %s and address like %s limit 300;', (fname, lname, address))
if ((cursor.rowcount == 0) and (fname != '') and (lname!='') and (dob!='')):
cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where fname like %s and lname like %s and birthdate=%s limit 300;', (fname, lname, dob))
## if ((cursor.rowcount == 0) and (((fname != '') and (lname!='')) or ((city!='') and (lname!='')))):
## cursor.execute('SELECT name, address, city, county, statevoterid from vw_voter where fname like %s and lname like %s limit 300;', (fname, lname))
## if ((city!='') and (lname!='')):
## cursor2.execute('SELECT name, address, city, county, statevoterid from vw_voter where city like %s and lname like %s limit 300;', (city, lname))
## elif ((county!='') and (lname!='')):
## cursor2.execute('SELECT name, address, city, county, statevoterid from vw_voter where county like %s and lname like %s limit 300;', (county, lname))
## for row in cursor2.fetchall():
## voterlist.append(dict([('name',cgi.escape(row[0])),
## ('address',cgi.escape(row[1])),
## ('city',cgi.escape(row[2])),
## ('county',cgi.escape(row[3])),
## ('statevoterid',cgi.escape(row[4])),
## ]))
# Create a list of voter entries to render with the HTML.
for row in cursor.fetchall():
voterlist.append(dict([('name',cgi.escape(row[0])),
('address',cgi.escape(row[1])),
('city',cgi.escape(row[2])),
('county',cgi.escape(row[3])),
('statevoterid',cgi.escape(row[4])),
]))
variables = {'voterlist': voterlist,
'fname':fname,
'lname':lname,
'city':city,
'county':county,
'address':address,
'dob':dob,
'results':'true'}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(variables))
db.close()
class Signed(webapp2.RequestHandler):
def post(self):
# Handle the post to add a signer.
statevoterid = self.request.get('signed')
statevoterid = statevoterid.lower()
voterlist = [];
variables = {'voterlist': voterlist,
'fname':'',
'lname':'',
'city':'',
'county':'',
'statevoterid':statevoterid,
'sig':'true'}
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
#Check Authentication
user = users.get_current_user()
useremail = user.email()
cursor.execute('select 1 from users where email = %s', (useremail))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render())
return
if(statevoterid != ''):
cursor.execute('select 1 from signers where statevoterid=%s;', (statevoterid))
if(cursor.fetchone()):
variables['dup'] = 'true'
else:
cursor.execute('insert into signers (statevoterid, signed, createdby, createddate) VALUES (%s, TRUE, %s, NOW());', (statevoterid, user.email()))
cursor.execute('select v.name from signers s join vw_voter v on s.statevoterid=v.statevoterid where s.statevoterid=%s limit 1;', (statevoterid))
for row in cursor.fetchall():
variables['name'] = cgi.escape(row[0])
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(variables))
db.commit()
db.close()
class Invalid(webapp2.RequestHandler):
def post(self):
# Handle the post to add an invalid signature.
fname = self.request.get('fname')
lname = self.request.get('lname')
city = self.request.get('city')
county = self.request.get('county')
fname = fname.lower()
lname = lname.lower()
city = city.lower()
county = county.lower()
variables = {'inv_fname':fname,
'inv_lname':lname,
'invalid':'true'}
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
#Check Authentication
user = users.get_current_user()
useremail = user.email()
cursor.execute('select 1 from users where email = %s', (useremail))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render())
return
if(fname != '' and lname != ''):
cursor.execute('select 1 from signers where fname=%s and lname=%s and city=%s and county=%s;', (fname, lname, city, county))
if(cursor.fetchone()):
variables['dup'] = 'true'
else:
cursor.execute('insert into signers (fname, lname, city, county, signed, createdby, createddate) VALUES (%s, %s, %s, %s, FALSE, %s, NOW());', (fname, lname, city, county, user.email()))
else:
variables['no_name'] = 'true'
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(variables))
db.commit()
db.close()
class Logins(webapp2.RequestHandler):
def get(self):
#Check Authentication
user = users.get_current_user()
userid = user.user_id()
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
cursor.execute('select 1 from admins where userid = %s', (userid))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render({'userid':user.user_id()}))
return
template = JINJA_ENVIRONMENT.get_template('users.html')
self.response.write(template.render())
db.close()
class AddUser(webapp2.RequestHandler):
def post(self):
#Check Authentication
user = users.get_current_user()
userid = user.user_id()
emailtoadd = self.request.get('email')
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
cursor.execute('select 1 from admins where userid = %s', (userid))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render({'userid':userid}))
return
#usertoadd = users.User("<EMAIL>")
#idtoadd = usertoadd.user_id()
cursor.execute('insert into users value ( %s );', (emailtoadd))
template = JINJA_ENVIRONMENT.get_template('users.html')
self.response.write(template.render({'emailtoadd':emailtoadd}))
db.commit()
db.close()
class Signatures(webapp2.RequestHandler):
def get(self):
#Check Authentication
user = users.get_current_user()
userid = user.user_id()
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db='voters', user='root', charset='utf8')
else:
db = MySQLdb.connect(host='127.0.0.1', port=3306, db='voters', user='root', charset='utf8')
cursor = db.cursor()
cursor.execute('select 1 from admins where userid = %s', (userid))
if not (cursor.fetchone()):
template = JINJA_ENVIRONMENT.get_template('unauth.html')
self.response.write(template.render({'userid':user.user_id()}))
return
siglist = []
# Start with the valid signatures
cursor.execute('SELECT v.name, v.address, v.city, v.county, s.signed, s.createdby, s.createddate from signers s inner join vw_voter v on s.statevoterid=v.statevoterid where signed=TRUE order by s.createddate desc;')
# Create a list of voter entries to render with the HTML.
for row in cursor.fetchall():
siglist.append(dict([('name',cgi.escape(row[0])),
('address',cgi.escape(row[1])),
('city',cgi.escape(row[2])),
('county',cgi.escape(row[3])),
('signed',row[4]),
('recordedby',cgi.escape(row[5])),
('recordedon',row[6])
]))
# Then append the invalid signatures
cursor.execute('SELECT concat(fname, " ", lname) as name, "" as address, city, county, signed, createdby, createddate from signers s where signed=FALSE order by s.createddate desc;')
# Create a list of voter entries to render with the HTML.
for row in cursor.fetchall():
siglist.append(dict([('name',cgi.escape(row[0])),
('address',cgi.escape(row[1])),
('city',cgi.escape(row[2])),
('county',cgi.escape(row[3])),
('signed',row[4]),
('recordedby',cgi.escape(row[5])),
('recordedon',row[6])
]))
variables = {'siglist': siglist}
template = JINJA_ENVIRONMENT.get_template('sigs.html')
self.response.write(template.render(variables))
db.close()
application = webapp2.WSGIApplication([('/', MainPage),
('/results', Results),
('/signed', Signed),
('/invalid', Invalid),
('/logins', Logins),
('/adduser', AddUser),
('/signatures', Signatures)],
debug=True)
def main():
application = webapp2.WSGIApplication([('/', MainPage),
('/results', Results),
('/signed', Signed),
('/invalid', Invalid),
('/logins', Logins),
('/adduser', AddUser),
('/signatures', Signatures)],
debug=True)
run_wsgi_app(application)
if __name__ == "__main__":
main()
``` |
{
"source": "joshkeating/letterbox-wl-overlap",
"score": 3
} |
#### File: joshkeating/letterbox-wl-overlap/letterbox-wl-scape.py
```python
import math
from selenium import webdriver
from collections import defaultdict
from bs4 import BeautifulSoup as bs
def pull_url(url):
STANDARD_PAGE_SIZE = 28
browser = webdriver.Chrome()
target_url = url
browser.get(target_url)
html = browser.execute_script("return document.body.innerHTML")
page = bs(html, 'html.parser')
num_films = page.find('h1', attrs={'class': 'section-heading'}).get_text()[-8:]
total_count = int(num_films[0:2])
page_count = math.ceil(total_count / STANDARD_PAGE_SIZE)
movie_list = []
for movie in page.find_all('span', attrs={'class': 'frame-title'}):
movie_list.append(movie.get_text())
if page_count > 1:
for i in range(2, page_count+1):
browser.get(url + '/page/' + str(i))
html = browser.execute_script("return document.body.innerHTML")
page = bs(html, 'html.parser')
for movie in page.find_all('span', attrs={'class': 'frame-title'}):
movie_list.append(movie.get_text())
return movie_list
else:
return movie_list
def process_friends(friend_list):
movie_bag = []
for name in friend_list:
search_path = 'https://letterboxd.com/' + name + '/watchlist/'
movie_bag.append(pull_url(search_path))
return movie_bag
def find_films_in_common(film_bag):
film_dict = defaultdict(int)
for wishlist in film_bag:
for film in wishlist:
film_dict[film] += 1
desc_dict = sorted(film_dict.items() , key=lambda t : t[1] , reverse=True)
output_file = open("movies-in-common.txt", "w")
for k,v in desc_dict:
output_file.write(k + ' ' + str(v) + '\n')
output_file.close
return
def main():
FRIENDS = ['joshkeating', 'ekatnic', 'paquinn', 'cjp123']
bag = process_friends(FRIENDS)
find_films_in_common(bag)
if __name__ == "__main__":
main()
``` |
{
"source": "joshkel/automated-testing-with-pytest",
"score": 3
} |
#### File: demo/failed_assertions/test_failed_assertions.py
```python
def test_math():
a = 1
b = 2
assert a + b == 4
def test_employee():
actual = {'first_name': 'John', 'last_name': 'Doe',
'city': 'Nashville', 'state': 'TN'}
expected = {'first_name': 'John', 'last_name': 'Doe',
'city': 'Memphis', 'state': 'TN'}
assert actual == expected
``` |
{
"source": "joshkel/pep8-naming",
"score": 2
} |
#### File: pep8-naming/testsuite/N807_py35.py
```python
class C:
async def γ(self):
pass
#: N807
async def __β(self):
pass
```
#### File: pep8-naming/testsuite/N807_py3.py
```python
class C:
def γ(self):
pass
#: N807
def __β(self):
pass
#: N807
def __β6(self):
pass
#: Okay
class C:
def γ1(self):
pass
``` |
{
"source": "JoshKing56/medusa",
"score": 2
} |
#### File: log_parser/src/parse_file.py
```python
import os, time
from influxdb import InfluxDBClient
from src.process_line import parse_line
from conf.constants import CONSTANTS
from conf.constants import INFLUX_INFO
def check_error(line):
return False
def write_to_db(json, influx_client):
if CONSTANTS["NO_DB"]:
print(f"Writing to db: {json}")
return None
try:
influx_client.ping()
except:
report_error("PYTHON ERROR: CONNECTION TO INFLUX LOST")
if not influx_client.write_points(json):
report_error("PYTHON ERROR: COULD NOT WRITE TO DATABASE")
def report_error(line):
# TODO: handle errors separately
print(f"ERROR: {line}")
def proces_file():
filename = CONSTANTS["LOG_FILEPATH"]
log_file = open(filename,'r')
file_size = os.stat(filename)[6]
log_file.seek(file_size)
influx = InfluxDBClient(host=INFLUX_INFO['HOST'],
port=INFLUX_INFO['PORT'],
username=INFLUX_INFO['USERNAME'],
password=<PASSWORD>['PASSWORD'])
dbname = INFLUX_INFO['DBNAME']
print(f"ping: {influx.ping()}")
influx.create_database(dbname) #going to assume this gets ignored if it already exists for now
influx.switch_database(dbname)
while True:
where = log_file.tell()
line = log_file.readline()
if not line:
time.sleep(CONSTANTS["FILE_READ_RATE_IN_SEC"])
log_file.seek(where)
else:
json_data = parse_line(line.strip())
if json_data:
write_to_db(json_data, influx)
``` |
{
"source": "joshklos/pyIDML",
"score": 4
} |
#### File: pyidml/Text/Paragraphs.py
```python
from .Styles import TextStyleRange
from .BaseText import CommonText
class Paragraph(CommonText):
"""Paragraph class
Takes three arguments upon creation:
index: The index for the position of the paragraph within the story
para_style: the name of the paragraph style that applies to this paragraph
parent_story: the parent story that this paragraph is a part of"""
def __init__(self, index, para_style, parent_story):
super().__init__()
self.index = index
self.appliedParagraphStyle = para_style
self.characters = []
self.content = ""
self.footnotes = []
self.parentStory = parent_story
self.textStyleRanges = []
def print_contents(self, with_style_prefixes=False):
output = ""
if self.content == "":
self.generate_contents()
if with_style_prefixes:
output += "<"
output += self.appliedParagraphStyle.name
output += ">"
output += self.content
print(output)
def generate_contents(self):
self.content = ""
for char in self.characters:
self.content += char.content
def generate_text_style_ranges(self):
new_range = TextStyleRange()
new_range.appliedCharacterStyle = self.characters[0].appliedCharacterStyle
for char in self.characters:
if char.appliedCharacterStyle == new_range.appliedCharacterStyle:
new_range.characters.append(char)
else:
new_range.generate_contents()
self.textStyleRanges.append(new_range)
new_range = TextStyleRange()
new_range.appliedCharacterStyle = char.appliedCharacterStyle
new_range.generate_contents()
self.textStyleRanges.append(new_range)
def update(self):
self.generate_contents()
self.textStyleRanges = []
self.generate_text_style_ranges()
``` |
{
"source": "joshkmartinez/pyotp",
"score": 4
} |
#### File: joshkmartinez/pyotp/otp.py
```python
import sys
import itertools
import binascii
s = "hello world" # message or ciphertext
password = "<PASSWORD>"
switch = 0 # 0 to encrypt - 1 to decrypt
p = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' '] # no numbers for you boi
def xor(msg, key):
# This should work with a key shorter than the msg?
return ''.join([chr(ord(x) ^ ord(y)) for x, y in zip(msg, itertools.cycle(key))])
def encrypt(msg, key):
out = xor(msg, key)
return (binascii.hexlify(out.encode())).decode()
def decrypt(cipher, key):
out = (binascii.unhexlify(cipher.encode())).decode()
return xor(out, key)
if (len(s) != len(password) and switch == 0):
print("The password must be the same length as the message.")
sys.exit()
split = list(s)
if switch == 0:
for x in split:
if x in p:
pass
else:
print("The only supported characters are uppercase and lowercase letters. Dont complain, I even included support for spaces, just for you!")
sys.exit()
if(switch == 1):
print("Here is the plaintext: \n" + decrypt(s, password))
else:
print("Here is the ciphertext: \n" + encrypt(s, password))
``` |
{
"source": "joshkunz/iTunesControl",
"score": 2
} |
#### File: joshkunz/iTunesControl/iTunesControl.py
```python
import win32com.client
if win32com.client.gencache.is_readonly == True:
win32com.client.gencache.is_readonly = False
win32com.client.gencache.Rebuild()
from win32com.client.gencache import EnsureDispatch
import pythoncom
import pyHook
import sys
from time import sleep
import threading
import Queue
import multiprocessing
global Kque
global Mque
global sendInfo
global LastAlt
LastAlt = 0
sendInfo = True
Kque = Queue.Queue()
Mque = Queue.Queue()
class Actor(threading.Thread):
def run(self):
global Kque
global LastAlt
global sendInfo
pythoncom.CoInitialize()
iTunes = EnsureDispatch("iTunes.Application")
self.To_Delete = []
print "Ready"
while 1:
command = Kque.get()
if command[2] > 0:
LastAlt = command[0]
sendInfo = False
if command[0]-LastAlt > 200:
sendInfo = True
try:
if command[1] == "P" and command[2] > 0:
iTunes.PlayPause()
elif command[1] == "Right" and command[2] > 0:
iTunes.NextTrack()
elif command[1] == "Left" and command[2] > 0:
iTunes.BackTrack()
elif command[1] == "Up" and command[2] > 0:
iTunes.SoundVolume += 5
elif command[1] == "Down" and command[2] > 0:
iTunes.SoundVolume -= 5
elif command[1] == "Oem_Minus" and command[2] > 0:
iTunes.SoundVolume = 0
elif command[1] == "Oem_Plus" and command[2] > 0:
iTunes.SoundVolume = 100
elif command[1] == "S" and command[2] > 0:
MainPlaylist = iTunes.CurrentPlaylist
if MainPlaylist.Shuffle == 1:
MainPlaylist.Shuffle = 0
elif MainPlaylist.Shuffle == 0:
MainPlaylist.Shuffle = 1
else:
pass
elif command[1] == "Finish" and command[2] > 0:
while len(self.To_Delete) > 0:
temp_l = iTunes.LibrarySource.Playlists.ItemByName(self.To_Delete.pop())
temp_l.Delete()
elif command[1] == "R" and command[2] > 0:
MainPlaylist = iTunes.CurrentPlaylist
Kque.task_done()
repeat = Kque.get()
if repeat[1] == "1" and repeat[2] > 0:
MainPlaylist.SongRepeat = 1
elif repeat[1] == "A" and repeat[2] > 0:
MainPlaylist.SongRepeat = 2
elif repeat[1] == "N" and repeat[2] > 0:
MainPlaylist.SongRepeat = 0
else:
pass
elif command[1] == "H" and command[2] > 0:
print "Enter Playlist Name:"
char_list = []
Kque.task_done()
pressed_key = Kque.get()
while pressed_key[2] > 0:
char_list.append(pressed_key[1])
Kque.task_done()
pressed_key = Kque.get()
ret_string = ""
Caps = False
Shift = False
for x in char_list:
val = x.lower()
if val not in ["space", "lshift", "rshift", "capital"]:
if Shift == True:
val = val.upper()
Shift = False
elif Caps == True:
val = val.upper()
else:
pass
ret_string += val
elif val == "space":
ret_string += " "
elif val in ["lshift", "rshift"]:
Shift = True
elif val == "capital":
if Caps == True:
Caps = False
elif Caps == False:
Caps = True
else:
pass
try:
gotoPlaylist = iTunes.LibrarySource.Playlists.ItemByName(ret_string)
gotoPlaylist.PlayFirstTrack()
print "Playing Playlist: %s"% ret_string
except:
print "Playlist %s Not Found"% ret_string
elif command[1] == "O" and command[2] > 0:
Kque.task_done()
repeat = Kque.get()
Op = None
if repeat[1] == "1" and repeat[2] > 0:
Op = "1"
elif repeat[1] == "2" and repeat[2] > 0:
Op = "2"
elif repeat[1] == "3" and repeat[2] > 0:
Op = "3"
else:
pass
print "Enter Char String"
char_list = []
Kque.task_done()
pressed_key = Kque.get()
while pressed_key[2] > 0:
char_list.append(pressed_key[1])
Kque.task_done()
pressed_key = Kque.get()
ret_string = ""
Caps = False
Shift = False
for x in char_list:
val = x.lower()
if val not in ["space", "lshift", "rshift", "capital"]:
if Shift == True:
val = val.upper()
Shift = False
elif Caps == True:
val = val.upper()
else:
pass
ret_string += val
elif val == "space":
ret_string += " "
elif val in ["lshift", "rshift"]:
Shift = True
elif val == "capital":
if Caps == True:
Caps = False
elif Caps == False:
Caps = True
else:
pass
Liby = iTunes.LibraryPlaylist
Tracks = Liby.Tracks
if Op == "1":
print "Scaning for artist: %s"% ret_string
track_list = []
for track in Tracks:
if track.Artist.lower() == ret_string.lower():
track_list.append(track)
elif Op == "2":
print "Scaning for album: %s"% ret_string
track_list = []
for track in Tracks:
if track.Album.lower() == ret_string.lower():
track_list.append(track)
elif Op == "3":
print "Scaning for Song Name: %s"% ret_string
track_list = []
for track in Tracks:
if track.Name.lower() == ret_string.lower():
track_list.append(track)
else:
pass
if len(track_list) > 0:
temp_list = iTunes.CreatePlaylist(ret_string)
self.To_Delete.append(ret_string)
temp_list = win32com.client.CastTo(temp_list, 'IITUserPlaylist')
for track in track_list:
temp_list.AddTrack(track)
temp_list.PlayFirstTrack()
print "Done"
else:
print "No Tracks Found"
else:
pass
except pythoncom.com_error, e:
print e
Kque.task_done()
class Actor2(threading.Thread):
def run(self):
global LastAlt
global Mque
global sendInfo
pythoncom.CoInitialize()
iTunes = EnsureDispatch("iTunes.Application")
self.RMouseDown = False
self.PosStart = None
self.PosEnd = None
print "Ready"
while 1:
command = Mque.get()
if sendInfo == False and command[1] == 513:
self.RMouseDown = True
if sendInfo == False and self.RMouseDown == True and self.PosStart == None and command[1] == 512:
self.PosStart = command[2]
if sendInfo == False and self.RMouseDown == True and command[1] == 512:
self.PosEnd = command[2]
try:
if sendInfo == False and self.RMouseDown == True and command[1] == 514:
self.RMouseDown = False
if self.PosStart != None and self.PosEnd != None:
if self.PosStart[0] < self.PosEnd[0]:
iTunes.NextTrack()
elif self.PosStart[0] > self.PosEnd[0]:
iTunes.BackTrack()
else:
pass
else:
iTunes.PlayPause()
self.PosStart = None
self.PosEnd = None
if sendInfo == False and command[3] != 0:
if command[3] > 0:
iTunes.SoundVolume += 2
elif command[3] < 0:
iTunes.SoundVolume -= 2
else:
pass
except pythoncom.com_error, e:
print e
Mque.task_done()
thread = Actor2()
thread.setDaemon(True)
thread.start()
thread = Actor()
thread.setDaemon(True)
thread.start()
def OnKeyboardEvent(event):
global Kque
global sendInfo
if event.Key == "Q" and event.Alt > 0:
Kque.put((0, "Finish", 32))
while len(thread.To_Delete) > 0:
sleep(0.2)
print "Thanks!"
sys.exit(0)
Kque.put([event.Time, event.Key, event.Alt])
if sendInfo == True:
return True
else:
return False
def OnMouseEvent(event):
global Mque
global LastAlt
global sendInfo
# called when mouse events are received
Mque.put([event.Time, event.Message, event.Position, event.Wheel])
if sendInfo != True:
if event.Message == 513 or event.Message == 514:
if event.Time-LastAlt > 150:
sendInfo = True
return True
else:
return False
elif event.Message == 522:
if event.Time-LastAlt > 150:
sendInfo = True
return True
else:
return False
else:
return True
else:
return True
# create a hook manager
hm = pyHook.HookManager()
# watch for all mouse events
hm.KeyDown = OnKeyboardEvent
# set the hook
hm.HookKeyboard()
hm.MouseAll = OnMouseEvent
# set the hook
hm.HookMouse()
# wait forever
pythoncom.PumpMessages()
if __name__ == '__main__':
print "paapy nama"
``` |
{
"source": "JoshLabs/django-allauth",
"score": 2
} |
#### File: providers/amazon/tests.py
```python
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import AmazonProvider
class AmazonTests(create_oauth2_tests(registry.by_id(AmazonProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"Profile":{
"CustomerId":"amzn1.account.K2LI23KL2LK2",
"Name":"<NAME>",
"PrimaryEmail":"<EMAIL>"
}
}""")
```
#### File: providers/angellist/provider.py
```python
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class AngelListAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('angellist_url')
def get_avatar_url(self):
return self.account.extra_data.get('image')
def to_str(self):
dflt = super(AngelListAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class AngelListProvider(OAuth2Provider):
id = 'angellist'
name = 'AngelList'
package = 'allauth.socialaccount.providers.angellist'
account_class = AngelListAccount
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
username=data.get('angellist_url').split('/')[-1],
name=data.get('name'))
providers.registry.register(AngelListProvider)
```
#### File: providers/angellist/tests.py
```python
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import AngelListProvider
class AngelListTests(create_oauth2_tests(registry
.by_id(AngelListProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{"name":"pennersr","id":424732,"bio":"","follower_count":0,
"angellist_url":"https://angel.co/dsxtst",
"image":"https://angel.co/images/shared/nopic.png",
"email":"<EMAIL>","blog_url":null,
"online_bio_url":null,"twitter_url":"https://twitter.com/dsxtst",
"facebook_url":null,"linkedin_url":null,"aboutme_url":null,
"github_url":null,"dribbble_url":null,"behance_url":null,
"what_ive_built":null,"locations":[],"roles":[],"skills":[],
"investor":false,"scopes":["message","talent","dealflow","comment",
"email"]}
""")
```
#### File: providers/openid/models.py
```python
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class OpenIDStore(models.Model):
server_url = models.CharField(max_length=255)
handle = models.CharField(max_length=255)
secret = models.TextField()
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField()
def __str__(self):
return self.server_url
@python_2_unicode_compatible
class OpenIDNonce(models.Model):
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.server_url
```
#### File: providers/openid/tests.py
```python
try:
from mock import Mock, patch
except ImportError:
from unittest.mock import Mock, patch
from openid.consumer import consumer
from django.test import TestCase
from django.core.urlresolvers import reverse
from allauth.utils import get_user_model
from . import views
from .utils import AXAttribute
class OpenIDTests(TestCase):
def test_discovery_failure(self):
"""
This used to generate a server 500:
DiscoveryFailure: No usable OpenID services found
for http://www.google.com/
"""
resp = self.client.post(reverse('openid_login'),
dict(openid='http://www.google.com'))
self.assertTrue('openid' in resp.context['form'].errors)
def test_login(self):
resp = self.client.post(reverse(views.login),
dict(openid='http://me.yahoo.com'))
assert 'login.yahooapis' in resp['location']
with patch('allauth.socialaccount.providers'
'.openid.views._openid_consumer') as consumer_mock:
client = Mock()
complete = Mock()
consumer_mock.return_value = client
client.complete = complete
complete_response = Mock()
complete.return_value = complete_response
complete_response.status = consumer.SUCCESS
complete_response.identity_url = 'http://dummy/john/'
with patch('allauth.socialaccount.providers'
'.openid.utils.SRegResponse') as sr_mock:
with patch('allauth.socialaccount.providers'
'.openid.utils.FetchResponse') as fr_mock:
sreg_mock = Mock()
ax_mock = Mock()
sr_mock.fromSuccessResponse = sreg_mock
fr_mock.fromSuccessResponse = ax_mock
sreg_mock.return_value = {}
ax_mock.return_value = {AXAttribute.PERSON_FIRST_NAME:
['raymond']}
resp = self.client.post(reverse('openid_callback'))
self.assertEqual('http://testserver/accounts/profile/',
resp['location'])
get_user_model().objects.get(first_name='raymond')
```
#### File: providers/orcid/tests.py
```python
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import OrcidProvider
class OrcidTests(create_oauth2_tests(registry.by_id(OrcidProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"message-version": "1.1",
"orcid-profile": {
"orcid-bio": {
"personal-details": {
"given-names": {
"value": "myname"
},
"other-names": {
"other-name": [
{
"value": "myself"
}
],
"visibility": "PUBLIC"
},
"family-name": {
"value": "mylastname"
}
},
"delegation": null,
"applications": null,
"contact-details": {
"email": [],
"address": {
"country": {
"value": "AR",
"visibility": "PUBLIC"
}
}
},
"keywords": {
"keyword": [
{
"value": "basil"
},
{
"value": "pizza"
}
],
"visibility": "PUBLIC"
},
"scope": null,
"biography": {
"value": "mybio",
"visibility": "PUBLIC"
}
},
"group-type": null,
"orcid-activities": {
"affiliations": null,
"orcid-works": {
"scope": null,
"orcid-work": [
{
"put-code": "394644",
"work-title": {
"subtitle": null,
"title": {
"value": "titlepaper"
}
},
"visibility": "PUBLIC",
"work-type": "CONFERENCE_PAPER",
"url": null,
"work-contributors": {
"contributor": [
{
"contributor-attributes": {},
"credit-name": {
"value": "myname",
"visibility": "PUBLIC"
}
}
]
},
"work-source": {
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org",
"uri": "http://sandbox.orcid.org/...98X",
"value": null
}
}
]
}
},
"orcid": null,
"client-type": null,
"orcid-history": {
"last-modified-date": {
"value": 1406058219693
},
"creation-method": "WEBSITE",
"submission-date": {
"value": 1405935036511
},
"visibility": null,
"source": null,
"claimed": {
"value": true
}
},
"type": "USER",
"orcid-preferences": {
"locale": "EN"
},
"orcid-identifier": {
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org",
"uri": "http://sandbox.orcid.org/0000-0001-6796-198X",
"value": null
}
}
}""")
def get_login_response_json(self, with_refresh_token=True):
# FIXME: This is not an actual response. I added this in order
# to get the test suite going but did not verify to check the
# exact response being returned.
return """
{
"access_token": "<PASSWORD>",
"expires_in": 631138026,
"token_type": "bearer",
"orcid": "0000-0001-6796-198X",
"scope": "/orcid-profile/read-limited",
"refresh_token": "<PASSWORD>"
}"""
```
#### File: providers/paypal/tests.py
```python
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import PaypalProvider
class PaypalTests(create_oauth2_tests(registry.by_id(PaypalProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"user_id": "https://www.paypal.com/webapps/auth/server/64ghr894040044",
"name": "<NAME>",
"given_name": "Jane",
"family_name": "Doe",
"email": "<EMAIL>"
}
""")
```
#### File: providers/stackexchange/provider.py
```python
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class StackExchangeAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('html_url')
def get_avatar_url(self):
return self.account.extra_data.get('avatar_url')
def to_str(self):
dflt = super(StackExchangeAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class StackExchangeProvider(OAuth2Provider):
id = 'stackexchange'
name = 'Stack Exchange'
package = 'allauth.socialaccount.providers.stackexchange'
account_class = StackExchangeAccount
def get_site(self):
settings = self.get_settings()
return settings.get('SITE', 'stackoverflow')
def extract_uid(self, data):
# `user_id` varies if you use the same account for
# e.g. StackOverflow and ServerFault. Therefore, we pick
# `account_id`.
uid = str(data['account_id'])
return uid
def extract_common_fields(self, data):
return dict(username=data.get('display_name'))
providers.registry.register(StackExchangeProvider)
```
#### File: providers/windowslive/tests.py
```python
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import WindowsLiveProvider
class WindowsLiveTests(create_oauth2_tests(
registry.by_id(WindowsLiveProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"first_name": "James",
"last_name": "Smith",
"name": "<NAME>",
"locale": "en_US",
"gender": null,
"emails": {
"personal": null,
"account": "<EMAIL>",
"business": null,
"preferred": "<EMAIL>"
},
"link": "https://profile.live.com/",
"updated_time": "2014-02-07T00:35:27+0000",
"id": "83605e110af6ff98"
}
""")
```
#### File: providers/windowslive/views.py
```python
from __future__ import unicode_literals
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import WindowsLiveProvider
class WindowsLiveOAuth2Adapter(OAuth2Adapter):
provider_id = WindowsLiveProvider.id
access_token_url = 'https://login.live.com/oauth20_token.srf'
authorize_url = 'https://login.live.com/oauth20_authorize.srf'
profile_url = 'https://apis.live.net/v5.0/me'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {0}'.format(token.token)}
resp = requests.get(self.profile_url, headers=headers)
#example of whats returned (in python format):
#{u'first_name': u'James', u'last_name': u'Smith',
# u'name': u'<NAME>', u'locale': u'en_US', u'gender': None,
# u'emails': {u'personal': None, u'account': u'<EMAIL>',
# u'business': None, u'preferred': u'<EMAIL>'},
# u'link': u'https://profile.live.com/',
# u'updated_time': u'2014-02-07T00:35:27+0000',
# u'id': u'83605e110af6ff98'}
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(WindowsLiveOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(WindowsLiveOAuth2Adapter)
```
#### File: example/demo/models.py
```python
import sys
from django.db.models.signals import post_syncdb
from django.contrib.sites.models import Site
from allauth.socialaccount.providers import registry
from allauth.socialaccount.models import SocialApp
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
def setup_dummy_social_apps(sender, **kwargs):
"""
`allauth` needs tokens for OAuth based providers. So let's
setup some dummy tokens
"""
site = Site.objects.get_current()
for provider in registry.get_list():
if (isinstance(provider, OAuth2Provider)
or isinstance(provider, OAuthProvider)):
try:
SocialApp.objects.get(provider=provider.id,
sites=site)
except SocialApp.DoesNotExist:
print ("Installing dummy application credentials for %s."
" Authentication via this provider will not work"
" until you configure proper credentials via the"
" Django admin (`SocialApp` models)" % provider.id)
app = SocialApp.objects.create(provider=provider.id,
secret='secret',
client_id='client-id',
name='Dummy %s app' % provider.id)
app.sites.add(site)
# We don't want to interfere with unittests et al
if 'syncdb' in sys.argv:
post_syncdb.connect(setup_dummy_social_apps, sender=sys.modules[__name__])
``` |
{
"source": "joshlam123/optimization_algorithms",
"score": 2
} |
#### File: joshlam123/optimization_algorithms/ComputePartition.py
```python
import numpy as np
import logging
import time
import json
import pandas as pd
logging.basicConfig(filename='app.log', filemode='w', format = '%(asctime)s %(levelname)-10s %(processName)s %(name)s %(message)s')
logging.debug("debug")
logging.info("info")
logging.warning("warning")
logging.error("error")
logging.critical("critical")
class ComputePartition():
def __init__(self, i1=[-10.0, 10.0], i2=[-10.0, 10.0], temp_step=5000):
self.generate_range(i1, i2)
self.Tmax = temp_step
self.partition = dict()
def generate_range(self, i1, i2):
self.i1=np.arange(min(i1), max(i1), 0.01),
self.i2=np.arange(min(i2), max(i2), 0.01)
return i1, i2
def cost_function(self, x):
x1 = x[0]
x2 = x[1]
# function 1, levy function
obj = np.sin(3 * np.pi * x[0]) ** 2 + (x[0] - 1) ** 2 * (1
+ np.sin(3 * np.pi * x[1]) ** 2) + (x[1] - 1) ** 2 * (1
+ np.sin(2 * np.pi * x[1]) ** 2)
return obj
def calculate_partition(self):
energy = list()
exploration_space = [(i,j) for i in self.i1 for j in self.i2]
super_energies = [self.cost_function(i) for i in exploration_space]
for i in range(1, 5000+1):
energies = super_energies.copy()
Beta = 1/i
energies = -Beta * np.array(energies)
partition_function = np.sum(np.exp(energies))
self.partition[i] = partition_function
energy.append(energies)
with open('partition.json', 'w') as fp:
json.dump(self.partition, fp)
pd.DataFrame.from_records(energy).T.to_csv("energies.csv")
if __name__ == '__main__':
partition = ComputePartition()
try:
partition.calculate_partition()
except Exception as e:
logging.error("Exception occurred", exc_info=True)
```
#### File: optimization_algorithms/Continuous/Anneal_cont.py
```python
import math
# %matplotlib qt5
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import seaborn as sns
# Display all floats rounded off to 1 decimal place
pd.options.display.float_format = '{:,.1f}'.format
sns.set()
# Width = 16, Height = 6
DIMS=(16, 6)
import os
import random
#!/usr/bin/python
# -*- coding: utf-8 -*-
# note if you want to change the function, remember to change the boundaries at which the function is evaluated!
class Annealer(object):
'''
Pass the max steps you want to take to the annealer function
'''
def __init__(
self,
maxsteps=500,
multiplier=1,
control_t=1,
acceptrate=0.5,
explore=30,
lams=1,
i1=np.arange(-15.0, 10., 0.01),
i2=np.arange(-10., 10.02, 0.01),
):
'''
inputs:
maxsteps - total number of temperature steps to anneal for (default = 500)
multiplier - eometric multiplier for annealing schedule (default = 1 OFF)
control_t - whether you want to turn on or off the geometric cooling schedule (default = 1 OFF)
acceptrate - generic lam's acceptance rate (default = 0.5)
explore - number of steps to explore at every iteration (default = 30 steps per iteration)
lams - whether to turn on or off lam's annealing schedule (default = 1 OFF)
Initialize parameters
output: none
'''
self.Tmax = maxsteps
self.threshold = multiplier
self.interval = list()
self.over_count = 0
# self.states = {"x":list(), "y":list()}
self.acceptrate = acceptrate
self.control = control_t
self.exploration_space = explore
self.trig_lams = lams
self.real_answer = -1.8013
self.lams = dict()
self.accepts = dict()
self.i1 = i1
self.i2 = i2
def get_range(self):
'''
function to get range from the user
'''
i1 = input('Please input desired x1 range in the form x1,y1: \n'
)
i2 = input('Please input desired x1 range in the form x1,y1: \n'
)
special_chars = r'[`\=~!@#$%^&*()_+\[\]{};\'\\:"|<,/<>?]'
(i1, i2) = (re.split(special_chars, i1),
re.split(special_chars, i2))
(i1, i2) = ([np.float(i) for i in i1], [np.float(i) for i in
i1])
i1 = np.arange(min(i1), max(i1), 0.01)
i2 = np.arange(min(i2), max(i2), 0.01)
return (i1, i2)
def random_start(self):
"""
input: none
Randomly choose a random starting point within the boundary
output: a pair of starting point coordinates (x1, x2)
"""
self.interval.append([random.uniform(self.i1[0], self.i1[-1]),
random.uniform(self.i2[0], self.i2[-1])])
return self.interval
def f(self, x):
'''
input: x (a 2D array)
Function that evaluates the cost of a given x1, x2
output: single cost
'''
x1 = x[0]
x2 = x[1]
# function 1, levy function
obj = np.sin(3 * np.pi * x[0]) ** 2 + (x[0] - 1) ** 2 * (1
+ np.sin(3 * np.pi * x[1]) ** 2) + (x[1] - 1) ** 2 * (1
+ np.sin(2 * np.pi * x[1]) ** 2)
# self.i1 = np.arange(-10.0, 10., 0.01)
# self.i2 = np.arange(-10.0, 10., 0.01)
# obj = 100 * np.sqrt(abs(x[1] - 0.01*(-x[0])**2)) + 0.01 * abs(x[0] + 10)
# self.i1 = np.arange(-15.0, 10., 0.01)
# self.i2 = np.arange(-15.0, 10., 0.01)
#obj = - ((np.sin(x[1])* (np.sin((x[1]**2) / (np.pi))**20 )) + (np.sin(x[1])*(np.sin(2*(x[1]**2) / (np.pi))**20 )))
# self.i1 = np.arange(0, np.pi, 0.01)
# self.i2 = np.arange(0, np.pi, 0.01)
return obj
def random_neighbour(self, x):
"""
input: x (a 2D array)
Move a little bit x1 and x2, from the left or the right and then check whether it's within
the boundary. (normalized by the min and max)
if it's within the boundary, return the new coordinates, otherwise find new ones.
output: (newx, newy)
"""
# normalized
deltax = random.uniform(self.i1[0], self.i1[-1])
deltay = random.uniform(self.i2[0], self.i2[-1])
newx = x[0] + deltax
newy = x[1] + deltay
return [newx, newy]
def acceptance_probability(
self,
cost,
new_cost,
temperature,
):
'''
inputs: old cost, new cost, current temperature
calculate probability of acceptance and return it using the metropolis algorithm
output: probability (0 to 1)
'''
return np.exp(-(new_cost - cost) / temperature)
def restart(self):
'''
reinitializes at a random point
'''
state = self.random_start()[0]
cost = self.f(state)
return (state, cost)
def anneal(self):
'''
inputs: none
function performs annealing and calls random start to kickstart the annealing process. iteratively
calculates the new cost.
output: final cost, final state (list of x1 and x2), all costs (list of costs at every timestep)
'''
best_cost = list()
current_cost = list()
deviation = list()
T_list = list()
acceptrate = self.acceptrate
(states, costs) = self.restart()
LamRate = 0
best_cost.append(costs)
for temp_step in range(self.Tmax):
fraction = temp_step / float(self.Tmax)
# T = max((1-self.trig_lams) * max(fraction*(1-self.control), (1 - fraction) * self.control) * self.threshold, (1-fraction)*self.trig_lams)
# if you want to trigger lam's, self.control == 1
if self.control == 0 & temp_step > 0:
T = self.threshold * (1 - fraction)
else:
T = 1 - fraction
T_list.append(T)
for step in range(self.exploration_space):
new_cost = costs
new_state = states
gen_new_state = self.random_neighbour(new_state)
gen_new_cost = self.f(gen_new_state)
if gen_new_cost < new_cost:
new_state = self.random_neighbour(states)
new_cost = self.f(new_state)
current_cost.append(new_cost)
if new_cost < costs or self.acceptance_probability(costs,
new_cost, T) >= random.uniform(0, 1):
states, costs = new_state, new_cost
if self.trig_lams == 1:
acceprate = 1 / 500 * (499 * acceptrate + 1)
else:
if self.trig_lams == 1:
acceptrate = 1 / 500 * (499 * acceptrate)
# check conditions
if fraction < 0.15:
LamRate = 0.44 + 0.56 * 560 ** (-temp_step
/ (self.Tmax * 0.15))
elif fraction < 0.65:
LamRate = 0.44
else:
LamRate = 0.44 * 440 ** ((-temp_step
/ self.Tmax - 0.65) / 0.35)
if LamRate < acceptrate:
T *= 0.99
else:
T *= 1 / 0.999
deviation.append(abs(costs - self.real_answer))
if best_cost[-1] > costs:
best_cost.append(costs)
else:
best_cost.append(best_cost[-1])
if self.trig_lams == 1:
if temp_step not in list(self.lams.keys()):
self.lams[temp_step] = list()
if temp_step not in list(self.accepts.keys()):
self.accepts[temp_step] = list()
self.lams[temp_step].append(LamRate)
self.accepts[temp_step].append(acceptrate)
return (
current_cost,
best_cost[1:],
deviation,
self.accepts,
self.lams,
T_list,
)
def get_range():
Bukin = '100 * np.sqrt(abs(x[1] - 0.01*(-x[0])**2)) + 0.01 * abs(x[0] + 10)'
function_choice = input('Please input your desired function, e.g. Bukin Function n.6, 100 * np.sqrt(abs(x[1] - 0.01*(-x[0])**2)) + 0.01 * abs(x[0] + 10) \n')
i1 = input('Please input desired x1 range in the form x1,y1: \n')
i2 = input('Please input desired x1 range in the form x1,y1: \n')
if function_choice == "":
function_choice = Bukin
i1 = [15.0, -10.0]
i2 = [15.0, -10.0]
else:
special_chars = r'[`\=~!@#$%^&*()_+\[\]{};\'\\:"|<,/<>?]'
i1, i2 = re.split(special_chars, i1), re.split(special_chars, i2)
i1, i2 = [np.float(i) for i in i1], [np.float(i) for i in i1]
i1 = np.arange(min(i1), max(i1), 0.01)
i2 = np.arange(min(i2), max(i2), 0.01)
return function_choice, i1, i2
if __name__ == '__main__':
# add annealing code here
''' THIS IS SAMPLE CODE '''
function_choice, r1, r2 = get_range()
tries1 = {"run":list(), "temp":list(), "current_cost":list(), "best_cost":list(), "deviations":list()}
for i in tqdm(range(0, 100, 1)):
a = Annealer(custom_function=function_choice, maxsteps=5000, multiplier=1, control_t=1, i1=r1, i2=r2)
current_cost, best_cost, deviations, accepts, lams, T = a.anneal()
cost_keys = len(list(current_cost))
for k in range(cost_keys):
tries1['run'].append(i)
tries1['temp'].append(T[k])
tries1['current_cost'].append(current_cost[k])
tries1['best_cost'].append(best_cost[k])
tries1['deviations'].append(deviations[k])
''' converts the dictionary into a pandas dataframe for easy data manipulation'''
df_case1 = pd.DataFrame.from_dict(tries1)
#df_case1 = df_case1.reindex(index=df_case1.index[::-1])
df_case1.head(20)
df_case1_group_mean = df_case1.groupby(['temp']).mean().reset_index()
df_case1_group_mean.to_csv("case1_func3.csv")
# TO PLOT TEMPERATURE V. COST
fig, ax1 = plt.subplots(1, 1)
plt.xlabel("Temperature")
plt.ylabel("Cost", fontsize=12)
#Add the legend
plt.title("Temperature v. Cost (1 - Ti / Tmax)")
plt.xlim(1.0, 0)
#plt.ylim(0,100)
plt.plot(df_case1_group_mean['temp'].tolist(), df_case1_group_mean['current_cost'].tolist(), label='current_cost')
plt.plot(df_case1_group_mean['temp'].tolist(), df_case1_group_mean['best_cost'].tolist(), label='best_cost')
plt.legend(fontsize=12)
plt.savefig('case_1_costs.png')
# TO PLOT DEVIATIONS
fig, ax1 = plt.subplots(1, 1)
plt.xlabel("Temperature")
plt.ylabel("Deviations", fontsize=12)
#Add the legend
plt.title("Temperature v. Deviation (1 - Ti / Tmax)")
plt.xlim(1.0, 0)
plt.plot(df_case1_group_mean['temp'].tolist(), df_case1_group_mean['deviations'].tolist(), label='mean')
plt.savefig('case_1_deviations.png')
plt.show()
''' END OF SAMPLE CODE '''
```
#### File: optimization_algorithms/Discrete/brute.py
```python
import numpy as np
#!/usr/bin/python
# -*- coding: utf-8 -*-
class BruteTSP:
def __init__(self):
self.cities = Ncity.cities
self.start_city = Ncity.start_city
self.init_tour = Ncity.init_tour
self.interval = list()
self.visited_cities = list()
self.greedy_tour = list()
self.distance = lambda x, y: np.sqrt((x[0] - y[0]) ** 2 + (x[1]
- y[1]) ** 2)
self.table_distances = Ncity.table_distances
self.shortest_distance = list() # tour: distance
self.all_distance = list()
def f(self, tour):
'''
input: tour (list)
Function that evaluates the cost of every single remaining node
output: distance
'''
distances = [self.table_distances[tour[i]][tour[i + 1]]
for i in range(len(tour) - 1)]
# for i in range(len(tour) - 1):
# total_distance += self.table_distances[tour[i]][tour[i+1]]
total_distance = sum(distances)
average_tour_len = total_distance / len(tour)
return (total_distance, average_tour_len)
def heap_perm(self, A):
''' instantiate the heap algorithm '''
n = len(A)
Alist = [el for el in A]
for hp in self._heap_perm_(n, Alist):
yield hp
def _heap_perm_(self, n, A):
''' implement the heap algorithm for generating permutations '''
if n == 1:
yield A
else:
for i in range(n - 1):
for hp in self._heap_perm_(n - 1, A):
yield hp
j = (0 if n % 2 == 1 else i)
(A[j], A[n - 1]) = (A[n - 1], A[j])
for hp in self._heap_perm_(n - 1, A):
yield hp
def brute_this(self):
'''
generates a tour and adds the shortest distance. instead of generating many permutations,
how do we know that we have explored all permutations??
'''
minimum_distance = (self.init_tour, self.f(self.init_tour)) # initial tour, total, average length
# perms = list(permutations(self.init_tour[1:][:-1]))
for item in self.heap_perm(self.init_tour[:1][:-1]):
self.shortest_distance.append(minimum_distance[1])
new_tour = [self.start_city] + item + [self.start_city]
cost = (item, self.f(new_tour))
self.all_distance.append(cost)
if minimum_distance[1] > cost: # if new tour cost is lesser than the cost of the old tour
minium_distance = (new_tour, cost) # gen permutation
# for i in perms:
# tours.append([self.start_city] + list(i) + [self.start_city])
# tours = [[self.start_city] + i + [self.start_city] for i in perms]
# distances = [self.f(i) for i in tours]
# total_distance, average_distance = zip(*distances)
# lowest = min(enumerate(average_distance), key=itemgetter(1))[0]
return minimum_distance
```
#### File: optimization_algorithms/Discrete/tsp_anneal.py
```python
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import seaborn as sns
import random
# Display all floats rounded off to 1 decimal place
pd.options.display.float_format = '{:,.1f}'.format
# Plot inline in Jupyter notebook
#%matplotlib inline
# Settings throughout the notebook
sns.set()
# Width = 16, Height = 6
DIMS=(16, 6)
import os
class TSPAnneal:
'''
Pass the max steps you want to take to the annealer function
'''
def __init__(
self,
# maxsteps=500,
# multiplier=1,
# control_t=1,
# acceptrate=0.5,
# lams=0,
# swaps=round((Ncity.n)**0.5),
# explore=30,
accs = [500, 1, 1, 0.5, 0, round((Ncity.n)**0.5), 30]
):
'''
inputs: total number of steps to try, geometric multiplier for annealing schedule
Initialize parameters
output: none
'''
self.cities = Ncity.cities # self.cities needs to be a
self.start_city = Ncity.start_city
self.init_tour = Ncity.generate_initial_tour()
self.Tmax = accs[0]
self.threshold = accs[1] # for geometric scaling
self.interval = list()
self.exploration_space = accs[6]
self.control = accs[2]
self.trig_lams = accs[4]
self.acceptrate = accs[3]
self.swaps = accs[5]
self.distance = lambda x, y: np.sqrt((x[0] - y[0]) ** 2 + (x[1]
- y[1]) ** 2)
self.table_distances = Ncity.table_distances
def f(self, tour):
'''
input: tour (list)
Function that evaluates the cost of a given x1, x2 (euclidean distance)
output: single cost
'''
distances = [self.table_distances[tour[i]][tour[i + 1]] for i in range(len(tour) - 1)]
total_distance = sum(distances)
average_tour_len = total_distance / len(tour)
return (total_distance, average_tour_len)
def acceptance_probability(
self,
cost,
new_cost,
temperature,
):
'''
inputs: old cost, new cost, current temperature
calculate probability of acceptance and return it using the metropolis algorithm
output: probability (0 to 1)
'''
return np.exp(-(new_cost - cost) / temperature)
def swap_random(self, tour):
'''
randomly swaps 2 tours
'''
tour = tour[1:][:-1]
idx = range(Ncity.n - 1)
for i in range(self.swaps):
(i1, i2) = random.sample(idx, 2)
(tour[i1], tour[i2]) = (tour[i2], tour[i1])
tour = [self.start_city] + tour + [self.start_city]
(cost, average_tour_len) = self.f(tour)
return (tour, cost, average_tour_len)
def anneal(self):
'''
inputs: none
function performs annealing and calls random start to kickstart the annealing process. iteratively
calculates the new cost.
output: final cost, final state (list of x1 and x2), all costs (list of costs at every timestep)
'''
# params related to returning the cost and deviation from the optimal objective function
# deviation = list()
# params related to Lam's Annealing Schedule
best_tour = list()
current_tour = list()
best_tour_total = list()
current_tour_total = list()
tours = list()
T_list = list()
acceptrate = self.acceptrate
LamRate = 0
tours.append(self.init_tour)
(costs, average_tour_length) = self.f(self.init_tour)
states = self.init_tour
best_tour_total.append(costs)
best_tour.append(average_tour_length)
for temp_step in range(self.Tmax):
# for each temperature step
fraction = temp_step / float(self.Tmax)
if self.control == 0 & temp_step > 0:
T = self.threshold * (1 - fraction)
else:
T = 1 - fraction
T_list.append(T)
# exploration space
(new_tour, new_cost, new_average_tour_length) = self.swap_random(states)
if new_tour not in tours:
tours.append(new_tour)
current_tour_total.append(new_cost)
current_tour.append(new_average_tour_length)
if (new_cost < costs) or (self.acceptance_probability(costs,
new_cost, T) >= random.uniform(0, 1)):
(states, costs, average_tour_length) = (new_tour, new_cost, new_average_tour_length)
if self.trig_lams == 1:
acceptrate = 1 / 500 * (499 * acceptrate + 1)
else:
if self.trig_lams == 1:
acceptrate = 1 / 500 * (499 * acceptrate)
# check conditions
if fraction < 0.15:
LamRate = 0.44 + 0.56 * 560 ** (-temp_step
/ (self.Tmax * 0.15))
elif fraction < 0.65:
LamRate = 0.44
else:
LamRate = 0.44 * 440 ** ((-temp_step / self.Tmax
- 0.65) / 0.35)
if LamRate < acceptrate:
T *= 0.99
else:
T *= 1 / 0.999
if best_tour_total[-1] > costs:
best_tour_total.append(costs)
best_tour.append(average_tour_length)
else:
best_tour_total.append(best_tour_total[-1])
best_tour.append(best_tour[-1])
return (best_tour[1:], current_tour, best_tour_total[1:], current_tour_total, T_list, tours)
``` |
{
"source": "joshland/wgmesh",
"score": 2
} |
#### File: wgmesh/wgmesh/endpointdb.py
```python
import os
import re
import sys
import ast
import click
import base64
import loguru
import pprint
import socket
import ipaddress
import nacl.utils
import attr, inspect
import hashlib, uuid
import dns.resolver
from loguru import logger
from ruamel import yaml
from typing import Union
from nacl.public import PrivateKey, Box, PublicKey
from .core import loadkey, keyimport
def nonone(arg):
''' eliminate the None and blanks '''
if arg == None:
return ''
return arg
def validateHostname(value):
if value == None:
return socket.gethostname()
return value
def validateUuid(value):
if value == None:
return str( uuid.uuid4() )
return value
@attr.s
class Endpoint(object):
hostname = attr.ib(default=None, kw_only=True, converter=validateHostname)
uuid = attr.ib(default=None, kw_only=True, converter=validateUuid)
SSK = attr.ib(default='', kw_only=True)
PPK = attr.ib(default='', kw_only=True)
cmdfping = attr.ib(default="/usr/sbin/fping", kw_only=True, converter=str)
private_key_file = attr.ib(default='', kw_only=True, converter=nonone)
public_key_file = attr.ib(default='', kw_only=True, converter=nonone)
interface_public = attr.ib(default='', kw_only=True, converter=nonone)
interface_trust = attr.ib(default='', kw_only=True, converter=nonone)
interface_trust_ip = attr.ib(default='', kw_only=True, converter=nonone)
interface_outbound = attr.ib(default='', kw_only=True, converter=nonone)
def publish(self):
m2 = {attr: str(getattr(self, attr)) for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")}
logger.trace(f'publish dict: {m2}')
del m2['SSK']
del m2['PPK']
return m2
pass
@attr.s
class SiteDetail(object):
locus = attr.ib(default='', kw_only=True, converter=nonone)
public_key = attr.ib(default='', kw_only=True, converter=nonone)
PPK = attr.ib(default='', kw_only=True)
def publish(self):
m2 = {attr: str(getattr(self, attr)) for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")}
logger.trace(f'publish dict: {m2}')
del m2['PPK']
return m2
pass
class HostDB(object):
def __init__(self, filename, **kwargs):
self.filename = filename
self.host = Endpoint(**kwargs.get('host', {}))
self.site = SiteDetail(**kwargs.get('site', {}))
pass
def publish(self):
retval = {
'host': self.host.publish(),
'site': self.site.publish(),
}
logger.trace(f'publish dict: {retval}')
return retval
pass
def load_host_config(domain: str, locus: str, pubkey: str) -> str:
''' Load/Generate local site-base config
opens /etc/wireguard/{locus}.yaml
return
'''
fn = f'/etc/wireguard/{domain}.yaml'
try:
with open(fn) as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.RoundTripLoader )
baseconfig = False
except FileNotFoundError:
baseconfig = True
config = {
'site': {
'locus': locus,
'public_key': pubkey,
},
}
pass
retval = HostDB(fn, **config)
return retval
def save_host_config(config: HostDB):
''' commit hostdb to disk '''
filename = config.filename
data = config.publish()
##leftoff - leave a way to update the file
with open(filename, 'w') as yamlfile:
yamlfile.write( yaml.dump(data, Dumper=yaml.RoundTripDumper) )
pass
pass
def check_update_route_table(rt_id: int, name: str) -> bool:
''' check that rt_table {number} exists in /etc/iproute2/rt_tables '''
rt_id = str(rt_id)
with open('/etc/iproute2/rt_tables', 'r') as rtfile:
content = rtfile.read().split('\n')
pass
decoded = [ x.split('\t') for x in content ]
tables = [ x[0] for x in decoded ]
if rt_id in tables:
idx = tables.index(rt_id)
if decoded[idx][1] == name:
logger.trace(f'Located {rt_id}=>{name} in rt_tables.')
return False
else:
logger.trace(f'Updating name for {rt_id} - {decoded[idx][1]}=>{name}')
decoded[idx][1] = name
pass
else:
logger.trace(f'Adding route table: {rt_id} ({name})')
decoded.insert(-1, (rt_id, name))
pass
content = [ '\t'.join(line) for line in decoded ]
with open('/etc/iproute2/rt_tables', 'w') as rtfile:
rtfile.write("\n".join(content))
pass
return True
def CheckLocalHostConfig(domain: str, locus: str, pubkey: str,
public: str = '', asn: str = '',
trust: str = '', outbound: str = '',
trustip: str = '') -> str:
''' Load/Generate local site-base config
Validate and update the settings.
return
'''
config = load_host_config(domain, locus, pubkey)
if outbound: config.host.interface_outbound = outbound
if public: config.host.interface_public = public
if trustip: config.host.interface_trust_ip = trustip
if trust: config.host.interface_trust = trust
if config.host.private_key_file == '':
config.host.private_key_file = f'/etc/wireguard/{locus}_priv'
pass
if config.host.public_key_file == '':
config.host.public_key_file = f'/etc/wireguard/{locus}_pub'
pass
try:
SSK = loadkey(config.host.private_key_file, PrivateKey)
except FileNotFoundError:
logger.debug(f'Private key does not exist. {config.host.private_key_file}')
SSK = None
pass
try:
PPK = loadkey(config.host.public_key_file, PublicKey)
except FileNotFoundError:
logger.debug(f'Public key does not exist. {config.host.public_key_file}')
PPK = None
pass
#config.host.asn =
config.host.SSK = SSK
config.host.PPK = PPK
config.site.PPK = keyimport(config.site.public_key, PublicKey)
save_host_config(config)
return config
if __name__ == "__main__":
from loguru import logger
testkey = '<KEY>
hostdata = load_host_config('test.local.example', 'exampletest', testkey)
save_host_config(hostdata)
``` |
{
"source": "josh-lang/where-cycle",
"score": 3
} |
#### File: src/preparation/extract.py
```python
import io
import os
import requests
import time
import zipfile
import boto3
import geopandas as gpd
import pandas as pd
s3 = boto3.resource('s3')
def get_taxi_zones():
'''Pull taxi zone shapfile and convert to WGS 84 (EPSG:4326)'''
s3.meta.client.download_file(
'nyc-tlc',
'misc/taxi_zones.zip',
'taxi_zones.zip'
)
taxi_zones = gpd.read_file('zip://taxi_zones.zip') \
.to_crs('EPSG:4326') \
.filter(
[
'OBJECTID',
'zone',
'borough',
'geometry'
],
axis = 1
).rename(
columns = {
'OBJECTID': 'zone_id',
'zone': 'zone_name'
}
)
os.remove('taxi_zones.zip')
return taxi_zones
def get_businesses(**kwargs):
'''For each taxi zone, query Yelp API for businesses closest to centroid'''
ti = kwargs['ti']
centroids = ti.xcom_pull(task_ids = 'calculate_centroids')
api_key = 'Bearer ' + os.environ['YELP_API_KEY']
head = {'Authorization': api_key}
url = 'https://api.yelp.com/v3/businesses/search'
businesses = pd.DataFrame()
for _, row in centroids.iterrows():
query = {
'latitude': row['latitude'],
'longitude': row['longitude'],
'radius': 3000,
'limit': 50,
'sort_by': 'distance'
}
response = requests.get(url, headers = head, params = query)
json = response.json()
retries = 0
while retries <= 10 and 'error' in json:
retries += 1
time.sleep(1)
response = requests.get(url, headers = head, params = query)
json = response.json()
matches = json['businesses']
businesses = businesses.append(matches, ignore_index = True)
return businesses
def unzip_csvs():
'''Iterate over relevant zipped files, unzip, and upload to private S3'''
source = s3.Bucket('tripdata')
for obj in source.objects.all():
key = obj.key
if not key.startswith('201307-201402') and key.endswith('.zip'):
buffer = io.BytesIO(obj.get()['Body'].read())
zipped = zipfile.ZipFile(buffer)
for name in zipped.namelist():
if not name.startswith('_') and name.endswith('.csv'):
s3.meta.client.upload_fileobj(
zipped.open(name),
Bucket = 'jlang-20b-de-ny',
Key = 'citibike/' + name
)
```
#### File: src/spark_reduction/transform.py
```python
from pyspark.sql import SparkSession
from config.geometries import \
TAXI_ZONE_LAT_MIN, TAXI_ZONE_LAT_MAX, \
TAXI_ZONE_LON_MIN, TAXI_ZONE_LON_MAX
spark = SparkSession.builder \
.appName('where-cycle') \
.getOrCreate()
def distill_citibike_stations():
'''Create list of unique Citibike stations across all trip endpoints'''
stations_df = spark.sql(f'''
SELECT
start_id AS station_id,
start_latitude AS latitude,
start_longitude AS longitude
FROM citibike
WHERE
start_latitude BETWEEN
{TAXI_ZONE_LAT_MIN} AND {TAXI_ZONE_LAT_MAX}
AND
start_longitude BETWEEN
{TAXI_ZONE_LON_MIN} AND {TAXI_ZONE_LON_MAX}
GROUP BY 1, 2, 3
UNION
SELECT
end_id AS station_id,
end_latitude AS latitude,
end_longitude AS longitude
FROM citibike
WHERE
end_latitude BETWEEN
{TAXI_ZONE_LAT_MIN} AND {TAXI_ZONE_LAT_MAX}
AND
end_longitude BETWEEN
{TAXI_ZONE_LON_MIN} AND {TAXI_ZONE_LON_MAX}
GROUP BY 1, 2, 3'''.translate({ord(c): ' ' for c in '\n\t'})
)
return stations_df
def aggregate_citibike_visits():
'''Convert Citibike trips to visits and sum by station_id'''
visits_df = spark.sql('''
SELECT
month,
station_id,
SUM(visits) AS visits
FROM (
SELECT
start_month AS month,
start_id AS station_id,
COUNT(*) AS visits
FROM citibike
GROUP BY 1, 2
UNION ALL
SELECT
end_month AS month,
end_id AS station_id,
COUNT(*) AS visits
FROM citibike
GROUP BY 1, 2
)
GROUP BY 1, 2
''')
return visits_df
def aggregate_past_tlc_visits():
'''
Convert past TLC trips to visits,
round lat-lon precision to street level,
and sum by lat-lon
'''
past_df = spark.sql(f'''
SELECT
month,
longitude,
latitude,
SUM(visits) AS visits
FROM (
SELECT
month,
ROUND(pickup_longitude, 3) AS longitude,
ROUND(pickup_latitude, 3) AS latitude,
COUNT(*) AS visits
FROM past
WHERE
pickup_longitude BETWEEN
{TAXI_ZONE_LON_MIN} AND {TAXI_ZONE_LON_MAX}
AND
pickup_latitude BETWEEN
{TAXI_ZONE_LAT_MIN} AND {TAXI_ZONE_LAT_MAX}
GROUP BY 1, 2, 3
UNION ALL
SELECT
month,
ROUND(dropoff_longitude, 3) AS longitude,
ROUND(dropoff_latitude, 3) AS latitude,
COUNT(*) AS visits
FROM past
WHERE
dropoff_longitude BETWEEN
{TAXI_ZONE_LON_MIN} AND {TAXI_ZONE_LON_MAX}
AND
dropoff_latitude BETWEEN
{TAXI_ZONE_LAT_MIN} AND {TAXI_ZONE_LAT_MAX}
GROUP BY 1, 2, 3
)
GROUP BY 1, 2, 3'''.translate({ord(c): ' ' for c in '\n\t'})
)
return past_df
def aggregate_modern_tlc_visits():
'''
Convert modern TLC trips to visits,
ignoring unknown taxi zone IDs,
and sum by taxi zone ID
'''
modern_df = spark.sql('''
SELECT
month,
zone_id,
SUM(visits) AS visits
FROM (
SELECT
month,
locationID AS zone_id,
COUNT(*) AS visits
FROM fhv_15_16
WHERE locationID BETWEEN 1 AND 263
GROUP BY 1, 2
UNION ALL
SELECT
month,
PULocationID AS zone_id,
COUNT(*) as visits
FROM modern
WHERE PUlocationID BETWEEN 1 AND 263
GROUP BY 1, 2
UNION ALL
SELECT
month,
DOLocationID AS zone_id,
COUNT(*) as visits
FROM modern
WHERE DOlocationID BETWEEN 1 AND 263
GROUP BY 1, 2
)
GROUP BY 1, 2
''')
return modern_df
``` |
{
"source": "joshlapham/py-misc-scripts",
"score": 3
} |
#### File: joshlapham/py-misc-scripts/helpers.py
```python
from subprocess import call, STDOUT
from os import devnull
from datetime import datetime
from time import strftime
def tprint(message):
""" Prints a given `message` with a timestamp prepended. """
print strftime("%c") + " - %s" % message
def file_timestamp():
""" Returns a timestamp for use in a filename. """
return datetime.now().strftime("%Y-%m-%d-%H%M%S")
def call_cli(cli_commands):
""" Calls command-line commands but supresses output. """
try:
FNULL = open(devnull, 'w')
call(cli_commands, stdout=FNULL, stderr=STDOUT)
except Exception as e:
raise Exception(e)
```
#### File: joshlapham/py-misc-scripts/make_vlc_playlist.py
```python
from glob import glob
from os import path
from subprocess import Popen, PIPE
from argparse import ArgumentParser
from logger import Logger
VLC_EXE = '/Applications/VLC.app/Contents/MacOS/VLC'
logger = Logger()
def _generate_playlist(results, playlist_path):
results_ordered = list(enumerate(results, start=1))
with open(playlist_path, 'w') as playlist_file:
playlist_file.write("[playlist]\n")
playlist_file.write("NumberOfEntries=%s\n" % str(len(results)))
for video in results_ordered:
number = video[0]
filepath = video[1]
playlist_file.write("File%s=%s\n" % (str(number), filepath))
playlist_file.write("Title%s=%s\n" % (str(number), filepath))
playlist_file.close()
def _open_in_vlc(playlist_path):
cli = [
'%s' % VLC_EXE,
'%s' % playlist_path
]
p = Popen(cli, shell=False, stdout=PIPE, stderr=PIPE)
return p
def _search_video_files(parent_dir):
path_to_glob = path.join(parent_dir, '*/*[.mp4, .flv, .mkv]')
results = glob(path_to_glob)
return results
if __name__ == '__main__':
""" Creates a playlist of all video files from a given directory (including all subdirectories) and opens in VLC for playback. """
args = ArgumentParser()
args.add_argument("--parent-dir", required=True)
args.add_argument("--playlist-path", required=True)
args = args.parse_args()
vlc_process = None
try:
results = _search_video_files(args.parent_dir)
logger.info("Found %s results" % str(len(results)))
_generate_playlist(results, args.playlist_path)
logger.info("Generated playlist")
logger.info("Opening playlist in VLC")
vlc_process = _open_in_vlc(args.playlist_path)
stdout, stderr = vlc_process.communicate()
except StandardError as e:
logger.error(e)
vlc_process.kill()
exit()
except KeyboardInterrupt:
vlc_process.kill()
logger.info("User aborted script execution")
exit()
``` |
{
"source": "JoshLarouche/BattleSnake2019",
"score": 4
} |
#### File: BattleSnake2019/app/aStar.py
```python
import numpy as np
import math
import queue
#finds the direction to go after the aStar algorithm finds a path
def backPedal(cameFrom, start, goal):
path = []
curr = goal
while curr[0] != -1:
path.append(tuple(curr))
curr = cameFrom[curr[0]][curr[1]]
npPath = np.array(path[-2])
npStart = np.array(start)
direction = npPath - npStart
print(path)
return direction
#finds the shortest paths to a goal node from a start node
def aStar(board, start, goal): #combine with bfs for efficiency
#set of checked nodes on the board
closedSet = np.zeros(board.shape, dtype=int) #possibly change to np.copy for consistency
for (x,y), value in np.ndenumerate(board):
if value == -1:
closedSet[x][y] = 1
closedSet[start[0]][start[1]] = 0
closedSet[goal[0]][goal[1]] = 0
#priority check for the next node to check
openSet = queue.PriorityQueue()
#node we came from
cameFrom = np.zeros((board.shape[0], board.shape[1], 2), dtype=int)
cameFrom[start[0]][start[1]] = (-1, -1)
#score the the weight it takes to travel to a node
gScore = np.full(board.shape, -1, dtype=int)
gScore[start[0]][start[1]] = 0
#heuristic for the weight it will take to get to the goal
fScore = np.full(board.shape, -1, dtype=int)
fScore[start[0]][start[1]] = math.fabs(goal[0] - start[0]) + math.fabs(goal[1] - start[1])
openSet.put((fScore[start[0]][start[1]], start))
#checking for the goal node
while not openSet.empty():
current = openSet.get()[1]
if current == goal:
return backPedal(cameFrom, start, goal)
if closedSet[current[0]][current[1]] == 1:
continue
closedSet[current[0]][current[1]] = 1
for x in range(0, 4):
neighbour = (-1, -1)
#left
if x is 0:
if current[0] == 0:
continue
neighbour = (current[0] - 1, current[1])
#up
if x is 1:
if current[1] == 0:
continue
neighbour = (current[0], current[1] - 1)
#right
if x is 2:
if current[0] == board.shape[0] - 1:
continue
neighbour = (current[0] + 1, current[1])
#down
if x is 3:
if current[1] == board.shape[1] - 1:
continue
neighbour = (current[0], current[1] + 1)
#if coordinate has already been checked
if closedSet[neighbour[0]][neighbour[1]] == 1:
continue
#assigning gScore to neighbour of current node
tentativeGScore = gScore[current[0]][current[1]] + 1
neighbourGScore = gScore[neighbour[0]][neighbour[1]]
#adding neighbour to openSet if shortest path
if tentativeGScore < neighbourGScore or neighbourGScore == -1:
gScore[neighbour[0]][neighbour[1]] = tentativeGScore
cameFrom[neighbour[0]][neighbour[1]] = current
fScore[neighbour[0]][neighbour[1]] = gScore[neighbour[0]][neighbour[1]] + math.fabs(goal[0] - neighbour[0]) + math.fabs(goal[1] - neighbour[1])
openSet.put((fScore[neighbour[0]][neighbour[1]], neighbour))
#error code for couldn't find path to goal
return (2, 2)
#tester main
def main():
board = np.zeros((11, 11), dtype=int)
goal = (8, 3)
start = (9, 0)
board[9, 1] = -1
board[8, 3] = 3
print(board)
path = aStar(board, start, goal)
print(path)
if __name__ == '__main__':
main()
```
#### File: BattleSnake2019/app/bfs.py
```python
from collections import deque
import numpy as np
'''
OBJECTIVE LEGEND:
0 = looking for food
1 = count the available space
2 = check if we can see our tail (unused)
'''
#breadth first search floodfill to find a goal
def bfs(board, start, objective, tail):
count = 1
queue = deque()
queue.append(start)
closed = np.copy(board)
#priority queue for bfs loop
while queue:
count = count + 1
current = queue.popleft()
closed[current[0]][current[1]] = -1
#if found food
if board[current[0]][current[1]] == 2 and objective == 0:
return current
#if found tail
if current[0] == tail[0] and current[1] == tail[1] and objective == 2:
return True
#move through board for algorithm
for x in range(0, 4):
# left
if x is 0:
if current[0] == 0:
continue
neighbour = (current[0] - 1, current[1])
# up
if x is 1:
if current[1] == 0:
continue
neighbour = (current[0], current[1] - 1)
# right
if x is 2:
if current[0] == board.shape[0] - 1:
continue
neighbour = (current[0] + 1, current[1])
# down
if x is 3:
if current[1] == board.shape[1] - 1:
continue
neighbour = (current[0], current[1] + 1)
#if not wall
if closed[neighbour[0]][neighbour[1]] != -1:
queue.append(neighbour)
closed[neighbour[0]][neighbour[1]] = -1
#return count of available space
if objective == 1:
return count
return False
#tester main
if __name__ == '__main__':
board = np.zeros((19, 19), dtype=int)
board[10][10] = 2
test = bfs(board, (3, 3))
#print(test)
``` |
{
"source": "joshleeb/8005Simulator",
"score": 3
} |
#### File: 8005Simulator/simulator/emulator.py
```python
class Emulator:
def __init__(self, iP=0, r0=0, r1=0):
self.running = False
self.address = iP # instruction pointer
self.register0 = r0 # register 0
self.register1 = r1 # register 1
self.breakpoints = [] # address locations of breakpoints
self.memory = [0 for i in range(255)]
self.registerSize = 256
self.code = [
self._halt, # 0
self._inc_register0, # 1
self._dec_register0, # 2
self._inc_register1, # 3
self._dec_register1, # 4
self._register0_add_register1, # 5
self._register1_add_register0, # 6
self._print_register0_uint, # 7
self._addr_if_register0_zero, # 8 (addr)
self._addr_if_register0_not_zero, # 9 (addr)
self._addr_to_register0, # 10 (addr)
self._addr_to_register1, # 11 (addr)
self._swap_register0_addr, # 12 (addr)
self._swap_register1_addr, # 13 (addr)
self._ring_bell, # 14
self._print_register0_char # 15
]
def load_instructions(self, seq):
"""Loads instructions into the emulator's memory."""
mem_index = 0
for i in range(len(seq)):
if seq[i] == '|':
self.breakpoints.append(i)
else:
self.memory[mem_index] = int(seq[i])
mem_index += 1
def execute(self):
"""Executes a microprocessor instruction."""
self.running = True
while self.running:
code = self.memory[self.address]
if self.address in self.breakpoints:
self._execute_breakpoint()
if 8 <= code <= 13:
self.code[code](self.memory[self.address + 1])
self._next_address(increment=2)
else:
self.code[code]()
self._next_address()
def _execute_breakpoint(self):
print('-- breakpoint -------------')
print('IP\tIS\tR0\tR1')
print('{}\t{}\t{}\t{}'.format(self.address, self.memory[self.address],
self.register0, self.register1))
input('Press enter to continue')
print('\x1b[1A\r---------------------------')
def _next_address(self, increment=1):
self.address += increment
self.address %= 255
def _halt(self):
"""Halts the emulator."""
self.running = False
def _inc_register0(self):
"""Increment register 0 by 1."""
self.register0 = (self.register0 + 1) % self.registerSize
def _dec_register0(self):
"""Decrements register 0 by 1."""
self.register0 = (self.register0 - 1) % self.registerSize
def _inc_register1(self):
"""Increments register 1 by 1."""
self.register1 = (self.register1 + 1) % self.registerSize
def _dec_register1(self):
"""Decrements register 1 by 1."""
self.register1 = (self.register1 - 1) % self.registerSize
def _register0_add_register1(self):
"""Increments register 0 by the value in register 1."""
self.register0 = (self.register0 + self.register1) % self.registerSize
def _register1_add_register0(self):
"""Increments register 1 by the value in register 0."""
self.register1 = (self.register1 + self.register0) % self.registerSize
def _print_register0_uint(self):
"""Prints the value in register 0 as an integer."""
print(self.register0)
def _addr_if_register0_zero(self, addr):
"""Jumps to address if register 0 is 0."""
if self.register0 is 0:
self.address = addr
def _addr_if_register0_not_zero(self, addr):
"""Jumps to address if register 1 is 0."""
if self.register0 is not 0:
self.address = addr
def _addr_to_register0(self, addr):
"""Reads the value at the address into register 0."""
self.register0 = self.memory[addr]
def _addr_to_register1(self, addr):
"""Reads the value at the address into register 1."""
self.register1 = self.memory[addr]
def _swap_register0_addr(self, addr):
"""Swaps the values in the address and register 0."""
temp = self.memory[addr]
self.memory[addr] = self.register0
self.register0 = temp
def _swap_register1_addr(self, addr):
"""Swaps the values in the address and register 1."""
temp = self.memory[addr]
self.memory[addr] = self.register1
self.register1 = temp
def _ring_bell(self):
"""Rings the bell."""
pass
def _print_register0_char(self):
"""Prints the value in register 0 as an ASCII character."""
print(chr(self.register0))
``` |
{
"source": "joshleeb/PerceptronVis",
"score": 4
} |
#### File: PerceptronVis/percept/plot.py
```python
import matplotlib.lines as lines
import matplotlib.pyplot as plt
COLOR_CLASSIFICATIONS = [
'black', # Unclassified
'blue', # Classified True (1)
'red' # Classified False (0)
]
def generate_line(ax, p0, p1, color='black', style='-'):
'''
Generates a line between points p0 and p1 which extends to be the width of
the plot.
'''
x0, y0 = p0
x1, y1 = p1
gradient = (y0 - y1) / (x0 - x1)
intercept = y1 - gradient * x1
x = ax.get_xlim()
data_y = [x[0] * gradient + intercept, x[1] * gradient + intercept]
return lines.Line2D(x, data_y, color=color, linestyle=style)
def get_boundary_plot_fn(weights):
'''
Gets the function used to represent and plot the line representative by the
perceptron's weights. The equation is: f(x) = -(w1/w2)x - w0/w2.
'''
def fn(x):
return -weights[1] / weights[2] * x - weights[0] / weights[2]
return fn
def get_point_color(point, colors):
'''
Get's the color of the point to be displayed.
'''
if point.classification is None:
return colors[0]
return colors[1] if point.classification else colors[2]
def generate(title, class_boundary, weights, points, bounds):
'''
Generates a scatter plot of points with the actualy classification boundary
and the perceptron's classification boundary drawn in.
'''
boundary_fn = get_boundary_plot_fn(weights)
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xlim(bounds[0])
ax.set_ylim(bounds[1])
ax.set_title(title)
ax.add_line(generate_line(
ax, class_boundary[0], class_boundary[1], 'cyan', '--'
))
ax.add_line(generate_line(ax, (0, boundary_fn(0)), (1, boundary_fn(1))))
ax.scatter(
[pt.x for pt in points], [pt.y for pt in points],
c=[get_point_color(pt, COLOR_CLASSIFICATIONS) for pt in points], s=30
)
return fig
``` |
{
"source": "joshleejosh/censusbuddy",
"score": 4
} |
#### File: censusbuddy/censusbuddy/census.py
```python
import os
import json
import collections
import re
import requests
import pandas as pd
from .util import check_response
CENSUS_BASE_URL = 'https://api.census.gov/data.json'
class CensusQuery(object):
"""
Download data from the Census API.
Requires an API key to run queries; see: http://api.census.gov/data/key_signup.html
Usage looks something like:
q = CensusQuery('cachedir', 'ACSProfile5Y2015', api_key)
results = q.query(['DP04_0045E', 'DP04_0045M'],
{'place':'*'},
{'state':'06', 'county':'037'})
"""
def __init__(self, cache_dir, dataset_name, api_key, verbose=False):
self.cache_dir = cache_dir
self.dataset_name = dataset_name
self.api_key = api_key
self.verbose = verbose
self.level = ''
self.inclause = ''
self.dataset = None
self.vars = None
if not self.cache_dir:
raise ValueError('Must specify valid cache_dir')
elif not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
if self.verbose:
print('Create dir [{}]'.format(self.cache_dir))
elif not os.path.isdir(self.cache_dir):
raise OSError('Invalid cache dir [{}]'.format(self.cache_dir))
self.query_cache = _QueryCache(self.cache_dir, verbose=self.verbose)
self._load_cache()
if not self.dataset:
self._fetch_dataset()
if not self.vars:
self._fetch_vars()
def search_vars(self, pat):
"""
Find census variables that match a pattern.
Args:
pat: regular expression
Returns:
list of strings
"""
rev = re.compile(pat)
return [k for k in self.vars.keys() if rev.search(k)]
def get_vars(self, ids=[]):
"""
Get info for the given variables.
Args:
list of variable names
Returns:
dict of variable names to info dicts
"""
return {
k:self.vars[k]
for k in self.vars.keys()
if k in ids
}
# https://api.census.gov/data/2015/acs5/examples.html
def query(self, get_clause=[], for_clause={}, in_clause={}, parameters={}, cache=True):
"""
Query the Census API.
Args:
get_clause (list): names of variables to fetch.
for_clause (dict): geo filter criteria.
in_clause (dict): specifiers for the for clause.
parameters (dict): other parameters.
Returns:
DataFrame
Example:
cendf = cenquery.query(['DP05_0001E', 'DP05_0001M'],
{'place':'*'},
{'state':'06'})
"""
# transform predicates to formatted strings
if for_clause and in_clause:
if not self.validate_predicate(for_clause, in_clause):
return pd.DataFrame()
_for = ''
if for_clause:
_for = ' '.join(
'{}:{}'.format(k, v)
for k, v in list(for_clause.items()))
_in = ''
if in_clause:
_in = ' '.join(
'{}:{}'.format(k, v)
for k, v in list(in_clause.items()))
# make sure we always get name and geoid (if appropriate)
if 'GEOID' not in get_clause and self.get_vars(['GEOID']):
get_clause.append('GEOID')
if 'NAME' not in get_clause and self.get_vars(['NAME']):
get_clause.append('NAME')
_get = ','.join(sorted(get_clause))
# build the parameter set
parms = {}
parms.update(parameters)
if _get:
parms['get'] = _get
if _for:
parms['for'] = _for
if _in:
parms['in'] = _in
parms['key'] = self.api_key
if self.verbose:
print('\n'.join('\t{} [{}]'.format(k, parms[k])
for k in sorted(parms.keys())
if k != 'key'))
url = self.dataset['distribution'][0]['accessURL']
# check the cache
if cache:
cachekey = self.query_cache.make_key(url, parms)
df, cachefn = self.query_cache.load(cachekey)
if df is not None:
if self.verbose:
print('Query results from cache [{}] [{}]'.format(cachefn, df.shape))
return df
# run the query already!
resp = requests.get(url, parms)
check_response(resp)
if self.verbose:
print('Query request ok')
# transform output to a DataFrame
ja = resp.json()
df = pd.DataFrame(ja[1:], columns=ja[0])
# convert columns to numeric where indicated by the variable spec
varlist = self.get_vars(get_clause)
for vid, var in varlist.items():
if 'predicateType' in var and var['predicateType'] == 'int':
try:
df[vid] = pd.to_numeric(df[vid])
except Exception as err:
if self.verbose:
print('Can\'t convert column [{}] to numeric: [{}]'.format(vid, err))
if cache:
self.query_cache.save(cachekey, df)
return df
def validate_predicate(self, for_clause, in_clause):
"""
Make sure that the given in_clause is valid for the for_clause.
Returns:
bool
"""
resp = requests.get(self.dataset['c_geographyLink'])
check_response(resp)
ins = in_clause.keys()
db = resp.json()['fips']
for fori in for_clause.keys():
# Gather requirement options for the 'for' clause.
reqs = [
'requires' in rec and rec['requires'] or []
for rec in db
if rec['name'] == fori
]
# Check each combination of requirements; if any of them
# match our "in" clause, we're ok.
forok = False
for j in reqs:
if sorted(j) == sorted(ins):
forok = True
break
if not forok:
print('ERROR: for clause [{}] won\'t work with in clause [{}]'.format(for_clause, in_clause))
print(' Try one of these combinations for the in clause:')
for j in reqs:
print(' {}'.format(', '.join(j)))
return False
return True
# ------------------------------------------------------
def _load_cache(self):
fn = os.path.join(self.cache_dir, '%s_dataset.json'%self.dataset_name)
if os.path.exists(fn):
with open(fn) as fp:
self.dataset = json.load(fp)
if self.dataset and self.verbose:
print('Dataset from cache [{}] [{}]'.format(fn, self.dataset['title']))
fn = os.path.join(self.cache_dir, '%s_vars.json'%self.dataset_name)
if os.path.exists(fn):
with open(fn) as fp:
self.vars = json.load(fp)
if self.vars and self.verbose:
print('Vars from cache [{}] [{}]'.format(fn, len(self.vars)))
def _fetch_dataset(self):
resp = requests.get(CENSUS_BASE_URL)
check_response(resp)
self.dataset = [
d for d in resp.json()['dataset']
if self.dataset_name in d['identifier']
][0]
if self.verbose:
print('Query dataset ok [{}]'.format(self.dataset['title']))
fn = os.path.join(self.cache_dir, '%s_dataset.json'%self.dataset_name)
with open(fn, 'w') as fp:
json.dump(self.dataset, fp, indent=1)
if self.verbose:
print('Saved dataset to cache [{}]'.format(fn))
def _fetch_vars(self):
url = self.dataset['c_variablesLink']
resp = requests.get(url)
check_response(resp)
j = resp.json()
self.vars = j['variables']
if self.verbose:
print('Query vars ok [{}]'.format(len(self.vars)))
fn = os.path.join(self.cache_dir, '%s_vars.json'%self.dataset_name)
with open(fn, 'w') as fp:
json.dump(self.vars, fp, indent=1)
if self.verbose:
print('Saved vars to cache [{}]'.format(fn))
class _QueryCache(object):
"""
Manage the cache of previous query results.
"""
def __init__(self, cache_dir, verbose=False):
self.verbose = verbose
self.cache_dir = cache_dir
self.index = {}
self.index_fn = os.path.join(self.cache_dir, 'query_index.json')
if os.path.exists(self.index_fn):
with open(self.index_fn) as fp:
self.index = json.load(fp)
if self.index and self.verbose:
print('Query index from cache [{}] [{}]'.format(self.index_fn, len(self.index)))
self.last_fni = 1001
if self.index:
self.last_fni = max(self.index.values())
def make_key(self, url, parms):
"""
Make an index key from a requests url+parameters.
"""
# sort keys to ensure consistency
# and be sure not to include the api key!
p2 = collections.OrderedDict([(k, parms[k]) for k in sorted(parms.keys()) if k != 'key'])
req = requests.Request(method='GET', url=url, params=p2)
pr = req.prepare()
return pr.url
def _itofn(self, i):
return os.path.join(self.cache_dir, 'qc{:09d}.csv'.format(i))
def _fntoi(self, fn):
bn = os.path.basename(fn)
i = re.sub(r'\D', '', bn)
return int(i)
def load(self, key):
"""
Args:
key: a query url prodced by make_key()
Returns:
DataFrame, string: the cached data and the filename it was stored
at. If the cache misses, DataFrame will be null.
"""
if key not in self.index:
return None, ''
fni = self.index[key]
fn = self._itofn(fni)
if not os.path.exists(fn):
if self.verbose:
print('WARNING: Query index cache file [{}] missing for key [{}]'.format(fn, key))
del self.index[key]
return None, fn
df = pd.read_csv(fn)
return df, fn
def save(self, key, df):
"""
Args:
key: a query url prodced by make_key()
df: a DataFrame to be saved
"""
fni = self.last_fni + 1
if key in self.index:
# overwrite existing cache entry
fni = self.index[key]
fn = self._itofn(fni)
df.to_csv(fn)
self.index[key] = fni
self.last_fni = max(self.last_fni, fni)
with open(self.index_fn, 'w') as fp:
json.dump(self.index, fp, indent=1)
if self.verbose:
print('Query results cached to [{}]'.format(fn))
``` |
{
"source": "joshleejosh/tmplr",
"score": 2
} |
#### File: tmplr/tmplr/template.py
```python
from __future__ import unicode_literals
from builtins import open
import os
import cgi
import datetime
import re
from . import consts, entry
from .util import d2s_rfc3339, d2s
RE_STRIP = re.compile(r'<[^>]*>')
RE_TEMPLATE_TAG = re.compile(r'\<@([^@]+)@\>')
G_TEMPLATES = {}
def setup():
"""
Load templates for later access.
"""
G_TEMPLATES.clear()
for fn in os.listdir(consts.TEMPLATEDIR):
tm = _read_template(os.path.join(consts.TEMPLATEDIR, fn))
G_TEMPLATES[tm['id']] = tm
# ############################################################# #
def _linkify_tag(tag):
return '<a href="tag/%s.html">%s</a>'%(tag, tag)
def _format_time(dt, fmt):
dolower = doupper = False
if fmt.find('%!l') != -1:
dolower = True
fmt = fmt.replace('%!l', '')
if fmt.find('%!u') != -1:
doupper = True
fmt = fmt.replace('%!u', '')
rv = datetime.datetime.strftime(dt, fmt)
if dolower:
rv = rv.lower()
if doupper:
rv = rv.upper()
return rv
def _run_template_tag(key, ent):
out = ''
if key.endswith('-stripped'):
nk = key[:-len('-stripped')]
out = RE_STRIP.sub('', ent[nk])
elif key.endswith('-escaped'):
nk = key[:-len('-escaped')]
out = cgi.escape(ent[nk])
elif key.endswith('-rfc3339'):
nk = key[:-len('-rfc3339')]
out = d2s_rfc3339(ent[nk])
elif key.find('-ftime:') != -1:
nk = key[:key.find('-ftime:')]
fmt = key[len(nk)+len('-ftime:'):]
out = _format_time(ent[nk], fmt)
elif key == 'date' or key == 'siteTimestamp':
out = d2s(ent[key])
elif key == 'tags':
out = ', '.join(map(_linkify_tag, ent[key]))
else:
out = ent[key]
return out
def run_template_entry(tk, en):
"""
Process a template on an entry.
"""
tm = G_TEMPLATES[tk]
s = tm['template']
for i in RE_TEMPLATE_TAG.findall(s):
nv = _run_template_tag(i, en)
s = re.sub(r'\<@' + i + r'@\>', nv, s)
return s
def run_template_loop(tk, baseent, entries, numtodo=-1):
"""
Process a template on a collection of entries.
"""
ekeys = entry.sorted_entry_keys(entries)
if numtodo == -1:
numtodo = len(ekeys)
tm = G_TEMPLATES[tk]
s = tm['template']
for i in RE_TEMPLATE_TAG.findall(s):
nv = ''
if i.startswith('loopentries-'):
k = i[len('loopentries-'):]
for key in ekeys[:numtodo]:
nv += run_template_entry(k, entries[key]) + '\n'
else:
nv = _run_template_tag(i, baseent)
s = re.sub(r'\<@' + i + r'@\>', nv, s)
return s
def _read_template(fn):
s = ''
with open(fn, encoding='utf-8') as fp:
s = fp.read()
tail = os.path.split(fn)[-1]
return {
'id': tail,
'template': s,
}
``` |
{
"source": "joshlemon/plaso",
"score": 2
} |
#### File: plaso/output/shared_json.py
```python
from __future__ import unicode_literals
import abc
import json
from plaso.lib import errors
from plaso.output import interface
from plaso.serializer import json_serializer
class SharedJSONOutputModule(interface.LinearOutputModule):
"""Shared functionality for a JSON output module."""
_JSON_SERIALIZER = json_serializer.JSONAttributeContainerSerializer
def _WriteSerialized(self, event, event_data, event_tag):
"""Writes an event, event data and event tag to serialized form.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_tag (EventTag): event tag.
Returns:
str: A JSON string containing the serialized form.
"""
json_dict = self._WriteSerializedDict(event, event_data, event_tag)
return json.dumps(json_dict, sort_keys=True)
def _WriteSerializedDict(self, event, event_data, event_tag):
"""Writes an event, event data and event tag to serialized form.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_tag (EventTag): event tag.
Returns:
dict[str, object]: JSON serialized objects.
"""
event_data_json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event_data)
del event_data_json_dict['__container_type__']
del event_data_json_dict['__type__']
inode = event_data_json_dict.get('inode', None)
if inode is None:
event_data_json_dict['inode'] = 0
try:
message, _ = self._output_mediator.GetFormattedMessages(event_data)
event_data_json_dict['message'] = message
except errors.WrongFormatter:
pass
event_json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event)
event_json_dict['__container_type__'] = 'event'
event_json_dict.update(event_data_json_dict)
if event_tag:
event_tag_json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event_tag)
event_json_dict['tag'] = event_tag_json_dict
return event_json_dict
@abc.abstractmethod
def WriteEventBody(self, event, event_data, event_tag):
"""Writes event values to the output.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_tag (EventTag): event tag.
"""
```
#### File: plaso/parsers/ntfs.py
```python
from __future__ import unicode_literals
import uuid
import pyfsntfs # pylint: disable=wrong-import-order
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfdatetime import uuid_time as dfdatetime_uuid_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.containers import windows_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import dtfabric_parser
from plaso.parsers import interface
from plaso.parsers import manager
class NTFSFileStatEventData(events.EventData):
"""NTFS file system stat event data.
Attributes:
attribute_type (int): attribute type for example "0x00000030", which
represents "$FILE_NAME".
file_attribute_flags (int): NTFS file attribute flags.
file_reference (int): NTFS file reference.
file_system_type (str): file system type.
is_allocated (bool): True if the MFT entry is allocated (marked as in use).
name (str): name associated with the stat event, for example that of
a $FILE_NAME attribute or None if not available.
parent_file_reference (int): NTFS file reference of the parent.
path_hints (list[str]): hints about the full path of the file.
"""
DATA_TYPE = 'fs:stat:ntfs'
def __init__(self):
"""Initializes event data."""
super(NTFSFileStatEventData, self).__init__(data_type=self.DATA_TYPE)
self.attribute_type = None
self.file_attribute_flags = None
self.file_reference = None
self.file_system_type = 'NTFS'
self.is_allocated = None
self.name = None
self.parent_file_reference = None
self.path_hints = None
class NTFSUSNChangeEventData(events.EventData):
"""NTFS USN change event data.
Attributes:
file_attribute_flags (int): NTFS file attribute flags.
filename (str): name of the file associated with the event.
file_reference (int): NTFS file reference.
file_system_type (str): file system type.
parent_file_reference (int): NTFS file reference of the parent.
update_reason_flags (int): update reason flags.
update_sequence_number (int): update sequence number.
update_source_flags (int): update source flags.
"""
DATA_TYPE = 'fs:ntfs:usn_change'
def __init__(self):
"""Initializes event data."""
super(NTFSUSNChangeEventData, self).__init__(data_type=self.DATA_TYPE)
self.file_attribute_flags = None
self.filename = None
self.file_reference = None
self.parent_file_reference = None
self.update_reason_flags = None
self.update_sequence_number = None
self.update_source_flags = None
class NTFSMFTParser(interface.FileObjectParser):
"""Parses a NTFS $MFT metadata file."""
_INITIAL_FILE_OFFSET = None
NAME = 'mft'
DESCRIPTION = 'Parser for NTFS $MFT metadata files.'
_MFT_ATTRIBUTE_STANDARD_INFORMATION = 0x00000010
_MFT_ATTRIBUTE_FILE_NAME = 0x00000030
_MFT_ATTRIBUTE_OBJECT_ID = 0x00000040
_NAMESPACE_DOS = 2
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'BAAD', offset=0)
format_specification.AddNewSignature(b'FILE', offset=0)
return format_specification
def _GetDateTime(self, filetime):
"""Retrieves the date and time from a FILETIME timestamp.
Args:
filetime (int): FILETIME timestamp.
Returns:
dfdatetime.DateTimeValues: date and time.
"""
if filetime == 0:
return dfdatetime_semantic_time.SemanticTime('Not set')
return dfdatetime_filetime.Filetime(timestamp=filetime)
def _ParseDistributedTrackingIdentifier(
self, parser_mediator, uuid_string, origin):
"""Extracts data from a Distributed Tracking identifier.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
uuid_string (str): UUID string of the Distributed Tracking identifier.
origin (str): origin of the event (event source).
"""
uuid_object = uuid.UUID(uuid_string)
if uuid_object.version == 1:
event_data = windows_events.WindowsDistributedLinkTrackingEventData(
uuid_object, origin)
date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseFileStatAttribute(
self, parser_mediator, mft_entry, mft_attribute, path_hints):
"""Extract data from a NFTS $STANDARD_INFORMATION or $FILE_NAME attribute.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
mft_attribute (pyfsntfs.attribute): MFT attribute.
path_hints (list[str]): hints about the full path of the file.
"""
event_data = NTFSFileStatEventData()
event_data.attribute_type = mft_attribute.attribute_type
event_data.file_reference = mft_entry.file_reference
event_data.is_allocated = mft_entry.is_allocated()
event_data.path_hints = path_hints
if mft_attribute.attribute_type == self._MFT_ATTRIBUTE_FILE_NAME:
event_data.file_attribute_flags = mft_attribute.file_attribute_flags
event_data.name = mft_attribute.name
event_data.parent_file_reference = mft_attribute.parent_file_reference
try:
creation_time = mft_attribute.get_creation_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the creation timestamp from MFT attribute: '
'0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
creation_time = None
if creation_time is not None:
date_time = self._GetDateTime(creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
modification_time = mft_attribute.get_modification_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the modification timestamp from MFT attribute: '
'0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
modification_time = None
if modification_time is not None:
date_time = self._GetDateTime(modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
access_time = mft_attribute.get_access_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the access timestamp from MFT attribute: '
'0x{0:08x} with error: {1!s}').format(
exception, mft_attribute.attribute_type))
access_time = None
if access_time is not None:
date_time = self._GetDateTime(access_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
entry_modification_time = (
mft_attribute.get_entry_modification_time_as_integer())
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the entry modification timestamp from MFT '
'attribute: 0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
entry_modification_time = None
if entry_modification_time is not None:
date_time = self._GetDateTime(entry_modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseObjectIDAttribute(
self, parser_mediator, mft_entry, mft_attribute):
"""Extract data from a NFTS $OBJECT_ID attribute.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
mft_attribute (pyfsntfs.attribute): MFT attribute.
"""
display_name = '$MFT: {0:d}-{1:d}'.format(
mft_entry.file_reference & 0xffffffffffff,
mft_entry.file_reference >> 48)
if mft_attribute.droid_file_identifier:
try:
self._ParseDistributedTrackingIdentifier(
parser_mediator, mft_attribute.droid_file_identifier,
display_name)
except (TypeError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read droid file identifier from attribute: 0x{0:08x} '
'with error: {1!s}').format(
mft_attribute.attribute_type, exception))
if mft_attribute.birth_droid_file_identifier:
try:
self._ParseDistributedTrackingIdentifier(
parser_mediator, mft_attribute.droid_file_identifier,
display_name)
except (TypeError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read birth droid file identifier from attribute: '
'0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
def _ParseMFTEntry(self, parser_mediator, mft_entry):
"""Extracts data from a NFTS $MFT entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
"""
path_hints = []
standard_information_attribute = None
standard_information_attribute_index = None
for attribute_index in range(0, mft_entry.number_of_attributes):
try:
mft_attribute = mft_entry.get_attribute(attribute_index)
if mft_attribute.attribute_type == (
self._MFT_ATTRIBUTE_STANDARD_INFORMATION):
standard_information_attribute = mft_attribute
standard_information_attribute_index = attribute_index
elif mft_attribute.attribute_type == self._MFT_ATTRIBUTE_FILE_NAME:
path_hint = mft_entry.get_path_hint(attribute_index)
self._ParseFileStatAttribute(
parser_mediator, mft_entry, mft_attribute, [path_hint])
if mft_attribute.name_space != self._NAMESPACE_DOS:
path_hints.append(path_hint)
elif mft_attribute.attribute_type == self._MFT_ATTRIBUTE_OBJECT_ID:
self._ParseObjectIDAttribute(
parser_mediator, mft_entry, mft_attribute)
except IOError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MFT attribute: {0:d} with error: {1!s}').format(
attribute_index, exception))
if standard_information_attribute:
try:
self._ParseFileStatAttribute(
parser_mediator, mft_entry, standard_information_attribute,
path_hints)
except IOError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MFT attribute: {0:d} with error: {1!s}').format(
standard_information_attribute_index, exception))
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a NTFS $MFT metadata file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
mft_metadata_file = pyfsntfs.mft_metadata_file()
try:
mft_metadata_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open $MFT file with error: {0!s}'.format(exception))
return
for entry_index in range(0, mft_metadata_file.number_of_file_entries):
try:
mft_entry = mft_metadata_file.get_file_entry(entry_index)
if (not mft_entry.is_empty() and
mft_entry.base_record_file_reference == 0):
self._ParseMFTEntry(parser_mediator, mft_entry)
except IOError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MFT entry: {0:d} with error: {1!s}').format(
entry_index, exception))
mft_metadata_file.close()
class NTFSUsnJrnlParser(dtfabric_parser.DtFabricBaseParser):
"""Parses a NTFS USN change journal."""
_INITIAL_FILE_OFFSET = None
NAME = 'usnjrnl'
DESCRIPTION = 'Parser for NTFS USN change journal ($UsnJrnl).'
_DEFINITION_FILE = 'ntfs.yaml'
# TODO: add support for USN_RECORD_V3 and USN_RECORD_V4 when actually
# seen to be used.
def _ParseUSNChangeJournal(self, parser_mediator, usn_change_journal):
"""Parses an USN change journal.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
usn_change_journal (pyfsntsfs.usn_change_journal): USN change journal.
Raises:
ParseError: if an USN change journal record cannot be parsed.
"""
if not usn_change_journal:
return
usn_record_map = self._GetDataTypeMap('usn_record_v2')
usn_record_data = usn_change_journal.read_usn_record()
while usn_record_data:
current_offset = usn_change_journal.get_offset()
try:
usn_record = self._ReadStructureFromByteStream(
usn_record_data, current_offset, usn_record_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse USN record at offset: 0x{0:08x} with error: '
'{1!s}').format(current_offset, exception))
# Per MSDN we need to use name offset for forward compatibility.
name_offset = usn_record.name_offset - 60
utf16_stream = usn_record.name[name_offset:usn_record.name_size]
try:
name_string = utf16_stream.decode('utf-16-le')
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
name_string = utf16_stream.decode('utf-16-le', errors='replace')
parser_mediator.ProduceExtractionWarning((
'unable to decode USN record name string with error: '
'{0:s}. Characters that cannot be decoded will be replaced '
'with "?" or "\\ufffd".').format(exception))
event_data = NTFSUSNChangeEventData()
event_data.file_attribute_flags = usn_record.file_attribute_flags
event_data.file_reference = usn_record.file_reference
event_data.filename = name_string
event_data.offset = current_offset
event_data.parent_file_reference = usn_record.parent_file_reference
event_data.update_reason_flags = usn_record.update_reason_flags
event_data.update_sequence_number = usn_record.update_sequence_number
event_data.update_source_flags = usn_record.update_source_flags
if not usn_record.update_date_time:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(
timestamp=usn_record.update_date_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
usn_record_data = usn_change_journal.read_usn_record()
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a NTFS $UsnJrnl metadata file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
volume = pyfsntfs.volume()
try:
volume.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open NTFS volume with error: {0!s}'.format(exception))
return
try:
usn_change_journal = volume.get_usn_change_journal()
self._ParseUSNChangeJournal(parser_mediator, usn_change_journal)
finally:
volume.close()
manager.ParsersManager.RegisterParsers([NTFSMFTParser, NTFSUsnJrnlParser])
```
#### File: plaso/tests/end-to-end.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import abc
import argparse
import difflib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
try:
import ConfigParser as configparser
except ImportError:
import configparser # pylint: disable=import-error
if sys.version_info[0] < 3:
PY2 = True
PY3 = False
BYTES_TYPE = str
else:
PY2 = False
PY3 = True
BYTES_TYPE = bytes
# Since os.path.abspath() uses the current working directory (cwd)
# os.path.abspath(__file__) will point to a different location if
# cwd has been changed. Hence we preserve the absolute location of __file__.
__file__ = os.path.abspath(__file__)
class TempDirectory(object):
"""Temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exception_type, value, traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
class TestCase(object):
"""Test case interface.
The test case defines what aspect of the plaso tools to test.
A test definition is used to provide parameters for the test
case so it can be easily run on different input files.
"""
NAME = None
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestCase, self).__init__()
self._debug_output = debug_output
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def _RunCommand(self, command, stdout=None, stderr=None):
"""Runs a command.
Args:
command (list[str]): full command to run, as expected by the Popen()
constructor (see the documentation:
https://docs.python.org/2/library/subprocess.html#popen-constructor)
stdout (Optional[str]): path to file to send stdout to.
stderr (Optional[str]): path to file to send stderr to.
Returns:
bool: True if the command ran successfully.
"""
if command[0].endswith('py'):
command.insert(0, sys.executable)
command_string = ' '.join(command)
logging.info('Running: {0:s}'.format(command_string))
child = subprocess.Popen(command, stdout=stdout, stderr=stderr)
child.communicate()
exit_code = child.returncode
if exit_code != 0:
logging.error('Running: "{0:s}" failed (exit code {1:d}).'.format(
command_string, exit_code))
return False
return True
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
class TestCasesManager(object):
"""Test cases manager."""
_test_case_classes = {}
_test_case_objects = {}
@classmethod
def DeregisterTestCase(cls, test_case_class):
"""Deregisters a test case class.
The test case classes are identified based on their lower case name.
Args:
test_case_class (type): test case class.
Raises:
KeyError: if test case class is not set for the corresponding name.
"""
test_case_name = test_case_class.NAME.lower()
if test_case_name not in cls._test_case_classes:
raise KeyError(
'Formatter class not set for name: {0:s}.'.format(
test_case_class.NAME))
del cls._test_case_classes[test_case_name]
@classmethod
def GetTestCaseObject(
cls, name, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Retrieves the test case object for a specific name.
Args:
name (str): name of the test case.
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
Returns:
TestCase: test case or None if not available.
"""
name = name.lower()
if name not in cls._test_case_objects:
test_case_object = None
if name in cls._test_case_classes:
test_case_class = cls._test_case_classes[name]
test_case_object = test_case_class(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
if not test_case_object:
return None
cls._test_case_objects[name] = test_case_object
return cls._test_case_objects[name]
@classmethod
def RegisterTestCase(cls, test_case_class):
"""Registers a test case class.
The test case classes are identified based on their lower case name.
Args:
test_case_class (type): test case class.
Raises:
KeyError: if test case class is already set for the corresponding
name.
"""
test_case_name = test_case_class.NAME.lower()
if test_case_name in cls._test_case_classes:
raise KeyError((
'Formatter class already set for name: {0:s}.').format(
test_case_class.NAME))
cls._test_case_classes[test_case_name] = test_case_class
@classmethod
def RegisterTestCases(cls, test_case_classes):
"""Registers test case classes.
The test case classes are identified based on their lower case name.
Args:
test_case_classes (list[type]): test case classes.
Raises:
KeyError: if test case class is already set for the corresponding
name.
"""
for test_case_class in test_case_classes:
cls.RegisterTestCase(test_case_class)
class TestDefinition(object):
"""Test definition.
Attributes:
case (str): name of test case.
name (str): name of the test.
"""
def __init__(self, name):
"""Initializes a test definition.
Args:
name (str): name of the test.
"""
super(TestDefinition, self).__init__()
self.case = ''
self.name = name
class TestDefinitionReader(object):
"""Test definition reader.
The test definition reader reads tests definitions from a configuration
file.
"""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test definition reader.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestDefinitionReader, self).__init__()
self._config_parser = None
self._debug_output = debug_output
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def GetConfigValue(
self, section_name, value_name, default=None, split_string=False):
"""Retrieves a value from the config parser.
Args:
section_name (str): name of the section that contains the value.
value_name (str): the name of the value.
default (Optional[object]): default value to return if no value is set
in the config parser.
split_string (Optional[bool]): if True, the value will be split into a
list of strings, suitable for passing to subprocess.Popen().
Returns:
object: value or the default if the value does not exist.
Raises:
RuntimeError: if the configuration parser is not set.
"""
if not self._config_parser:
raise RuntimeError('Missing configuration parser.')
try:
value = self._config_parser.get(section_name, value_name)
except configparser.NoOptionError:
value = None
if isinstance(value, BYTES_TYPE):
value = value.decode('utf-8')
if split_string and value:
options = []
for flag_and_setting in value.split(' '):
if flag_and_setting.find('=') > 0:
options.extend(flag_and_setting.split('='))
else:
options.append(flag_and_setting)
value = options
if value is None:
value = default
return value
def Read(self, file_object):
"""Reads test definitions.
Args:
file_object (file): a file-like object to read from.
Yields:
TestDefinition: end-to-end test definition.
"""
# TODO: replace by:
# self._config_parser = configparser.ConfigParser(interpolation=None)
self._config_parser = configparser.RawConfigParser()
try:
self._config_parser.read_file(file_object)
for section_name in self._config_parser.sections():
test_definition = TestDefinition(section_name)
test_definition.case = self.GetConfigValue(section_name, 'case')
if not test_definition.case:
logging.warning(
'Test case missing in test definition: {0:s}.'.format(
section_name))
continue
test_case = TestCasesManager.GetTestCaseObject(
test_definition.case, self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path,
debug_output=self._debug_output)
if not test_case:
logging.warning('Undefined test case: {0:s}'.format(
test_definition.case))
continue
if not test_case.ReadAttributes(self, test_definition):
logging.warning(
'Unable to read attributes of test case: {0:s}'.format(
test_definition.case))
continue
yield test_definition
finally:
self._config_parser = None
class TestLauncher(object):
"""Test launcher.
The test launcher reads the test definitions from a file, looks up
the corresponding test cases in the test case manager and then runs
the test case with the parameters specified in the test definition.
"""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test launcher.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestLauncher, self).__init__()
self._debug_output = debug_output
self._test_definitions = []
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def _RunTest(self, test_definition):
"""Runs the test.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
test_case = TestCasesManager.GetTestCaseObject(
test_definition.case, self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path)
if not test_case:
logging.error('Unsupported test case: {0:s}'.format(
test_definition.case))
return False
return test_case.Run(test_definition)
def ReadDefinitions(self, configuration_file):
"""Reads the test definitions from the configuration file.
Args:
configuration_file (str): path of the configuration file.
"""
self._test_definitions = []
with open(configuration_file) as file_object:
test_definition_reader = TestDefinitionReader(
self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path)
for test_definition in test_definition_reader.Read(file_object):
self._test_definitions.append(test_definition)
def RunTests(self):
"""Runs the tests.
Returns:
list[str]: names of the failed tests.
"""
# TODO: set up test environment
failed_tests = []
for test_definition in self._test_definitions:
if not self._RunTest(test_definition):
failed_tests.append(test_definition.name)
return failed_tests
class StorageFileTestCase(TestCase):
"""Shared functionality for plaso test cases that involve storage files."""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(StorageFileTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._pinfo_path = None
self._psort_path = None
def _CompareOutputFile(self, test_definition, temp_directory):
"""Compares the output file with a reference output file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
Returns:
bool: True if he output files are identical.
"""
output_file_path = os.path.join(temp_directory, test_definition.output_file)
# TODO: add support to compare output by SHA-256.
result = False
if test_definition.reference_output_file:
reference_output_file_path = test_definition.reference_output_file
if self._test_references_path:
reference_output_file_path = os.path.join(
self._test_references_path, reference_output_file_path)
if not os.path.exists(reference_output_file_path):
logging.error('No such reference output file: {0:s}'.format(
reference_output_file_path))
return False
with open(reference_output_file_path, 'r') as reference_output_file:
with open(output_file_path, 'r') as output_file:
# Hack to remove paths in the output that are different when running
# the tests under UNIX and Windows.
reference_output_list = []
for line in reference_output_file.readlines():
if PY2:
line = line.decode('utf-8')
line = line.replace('/tmp/test/test_data/', '')
reference_output_list.append(line)
output_list = []
for line in output_file:
if PY2:
line = line.decode('utf-8')
line = line.replace('/tmp/test/test_data/', '')
line = line.replace('C:\\tmp\\test\\test_data\\', '')
line.replace('C:\\\\tmp\\\\test\\\\test_data\\\\', '')
output_list.append(line)
differences = list(difflib.unified_diff(
reference_output_list, output_list,
fromfile=reference_output_file_path, tofile=output_file_path))
if differences:
differences_output = []
for difference in differences:
differences_output.append(difference)
differences_output = '\n'.join(differences_output)
logging.error('Differences: {0:s}'.format(differences_output))
if not differences:
result = True
return result
def _InitializePinfoPath(self):
"""Initializes the location of pinfo."""
for filename in ('pinfo.exe', 'pinfo.sh', 'pinfo.py'):
self._pinfo_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._pinfo_path):
break
def _InitializePsortPath(self):
"""Initializes the location of psort."""
for filename in ('psort.exe', 'psort.sh', 'psort.py'):
self._psort_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._psort_path):
break
def _RunPinfo(self, test_definition, temp_directory, storage_file):
"""Runs pinfo on the storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if pinfo ran successfully.
"""
stdout_file = os.path.join(
temp_directory, '{0:s}-pinfo.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, '{0:s}-pinfo.err'.format(test_definition.name))
command = [self._pinfo_path, '--output-format', 'json', storage_file]
with open(stdout_file, 'w') as stdout:
with open(stderr_file, 'w') as stderr:
result = self._RunCommand(command, stdout=stdout, stderr=stderr)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPinfoCompare(self, test_definition, temp_directory, storage_file):
"""Runs pinfo --compare on the storage file and a reference storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if pinfo ran successfully.
"""
reference_storage_file = test_definition.reference_storage_file
if self._test_references_path:
reference_storage_file = os.path.join(
self._test_references_path, reference_storage_file)
if not os.path.exists(reference_storage_file):
logging.error('No such reference storage file: {0:s}'.format(
reference_storage_file))
return False
stdout_file = os.path.join(
temp_directory, '{0:s}-compare-pinfo.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, '{0:s}-compare-pinfo.err'.format(test_definition.name))
command = [
self._pinfo_path, '--compare', reference_storage_file, storage_file]
with open(stdout_file, 'w') as stdout:
with open(stderr_file, 'w') as stderr:
result = self._RunCommand(command, stdout=stdout, stderr=stderr)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPsort(
self, test_definition, temp_directory, storage_file,
analysis_options=None, output_options=None):
"""Runs psort with the output options specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
analysis_options (Optional[str]): analysis options.
output_options (Optional[str]): output options.
Returns:
bool: True if psort ran successfully.
"""
analysis_options = analysis_options or []
output_options = output_options or []
output_format = test_definition.output_format or 'null'
if '-o' not in output_options and '--output-format' not in output_options:
output_options.extend(['--output-format', output_format])
output_file_path = None
if output_format != 'null':
output_file = getattr(test_definition, 'output_file', None)
if output_file:
output_file_path = os.path.join(temp_directory, output_file)
output_options.extend(['-w', output_file_path])
output_options.append(storage_file)
output_filter = getattr(test_definition, 'output_filter', None)
if output_filter:
output_options.append(output_filter)
logging_options = [
option.replace('%command%', 'psort')
for option in test_definition.logging_options]
stdout_file = os.path.join(
temp_directory, '{0:s}-psort.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, '{0:s}-psort.err'.format(test_definition.name))
command = [self._psort_path]
command.extend(analysis_options)
command.extend(output_options)
command.extend(logging_options)
command.extend(['--status-view', 'none'])
command.extend(test_definition.profiling_options)
with open(stdout_file, 'w') as stdout:
with open(stderr_file, 'w') as stderr:
result = self._RunCommand(command, stdout=stdout, stderr=stderr)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if output_file_path and os.path.exists(output_file_path):
shutil.copy(output_file_path, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
class ExtractAndOutputTestCase(StorageFileTestCase):
"""Extract and output test case.
The extract and output test case runs log2timeline to extract data
from a source, specified by the test definition. After the data has been
extracted pinfo and psort are run to read from the resulting storage file.
"""
NAME = 'extract_and_output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ExtractAndOutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._log2timeline_path = None
self._InitializeLog2TimelinePath()
self._InitializePinfoPath()
self._InitializePsortPath()
def _InitializeLog2TimelinePath(self):
"""Initializes the location of log2timeline."""
for filename in (
'log2timeline.exe', 'log2timeline.sh', 'log2timeline.py'):
self._log2timeline_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._log2timeline_path):
break
def _RunLog2Timeline(
self, test_definition, temp_directory, storage_file, source_path):
"""Runs log2timeline with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
source_path (str): path of the source.
Returns:
bool: True if log2timeline ran successfully.
"""
extract_options = ['--status-view=none']
extract_options.extend(test_definition.extract_options)
logging_options = [
option.replace('%command%', 'log2timeline')
for option in test_definition.logging_options]
stdout_file = os.path.join(
temp_directory, '{0:s}-log2timeline.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, '{0:s}-log2timeline.err'.format(test_definition.name))
command = [self._log2timeline_path]
command.extend(extract_options)
command.extend(logging_options)
command.extend(test_definition.profiling_options)
command.extend([storage_file, source_path])
with open(stdout_file, 'w') as stdout:
with open(stderr_file, 'w') as stderr:
result = self._RunCommand(command, stdout=stdout, stderr=stderr)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(storage_file):
shutil.copy(storage_file, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.extract_options = test_definition_reader.GetConfigValue(
test_definition.name, 'extract_options', default=[], split_string=True)
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, 'output_file')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, 'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, 'output_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_output_file'))
test_definition.reference_storage_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_storage_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, 'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, '{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source_path):
return False
# Check if the resulting storage file can be read with pinfo.
if not self._RunPinfo(
test_definition, temp_directory, storage_file):
return False
# Compare storage file with a reference storage file.
if test_definition.reference_storage_file:
if not self._RunPinfoCompare(
test_definition, temp_directory, storage_file):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file,
output_options=test_definition.output_options):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
class ExtractAndOutputWithPstealTestCase(StorageFileTestCase):
"""Extract and output with psteal test case.
The extract and output test case runs psteal to extract data from a source,
specified by the test definition, and outputs the extracted events.
"""
NAME = 'extract_and_output_with_psteal'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ExtractAndOutputWithPstealTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._psteal_path = None
self._InitializePstealPath()
def _InitializePstealPath(self):
"""Initializes the location of psteal."""
for filename in ('psteal.exe', 'psteal.sh', 'psteal.py'):
self._psteal_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._psteal_path):
break
def _RunPsteal(
self, test_definition, temp_directory, storage_file, source_path):
"""Runs psteal with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
source_path (str): path of the source.
Returns:
bool: True if psteal ran successfully.
"""
psteal_options = [
'--source={0:s}'.format(source_path),
'--status-view=none',
'--storage-file={0:s}'.format(storage_file)]
psteal_options.extend(test_definition.extract_options)
if test_definition.output_format:
psteal_options.extend(['-o', test_definition.output_format])
output_file_path = None
if test_definition.output_file:
output_file_path = os.path.join(
temp_directory, test_definition.output_file)
psteal_options.extend(['-w', output_file_path])
logging_options = [
option.replace('%command%', 'psteal')
for option in test_definition.logging_options]
stdout_file = os.path.join(
temp_directory, '{0:s}-psteal.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, '{0:s}-psteal.err'.format(test_definition.name))
command = [self._psteal_path]
command.extend(psteal_options)
command.extend(logging_options)
command.extend(test_definition.profiling_options)
with open(stdout_file, 'w') as stdout:
with open(stderr_file, 'w') as stderr:
result = self._RunCommand(command, stdout=stdout, stderr=stderr)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(storage_file):
shutil.copy(storage_file, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.extract_options = test_definition_reader.GetConfigValue(
test_definition.name, 'extract_options', default=[], split_string=True)
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, 'output_file')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, 'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, 'output_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_output_file'))
test_definition.reference_storage_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_storage_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, 'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, '{0:s}.plaso'.format(test_definition.name))
# Extract and output events with psteal.
if not self._RunPsteal(
test_definition, temp_directory, storage_file, source_path):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
class ExtractAndTagTestCase(ExtractAndOutputTestCase):
"""Extract and tag test case.
The extract and tag test case runs log2timeline to extract data
from a source, specified by the test definition. After the data has been
extracted psort is run to tag events in the resulting storage file.
"""
NAME = 'extract_and_tag'
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
if not super(ExtractAndTagTestCase, self).ReadAttributes(
test_definition_reader, test_definition):
return False
test_definition.tagging_file = test_definition_reader.GetConfigValue(
test_definition.name, 'tagging_file')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, '{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source_path):
return False
# Add tags to the resulting storage file with psort.
tagging_file_path = test_definition.tagging_file
if self._test_sources_path:
tagging_file_path = os.path.join(
self._test_sources_path, tagging_file_path)
analysis_options = [
'--analysis', 'tagging', '--tagging-file', tagging_file_path]
output_options = ['--output-format', 'null']
if not self._RunPsort(
test_definition, temp_directory, storage_file,
analysis_options=analysis_options, output_options=output_options):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file,
output_options=test_definition.output_options):
return False
return True
class ImageExportTestCase(TestCase):
"""Image export test case.
The image export test case runs image_export to extract files from a storage
media image, specified by the test definition.
"""
NAME = 'image_export'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ImageExportTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._image_export_path = None
self._InitializeImageExportPath()
def _InitializeImageExportPath(self):
"""Initializes the location of image_export."""
for filename in (
'image_export.exe', 'image_export.sh', 'image_export.py'):
self._image_export_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._image_export_path):
break
def _RunImageExport(self, test_definition, temp_directory, source_path):
"""Runs image_export on a storage media image.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
source_path (str): path of the source.
Returns:
bool: True if image_export ran successfully.
"""
output_file_path = os.path.join(temp_directory, 'export')
output_options = ['-w', output_file_path]
logging_options = [
option.replace('%command%', 'image_export')
for option in test_definition.logging_options]
stdout_file = os.path.join(
temp_directory, '{0:s}-image_export.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, '{0:s}-image_export.err'.format(test_definition.name))
command = [self._image_export_path]
command.extend(output_options)
command.extend(logging_options)
command.extend(test_definition.profiling_options)
command.append(source_path)
with open(stdout_file, 'w') as stdout:
with open(stderr_file, 'w') as stderr:
result = self._RunCommand(command, stdout=stdout, stderr=stderr)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
# TODO: hash the files.
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.filter_file = test_definition_reader.GetConfigValue(
test_definition.name, 'filter_file')
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, 'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
# Extract files with image_export.
if not self._RunImageExport(
test_definition, temp_directory, source_path):
return False
return True
class MultiExtractAndOutputTestCase(ExtractAndOutputTestCase):
"""Extract multiple times with the same storage file and output test case.
The multi extract and output test case runs log2timeline to extract data
from a source, specified by the test definition, multiple times with the
same storage file. After the data has been extracted pinfo and psort are
run to read from the resulting storage file.
"""
NAME = 'multi_extract_and_output'
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.extract_options = test_definition_reader.GetConfigValue(
test_definition.name, 'extract_options', default=[], split_string=True)
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, 'output_file')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, 'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, 'output_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_output_file'))
test_definition.reference_storage_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_storage_file'))
test_definition.source1 = test_definition_reader.GetConfigValue(
test_definition.name, 'source1')
test_definition.source2 = test_definition_reader.GetConfigValue(
test_definition.name, 'source2')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source1_path = test_definition.source1
if self._test_sources_path:
source1_path = os.path.join(self._test_sources_path, source1_path)
if not os.path.exists(source1_path):
logging.error('No such source: {0:s}'.format(source1_path))
return False
source2_path = test_definition.source2
if self._test_sources_path:
source2_path = os.path.join(self._test_sources_path, source2_path)
if not os.path.exists(source2_path):
logging.error('No such source: {0:s}'.format(source2_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, '{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source1_path):
return False
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source2_path):
return False
# Check if the resulting storage file can be read with pinfo.
if not self._RunPinfo(
test_definition, temp_directory, storage_file):
return False
# Compare storage file with a reference storage file.
if test_definition.reference_storage_file:
if not self._RunPinfoCompare(
test_definition, temp_directory, storage_file):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file,
output_options=test_definition.output_options):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
class AnalyzeAndOutputTestCase(StorageFileTestCase):
"""Analyze and output test case.
The analyze and output test case runs psort on a storage file with specific
analysis and output options.
"""
NAME = 'analyze_and_output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(AnalyzeAndOutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._InitializePsortPath()
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.analysis_options = test_definition_reader.GetConfigValue(
test_definition.name, 'analysis_options', default=[], split_string=True)
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, 'output_file')
test_definition.output_filter = test_definition_reader.GetConfigValue(
test_definition.name, 'output_filter', default='')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, 'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, 'output_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_output_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, 'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
# Run psort with both analysis and output options.
if not self._RunPsort(
test_definition, temp_directory, source_path,
analysis_options=test_definition.analysis_options,
output_options=test_definition.output_options):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
class MultiAnalyzeAndOutputTestCase(AnalyzeAndOutputTestCase):
"""Analyzes multiple times with the same storage file and output test case.
The multi analysis and output test case runs psort analysis modules multiple
times with the same storage file. After the analysis modules have run psort is
run to read from the resulting storage file.
"""
NAME = 'multi_analyze_and_output'
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.analysis_options1 = test_definition_reader.GetConfigValue(
test_definition.name, 'analysis_options1', default=[],
split_string=True)
test_definition.analysis_options2 = test_definition_reader.GetConfigValue(
test_definition.name, 'analysis_options2', default=[],
split_string=True)
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, 'output_file')
test_definition.output_filter = test_definition_reader.GetConfigValue(
test_definition.name, 'output_filter', default='')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, 'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, 'output_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_output_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, 'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
# Run psort with the first set of analysis options.
if not self._RunPsort(
test_definition, temp_directory, source_path,
analysis_options=test_definition.analysis_options1):
return False
# Run psort with the second set of analysis options.
if not self._RunPsort(
test_definition, temp_directory, source_path,
analysis_options=test_definition.analysis_options2):
return False
# Run psort with the output options.
if not self._RunPsort(
test_definition, temp_directory, source_path,
output_options=test_definition.output_options):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
# TODO: This class is kept for backwards compatibility. For new tests use
# AnalyzeAndOutputTestCase instead.
class OutputTestCase(StorageFileTestCase):
"""Output test case.
The output test case runs psort on a storage file to its various
output formats.
"""
NAME = 'output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(OutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._InitializePsortPath()
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.logging_options = test_definition_reader.GetConfigValue(
test_definition.name, 'logging_options', default=[], split_string=True)
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, 'output_file')
test_definition.output_filter = test_definition_reader.GetConfigValue(
test_definition.name, 'output_filter', default='')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, 'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, 'output_options', default=[], split_string=True)
test_definition.profiling_options = test_definition_reader.GetConfigValue(
test_definition.name, 'profiling_options', default=[],
split_string=True)
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, 'reference_output_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, 'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error('No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
# Run psort with the output options.
if not self._RunPsort(
test_definition, temp_directory, source_path,
output_options=test_definition.output_options):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
TestCasesManager.RegisterTestCases([
AnalyzeAndOutputTestCase, ExtractAndOutputTestCase,
ExtractAndOutputWithPstealTestCase, ExtractAndTagTestCase,
ImageExportTestCase, MultiAnalyzeAndOutputTestCase,
MultiExtractAndOutputTestCase, OutputTestCase])
def Main():
"""The main function."""
argument_parser = argparse.ArgumentParser(
description='End-to-end test launcher.', add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
argument_parser.add_argument(
'-c', '--config', dest='config_file', action='store',
metavar='CONFIG_FILE', default=None,
help='path of the test configuration file.')
argument_parser.add_argument(
'--debug', dest='debug_output', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'-h', '--help', action='help',
help='show this help message and exit.')
argument_parser.add_argument(
'--references-directory', '--references_directory', action='store',
metavar='DIRECTORY', dest='references_directory', type=str,
default=None, help=(
'The location of the directory where the test references are '
'stored.'))
argument_parser.add_argument(
'--results-directory', '--results_directory', action='store',
metavar='DIRECTORY', dest='results_directory', type=str,
default=None, help=(
'The location of the directory where to store the test results.'))
argument_parser.add_argument(
'--sources-directory', '--sources_directory', action='store',
metavar='DIRECTORY', dest='sources_directory', type=str,
default=None, help=(
'The location of the directory where the test sources are stored.'))
argument_parser.add_argument(
'--tools-directory', '--tools_directory', action='store',
metavar='DIRECTORY', dest='tools_directory', type=str,
default=None, help='The location of the plaso tools directory.')
options = argument_parser.parse_args()
if not options.config_file:
options.config_file = os.path.dirname(__file__)
options.config_file = os.path.dirname(options.config_file)
options.config_file = os.path.join(
options.config_file, 'config', 'end-to-end.ini')
if not os.path.exists(options.config_file):
print('No such config file: {0:s}.'.format(options.config_file))
print('')
return False
logging.basicConfig(
format='[%(levelname)s] %(message)s', level=logging.INFO)
tools_path = options.tools_directory
if not tools_path:
tools_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'tools')
test_sources_path = options.sources_directory
if test_sources_path and not os.path.isdir(test_sources_path):
print('No such sources directory: {0:s}.'.format(test_sources_path))
print('')
return False
test_references_path = options.references_directory
if test_references_path and not os.path.isdir(test_references_path):
print('No such references directory: {0:s}.'.format(test_references_path))
print('')
return False
test_results_path = options.results_directory
if not test_results_path:
test_results_path = os.getcwd()
if not os.path.isdir(test_results_path):
print('No such results directory: {0:s}.'.format(test_results_path))
print('')
return False
tests = []
with open(options.config_file) as file_object:
test_definition_reader = TestDefinitionReader(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=options.debug_output)
for test_definition in test_definition_reader.Read(file_object):
tests.append(test_definition)
test_launcher = TestLauncher(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=options.debug_output)
test_launcher.ReadDefinitions(options.config_file)
failed_tests = test_launcher.RunTests()
if failed_tests:
print('Failed tests:')
for failed_test in failed_tests:
print(' {0:s}'.format(failed_test))
print('')
return False
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
```
#### File: tests/output/dynamic.py
```python
from __future__ import unicode_literals
import unittest
from plaso.containers import events
from plaso.formatters import interface as formatters_interface
from plaso.formatters import manager as formatters_manager
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.output import dynamic
from tests.cli import test_lib as cli_test_lib
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class TestEventFormatter(formatters_interface.EventFormatter):
"""Test event formatter."""
DATA_TYPE = 'test:dynamic'
FORMAT_STRING = '{text}'
SOURCE_SHORT = 'LOG'
SOURCE_LONG = 'Syslog'
class DynamicFieldsHelperTest(test_lib.OutputModuleTestCase):
"""Test the dynamic fields helper."""
# pylint: disable=protected-access
_TEST_EVENTS = [
{'data_type': 'test:dynamic',
'filename': 'log/syslog.1',
'hostname': 'ubuntu',
'text': (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': timelib.Timestamp.CopyFromString('2012-06-27 18:17:01'),
'timestamp_desc': definitions.TIME_DESCRIPTION_CHANGE}]
def testFormatDate(self):
"""Tests the _FormatDate function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
date_string = dynamic_fields_helper._FormatDate(event, event_data)
self.assertEqual(date_string, '2012-06-27')
event.timestamp = -9223372036854775808
date_string = dynamic_fields_helper._FormatDate(event, event_data)
self.assertEqual(date_string, '0000-00-00')
def testFormatDateTime(self):
"""Tests the _FormatDateTime function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
date_time_string = dynamic_fields_helper._FormatDateTime(event, event_data)
self.assertEqual(date_time_string, '2012-06-27T18:17:01+00:00')
event.timestamp = -9223372036854775808
date_time_string = dynamic_fields_helper._FormatDateTime(event, event_data)
self.assertEqual(date_time_string, '0000-00-00T00:00:00')
def testFormatHostname(self):
"""Tests the _FormatHostname function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
hostname_string = dynamic_fields_helper._FormatHostname(event, event_data)
self.assertEqual(hostname_string, 'ubuntu')
def testFormatInode(self):
"""Tests the _FormatInode function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
inode_string = dynamic_fields_helper._FormatInode(event, event_data)
self.assertEqual(inode_string, '-')
def testFormatMACB(self):
"""Tests the _FormatMACB function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
macb_string = dynamic_fields_helper._FormatMACB(event, event_data)
self.assertEqual(macb_string, '..C.')
def testFormatMessage(self):
"""Tests the _FormatMessage function."""
formatters_manager.FormattersManager.RegisterFormatter(
TestEventFormatter)
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
message_string = dynamic_fields_helper._FormatMessage(event, event_data)
expected_message_string = (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session closed '
'for user root)')
self.assertEqual(message_string, expected_message_string)
formatters_manager.FormattersManager.DeregisterFormatter(
TestEventFormatter)
def testFormatMessageShort(self):
"""Tests the _FormatMessageShort function."""
formatters_manager.FormattersManager.RegisterFormatter(
TestEventFormatter)
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
message_short_string = dynamic_fields_helper._FormatMessageShort(
event, event_data)
expected_message_short_string = (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session closed '
'for user root)')
self.assertEqual(message_short_string, expected_message_short_string)
formatters_manager.FormattersManager.DeregisterFormatter(
TestEventFormatter)
def testFormatSource(self):
"""Tests the _FormatSource function."""
formatters_manager.FormattersManager.RegisterFormatter(
TestEventFormatter)
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
source_string = dynamic_fields_helper._FormatSource(event, event_data)
self.assertEqual(source_string, 'Syslog')
formatters_manager.FormattersManager.DeregisterFormatter(
TestEventFormatter)
def testFormatSourceShort(self):
"""Tests the _FormatSourceShort function."""
formatters_manager.FormattersManager.RegisterFormatter(
TestEventFormatter)
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
source_short_string = dynamic_fields_helper._FormatSourceShort(
event, event_data)
self.assertEqual(source_short_string, 'LOG')
formatters_manager.FormattersManager.DeregisterFormatter(
TestEventFormatter)
def testFormatTag(self):
"""Tests the _FormatTag function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
tag_string = dynamic_fields_helper._FormatTag(None)
self.assertEqual(tag_string, '-')
event_tag = events.EventTag()
event_tag.AddLabel('one')
event_tag.AddLabel('two')
tag_string = dynamic_fields_helper._FormatTag(event_tag)
self.assertEqual(tag_string, 'one two')
def testFormatTime(self):
"""Tests the _FormatTime function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
time_string = dynamic_fields_helper._FormatTime(event, event_data)
self.assertEqual(time_string, '18:17:01')
event.timestamp = -9223372036854775808
time_string = dynamic_fields_helper._FormatTime(event, event_data)
self.assertEqual(time_string, '--:--:--')
def testFormatTimestampDescription(self):
"""Tests the _FormatTimestampDescription function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
timestamp_description_string = (
dynamic_fields_helper._FormatTimestampDescription(event, event_data))
self.assertEqual(timestamp_description_string, 'Metadata Modification Time')
def testFormatUsername(self):
"""Tests the _FormatUsername function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
username_string = dynamic_fields_helper._FormatUsername(event, event_data)
self.assertEqual(username_string, '-')
def testFormatZone(self):
"""Tests the _FormatZone function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
zone_string = dynamic_fields_helper._FormatZone(event, event_data)
self.assertEqual(zone_string, 'UTC')
# TODO: add coverage for _ReportEventError
def testGetFormattedField(self):
"""Tests the GetFormattedField function."""
output_mediator = self._CreateOutputMediator()
dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
zone_string = dynamic_fields_helper.GetFormattedField(
event, event_data, None, 'zone')
self.assertEqual(zone_string, 'UTC')
class DynamicOutputModuleTest(test_lib.OutputModuleTestCase):
"""Test the dynamic output module."""
# pylint: disable=protected-access
_TEST_EVENTS = [
{'data_type': 'test:dynamic',
'filename': 'log/syslog.1',
'hostname': 'ubuntu',
'text': (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': timelib.Timestamp.CopyFromString('2012-06-27 18:17:01'),
'timestamp_desc': definitions.TIME_DESCRIPTION_CHANGE}]
# TODO: add coverage for _SanitizeField
# TODO: add coverage for SetFieldDelimiter
# TODO: add coverage for SetFields
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
formatters_manager.FormattersManager.RegisterFormatter(
TestEventFormatter)
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = dynamic.DynamicOutputModule(output_mediator)
output_module.SetFields([
'date', 'time', 'timezone', 'macb', 'source', 'sourcetype',
'type', 'user', 'host', 'message_short', 'message',
'filename', 'inode', 'notes', 'format', 'extra'])
output_module.SetOutputWriter(output_writer)
output_module.WriteHeader()
expected_header = (
'date,time,timezone,macb,source,sourcetype,type,user,host,'
'message_short,message,filename,inode,notes,format,extra\n')
header = output_writer.ReadOutput()
self.assertEqual(header, expected_header)
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
output_module.WriteEventBody(event, event_data, None)
expected_event_body = (
'2012-06-27,18:17:01,UTC,..C.,LOG,Syslog,Metadata Modification Time,-,'
'ubuntu,Reporter <CRON> PID: 8442 (pam_unix(cron:session): session '
'closed for user root),Reporter <CRON> PID: 8442 '
'(pam_unix(cron:session): session closed for user root),log/syslog.1'
',-,-,-,-\n')
event_body = output_writer.ReadOutput()
self.assertEqual(event_body, expected_event_body)
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = dynamic.DynamicOutputModule(output_mediator)
output_module.SetFields([
'datetime', 'nonsense', 'hostname', 'message'])
output_module.SetOutputWriter(output_writer)
expected_header = 'datetime,nonsense,hostname,message\n'
output_module.WriteHeader()
header = output_writer.ReadOutput()
self.assertEqual(header, expected_header)
expected_event_body = (
'2012-06-27T18:17:01+00:00,-,ubuntu,Reporter <CRON> PID: 8442'
' (pam_unix(cron:session): session closed for user root)\n')
event, event_data = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
output_module.WriteEventBody(event, event_data, None)
event_body = output_writer.ReadOutput()
self.assertEqual(event_body, expected_event_body)
formatters_manager.FormattersManager.DeregisterFormatter(
TestEventFormatter)
def testHeader(self):
"""Tests the WriteHeader function."""
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = dynamic.DynamicOutputModule(output_mediator)
output_module.SetOutputWriter(output_writer)
expected_header = (
'datetime,timestamp_desc,source,source_long,message,parser,'
'display_name,tag\n')
output_module.WriteHeader()
header = output_writer.ReadOutput()
self.assertEqual(header, expected_header)
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = dynamic.DynamicOutputModule(output_mediator)
output_module.SetFields([
'date', 'time', 'message', 'hostname', 'filename', 'some_stuff'])
output_module.SetOutputWriter(output_writer)
expected_header = 'date,time,message,hostname,filename,some_stuff\n'
output_module.WriteHeader()
header = output_writer.ReadOutput()
self.assertEqual(header, expected_header)
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = dynamic.DynamicOutputModule(output_mediator)
output_module.SetFields([
'date', 'time', 'message', 'hostname', 'filename', 'some_stuff'])
output_module.SetFieldDelimiter('@')
output_module.SetOutputWriter(output_writer)
expected_header = 'date@time@message@hostname@filename@some_stuff\n'
output_module.WriteHeader()
header = output_writer.ReadOutput()
self.assertEqual(header, expected_header)
if __name__ == '__main__':
unittest.main()
```
#### File: storage/sqlite/merge_reader.py
```python
from __future__ import unicode_literals
import os
import unittest
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.storage.sqlite import merge_reader
from plaso.storage.sqlite import writer
from tests import test_lib as shared_test_lib
from tests.containers import test_lib as containers_test_lib
from tests.storage import test_lib
class SQLiteStorageMergeReaderTest(test_lib.StorageTestCase):
"""Tests for the SQLite-based storage file reader for merging."""
# pylint: disable=protected-access
_TEST_EVENTS_WITH_DESERIALIZATION_ERROR = [
{'data_type': 'windows:registry:key_value',
'key_path': 'MY AutoRun key',
'parser': 'UNKNOWN',
'regvalue': {'Name1': 'Data1', 'Name2': 'Data2'},
'timestamp': timelib.Timestamp.CopyFromString(
'2012-04-20 22:38:46.929596'),
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN,
'values': 'Value: c:/Temp/evil.exe'}]
def _CreateTaskStorageFile(self, session, path, event_values_list):
"""Creates a task storage file for testing.
Args:
session (Session): session the task storage is part of.
path (str): path to the task storage file that should be merged.
event_values_list (list[dict[str, str]]): list of event values.
"""
task = tasks.Task(session_identifier=session.identifier)
storage_file = writer.SQLiteStorageFileWriter(
session, path, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_file.Open()
for event, event_data in containers_test_lib.CreateEventsFromValues(
event_values_list):
storage_file.AddEventData(event_data)
event.SetEventDataIdentifier(event_data.GetIdentifier())
storage_file.AddEvent(event)
storage_file.Close()
def testReadStorageMetadata(self):
"""Tests the _ReadStorageMetadata function."""
session = sessions.Session()
with shared_test_lib.TempDirectory() as temp_directory:
task_storage_path = os.path.join(temp_directory, 'task.sqlite')
self._CreateTaskStorageFile(session, task_storage_path, self._TEST_EVENTS)
session_storage_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = writer.SQLiteStorageFileWriter(
session, session_storage_path)
test_reader = merge_reader.SQLiteStorageMergeReader(
storage_writer, task_storage_path)
test_reader._Open()
test_reader._ReadStorageMetadata()
test_reader._Close()
def testMergeAttributeContainers(self):
"""Tests the MergeAttributeContainers function."""
session = sessions.Session()
with shared_test_lib.TempDirectory() as temp_directory:
task_storage_path = os.path.join(temp_directory, 'task.sqlite')
self._CreateTaskStorageFile(session, task_storage_path, self._TEST_EVENTS)
session_storage_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = writer.SQLiteStorageFileWriter(
session, session_storage_path)
test_reader = merge_reader.SQLiteStorageMergeReader(
storage_writer, task_storage_path)
storage_writer.Open()
result = test_reader.MergeAttributeContainers()
self.assertTrue(result)
storage_writer.Close()
def testMergeAttributeContainersWithDeserializationError(self):
"""Tests MergeAttributeContainers with a deserialization error."""
session = sessions.Session()
with shared_test_lib.TempDirectory() as temp_directory:
task_storage_path = os.path.join(temp_directory, 'task.sqlite')
self._CreateTaskStorageFile(
session, task_storage_path,
self._TEST_EVENTS_WITH_DESERIALIZATION_ERROR)
session_storage_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = writer.SQLiteStorageFileWriter(
session, session_storage_path)
test_reader = merge_reader.SQLiteStorageMergeReader(
storage_writer, task_storage_path)
storage_writer.Open()
result = test_reader.MergeAttributeContainers()
self.assertTrue(result)
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshli0/cs490",
"score": 2
} |
#### File: cs490/cstermproj/__init__.py
```python
from secrets import token_urlsafe
from flask import Flask
from flask_session import Session
from .frontend import startup as start_front
from .middleend import startup as start_middle
from .backend import startup as start_back
from .backend.dbconn import get_environment_var, get_db_uri
def startup():
flaskapp = Flask(
__name__,
static_url_path = "/",
static_folder = "../static/",
template_folder = "../templates/"
)
key = get_environment_var("SESSION_SECRET_KEY")
if key is None:
print("Warning: Generating RANDOM session key, this WILL NOT WORK in production!")
key = token_urlsafe(16)
flaskapp.secret_key = key
start_back()
flaskapp.config["SESSION_TYPE"] = "sqlalchemy"
flaskapp.config["SESSION_SQLALCHEMY_TABLE"] = "sessions"
flaskapp.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
flaskapp.config["SQLALCHEMY_DATABASE_URI"] = get_db_uri().replace("postgres", "postgresql+psycopg2")
s = Session(flaskapp)
s.app.session_interface.db.create_all()
start_middle(flaskapp)
start_front(flaskapp)
return flaskapp
```
#### File: cstermproj/middleend/login.py
```python
import flask
from ..backend.login import check_credentials
def setup(flaskapp):
@flaskapp.before_request
def check_logged_in():
url = flask.request.path.lower()
if "user" in flask.session:
if url == "/login":
return flask.redirect("/app")
elif url not in ["/login", "/style.css", "/favicon.ico", "/script.js"]:
return flask.redirect("/login")
@flaskapp.route("/login", methods = ["POST"])
def login_process():
data = flask.request.values
success = False
if "username" in data and "password" in data:
username = data["username"]
password = data["password"]
if username is not None and len(username) > 0 and password is not None and len(password) > 0:
teacher_or_student = check_credentials(username, password)
if teacher_or_student is not None:
flask.session["user"] = username
flask.session["teacher"] = teacher_or_student
success = True
flask.session["errmsg"] = None if success else "Invalid username or password!"
return flask.redirect("/app" if success else "/login")
``` |
{
"source": "joshliddi15/CSE210-04",
"score": 2
} |
#### File: game/casting/gem.py
```python
from game.casting.object import Object
class Gem(Object):
"""
This is a basic Gem
"""
def __init__(self):
super().__init__()
self._point_value = 1
self._text = "*"
``` |
{
"source": "joshlin5900/hostlink-python",
"score": 3
} |
#### File: hostlink-python/common/util.py
```python
class Util():
def hex_to_ascii(self, fcs):
hexstr = "%02X" % fcs
fcs_high = hexstr[0]
fcs_low = hexstr[1]
return fcs_high, fcs_low
def assert_fcs(self, frame, high_7_bit_padding=True):
buffer = frame.buffer
buffer_len = len(buffer)
fcs_high_idx = buffer_len-4
read_fcs_high = chr(buffer[fcs_high_idx])
read_fcs_low = chr(buffer[fcs_high_idx+1])
fcs = 0
for i in range(0, fcs_high_idx):
fcs = fcs ^ buffer[i]
if fcs < 0x10 and high_7_bit_padding:
fcs |= 0x40
fcs_high, fcs_low = self.hex_to_ascii(fcs)
if fcs_high != read_fcs_high or fcs_low != read_fcs_low:
raise ValueError("FCS not validated. Got char(%s%s), but should be cahr(%s%s)"
% (read_fcs_high, read_fcs_low, fcs_high, fcs_low))
else:
return
def get_func_name(self, inspect_stack):
return inspect_stack[0][3]
``` |
{
"source": "josh-lind/RLBot-Puddles",
"score": 3
} |
#### File: RLBot-Puddles/src/bot.py
```python
import math
import csv, time # Used for logging
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.orientation import Orientation
from util.vec import Vec3
# from RLUtilities.GameInfo import GameInfo
# from RLUtilities.Simulation import Input
# from RLUtilities.LinearAlgebra import norm
class Maneuver:
pass
class MyBot(BaseAgent):
def get_next_csv_name(self):
# Read in the number we should make it
f = open("./MyBots/RLBot-Puddles/src/nextcsvnumber.txt", "r")
returnValue = int(f.read())
f.close()
# Update the file we just read from to increment it
with open("./MyBots/RLBot-Puddles/src/nextcsvnumber.txt", "w") as f:
f.write(str(int(returnValue) + 1))
# Actually return the value
return returnValue
# This runs once before the bot starts up
def initialize_agent(self):
self.controller_state = SimpleControllerState()
self.maneuver = Maneuver()
self.maneuver.name = None
self.maneuver.prevent_goal_properties = Maneuver()
self.collected_data = []
self.time_since_last_log = time.time() # Don't want to save a log any more than .1 seconds, but also don't want it to be blocking
self.current_csv_name = self.get_next_csv_name()
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
self.set_maneuver(packet, self.get_ball_prediction_struct())
self.exec_maneuver(packet)
my_car = packet.game_cars[self.index]
draw_debug(self.renderer, my_car, packet.game_ball, self.maneuver.name)
# Update log if it's been a tenth of a second since we last did
if time.time() - self.time_since_last_log > .1:
self.update_log(packet, self.get_ball_prediction_struct())
self.time_since_last_log = 0
return self.controller_state
def update_log(self, packet, ball_prediction_struct):
csv_line = []
''' PlayerInfo Object (Bot) '''
# Position (X, Y, Z)
csv_line.insert(0, str(packet.game_cars[0].physics.location.x))
csv_line.insert(1, str(packet.game_cars[0].physics.location.y))
csv_line.insert(2, str(packet.game_cars[0].physics.location.z))
# Rotation (Roll, Pitch, Yaw)
csv_line.insert(3, str(packet.game_cars[0].physics.rotation.roll))
csv_line.insert(4, str(packet.game_cars[0].physics.rotation.pitch))
csv_line.insert(5, str(packet.game_cars[0].physics.rotation.yaw))
# Velocity (X/s, Y/s, Z/s)
csv_line.insert(6, str(packet.game_cars[0].physics.velocity.x))
csv_line.insert(7, str(packet.game_cars[0].physics.velocity.y))
csv_line.insert(8, str(packet.game_cars[0].physics.velocity.z))
# Angular Velocity (Roll/s, Pitch/s, Yaw/s)
csv_line.insert(9, str(packet.game_cars[0].physics.angular_velocity.x))
csv_line.insert(10, str(packet.game_cars[0].physics.angular_velocity.y))
csv_line.insert(11, str(packet.game_cars[0].physics.angular_velocity.z))
# Bot Score, Goals, Own Goals, Saves, Shots, Demolitions
csv_line.insert(12, str(packet.game_cars[0].score_info.score))
csv_line.insert(13, str(packet.game_cars[0].score_info.goals))
csv_line.insert(14, str(packet.game_cars[0].score_info.own_goals))
csv_line.insert(15, str(packet.game_cars[0].score_info.saves))
csv_line.insert(16, str(packet.game_cars[0].score_info.shots))
csv_line.insert(17, str(packet.game_cars[0].score_info.demolitions))
# Has Wheel Contact
csv_line.insert(18, str(packet.game_cars[0].has_wheel_contact))
# Is Currently Supersonic
csv_line.insert(19, str(packet.game_cars[0].is_super_sonic))
# Has Currently Jumped
csv_line.insert(20, str(packet.game_cars[0].jumped))
# Has Currently Used Double Jump
csv_line.insert(21, str(packet.game_cars[0].double_jumped))
''' PlayerInfo Object (Enemy) '''
# Position (X, Y, Z)
csv_line.insert(22, str(packet.game_cars[1].physics.location.x))
csv_line.insert(23, str(packet.game_cars[1].physics.location.y))
csv_line.insert(24, str(packet.game_cars[1].physics.location.z))
# Rotation (Roll, Pitch, Yaw)
csv_line.insert(25, str(packet.game_cars[1].physics.rotation.roll))
csv_line.insert(26, str(packet.game_cars[1].physics.rotation.pitch))
csv_line.insert(27, str(packet.game_cars[1].physics.rotation.yaw))
# Velocity (X/s, Y/s, Z/s)
csv_line.insert(28, str(packet.game_cars[1].physics.velocity.x))
csv_line.insert(29, str(packet.game_cars[1].physics.velocity.y))
csv_line.insert(30, str(packet.game_cars[1].physics.velocity.z))
# Angular Velocity (Roll/s, Pitch/s, Yaw/s)
csv_line.insert(31, str(packet.game_cars[1].physics.angular_velocity.x))
csv_line.insert(32, str(packet.game_cars[1].physics.angular_velocity.y))
csv_line.insert(33, str(packet.game_cars[1].physics.angular_velocity.z))
# Bot Score
csv_line.insert(34, str(packet.game_cars[1].score_info.score))
csv_line.insert(35, str(packet.game_cars[1].score_info.goals))
csv_line.insert(36, str(packet.game_cars[1].score_info.own_goals))
csv_line.insert(37, str(packet.game_cars[1].score_info.saves))
csv_line.insert(38, str(packet.game_cars[1].score_info.shots))
csv_line.insert(39, str(packet.game_cars[1].score_info.demolitions))
# Has Wheel Contact
csv_line.insert(40, str(packet.game_cars[1].has_wheel_contact))
# Is Currently Supersonic
csv_line.insert(41, str(packet.game_cars[1].is_super_sonic))
# Has Currently Jumped
csv_line.insert(42, str(packet.game_cars[1].jumped))
# Has Currently Used Double Jump
csv_line.insert(43, str(packet.game_cars[1].double_jumped))
''' BallInfo Object '''
# Ball X, Y, Z
csv_line.insert(44, str(packet.game_ball.physics.location.x))
csv_line.insert(45, str(packet.game_ball.physics.location.y))
csv_line.insert(46, str(packet.game_ball.physics.location.z))
# Ball Velocity (X/s, Y/s, Z/s)
csv_line.insert(47, str(packet.game_ball.physics.velocity.x))
csv_line.insert(48, str(packet.game_ball.physics.velocity.y))
csv_line.insert(49, str(packet.game_ball.physics.velocity.z))
# Ball Angular Velocity (Roll/s, Pitch/s, Yaw/s)
csv_line.insert(50, str(packet.game_ball.physics.angular_velocity.x))
csv_line.insert(51, str(packet.game_ball.physics.angular_velocity.y))
csv_line.insert(52, str(packet.game_ball.physics.angular_velocity.z))
''' Latest Touch Object '''
# Last Player To Touch (True if Bot, False Otherwise)
csv_line.insert(53, str(packet.game_ball.latest_touch.player_index is self.team))
# Point of contact for touch X, Y, Z
csv_line.insert(54, str(packet.game_ball.latest_touch.hit_location.x))
csv_line.insert(55, str(packet.game_ball.latest_touch.hit_location.y))
csv_line.insert(56, str(packet.game_ball.latest_touch.hit_location.z))
# Direction of the touch X, Y, Z
csv_line.insert(57, str(packet.game_ball.latest_touch.hit_normal.x))
csv_line.insert(58, str(packet.game_ball.latest_touch.hit_normal.y))
csv_line.insert(59, str(packet.game_ball.latest_touch.hit_normal.z))
''' GameInfo Object '''
# Total seconds elapsed (seconds, I assume)
# Estimating high here, will revise once I see the CSV
csv_line.insert(60, str(packet.game_info.seconds_elapsed))
# Total game time remaining (seconds, I assume)
csv_line.insert(61, str(packet.game_info.game_time_remaining))
# Is Overtime (True if not, False otherwise)
csv_line.insert(62, str(packet.game_info.is_overtime))
''' Predicted Ball Position '''
# 1-Second Slice Values
csv_line.insert(63, str(ball_prediction_struct.slices[60].physics.location.x))
csv_line.insert(64, str(ball_prediction_struct.slices[60].physics.location.y))
csv_line.insert(65, str(ball_prediction_struct.slices[60].physics.location.z))
# 2-Second Slice Values
csv_line.insert(66, str(ball_prediction_struct.slices[120].physics.location.x))
csv_line.insert(67, str(ball_prediction_struct.slices[120].physics.location.y))
csv_line.insert(68, str(ball_prediction_struct.slices[120].physics.location.z))
''' BoostPadState Object '''
# Activation state for each of the 34 boost pads
index = 69
for boost_pad in packet.game_boosts:
csv_line.insert(index, str(boost_pad.is_active))
csv_line.insert(index + 1, str(boost_pad.timer))
index += 2
index = 69
# If the ball is projected to be in the goal at ANY point within the next six seconds (True if so, False if not)
# TODO: This
# Deciding what the "correct state" in this position
# Append onto the variable that holds all our data
self.collected_data.append(csv_line)
# Writing freshest iteration to file
# For WHATEVER F*CKING REASON the file itself is run in the RLBot home
# directory and not FROM THIS F*CKING FILE... there's 2.5 hours down the drain
with open("./MyBots/RLBot-Puddles/src/output/" + str(self.current_csv_name) + ".csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.collected_data)
def set_maneuver(self, packet: GameTickPacket, prediction_slices):
my_car = packet.game_cars[self.index]
car_location = Vec3(my_car.physics.location)
opponent_goal_y = -7000.0 if self.team == 1 else 7000.0
ball_loc = packet.game_ball.physics.location
impending_goal = self.will_enter_goal(packet)
if self.maneuver.name == None:
self.maneuver.name = "Attack"
elif self.maneuver.name is not "PreventGoal" and impending_goal:
self.maneuver.name = "PreventGoal"
self.maneuver.prevent_goal_properties.chasing_ball = False
elif isbetween(car_location.y, ball_loc.y, opponent_goal_y) and not impending_goal and abs(ball_loc.y) < 3500:
self.maneuver.name = "GetHomeBoost"
# This method calls the correct functions dependant on what maneuver we are executing
def exec_maneuver(self, packet: GameTickPacket):
if self.maneuver.name == "PreventGoal":
self.prevent_goal(packet)
elif self.maneuver.name == "GetHomeBoost":
self.get_home_boost(packet)
else:
self.hit_ball_to_goal(packet)
def prevent_goal(self, packet: GameTickPacket):
if not self.will_enter_goal(packet):
self.maneuver.name = None
self.maneuver.prevent_goal_properties.chasing_ball = False
return
if self.maneuver.prevent_goal_properties.chasing_ball is not True:
self.get_to_goal_post(packet)
else:
self.go_to_ball(packet)
def will_enter_goal(self, packet: GameTickPacket) -> bool:
goalYVal = -5200.0 if self.team == 0 else 5200.0
ball_prediction = self.get_ball_prediction_struct()
if ball_prediction is not None:
for i in range(0, ball_prediction.num_slices):
prediction_slice = ball_prediction.slices[i]
location = prediction_slice.physics.location
if abs(location.y - goalYVal) < 700 and abs(location.x) < 900.0:
return True
return False
def get_to_goal_post(self, packet: GameTickPacket):
my_car = packet.game_cars[self.index]
car_location = Vec3(my_car.physics.location)
goToX = 1000.0 if car_location.x > 0 else -1000.0
goToY = -4500.0 if self.team == 0 else 4500.0
target: Vec3 = Vec3(goToX, goToY, 0.0)
self.go_to_position(packet, target)
if target.dist(packet.game_cars[self.index].physics.location) < 200:
self.maneuver.prevent_goal_properties.chasing_ball = True
def go_to_ball(self, packet: GameTickPacket):
self.go_to_position(packet, packet.game_ball.physics.location)
def hit_ball_to_goal(self, packet: GameTickPacket):
ball_prediction = self.get_ball_prediction_struct()
my_car = packet.game_cars[self.index]
speed = Vec3(my_car.physics.velocity).length()
info = self.get_field_info()
their_goal = info.goals[1 - self.team].location
starting_time = ball_prediction.slices[0].game_seconds
if ball_prediction is not None:
for i in range(0, ball_prediction.num_slices):
prediction_slice = ball_prediction.slices[i]
location = prediction_slice.physics.location
ball_difference: Vec3 = Vec3(location) - Vec3(my_car.physics.location)
distance_to_ball = ball_difference.length()
if distance_to_ball < speed * (prediction_slice.game_seconds - starting_time):
# self.go_to_position(packet, location)
self.go_to_position(packet, self.get_vector_90_away_from_goal(location, their_goal))
return
self.go_to_ball(packet)
def get_vector_90_away_from_goal(self, ball_location: Vec3, goal_location: Vec3) -> Vec3:
goal_to_ball: Vec3 = Vec3(ball_location) - Vec3(goal_location)
return Vec3(ball_location) + goal_to_ball.rescale(90.0)
def go_to_position(self, packet: GameTickPacket, ideal: Vec3):
my_car = packet.game_cars[self.index]
car_orientation = Orientation(my_car.physics.rotation)
car_direction = car_orientation.forward
steer_correction_radians = find_correction(car_direction, Vec3(ideal) - Vec3(my_car.physics.location))
# Turn left if steer correction is positive in radians
turn_direction_multiplier = -1.0 if steer_correction_radians > 0 else 1.0
abs_correction = abs(steer_correction_radians)
if abs_correction >= .5:
turn = 1 * turn_direction_multiplier
else:
turn = abs_correction * 2 * turn_direction_multiplier
# Powerslide if the angle of correction is more than 1 radian
if abs_correction > 1.3:
self.controller_state.handbrake = True
else:
self.controller_state.handbrake = False
# TODO: change. always boost
self.controller_state.boost = not self.controller_state.handbrake
self.controller_state.throttle = 1.0
self.controller_state.steer = turn
def get_boost(self, packet: GameTickPacket):
info = self.get_field_info()
# Manually construct a list of all big boost pads
min_dist = 50000
min_dist_index = 0
# info.boost_pads has a fixed size but info.num_boosts is how many pads there actually are
for i in range(info.num_boosts):
pad = info.boost_pads[i]
if pad.is_full_boost:
dist = abs((pad.location - packet.game_cars[self.index].physics.location).length())
if min_dist > dist:
min_dist=dist
min_dist_index = i
self.go_to_position(packet,info.boost_pads[min_dist_index].location)
def get_home_boost(self, packet: GameTickPacket):
boost_Yval = -4100.0 if self.team == 0 else 4100.0
info = self.get_field_info()
# Manually construct a list of all big boost pads
min_dist = 50000
min_dist_index = 0
# info.boost_pads has a fixed size but info.num_boosts is how many pads there actually are
for i in range(info.num_boosts):
pad = info.boost_pads[i]
if pad.is_full_boost and abs(pad.location.y - boost_Yval) < 100:
dist = abs((Vec3(pad.location) - Vec3(packet.game_cars[self.index].physics.location)).length())
if min_dist > dist:
min_dist=dist
min_dist_index = i
self.go_to_position(packet, info.boost_pads[min_dist_index].location)
if min_dist < 500:
self.maneuver.name = None
def find_correction(current: Vec3, ideal: Vec3) -> float:
# Finds the angle from current to ideal vector in the xy-plane. Angle will be between -pi and +pi.
# The in-game axes are left handed, so use -x
current_in_radians = math.atan2(current.y, -current.x)
ideal_in_radians = math.atan2(ideal.y, -ideal.x)
diff = ideal_in_radians - current_in_radians
# Make sure that diff is between -pi and +pi.
if abs(diff) > math.pi:
if diff < 0:
diff += 2 * math.pi
else:
diff -= 2 * math.pi
return diff
#used to check position vectors
def isbetween(x: float, y1: float, y2: float) -> bool:
if y1 <= x <= y2:
return True
elif y1 >= x >= y2:
return True
return False
def draw_debug(renderer, car, ball, action_display):
renderer.begin_rendering()
# draw a line from the car to the ball
renderer.draw_line_3d(car.physics.location, ball.physics.location, renderer.white())
# print the action that the bot is taking
renderer.draw_string_3d(car.physics.location, 2, 2, action_display, renderer.white())
renderer.end_rendering()
``` |
{
"source": "joshliu11/DSGN",
"score": 3
} |
#### File: dsgn/dataloader/KITTILoader3D.py
```python
import numpy as np
from dsgn.dataloader.kitti_dataset import kitti_dataset as kittidataset
def get_kitti_annos(labels,
# ignore_van_and_personsitting=False,
# ignore_smaller=True,
# ignore_occlusion=True,
ignore_van_and_personsitting=False,
ignore_smaller=False,
ignore_occlusion=False,
ignore_truncation=True,
valid_classes=[1,2,3,4]):
assert not ignore_occlusion # 6150 occlusion should be induced
boxes = []
box3ds = []
ori_classes = []
for i, label in enumerate(labels):
# 4 will be ignored.
if label.type == 'Pedestrian' or label.type == 'Person_sitting': typ = 1
elif label.type == 'Car' or label.type == 'Van': typ = 2
elif label.type == 'Cyclist': typ = 3
elif label.type == 'DontCare': typ = 4
elif label.type in ['Misc', 'Tram', 'Truck']: continue
else:
raise ValueError('Invalid Label.')
# only train Car or Person
if typ != 4 and typ not in set(valid_classes) - set([4]):
continue
if ignore_van_and_personsitting and (label.type == 'Van' or label.type == 'Person_sitting'):
typ = 4
if ignore_smaller and label.box2d[3] - label.box2d[1] <= 10.:
typ = 4
if ignore_occlusion and label.occlusion >= 3:
typ = 4
if ignore_truncation and label.truncation >= 0.98:
typ = 4
if typ not in valid_classes:
continue
boxes.append( np.array(label.box2d) )
box3ds.append( np.array(label.box3d[[3,4,5, 0,1,2, 6]]) )
ori_classes.append( typ )
boxes[-1][2:4] = boxes[-1][2:4] - boxes[-1][0:2]
# if typ == 4:
# box3ds[-1] = np.zeros((7,))
boxes = np.asarray(boxes, dtype=np.float32)
box3ds = np.asarray(box3ds, dtype=np.float32)
ori_classes = np.asarray(ori_classes, dtype=np.int32)
# inds = ori_classes.argsort()
# boxes = boxes[inds]
# box3ds = box3ds[inds]
# ori_classes = ori_classes[inds]
return boxes, box3ds, ori_classes
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath, train_file, depth_disp=False, cfg=None, is_train=False, generate_target=False):
kitti_dataset = kittidataset('trainval').train_dataset
left_fold = 'image_2/'
right_fold = 'image_3/'
if depth_disp:
disp_L = 'depth/'
else:
disp_L = 'disparity/'
with open(train_file, 'r') as f:
train_idx = [x.strip() for x in f.readlines()]
if is_train or generate_target:
filter_idx = []
if cfg.RPN3D_ENABLE:
for image_index in train_idx:
labels = kitti_dataset.get_label_objects(int(image_index))
boxes, box3ds, ori_classes = get_kitti_annos(labels,
valid_classes = cfg.valid_classes)
if len(box3ds) > 0:
filter_idx.append(image_index)
train_idx = filter_idx
left_train = [filepath + '/' + left_fold + img + '.png' for img in train_idx]
right_train = [filepath + '/' + right_fold + img + '.png' for img in train_idx]
disp_train_L = [filepath + '/' + disp_L + img + '.npy' for img in train_idx]
return left_train, right_train, disp_train_L
``` |
{
"source": "joshlk/asyn_requests",
"score": 3
} |
#### File: asyn_requests/async_requests/async_requests_scrapy.py
```python
import multiprocessing
from multiprocessing import Queue
from Queue import Full, Empty
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.utils.spider import DefaultSpider
def _spawn_spider(queue, urls):
responses = []
class FetchSpider(DefaultSpider):
name = 'fetch_spider'
start_urls = urls
def parse(self, response):
responses.append(response)
spider = FetchSpider()
settings = Settings()
settings.set('DOWNLOAD_HANDLERS', {'s3': None})
crawler_process = CrawlerProcess(settings)
crawler_process.crawl(spider)
crawler_process.start()
# Put into queue a bit at a time to stop deadlock due to the queue being full
for response in responses:
while True:
try:
queue.put_nowait(response)
break
except Full:
pass
def fetch(urls):
"""
Fetch a list of URLs asynchronously
:param urls: List of URLs
:return: List of scrapy.http.response.html.HtmlResponse
"""
# Start in separate process as Twisted reactor cannot be started once stopped previously
queue = Queue()
p = multiprocessing.Process(target=_spawn_spider, args=(queue, urls))
p.start()
# Collect data while process is still running to prevent the queue becoming full and creating a deadlock
responses = []
while True:
queue_empty = False
try:
response = queue.get_nowait()
responses.append(response)
except Empty:
queue_empty = True
is_dead = not p.is_alive()
if queue_empty and is_dead:
break
return responses
``` |
{
"source": "joshlk/jenkins-githubaction",
"score": 2
} |
#### File: joshlk/jenkins-githubaction/main.py
```python
import os
from api4jenkins import Jenkins
import logging
import json
from time import time, sleep
log_level = os.environ.get('INPUT_LOG_LEVEL', 'INFO')
logging.basicConfig(format='JENKINS_ACTION: %(message)s', level=log_level)
def main():
# Required
url = os.environ["INPUT_URL"]
job_name = os.environ["INPUT_JOB_NAME"]
# Optional
username = os.environ.get("INPUT_USERNAME")
api_token = os.environ.get("INPUT_API_TOKEN")
parameters = os.environ.get("INPUT_PARAMETERS")
cookies = os.environ.get("INPUT_COOKIES")
wait = bool(os.environ.get("INPUT_WAIT"))
timeout = int(os.environ.get("INPUT_TIMEOUT"))
start_timeout = int(os.environ.get("INPUT_START_TIMEOUT"))
interval = int(os.environ.get("INPUT_INTERVAL"))
if username and api_token:
auth = (username, api_token)
else:
auth = None
logging.info('Username or token not provided. Connecting without authentication.')
if parameters:
try:
parameters = json.loads(parameters)
except json.JSONDecodeError as e:
raise Exception('`parameters` is not valid JSON.') from e
else:
parameters = {}
if cookies:
try:
cookies = json.loads(cookies)
except json.JSONDecodeError as e:
raise Exception('`cookies` is not valid JSON.') from e
else:
cookies = {}
jenkins = Jenkins(url, auth=auth, cookies=cookies)
try:
jenkins.version
except Exception as e:
raise Exception('Could not connect to Jenkins.') from e
logging.info('Successfully connected to Jenkins.')
queue_item = jenkins.build_job(job_name, **parameters)
logging.info('Requested to build job.')
t0 = time()
sleep(interval)
while time() - t0 < start_timeout:
build = queue_item.get_build()
if build:
break
logging.info(f'Build not started yet. Waiting {interval} seconds.')
sleep(interval)
else:
raise Exception(f"Could not obtain build and timed out. Waited for {start_timeout} seconds.")
build_url = build.url
logging.info(f"Build URL: {build_url}")
print(f"::set-output name=build_url::{build_url}")
print(f"::notice title=build_url::{build_url}")
if not wait:
logging.info("Not waiting for build to finish.")
return
t0 = time()
sleep(interval)
while time() - t0 < timeout:
result = build.result
if result == 'SUCCESS':
logging.info(f'Build successful 🎉')
return
elif result in ('FAILURE', 'ABORTED', 'UNSTABLE'):
raise Exception(f'Build status returned "{result}". Build has failed ☹️.')
logging.info(f'Build not finished yet. Waiting {interval} seconds. {build_url}')
sleep(interval)
else:
raise Exception(f"Build has not finished and timed out. Waited for {timeout} seconds.")
if __name__ == "__main__":
main()
``` |
{
"source": "joshlk/many_requests",
"score": 3
} |
#### File: many_requests/many_requests/easy_async.py
```python
from itertools import repeat, count
from typing import Iterable, Optional, Any, Coroutine, Callable
import trio
from tqdm.auto import tqdm
from many_requests.common import N_WORKERS_DEFAULT, is_collection
def delayed(func: Callable[..., Coroutine]):
"""
Decorator used to capture an async function with arguments and delay its execution.
Examples:
>>> import asks
>>> func_, args_, kwargs_ = delayed(asks.request)('GET', url='https://example.org')
>>> assert func_ == asks.request
>>> assert args_ == ('GET',)
>>> assert kwargs_ == {'url': 'https://example.org'}
"""
def delayed_func(*args, **kwargs):
return func, args, kwargs
return delayed_func
def zip_kw(**kwargs):
"""
Return an iterator of N dictionaries, where the Nth item of each iterator specified in `kwargs` is paired together.
Like `zip` but returns a dict instead.
Also keyword arguments which are strings or not iterators are automatically repeated N times.
`zip_kw` stops when the shortest iterator has been exhausted.
"""
has_collection = False
for k, v in kwargs.items():
if not is_collection(v):
kwargs[k] = repeat(v)
else:
has_collection = True
kwargs[k] = iter(v)
counter = count() if has_collection else range(1)
try:
for i in counter:
dict_item = {k: next(v) for k, v in kwargs.items()}
yield dict_item
except StopIteration:
pass
class EasyAsync:
def __init__(self, n_workers: int = N_WORKERS_DEFAULT):
"""
Dead simple parallel execution of async coroutines.
`n_workers` are dispatched which asynchronously process each task given.
Args:
n_workers: Number of workers to use
Examples:
Each of the 10 tasks sleeps for `i` seconds asyncronosly:
>>> EasyAsync(n_workers = 4)(delayed(trio.sleep)(i) for i in range(10))
Each task calculates `isclose` with a different `a` and `b` paramter. `abs_tol` is set to 4 for all tasks:
>>> from math import isclose
>>> async def isclose_(a, b, abs_tol): return isclose(a=a, b=b, abs_tol=abs_tol)
>>> EasyAsync(n_workers = 4)(
>>> delayed(isclose_)(**kwargs)
>>> for kwargs in zip_kw(a=range(10), b=range(10)[::-1], abs_tol=4))
"""
self.n_workers = n_workers
self.progress_bar = None
def __call__(self, tasks: Iterable[delayed], length: Optional[int] = None):
"""
Execute given coroutine `tasks` using workers. The order of output will match the order of input.
Args:
tasks: A sequence of coroutines to execute
length: The number of tasks. If not specified it will try obtain the length automatically.
Used for progress bar
Returns:
A list of outputs from each task. The order of items is determined by the input.
"""
if length is None:
try:
length = len(tasks)
except TypeError:
pass
self.progress_bar = tqdm(total=length, smoothing=0)
self.tasks = enumerate(iter(tasks)) # Force the sequence to be a iterator
self.outputs = []
trio.run(self._worker_nursery)
# Order output and remove idx
self.outputs = sorted(self.outputs, key=lambda e: e[0])
self.outputs = [e[1] for e in self.outputs]
return self.outputs
async def _worker_nursery(self):
"""Start a trio nursery with n workers"""
async with trio.open_nursery() as nursery:
for i in range(self.n_workers):
nursery.start_soon(self._worker)
async def _worker(self):
"""Execute tasks until exhausted"""
for idx, task in self.tasks:
function, args, kwargs = task
output = await function(*args, **kwargs)
self.outputs.append((idx, output))
self._progress_update(1)
def _progress_update(self, n: int):
"""Increment progress bar"""
if self.progress_bar is not None:
self.progress_bar.update(n)
``` |
{
"source": "joshlk/vtext",
"score": 2
} |
#### File: vtext/benchmarks/bench_tokenizers.py
```python
from time import time
from glob import glob
from pathlib import Path
import re
from vtext.tokenize import RegexpTokenizer
from vtext.tokenize import UnicodeWordTokenizer
from vtext.tokenize import VTextTokenizer
from vtext.tokenize import CharacterTokenizer
try:
import sacremoses
except ImportError:
sacremoses = None
try:
import spacy
except ImportError:
spacy = None
try:
import blingfire
except ImportError:
blingfire = None
base_dir = Path(__file__).parent.parent.resolve()
if __name__ == "__main__":
input_files = list(glob(str(base_dir / "data" / "*" / "*")))
data = []
for file_path in input_files:
with open(file_path, "rt") as fh:
data.append(fh.read())
assert len(data) > 0
token_regexp = r"\b\w\w+\b"
dataset_size = 91 # MB for 20 newsgroup dataset
print("# Tokenizing {} documents".format(len(data)))
def pyre_tokenizer(txt):
return list(re.compile(token_regexp).findall(txt))
db = [
(r"Python re.findall(r'\b\w\w+\b', ...)", pyre_tokenizer),
(
r"RegexpTokenizer(r'\b\w\w+\b')",
RegexpTokenizer(pattern=token_regexp).tokenize,
),
(
"UnicodeWordTokenizer(word_bounds=False)",
UnicodeWordTokenizer(word_bounds=False).tokenize,
),
(
"UnicodeWordTokenizer(word_bounds=True)",
UnicodeWordTokenizer(word_bounds=True).tokenize,
),
("VTextTokenizer('en')", VTextTokenizer("en").tokenize),
("CharacterTokenizer(4)", CharacterTokenizer(4).tokenize),
]
if sacremoses is not None:
db.append(("MosesTokenizer()", sacremoses.MosesTokenizer().tokenize))
if spacy is not None:
from spacy.lang.en import English
db.append(("Spacy en", English().tokenizer))
if blingfire is not None:
db.append(("BlingFire en", lambda x: blingfire.text_to_words(x).split(" ")))
for label, func in db:
t0 = time()
out = []
for idx, doc in enumerate(data):
out.append(func(doc))
dt = time() - t0
n_tokens = sum(len(tok) for tok in out)
print(
"{:>45}: {:.2f}s [{:.1f} MB/s, {:.0f} kWPS]".format(
label, dt, dataset_size / dt, n_tokens * 1e-3 / dt
)
)
```
#### File: vtext/tests/test_vectorize.py
```python
import pickle
from numpy.testing import assert_array_equal
import pytest
from vtext.vectorize import HashingVectorizer, CountVectorizer
def test_count_vectorizer():
text = ["some sentence", "a different sentence"]
vect = CountVectorizer()
vect.fit(text)
X2 = vect.transform(text)
vect = CountVectorizer()
X = vect.fit_transform(text)
assert X.nnz == 4
assert_array_equal(X.indices, X2.indices)
def test_hashing_vectorizer():
text = ["some sentence", "a different sentence"]
vect = HashingVectorizer()
vect.fit(text)
X2 = vect.transform(text)
vect = HashingVectorizer()
X = vect.fit_transform(text)
assert X.nnz == 4
assert_array_equal(X.indices, X2.indices)
@pytest.mark.parametrize("Estimator", [HashingVectorizer])
def test_pickle_vectorizers(Estimator):
vect = Estimator()
out = pickle.dumps(vect)
pickle.loads(out)
@pytest.mark.parametrize("Estimator", [HashingVectorizer, CountVectorizer])
def test_vectorizers_n_jobs(Estimator):
"""Check that parallel feature ingestion works"""
text = ["Εν οίδα ότι ουδέν οίδα"]
vect = Estimator(n_jobs=2)
vect.fit(text)
vect.transform(text)
with pytest.raises(ValueError, match="n_jobs=0 must be a integer >= 1"):
Estimator(n_jobs=0).fit(text)
``` |
{
"source": "joshlong/joshlong-calendar-processor",
"score": 3
} |
#### File: joshlong-calendar-processor/google/sheets.py
```python
import os.path
import os.path
import pickle
import googleapiclient.discovery
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
class GoogleSheet(object):
USER_ENTERED = 'USER_ENTERED'
INPUT_VALUE_OPTION_UNSPECIFIED = 'INPUT_VALUE_OPTION_UNSPECIFIED'
RAW = 'RAW'
## todo can this logic for obtainng a token be extracted
# todo out across the two different clients?
@staticmethod
def _obtain_token(credentials_config_str: str, pickle_path_fn: str) -> Credentials:
scopes = ['https://www.googleapis.com/auth/drive']
credentials: Credentials = None
if os.path.exists(pickle_path_fn):
with open(pickle_path_fn, 'rb') as token:
credentials = pickle.load(token)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(credentials_config_str, scopes)
credentials = flow.run_local_server(port=0)
with open(pickle_path_fn, 'wb') as token:
pickle.dump(credentials, token)
return credentials
def write_values(self, spreadsheet_range: str, input_option: str, values: list):
body = {'values': values}
result = self.service.spreadsheets().values().update(
spreadsheetId=self.id,
range=spreadsheet_range,
valueInputOption=input_option,
body=body) \
.execute()
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
def read_values(self, spreadsheet_range: str) -> list:
sheet = self.service.spreadsheets()
result = sheet.values().get(
spreadsheetId=self.id, range=spreadsheet_range).execute()
return result.get('values', [])
def __init__(self, credentials: Credentials, spreadsheet_id: str):
assert credentials is not None, 'the credentials must be valid'
assert spreadsheet_id is not None, 'the spreadsheet_id must be valid'
self.service: googleapiclient.discovery.Resource = build('sheets', 'v4',
credentials=credentials)
self.id = spreadsheet_id
``` |
{
"source": "joshlong/screencapture-gif-maker",
"score": 3
} |
#### File: joshlong/screencapture-gif-maker/pc.py
```python
import logging
import threading
import random
import concurrent.futures
import time
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-9s) %(message)s', )
if __name__ == '__main__':
cv = threading.Condition()
max = 10
executor = concurrent.futures.ThreadPoolExecutor(max_workers=max)
finished = []
def handler(ctr):
logging.debug('entering %s' % ctr)
seconds = random.randint(1, 10)
time.sleep(seconds)
logging.debug('slept %s!' % seconds)
with cv:
finished.append(ctr)
if len(finished) == max:
logging.debug('finished length=%s - notifying all' % len(finished))
cv.notify_all()
for i in range(0, max):
executor.submit(lambda *args: handler(i))
with cv:
cv.wait()
logging.debug('finished all threads!')
``` |
{
"source": "joshlong/simple-python-github-client",
"score": 3
} |
#### File: simple-python-github-client/github/github.py
```python
import typing
import requests
import json
# This uses the Github v3 API
# * https://developer.github.com/v3/#current-version
# * all dates are in ISO 8601 format
# * uses requests Python package: https://www.edureka.co/blog/python-requests-tutorial/#z3
# * for more on pagination and client errors: https://developer.github.com/v3/#pagination
class ActionsClient(object):
'''
This client supports the endpoints described in
https://developer.github.com/v3/actions/workflow-runs
'''
def __init__(self, parent: 'SimpleGithubClient') -> None:
self.parent = parent
def list_workflow_runs(self,
owner: str,
repo: str,
workflow_file_name_or_id: str,
actor: str = None,
branch: str = None,
event: str = None,
status: str = None) -> typing.Dict:
params = {}
if actor is not None:
params['actor'] = actor
if branch is not None:
params['branch'] = branch
if event is not None:
params['event'] = event
if status is not None:
params['status'] = status
reply = requests.get(
f'{SimpleGithubClient.GH_ROOT}/repos/{owner}/{repo}/actions/workflows/{workflow_file_name_or_id}/runs',
params=params, headers=self.parent.build_headers())
return reply.json()
class ReposClient(object):
def __init__(self, parent: 'SimpleGithubClient'):
self.parent = parent
def create_repository_dispatch_event(self, owner: str, repo: str, event_type: str,
client_payload: typing.Dict = {}):
client_payload_json = json.dumps(client_payload)
data = {'event_type': event_type, 'client_payload': client_payload_json}
json_data = '''
{
"event_type": "%s",
"client_payload": %s
}
''' % (event_type, client_payload_json)
response = requests.post(f'{SimpleGithubClient.GH_ROOT}/repos/{owner}/{repo}/dispatches',
params=data,
data=json_data,
headers=self.parent.build_headers(
{'Accept': 'application/vnd.github.everest-preview+json'}))
return response
class UsersClient(object):
def __init__(self, parent: 'SimpleGithubClient'):
self.parent = parent
def get_events_for_authenticated_user(self, username: str) -> typing.List[typing.Dict]:
r = requests.get(f'{SimpleGithubClient.GH_ROOT}/users/{username}/events '.strip(),
headers=self.parent.build_headers())
return r.json()
class SimpleGithubClient(object):
GH_ROOT = ' https://api.github.com '.strip()
def repositories(self) -> ReposClient:
return ReposClient(self)
def actions(self) -> ActionsClient:
return ActionsClient(self)
def users(self) -> UsersClient:
return UsersClient(self)
def __init__(self, personal_access_token: str):
self.personal_access_token = personal_access_token
self.default_headers = {'Authorization': f'token {self.personal_access_token}'}
def build_headers(self, custom_headers: typing.Dict = {}) -> typing.Dict:
n_dict = {}
for k, v in self.default_headers.items():
n_dict[k] = self.default_headers[k]
for k, v in custom_headers.items():
n_dict[k] = custom_headers[k]
return n_dict
``` |
{
"source": "JoshLove-msft/azure-sdk-tools",
"score": 3
} |
#### File: doc-warden/warden/warden_common.py
```python
import os
import fnmatch
import re
import xml.etree.ElementTree as ET
# python 3 transitioned StringIO to be part of `io` module.
# python 2 needs the old version however
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
JS_PACKAGE_DISCOVERY_PATTERN = '*/package.json'
PYTHON_PACKAGE_DISCOVERY_PATTERN = '*/setup.py'
NET_PACKAGE_ROOT_DISCOVERY_PATTERN = '*.sln'
NET_PACKAGE_DISCOVERY_PATTERN = '*.csproj'
JAVA_PACKAGE_DISCOVERY_PATTERN = '*/pom.xml'
# we want to walk the files as few times as possible. as such, for omitted_files, we provide a SET
# of patterns that we want to omit. This function simply checks
def check_match(file_path, normalized_target_patterns):
return any([fnmatch.fnmatch(file_path, normalized_target_pattern)
for normalized_target_pattern in normalized_target_patterns])
def get_java_package_roots(configuration):
file_set = get_file_sets(configuration, JAVA_PACKAGE_DISCOVERY_PATTERN, is_java_pom_package_pom)
if configuration.verbose_output:
print(file_set)
return file_set
def get_net_package_roots(configuration):
file_set = get_file_sets(configuration, NET_PACKAGE_ROOT_DISCOVERY_PATTERN)
if configuration.verbose_output:
print(file_set)
return file_set
def get_net_packages(configuration):
file_set = get_file_sets(configuration, NET_PACKAGE_DISCOVERY_PATTERN, is_net_csproj_package)
if configuration.verbose_output:
print(sets)
def get_python_package_roots(configuration):
file_set = get_file_sets(configuration, PYTHON_PACKAGE_DISCOVERY_PATTERN)
if configuration.verbose_output:
print(file_set)
return file_set
def get_js_package_roots(configuration):
file_set = get_file_sets(configuration, JS_PACKAGE_DISCOVERY_PATTERN)
if configuration.verbose_output:
print(file_set)
return file_set
# returns the two sets:
# the set of files where we expect a readme to be present
# and the set of files that we expect a readme to be present that have been explicitly omitted
def get_file_sets(configuration, target_pattern, lambda_check = None):
expected_locations = walk_directory_for_pattern(configuration.target_directory, [target_pattern], configuration, lambda_check)
omitted_files = get_omitted_files(configuration)
return list(set(expected_locations) - set(omitted_files)), list(set(omitted_files).intersection(expected_locations))
# gets the set of files in the target directory that have explicitly been omitted in the config settings
def get_omitted_files(configuration):
target_directory = configuration.target_directory
omitted_paths = []
dirs = configuration.omitted_paths or []
# single special case here. if wildcard match at the beginning, do not join, use the pattern as is
adjusted_dirs = [pattern if pattern.startswith("*") else os.path.join(target_directory, pattern) for pattern in dirs]
omitted_paths.extend(walk_directory_for_pattern(target_directory, adjusted_dirs, configuration, None))
return omitted_paths
# convention. omit test projects
def is_net_csproj_package(file_path):
test_proj_exclude = re.compile('.*tests.csproj|.*test.csproj', re.IGNORECASE)
sample_project_exclude = re.compile('.*TestProject.csproj', re.IGNORECASE)
if test_proj_exclude.match(file_path) or sample_project_exclude.match(file_path):
return False
return True
# Returns a list of files under a target directory. The files included will match any of the
# target_patterns AND the lambda_check function.
def walk_directory_for_pattern(target_directory, target_patterns, configuration, lambda_check = None):
expected_locations = []
target_directory = os.path.normpath(target_directory)
normalized_target_patterns = [os.path.normpath(pattern) for pattern in target_patterns]
return_true = lambda x: True
check_function = lambda_check or return_true
# walk the folders, filter to the patterns established
for folder, subfolders, files in os.walk(target_directory):
for file in files:
file_path = os.path.join(folder, file)
if check_match(file_path, normalized_target_patterns):
if configuration.verbose_output:
print('Pattern matched {}. Running Check Function.'.format(file_path))
if check_function(file_path):
expected_locations.append(file_path)
return expected_locations
# given a file location or folder, check within or alongside for a target file
# case insensitive
def find_alongside_file(file_location, target):
if not os.path.exists(file_location) or not target:
return False
rule = re.compile(fnmatch.translate(target), re.IGNORECASE)
containing_folder = ''
if os.path.isdir(file_location):
# we're already looking at a file location. just check for presence of target in listdir
containing_folder = file_location
else:
# os.path.listdir(os.path.dirname(file_location))
containing_folder = os.path.dirname(file_location)
for file in os.listdir(containing_folder):
if file.lower() == target.lower():
return os.path.normpath(os.path.join(containing_folder, file))
return False
# find's the first file that matches a glob pattern under a target file's location
# case insensitive
def find_below_file(glob_pattern, file):
if not os.path.exists(file) or not glob_pattern or os.path.isdir(file):
return None
rule = re.compile(fnmatch.translate(glob_pattern), re.IGNORECASE)
target_directory = os.path.dirname(file)
for folder, subfolders, files in os.walk(target_directory):
for file in files:
file_path = os.path.join(folder, file)
if rule.match(file):
return file_path
# searches upwards along from a specified file for a pattern
# glob pattern is the pattern we're matching against. often just a filename
# file is the file we're starting from
# path_exclusion_list the list of paths we should hard stop traversing up on if we haven't already exited
# early_exit_lambda_check a specific check that isn't only based on file. for .net we check to see of a .sln is present in the directory
def find_above_file(glob_pattern, file, path_exclusion_list, early_exit_lambda_check, root_directory):
if not os.path.exists(file) or not glob_pattern or os.path.isdir(file):
return None
if (path_exclusion_list is None or len(path_exclusion_list) == 0) and early_exit_lambda_check is None:
print('Using find_above_file without at least one member set for package_indexing_traversal_stops in .docsettings OR setting an early_exit_lambda_check is disallowed. Exiting.')
exit(1)
complete_exclusion_list = path_exclusion_list + [root_directory]
if early_exit_lambda_check is None:
early_exit_lambda_check = lambda path: True
target_rule = re.compile(fnmatch.translate(glob_pattern), re.IGNORECASE)
file_dir = os.path.dirname(file)
while not check_folder_against_exclusion_list(file_dir, complete_exclusion_list):
for file in os.listdir(file_dir):
if target_rule.match(file):
return os.path.normpath(os.path.join(file_dir, file))
# the early_exit_lambda check runs after we're done scanning the current directory for matches
if early_exit_lambda_check(file_dir):
return None
file_dir = os.path.abspath(os.path.join(file_dir, '../'))
return None
# True if folder matches anything in the exclusion list
# False if not
def check_folder_against_exclusion_list(folder, path_exclusion_list):
if not os.path.isdir(folder):
return True
return os.path.normpath(folder) in path_exclusion_list
# given a pom.xml, crack it open and ensure that it is actually a package pom (versus a parent pom)
def is_java_pom_package_pom(file_path):
root = parse_pom(file_path)
jar_tag = root.find('packaging')
if jar_tag is not None:
return jar_tag.text == 'jar'
return False
# namespaces in xml really mess with xmlTree: https://bugs.python.org/issue18304
# this function provides a workaround for both parsing an xml file as well as REMOVING said namespaces
def parse_pom(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as f:
xml = f.read()
except Exception as ex:
print('Invalid XML in {}'.format(file_path))
raise ex
it = ET.iterparse(StringIO(xml))
for _, el in it:
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1]
return it.root
``` |
{
"source": "joshloyal/Aeolus",
"score": 3
} |
#### File: baumkuchen/models/autoencoder.py
```python
import keras.backend as K
from keras.layers import Input, Dense, Lambda, Dropout
from keras.layers.noise import GaussianNoise
from keras.models import Model
from keras import regularizers
import numpy as np
def noise_output_shape(input_shape):
return tuple(input_shape)
def gaussian_noise(x, mean=0.0, std=0.1, random_state=1234):
return x + K.random_normal(K.shape(x), mean=mean, std=std, seed=random_state)
def AutoEncoder(input_dim, encoding_dim, add_noise=None, dropout_proba=None, l1=1e-4):
model_input = Input(shape=(input_dim,))
if add_noise is not None:
x = Lambda(add_noise, output_shape=noise_output_shape)(model_input)
else:
x = model_input
if l1 is not None:
encoded = Dense(encoding_dim, activation='relu',
activity_regularizer=regularizers.activity_l1(l1))(x)
else:
encoded = Dense(encoding_dim, activation='relu')(x)
if dropout_proba:
encoded = Dropout(dropout_proba)(encoded)
decoded = Dense(input_dim, activation='sigmoid')(encoded)
autoencoder = Model(input=model_input, output=decoded)
autoencoder.compile(optimizer='adadelta',
loss='binary_crossentropy',
metrics=['accuracy'])
return autoencoder
if __name__ == '__main__':
model = AutoEncoder(input_dim=784, encoding_dim=32)
``` |
{
"source": "joshloyal/ClumPy",
"score": 2
} |
#### File: ClumPy/clumpy/analysis.py
```python
import numpy as np
from sklearn.decomposition import RandomizedPCA, MiniBatchSparsePCA
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize, MinMaxScaler
import clumpy
class Cluster(object):
def __init__(self, numeric_columns=[], categorical_columns=[]):
self.numeric_columns = numeric_columns
self.categorical_columns = categorical_columns
self.clusterer_ = None
self.importances_ = None
#@property
#def feature_names(self):
# return self.numeric_columns + self.categorical_columns
@property
def n_clusters(self):
return self.clusterer_.n_clusters
def find_clusters(self, df):
X = np.hstack([X for X in clumpy.preprocessing.process_data(df) if X is not None])
# reduction using pca
#pca = RandomizedPCA(n_components=50, random_state=123, iterated_power=7)
pca = TruncatedSVD(n_components=50, random_state=123)
scaled_X = pca.fit_transform(X)
scaled_X = MinMaxScaler().fit_transform(scaled_X)
#pca = MiniBatchSparsePCA(n_components=50, alpha=0.8, n_iter=100, random_state=123)
#scaled_X = np.hstack((X[:, :len(num_columns)], pca_X))
#scaled_X = scaled_X - np.mean(scaled_X, axis=0)
#max_x = np.max(np.abs(scaled_X), axis=0)
#max_x[max_x == 0] = 1.
#scaled_X = scaled_X / max_x
#ptp_scale = np.ptp(scaled_X, axis=0)
#ptp_scale[ptp_scale == 0] = 1.
#scaled_X /= ptp_scale
#scaled_X = normalize(scaled_X, norm='l2', axis=1, copy=False)
#self.clusterer_ = clumpy.cluster.auto_kmeans(scaled_X, n_clusters=[2, 3, 4])
#self.find_rules(X)
#self.rules_ = clumpy.rules.prim_descriptions(
# data[self.numeric_columns + self.categorical_columns], self.clusterer_.labels_, feature_names=self.importances_)
##self.rules_ = clumpy.rules.tree_descriptions(
# data[self.feature_names], self.clusterer_.labels_,
# categorical_columns=self.categorical_columns,
# feature_names=self.importances_)
tsne = TSNE(n_components=2, random_state=1234, verbose=True)
self.embedding_ = tsne.fit_transform(scaled_X)
self.embedding_ -= np.mean(self.embedding_, axis=0)
#self.clusterer_ = clumpy.cluster.auto_kmeans(self.embedding_, n_clusters=[2, 3, 4])
def find_rules(self, X):
self.importances_ = clumpy.importance.anova_importance(
X,
self.clusterer_.labels_,
feature_names=self.feature_names,
n_features=5)
def cluster(X, numeric_columns=None, categorical_columns=None):
clusterer = Cluster(numeric_columns=numeric_columns,
categorical_columns=categorical_columns)
clusterer.find_clusters(X)
return clusterer
def plot(clusterer, data, cluster_id):
cluster_importances = clusterer.importances_[cluster_id]
cat_vars = [var for var in cluster_importances if var in clusterer.categorical_columns]
num_vars = [var for var in cluster_importances if var in clusterer.numeric_columns]
return clumpy.plots.plot_cluster_statistics(
cluster_labels=clusterer.clusterer_.labels_,
cluster_id=cluster_id,
data=data,
scale=True,
quant_var=num_vars,
qual_var=cat_vars,
figsize=(15,15))
```
#### File: ClumPy/clumpy/importance.py
```python
from __future__ import division
import collections
import numpy as np
import scipy.stats as stats
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
def top_k_features(estimator, features=None, top_k=None):
"""top_k features from a forest ensemble."""
importances = estimator.feature_importances_
sorted_features = np.argsort(importances)[::-1]
if top_k is not None:
sorted_features = sorted_features[:top_k]
if features is not None:
return [features[index] for index in sorted_features]
return sorted_features
def ova_forest_importance(X, cluster_labels, features=None, top_k=None):
"""Determine distinguishing cluster features based on
RandomForest feature importances.
"""
# fit a One-Vs-Rest classifier to distinguish clusters
cluster_classifier = OneVsRestClassifier(
estimator=RandomForestClassifier(n_estimators=100, n_jobs=-1))
cluster_classifier.fit(X, cluster_labels)
feature_importance = [top_k_features(estimator,
features=features,
top_k=top_k) for estimator in
cluster_classifier.estimators_]
return feature_importance
def ttest_importance(X,
cluster_labels,
feature_names=None,
n_features=5):
"""ttest_importance
t-test takes clusters vs. overall statistics and looks at
deviations of each variable between there means.
Returns
-------
importances: dict
Returns a dict mapping cluster_id to list of top n features.
"""
importances = {}
n_features = X.shape[1]
cluster_ids = np.unique(cluster_labels)
for cluster_id in cluster_ids:
cluster_mask = (cluster_labels == cluster_id)
in_cluster = X[cluster_mask]
out_cluster = X#[~cluster_mask]
pvalues = np.empty(n_features)
for col in xrange(n_features):
pvalues[col] = stats.ttest_ind(
in_cluster[:, col], out_cluster[:, col])[1]
# apply beferoni correction and return lowest p-values (valid?)
sig_mask = pvalues < (0.05 / n_features)
top_k = np.argsort(pvalues[sig_mask])[:n_features]
importances[cluster_id] = [
feature for idx, feature in enumerate(feature_names) if
idx in top_k]
return importances
def anova_importance(X, cluster_labels, feature_names=None, n_features=5):
"""anova_importance
Use the cluster ids as the dependent variables and do a one-way anova
or t-test to determine significant deviations. May need to do (cluster
not cluster) to do this on a per cluster basis.
ANOVA takes variable vs. cluster id and determines significance.
Returns
-------
importances: dict
Returns a dict mapping cluster_id to list of top n features.
"""
importances = {}
cluster_ids = np.unique(cluster_labels)
for cluster_id in cluster_ids:
selector = SelectKBest(score_func=f_classif, k=n_features)
selector.fit(X, cluster_labels == cluster_id)
if feature_names:
importances[cluster_id] = [feature_names[support_id] for
support_id in
selector.get_support(indices=True)]
else:
importances[cluster_id] = selector.get_support(indices=True)
return importances
def relevance_score(cluster_proba, marginal_proba, alpha):
if cluster_proba == 0.0 and marginal_proba == 0.0:
return np.nan
else:
return (alpha * np.log(cluster_proba) +
(1 - alpha) * np.log(cluster_proba / marginal_proba))
def single_cluster_relevance(
column, cluster_labels, cluster_id, data,
marginal_probas=None, n_features=5, alpha=0.3):
X = data[column].values
levels = np.unique(X)
if marginal_probas is None:
n_samples = X.shape[0]
levels = np.unique(X)
marginal_probas = {}
for level in levels:
marginal_probas[level] = X[X == level].shape[0] / n_samples
cluster_X = X[cluster_labels == cluster_id]
n_samples_cluster = cluster_X.shape[0]
rel = {}
for level in levels:
cluster_proba = cluster_X[cluster_X == level].shape[0] / n_samples_cluster
rel[level] = relevance_score(
cluster_proba, marginal_probas[level], alpha)
rel = sorted(rel.items(), key=lambda x: x[1])[::-1]
return [r[0] for r in rel if np.isfinite(r[1])][:n_features]
def categorical_relevance(column, cluster_labels, data, n_features=5, alpha=0.3):
X = data[column].values
cluster_ids = np.unique(cluster_labels)
# calculate marginal statistics
n_samples = X.shape[0]
levels = np.unique(X)
marginal_probas = {}
for level in levels:
marginal_probas[level] = X[X == level].shape[0] / n_samples
relevance = {cluster_id: single_cluster_relevance(
column, cluster_labels, cluster_id, data,
marginal_probas=marginal_probas) for
cluster_id in cluster_ids}
return relevance
```
#### File: ClumPy/clumpy/preprocessing.py
```python
from __future__ import division
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler, OneHotEncoder
from sklearn.feature_selection import VarianceThreshold
import pandas as pd
from clumpy.unique_threshold import UniqueThreshold
from clumpy.datasets.utils import ordinal_columns, continous_columns, categorical_columns
def column_atleast_2d(array):
if array.ndim == 1:
return array.reshape(-1, 1)
return array
def fit_encode_1d(y, strategy='frequency'):
y = column_atleast_2d(y)
levels = np.unique(y)
# FIXME: serach sorted doesn't work here...
if strategy == 'frequency':
frequencies = [np.sum(y == level) for level in levels]
levels = levels[np.argsort(frequencies)]
return levels
def transform_encode_1d(y, fit_levels):
levels = np.unique(y)
if len(np.intersect1d(levels, fit_levels)) < len(levels):
diff = np.setdiff1d(levels, fit_levels)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(fit_levels, y).reshape(-1, 1)
def inverse_encode_1d(y, fit_levels):
"""There is probably a built in numpy method for this..."""
vec_map = np.vectorize(lambda x: fit_levels[int(x)])
return vec_map(y).reshape(-1, 1)
class OrdinalEncoder(BaseEstimator, TransformerMixin):
def __init__(self, strategy='random'):
self.strategy = strategy
self.level_map = []
def fit(self, X):
X = column_atleast_2d(X)
self.n_features_ = X.shape[1]
self.level_map = [
fit_encode_1d(X[:, column_idx], strategy=self.strategy) for
column_idx in xrange(self.n_features_)]
return self
def transform(self, X):
X = column_atleast_2d(X)
if X.shape[1] != self.n_features_:
raise ValueError("Different number of features at transform time.",
" n_features_transform= %d" % X.shape[1],
" and n_features_fit= %d" % self.n_features_)
return np.hstack([transform_encode_1d(X[:, column_idx], levels) for
column_idx, levels in enumerate(self.level_map)])
def inverse_transform(self, X):
X = column_atleast_2d(X)
if X.shape[1] != self.n_features_:
raise ValueError("Different number of features at transform time.",
" n_features_transform= %d" % X.shape[1],
" and n_features_fit= %d" % self.n_features_)
encoding = np.hstack([inverse_encode_1d(X[:, column_idx], levels) for
column_idx, levels in enumerate(self.level_map)])
return encoding
class ArbitraryImputer(BaseEstimator, TransformerMixin):
def __init__(self, impute_value):
self.impute_value = impute_value
def fit(self, X):
return self
def transform(self, X):
mask = np.isfinite(X)
if ~np.all(mask):
np.putmask(X, ~mask, self.impute_value)
return X
def median_impute(X, strategy='median'):
imputer = Imputer(strategy=strategy, missing_values='NaN')
return imputer.fit_transform(X)
def scale_values(X, strategy='standardize'):
if strategy == 'standardize':
scaler = StandardScaler()
elif strategy == 'center':
scaler = StandardScaler(with_mean=True, with_std=False)
elif strategy == 'minmax':
scaler = MinMaxScaler()
else:
raise ValueError('Unrecognized scaling strategy `%s`.' % strategy)
return scaler.fit_transform(X)
def remove_low_variance(X, threshold=0.0):
"""Remove columns with low variance."""
selector = VarianceThreshold(threshold=threshold)
return selector.fit_transform(X)
def remove_low_info(X, max_frequency=0.99):
"""remove columns that have too much variance (a lot of unique values)"""
selector = UniqueThreshold(max_frequency=max_frequency)
return selector.fit_transform(X)
def encode_values(X, strategy='onehot'):
if strategy == 'onehot':
return pd.get_dummies(X, dummy_na=True).values
elif strategy == 'none':
return X.values
else:
raise ValueError('Unrecognized encoding strategy `%s`.' % strategy)
def process_continous(X):
"""Continous numeric value preprocessing."""
# missing value imputation
X = median_impute(X, strategy='median')
# remove low variance variables
X = remove_low_variance(X)
# scaling
X = scale_values(X, strategy='standardize')
return X.astype(np.float64)
def process_ordinal(X):
"""ordinal numeric value preprocessing."""
# missing value imputation
X = median_impute(X, strategy='median')
# remove any low info columns (high variance)
X = remove_low_info(X)
# remove low variance variables
X = remove_low_variance(X)
# scaling
X = scale_values(X, strategy='standardize')
return X.astype(np.float64)
def process_categorical(X):
# encode categoricals as numeric
X = encode_values(X, strategy='onehot')
# remove any low info columns
X = remove_low_info(X)
# remove low variance variables
X = remove_low_variance(X)
# scaling
#X = scale_values(X, strategy='center')
return X.astype(np.float64)
def process_data(df):
# categorize columns
categorical_cols = categorical_columns(df)
ordinal_cols = ordinal_columns(df)
continous_cols = continous_columns(df)
# pre-process
continous_X, ordinal_X, categorical_X = None, None, None
if categorical_cols:
categorical_X = process_categorical(df[categorical_cols])
if ordinal_cols:
ordinal_X = process_ordinal(df[ordinal_cols].values)
if continous_cols:
continous_X = process_continous(df[continous_cols].values)
return continous_X, ordinal_X, categorical_X
#if num_preprocessing == 'standardize':
# scaler = StandardScaler()
#elif num_preprocessing == 'minmax':
# scaler = MinMaxScaler()
#else:
# scaler = None
#if categorical_columns:
# num_columns = [col for col in X.columns if
# col not in categorical_columns]
# if cat_preprocessing == 'ordinal':
# cat_X = OrdinalEncoder().fit_transform(X[categorical_columns].values)
# else:
# dummy_data = pd.get_dummies(X[categorical_columns], columns=categorical_columns, dummy_na=True)
# categorical_columns = dummy_data.columns.tolist()
# cat_X = dummy_data.values
# if num_columns:
# num_X = imputer.fit_transform(X[num_columns].values)
# if scaler:
# num_X = scaler.fit_transform(num_X)
# return np.hstack((num_X, cat_X)), num_columns, categorical_columns
# return cat_X, [], categorical_columns
#else:
# num_X = imputer.fit_transform(X.values)
# if scaler:
# num_X = scaler.fit_transform(num_X)
# return num_X, X.columns.tolist(), []
```
#### File: clumpy/similarity/cluster_graph.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import networkx as nx
import numpy as np
from clumpy.similarity import jaccard_similarity
def get_induced_partitions(clusterer, data):
"""Return the partition of the dataset induced by a clustering algorithm.
Parameters
----------
clusterer : sklearn style clustering algorithms
This clusterer will be used to partition in the input data.
data : array-like of shape [n_samples, n_features]
The data that the clusterer will label.
Returns:
--------
A list of length clusterer.n_clusters. Each element is the indices
of the data points placed in that cluster.
"""
if hasattr(clusterer, 'predict'):
labels = clusterer.predict(data)
else:
labels = clusterer.labels_
if labels.shape[0] != data.shape[0]:
raise ValueError('Could not get predictions')
return [np.where(labels == cluster_id)[0]
for cluster_id in xrange(clusterer.n_clusters)]
def to_similarity_matrix(clusterer_a, clusterer_b, data):
partitions_a = get_induced_partitions(clusterer_a, data)
partitions_b = get_induced_partitions(clusterer_b, data)
n_clusters_a = clusterer_a.n_clusters
n_clusters_b = clusterer_b.n_clusters
S = np.zeros((n_clusters_a, n_clusters_b), dtype=np.float64)
for cluster_id_a, part_a in enumerate(partitions_a):
for cluster_id_b, part_b in enumerate(partitions_b):
S[cluster_id_a, cluster_id_b] = jaccard_similarity(part_a, part_b)
return S
def to_adjacency_matrix(similarity_matrix):
n_vertices_U, n_vertices_V = similarity_matrix.shape
n_vertices = n_vertices_U + n_vertices_V
adjacency_matrix = np.zeros((n_vertices, n_vertices), dtype=np.float64)
# fill the adjacency matrix
adjacency_matrix[:n_vertices_U, n_vertices_U:] = similarity_matrix
adjacency_matrix[n_vertices_U:, :n_vertices_U] = similarity_matrix.T
return adjacency_matrix
def cluster_similarity(clusterer_a, clusterer_b, data):
similarity_matrix = to_similarity_matrix(clusterer_a, clusterer_b, data)
graph = nx.from_numpy_matrix(to_adjacency_matrix(similarity_matrix))
max_matching = nx.max_weight_matching(graph)
return np.mean([graph.edge[vertex_id][max_matching[vertex_id]]['weight']
for vertex_id in graph if vertex_id in max_matching])
```
#### File: ClumPy/clumpy/summary.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import numpy as np
import pandas as pd
import scipy.stats as stats
from sklearn.utils import check_random_state
from clumpy.datasets import utils as data_utils
from clumpy.cluster_rules import ova_forest_importance
@np.vectorize
def as_factors(x):
factor_map = {1: 'LOW', 2: 'MEDIUM', 3: 'HIGH'}
return factor_map.get(x, 'UNK')
def mode_aggregate(x):
return stats.mode(x)[0].item()
def bin_numeric_column(X, bins=10, random_state=1234):
X = X.values
n_samples = X.shape[0]
rng = check_random_state(random_state)
X = X + rng.rand(n_samples) * 1e-6
percentiles = np.arange(1, bins-1) * 1. / bins
breaks = stats.mstats.mquantiles(X, np.hstack((0, percentiles, 1)))
X_binned = np.digitize(X, breaks)
#return as_factors(X_binned)
return X_binned
def cluster_summary(df, cluster_labels):
data = df.copy()
# calculate overall statistics
stats = data.median()
#groupby cluster
data['cluster_id'] = cluster_labels
#numeric_cols = data_utils.numeric_columns(data)
#categorical_cols = data_utils.categorical_columns(data)
#data['cluster'] = clusterer.labels_
#if bin_numeric:
# data[numeric_cols] = data[numeric_cols].apply(bin_numeric_column, axis=1)
# numeric_summary = data[
# numeric_cols + ['cluster']].groupby('cluster').agg(
# mode_aggregate)
#else:
# numeric_summary = data[numeric_cols + ['cluster']].groupby('cluster').median()
## use modes for categoricals
#categorical_summary = data[
# categorical_cols + ['cluster']].groupby('cluster').agg(
# mode_aggregate)
#return pd.concat([numeric_summary, categorical_summary], axis=1)
group_stats = data.groupby('cluster_id').median()
group_stats.loc['overall'] = stats
return group_stats
def flat_bar(frame, feature_name, class_column):
import matplotlib.pyplot as plt
n_samples = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame[feature_name]
ax = plt.gca()
for sample_idx in range(n_samples):
y = df.iloc[sample_idx].values
``` |
{
"source": "joshloyal/DataView",
"score": 3
} |
#### File: DataView/dataview/data_utils.py
```python
import six
import pandas as pd
import numpy as np
def null_filter(x):
return True
def unflatten_list(llist):
return [l for sublist in llist for l in sublist]
def dtype_dict(dataframe, dtype_filter=None):
column_series = dataframe.columns.to_series()
dtype_groups = column_series.groupby(dataframe.dtypes).groups
if dtype_filter is None:
dtype_filter = null_filter
return {k.name: v for k, v in dtype_groups.items() if dtype_filter(k.name)}
def is_numeric(dtype):
if isinstance(dtype, six.string_types):
try:
dtype = np.dtype(dtype)
except TypeError:
return False
return np.issubdtype(dtype, np.number)
def is_categorical(dtype):
return not is_numeric(dtype)
def numeric_columns(dataframe):
return unflatten_list(dtype_dict(dataframe, is_numeric).values())
def categorical_columns(dataframe):
return unflatten_list(dtype_dict(dataframe, is_categorical).values())
```
#### File: DataView/dataview/wrappers.py
```python
import functools
registry = {}
def register_class(target_class):
registry[target_class.__name__] = target_class
class DataViewMeta(type):
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
register_class(cls)
return cls
``` |
{
"source": "joshloyal/drforest",
"score": 3
} |
#### File: drforest/drforest/plots.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
__all__ = ['plot_local_direction', 'plot_variable_importance',
'plot_local_direction_histogram', 'plot_single_direction']
def label(x, color, label):
ax = plt.gca()
ax.text(0, 0.2, label, color='black', #fontweight="bold",
ha="left", va="center", transform=ax.transAxes)
def plot_variable_importance(importances, plot_type='bar', normalize=False,
names=None, xlabel=None, title=None,
figsize=(8, 6), ax=None):
"""Plot global variable importances."""
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
n_features = importances.shape[0]
if normalize:
importances = importances / np.sum(importances)
# re-order from largets to smallest
order = np.argsort(importances)
if names is None:
names = ['Feature {}'.format(i + 1) for i in order]
else:
names = names[order]
margin = 0.1 if plot_type == 'lollipop' else 0.0
ax.set_xlim(0, importances.max() + margin)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if plot_type == 'bar':
ax.barh(y=np.arange(n_features), width=importances[order],
color='gray', tick_label=names, height=0.5)
elif plot_type == 'lollipop':
for k in range(n_features):
ax.hlines(y=k, xmin=0, xmax=importances[order[k]],
color='k', linewidth=2)
ax.plot(importances[order[k]], k, 'o', color='k')
ax.axvline(x=0, ymin=0, ymax=1, color='k', linestyle='--')
ax.set_yticks(range(n_features))
ax.set_yticklabels(names)
else:
raise ValueError(
"Unrecognized plot_type. Should be 'bar' or 'lollipop'")
if xlabel is not None:
ax.set_xlabel(xlabel)
else:
ax.set_xlabel('Variable Importance')
if title:
ax.set_title(title)
return fig, ax
def plot_local_direction(importances, sort_features=False, feature_names=None,
figsize=(10, 6), palette='Set3', scale='count',
inner='quartile'):
n_features = importances.shape[1]
if feature_names is None:
feature_names = ["Feature {}".format(i + 1) for i in range(n_features)]
feature_names = np.asarray(feature_names)
if sort_features:
order = np.argsort(np.var(importances, axis=0))[::-1]
importances = importances[:, order]
feature_names = feature_names[order]
fig, ax = plt.subplots(figsize=(10, 6))
data = pd.melt(pd.DataFrame(importances, columns=feature_names))
sns.violinplot(x='variable', y='value', data=data, palette=palette,
scale=scale, inner='quartile', ax=ax)
ax.set_xlabel('')
ax.set_ylabel('LPD Loadings', fontsize=18)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
return fig, ax
def plot_local_direction_histogram(
directions, importances=None, feature_names=None,
figsize=(10, 6), color='0.3', bins=30):
"""Plot marginal distribution of local subspace variable importances."""
n_samples, n_features = directions.shape
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0),
"figure.figsize": figsize,
"font.family": "serif"})
if feature_names is None:
feature_names = np.asarray(["Feature {}".format(i + 1) for
i in range(n_features)])
data = pd.DataFrame({'x': directions.T.ravel(),
'g': np.repeat(feature_names, n_samples)})
if importances is not None:
sort_ids = np.argsort(importances)[::-1]
else:
sort_ids = np.argsort(np.var(directions, axis=0))[::-1]
pal = sns.cubehelix_palette(n_features, light=0.8, reverse=True)
g = sns.FacetGrid(data, row='g', hue='g', aspect=15, height=0.5,
palette=pal, xlim=(-1.5, 1.1),
row_order=feature_names[sort_ids],
hue_order=feature_names[sort_ids])
# add histograms
g.map(sns.distplot, "x", hist_kws={'color': color, 'alpha': 1},
bins=bins, kde=False)
g.map(sns.distplot, "x", hist_kws={'color': 'w', 'lw': 1.5}, bins=bins,
kde=False)
g.fig.subplots_adjust(hspace=-0.25)
g.map(label, "x")
g.set_titles("")
g.set_xlabels("Loadings", fontsize=14)
g.set(yticks=[])
g.set(xticks=[-1.0, -0.5, 0, 0.5, 1.0])
g.set_xticklabels([-1.0, -0.5, 0, 0.5, 1.0], size=12)
g.despine(bottom=True, left=True)
return g
def plot_single_direction(
imp_x, feature_names=None, figsize=(10, 12), rotation=None, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
color = ['tomato' if x > 0 else 'cornflowerblue' for x in imp_x]
n_features = imp_x.shape[0]
ax.bar(np.arange(n_features), imp_x, color=color)
ax.axhline(0, color='black', linestyle='-', lw=1)
ax.set_ylabel('Importance', fontsize=18)
ax.set_ylim(-1, 1)
ax.set_xlabel('')
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.set_xticks(np.arange(n_features))
if feature_names is None:
ax.set_xticklabels(feature_names, rotation=rotation)
else:
ax.set_xticklabels(
['Feature {}'.format(i + 1) for i in range(n_features)],
rotation=rotation)
return ax
```
#### File: drforest/tests/test_drforest.py
```python
import numpy as np
import pytest
from drforest import datasets
from drforest.ensemble import DimensionReductionForestRegressor
def test_drforest_smoke():
X, y = datasets.make_simulation1()
forest = DimensionReductionForestRegressor(
n_estimators=100, random_state=42, n_jobs=-1).fit(X, y)
y_pred = forest.predict(X)
assert y_pred.shape == (1000,)
imp = forest.local_subspace_importance(np.array([[-1.5, 1.5],
[0.5, -0.5]]))
assert imp.shape == (2, 2)
```
#### File: drforest/examples/random_forest_importances.py
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from drforest.datasets import make_simulation1
from drforest.ensemble import DimensionReductionForestRegressor
from drforest.ensemble import permutation_importance
plt.rc('font', family='serif')
fontsize = 14
n_samples = 2000
n_features = 5
X, y = make_simulation1(
n_samples=n_samples, noise=1, n_features=n_features, random_state=1234)
forest = DimensionReductionForestRegressor(
n_estimators=500, store_X_y=True, n_jobs=-1,
min_samples_leaf=3, max_features=None,
random_state=42).fit(X, y)
x0 = np.zeros(n_features)
x0[:2] = np.array([-1.5, 1.5])
local_direc_x0 = forest.local_principal_direction(x0)
local_direc_x0 *= np.sign(local_direc_x0[0])
x1 = np.zeros(n_features)
x1[:2] = [0.5, -0.5]
local_direc_x1 = forest.local_principal_direction(x1)
local_direc_x1 *= np.sign(local_direc_x1[0])
#forest = RandomForestRegressor(n_estimators=500,
# min_samples_leaf=3,
# n_jobs=-1, max_features=None,
# oob_score=True,
# random_state=42).fit(X, y)
#
#forest_imp = permutation_importance(
# forest, X, y, random_state=forest.random_state)
#forest_imp /= np.sum(forest_imp)
forest_imp = forest.feature_importances_
#order = np.argsort(forest_imp)
fig, ax = plt.subplots(figsize=(18, 5), ncols=4)
def f(x, y):
r1 = x - y
r2 = x + y
return (20 * np.maximum(
np.maximum(np.exp(-2 * r1 ** 2), np.exp(-r2 ** 2)),
2 * np.exp(-0.5 * (x ** 2 + y ** 2))))
x = np.linspace(-3, 3, 100)
y = np.linspace(-3, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
ax[0].contour(X, Y, Z, 3, colors='black', linestyles='--', levels=5, linewidths=1.5)
ax[0].imshow(Z, extent=[-3, 3, -3, 3], origin='lower', cmap='YlGnBu_r', alpha=0.5)
ax[0].scatter([-1.5, 0.5], [1.5, -0.5], color=None, edgecolor='black')
ax[0].annotate(r'(-1.5, 1.5)', (-1.5, 1.5), xytext=(-1.4, 1.6), fontname='Sans', weight='bold')
ax[0].annotate(r'(0.5, -0.5)', (0.5, -0.5), xytext=(0.6, -0.4), fontname='Sans', weight='bold')
ax[0].set_aspect('equal')
ax[1].bar(np.arange(1, n_features + 1), forest_imp, color='gray')
ax[1].set_ylabel('Importance', fontsize=fontsize)
#ax[1].set_title('Random Forest', fontsize=fontsize)
ax[1].set_xlabel(None)
ax[1].axhline(0, color='black', linestyle='-')
ax[1].set_ylim(-1, 1)
ax[1].set_xlabel('Variable', fontsize=fontsize)
ax[1].text(3.5, 0.8, 'Global', fontsize=16)
color = ['tomato' if x > 0 else 'cornflowerblue' for x in local_direc_x0]
ax[2].bar(np.arange(1, n_features + 1), local_direc_x0, color=color)
#ax[2].set_title('Dimension Reduction Forest', fontsize=fontsize)
ax[2].axhline(0, color='black', linestyle='-', lw=1)
ax[2].set_ylim(-1, 1)
ax[2].set_xlabel('Variable', fontsize=fontsize)
ax[2].text(2.5, 0.8, '$\mathbf{x}_0 = (-1.5, 1.5, 0, 0, 0)$', fontsize=12)
color = ['tomato' if x > 0 else 'cornflowerblue' for x in local_direc_x1]
ax[3].bar(np.arange(1, n_features + 1), local_direc_x1, color=color)
#ax[3].set_title('Dimension Reduction Forest', fontsize=fontsize)
ax[3].set_xlabel('Variable', fontsize=fontsize)
ax[3].invert_yaxis()
ax[3].axhline(0, color='black', linestyle='-', lw=1)
ax[3].text(2.5, 0.8, '$\mathbf{x}_0 = (0.5, -0.5, 0, 0, 0)$', fontsize=12)
ax[3].set_ylim(-1, 1)
plt.subplots_adjust(wspace=0.3, left=0.03, right=0.985)
fig.savefig('local_lpd.png', dpi=300, bbox_inches='tight')
```
#### File: drforest/examples/real_data.py
```python
import os
import numpy as np
import pandas as pd
import plac
from functools import partial
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from drforest.ensemble import DimensionReductionForestRegressor
from drforest.dimension_reduction import SlicedInverseRegression
from drforest.dimension_reduction import SlicedAverageVarianceEstimation
from drforest.kernel_regression import fit_kernel_smoother_silverman
from drforest.datasets import (
load_abalone,
load_bodyfat,
load_cpu_small,
load_fishcatch,
load_kin8nm,
load_openml
)
DATASETS = {
'abalone': load_abalone,
'bodyfat': load_bodyfat,
'autoprice': partial(load_openml, name='autoPrice'),
'puma8NH': partial(load_openml, name='puma8NH'),
'puma32H': partial(load_openml, name='puma32H'),
'liver': partial(load_openml, name='liver-disorders'),
'mu284': partial(load_openml, name='mu284'),
'wisconsin': partial(load_openml, name='wisconsin'),
'fishcatch': load_fishcatch,
'bank8FM': partial(load_openml, name='bank8FM'),
'cpu' : load_cpu_small,
'kin8nm' : load_kin8nm,
}
OUT_DIR = 'real_data_results'
def benchmark(dataset, n_resamples=15, n_splits=10):
X, y = DATASETS[dataset]()
n_samples, n_features = X.shape
n_estimators = 500
min_samples_leaf_params = [1, 5]
max_feature_params = [2, 4, 6, 1/3., 'sqrt', None]
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
for resample_id in range(n_resamples):
cv = KFold(n_splits=n_splits, shuffle=True,
random_state=resample_id * 42)
results = {
'mean': np.zeros(n_splits),
'kernel_reg': np.zeros(n_splits),
'kernel_reg_sir': np.zeros(n_splits),
'kernel_reg_save': np.zeros(n_splits)
}
for min_samples_leaf in min_samples_leaf_params:
for max_features in max_feature_params:
results['rf (l={},p={})'.format(min_samples_leaf, max_features)] = (
np.zeros(n_splits))
results['drrf (l={},p={})'.format(min_samples_leaf, max_features)] = (
np.zeros(n_splits))
results['sir_rf (l={},p={})'.format(min_samples_leaf, max_features)] = (
np.zeros(n_splits))
results['save_rf (l={},p={})'.format(min_samples_leaf, max_features)] = (
np.zeros(n_splits))
for k, (train, test) in enumerate(cv.split(X)):
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
print('Train: {}'.format(X_train.shape))
print('Test: {}'.format(X_test.shape))
print("Mean Only")
err = np.mean((y_test - np.mean(y_train))**2)
results['mean'][k] = err
print(err)
for min_samples_leaf in min_samples_leaf_params:
for max_features in max_feature_params:
if isinstance(max_features, int) and X.shape[1] < max_features:
continue
print("RandomForest (l={},p={})".format(min_samples_leaf, max_features))
forest = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=123,
n_jobs=-1).fit(X_train, y_train)
y_pred = forest.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['rf (l={},p={})'.format(min_samples_leaf, max_features)][k] = err
print(err)
for min_samples_leaf in min_samples_leaf_params:
for max_features in max_feature_params:
if isinstance(max_features, int) and X.shape[1] < max_features:
continue
print("DR RandomForest (l={},p={})".format(min_samples_leaf, max_features))
forest = DimensionReductionForestRegressor(
n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=123,
n_jobs=-1).fit(X_train, y_train)
y_pred = forest.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['drrf (l={},p={})'.format(min_samples_leaf, max_features)][k] = err
print(err)
for min_samples_leaf in min_samples_leaf_params:
for max_features in max_feature_params:
if isinstance(max_features, int) and X.shape[1] < max_features:
continue
print("SIR + RF (l={},p={})".format(min_samples_leaf, max_features))
forest = Pipeline([
('sir', SlicedInverseRegression(n_directions=None)),
('rf', RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=123,
n_jobs=-1))
]).fit(X_train, y_train)
y_pred = forest.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['sir_rf (l={},p={})'.format(min_samples_leaf, max_features)][k] = err
print(err)
for min_samples_leaf in min_samples_leaf_params:
for max_features in max_feature_params:
if isinstance(max_features, int) and X.shape[1] < max_features:
continue
print("SAVE + RF (l={},p={})".format(min_samples_leaf, max_features))
forest = Pipeline([
('save', SlicedAverageVarianceEstimation(n_directions=None)),
('rf', RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=123,
n_jobs=-1))
]).fit(X_train, y_train)
y_pred = forest.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['save_rf (l={},p={})'.format(min_samples_leaf, max_features)][k] = err
print(err)
print("Kernel Regression")
ksmooth = fit_kernel_smoother_silverman(
X_train, y_train, feature_type='raw')
y_pred = ksmooth.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['kernel_reg'][k] = err
print(err)
print("SIR Kernel Regression")
ksmooth = fit_kernel_smoother_silverman(
X_train, y_train, feature_type='sir')
y_pred = ksmooth.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['kernel_reg_sir'][k] = err
print(err)
print("SAVE Kernel Regression")
ksmooth = fit_kernel_smoother_silverman(
X_train, y_train, feature_type='save')
y_pred = ksmooth.predict(X_test)
err = np.mean((y_pred - y_test)**2)
results['kernel_reg_save'][k] = err
print(err)
results = pd.DataFrame(results)
results['fold'] = np.arange(n_splits)
output_name = os.path.join(OUT_DIR, "{}_{}n_{}p_{}k_{}r_{}.csv".format(
dataset, n_samples, n_features, n_splits, n_resamples, resample_id))
results.to_csv(output_name, index=False)
if __name__ == '__main__':
plac.call(benchmark)
``` |
{
"source": "joshloyal/fully-differentiable-deep-ndf-tf",
"score": 2
} |
#### File: joshloyal/fully-differentiable-deep-ndf-tf/test_ops.py
```python
import numpy as np
import tensorflow as tf
DEPTH = 4 # Depth of a tree (this includes the leaf probabilities)
N_LEAF = 2 ** (DEPTH - 1) # Number of leaf nodes
N_DECISION_NODES = 2 ** (DEPTH - 1) - 1 # These are all nodes but the leaves
N_BATCH = 3
N_LABELS = 10
rng = np.random.RandomState(1234)
proba = rng.beta(2, 2, N_DECISION_NODES * N_BATCH).reshape(
N_BATCH, N_DECISION_NODES)
print(proba)
proba_var = tf.placeholder('float32', name='proba', shape=[None, proba.shape[1]])
def mu_calc():
"""
\mu = [d_0, d_0, d_0, d_0, 1-d_0, 1-d_0, 1-d_0, 1-d_0]
\mu = \mu * [d_1, d_1, 1-d_1, 1-d_1, d_2, d_2, 1-d_2, 1-d_2]
\mu = \mu * [d_3, 1-d_3, d_4, 1-d_4, d_5, 1-d_5, d_6, 1-d_6]
Tree indexing
0
1 2
3 4 5 6
"""
batch_size = tf.shape(proba_var)[0]
n_decision_nodes = proba_var.get_shape().as_list()[-1]
n_leaves = n_decision_nodes + 1
tree_depth = np.int64(np.log2(n_leaves) + 1)
# decision probabilities.
# The first n_batch * n_decision_nodes values are d(i)
# The second n_batch * n_decision_nodes values are 1-d(i)
decision_p = tf.pack([proba_var, 1 - proba_var])
flat_decision_p = tf.reshape(decision_p, [-1])
# zeroth index of each routing probability in the mini-batch
batch_0_indices = tf.tile(
tf.expand_dims(tf.range(0, batch_size * n_decision_nodes, n_decision_nodes), 1),
[1, n_leaves])
batch_complement_row = tf.concat(1,
[tf.zeros([1, n_leaves/2]),
tf.fill([1, n_leaves/2], tf.to_float(n_decision_nodes * batch_size))]
)
batch_complement_indices = tf.to_int32(tf.tile(batch_complement_row, tf.pack([batch_size, 1])))
# First row of mu
mu = tf.gather(flat_decision_p, tf.add(batch_0_indices, batch_complement_indices))
for d in xrange(2, tree_depth):
indices = tf.range(2 ** (d - 1), 2 ** d) - 1 # [2, 4]
tile_indices = tf.reshape(tf.tile(tf.expand_dims(indices, 1),
[1, 2 ** (tree_depth - d)]), [1, -1])
batch_indices = tf.add(batch_0_indices, tf.tile(tile_indices, tf.pack([batch_size, 1])))
batch_complement_row = tf.tile(
tf.concat(1,
[tf.zeros([1, n_leaves/(2**d)]),
tf.fill([1, n_leaves/(2**d)], tf.to_float(n_decision_nodes * batch_size))]
),
[1, 2 ** (d - 1)]
)
batch_complement_indices = tf.to_int32(tf.tile(batch_complement_row, tf.pack([batch_size, 1])))
mu = tf.mul(mu, tf.gather(flat_decision_p, tf.add(batch_indices, batch_complement_indices)))
return mu
def pyx(mu):
batch_size = tf.shape(mu)[0]
w_l = tf.Variable(tf.random_uniform([N_LEAF, N_LABELS], -2, 2, seed=1))
leaf_p = tf.nn.softmax(w_l)
return tf.reduce_mean(
tf.mul(tf.tile(tf.expand_dims(mu, 2), [1, 1, N_LABELS]),
tf.tile(tf.expand_dims(leaf_p, 0), tf.pack([batch_size, 1, 1]))), 1)
def random_columns(tensor, n_columns, random_state=1234):
rng = np.random.RandomState(random_state)
n_features = tensor.get_shape().as_list()[-1]
column_indices = rng.choice(np.arange(n_features), n_columns)
return tf.concat(1,
[tf.slice(tensor, [0, column_idx], [-1, 1]) for column_idx in column_indices])
sess = tf.Session()
data = random_columns(proba_var, n_columns=2)#pyx(mu_calc())
sess.run(tf.initialize_all_variables())
result = sess.run(data, feed_dict={proba_var: proba})
print(result)
``` |
{
"source": "joshloyal/geneva",
"score": 2
} |
#### File: geneva/tests/test_linalg.py
```python
import numpy as np
import numpy.testing as npt
from neuralnet.linalg import broadcaste
from neuralnet.test_linalg import (scopy_verify,
saxpy_verify,
sgemv_verify,
sgemm_verify)
def test_scopy():
scopy_verify()
def test_saxpy():
saxpy_verify()
def test_sgemv():
sgemv_verify()
def test_sgemm():
sgemm_verify()
def test_broadcaste():
x = np.zeros((100, 10)).astype(np.float32)
y = np.arange(10).astype(np.float32)
expected = x + y
broadcaste(x, y)
npt.assert_almost_equal(expected, x)
``` |
{
"source": "joshloyal/image-cache",
"score": 2
} |
#### File: image_cache/tests/conftest.py
```python
import itertools
import os
import tempfile
import shutil
import numpy as np
import pytest
import PIL.Image as pil_image
rng = np.random.RandomState(123)
@pytest.fixture(scope='session')
def img_w():
return 20
@pytest.fixture(scope='session')
def img_h():
return 20
@pytest.fixture(scope='session')
def image_list(img_w, img_h):
rgb_images = []
gray_images = []
for n in range(8):
bias = rng.rand(img_w, img_h, 1) * 64
variance = rng.rand(img_w, img_h, 1) * (255 - 64)
image_array = rng.rand(img_w, img_h, 3) * variance + bias
image = pil_image.fromarray(image_array.astype('uint8')).convert('RGB')
rgb_images.append(image)
image_array = rng.rand(img_w, img_h, 1) * variance + bias
image = pil_image.fromarray(image_array.astype('uint8').squeeze()).convert('L')
gray_images.append(image)
return [rgb_images, gray_images]
@pytest.fixture(scope='session')
def rgb_images(image_list):
return image_list[0]
@pytest.fixture(scope='session')
def gray_images(image_list):
return image_list[1]
@pytest.fixture(scope='session')
def rgb_image_array(rgb_images):
return np.vstack([np.asarray(img, dtype=np.uint8) for img in rgb_images])
@pytest.fixture(scope='session')
def gray_image_array(gray_images):
return np.vstack([np.asarray(img, dtype=np.uint8) for img in gray_images])
@pytest.fixture(scope='session')
def rgb_image_data(tmpdir_factory, image_list):
temp_dir = tmpdir_factory.mktemp('data')
image_paths = []
for i, image in enumerate(image_list[0]):
image_file = 'image_{}.jpeg'.format(i)
image_path = str(temp_dir.join(image_file))
image.save(image_path)
image_paths.append(image_file)
return str(temp_dir), image_paths
@pytest.fixture(scope='session')
def gray_image_data(tmpdir_factory, image_list):
temp_dir = tmpdir_factory.mktemp('data')
image_paths = []
for i, image in enumerate(image_list[1]):
image_file = 'image_{}.jpeg'.format(i)
image_path = str(temp_dir.join(image_file))
image.save(image_path)
image_paths.append(image_file)
return str(temp_dir), image_paths
@pytest.fixture(scope='session')
def image_data(tmpdir_factory, image_list):
temp_dir = tmpdir_factory.mktemp('data')
image_paths = []
for i, image in enumerate(itertools.chain(*image_list)):
image_file = 'image_{}.jpeg'.format(i)
image_path = str(temp_dir.join(image_file))
image.save(image_path)
image_paths.append(image_file)
return str(temp_dir), image_paths
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.