metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jon-rd/jc",
"score": 3
} |
#### File: jc/parsers/hosts.py
```python
import jc.utils
class info():
version = '1.2'
description = '/etc/hosts file parser'
author = '<NAME>'
author_email = '<EMAIL>'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data with the following schema:
[
{
"ip": string,
"hostname": [
string
]
}
]
"""
# no additional processing needed
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
raw_output = []
# Clear any blank lines
cleandata = list(filter(None, data.splitlines()))
if jc.utils.has_data(data):
for line in cleandata:
output_line = {}
# ignore commented lines
if line.strip().startswith('#'):
continue
line_list = line.split(maxsplit=1)
ip = line_list[0]
hosts = line_list[1]
hosts_list = hosts.split()
comment_found = False
for i, item in enumerate(hosts_list):
if '#' in item:
comment_found = True
comment_item = i
break
if comment_found:
hosts_list = hosts_list[:comment_item]
output_line['ip'] = ip
output_line['hostname'] = hosts_list
raw_output.append(output_line)
if raw:
return raw_output
else:
return process(raw_output)
```
#### File: jc/parsers/lsblk.py
```python
import jc.utils
import jc.parsers.universal
class info():
version = '1.5'
description = 'lsblk command parser'
author = '<NAME>'
author_email = '<EMAIL>'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux']
magic_commands = ['lsblk']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data with the following schema:
[
{
"name": string,
"maj_min": string,
"rm": boolean,
"size": string,
"ro": boolean,
"type": string,
"mountpoint": string,
"kname": string,
"fstype": string,
"label": string,
"uuid": string,
"partlabel": string,
"partuuid": string,
"ra": integer,
"model": string,
"serial": string,
"state": string,
"owner": string,
"group": string,
"mode": string,
"alignment": integer,
"min_io": integer,
"opt_io": integer,
"phy_sec": integer,
"log_sec": integer,
"rota": boolean,
"sched": string,
"rq_size": integer,
"disc_aln": integer,
"disc_gran": string,
"disc_max": string,
"disc_zero": boolean,
"wsame": string,
"wwn": string,
"rand": boolean,
"pkname": string,
"hctl": string,
"tran": string,
"rev": string,
"vendor": string
}
]
"""
for entry in proc_data:
# boolean changes
bool_list = ['rm', 'ro', 'rota', 'disc_zero', 'rand']
for key in bool_list:
if key in entry:
try:
key_bool = bool(int(entry[key]))
entry[key] = key_bool
except (ValueError):
entry[key] = None
# integer changes
int_list = ['ra', 'alignment', 'min_io', 'opt_io', 'phy_sec', 'log_sec', 'rq_size', 'disc_aln']
for key in int_list:
if key in entry:
try:
key_int = int(entry[key])
entry[key] = key_int
except (ValueError):
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
# Clear any blank lines
cleandata = list(filter(None, data.splitlines()))
raw_output = []
if jc.utils.has_data(data):
cleandata = data.splitlines()
cleandata[0] = cleandata[0].lower()
cleandata[0] = cleandata[0].replace(':', '_')
cleandata[0] = cleandata[0].replace('-', '_')
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
# clean up non-ascii characters, if any
for entry in raw_output:
entry['name'] = entry['name'].encode('ascii', errors='ignore').decode()
if raw:
return raw_output
else:
return process(raw_output)
```
#### File: jc/tests/test_uname.py
```python
import os
import json
import unittest
import jc.parsers.uname
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/uname-a.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_uname_a = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/uname-a.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_uname_a = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/uname-a.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_uname_a = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/uname-a.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_uname_a = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/uname.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_uname = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/uname-a.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_uname_a_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/uname-a.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_uname_a_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/uname-a.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_uname_a_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/uname-a.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_uname_a_json = json.loads(f.read())
def test_uname_nodata(self):
"""
Test 'uname -a' with no data
"""
self.assertEqual(jc.parsers.uname.parse('', quiet=True), {})
def test_uname_no_a(self):
"""
Test 'uname' without -a option. Should generate a ParseError exception
"""
self.assertRaises(jc.parsers.uname.ParseError, jc.parsers.uname.parse, self.osx_10_14_6_uname)
def test_uname_centos_7_7(self):
"""
Test 'uname -a' on Centos 7.7
"""
self.assertEqual(jc.parsers.uname.parse(self.centos_7_7_uname_a, quiet=True), self.centos_7_7_uname_a_json)
def test_uname_ubuntu_18_4(self):
"""
Test 'uname -a' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.uname.parse(self.ubuntu_18_4_uname_a, quiet=True), self.ubuntu_18_4_uname_a_json)
def test_uname_osx_10_11_6(self):
"""
Test 'uname -a' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.uname.parse(self.osx_10_11_6_uname_a, quiet=True), self.osx_10_11_6_uname_a_json)
def test_uname_osx_10_14_6(self):
"""
Test 'uname -a' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.uname.parse(self.osx_10_14_6_uname_a, quiet=True), self.osx_10_14_6_uname_a_json)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonreade/sagemaker-workshop",
"score": 2
} |
#### File: lab-0-setup/scripts/createIAMUsers.py
```python
import boto3
iamClient = boto3.client('iam')
iamResource = boto3.resource('iam')
# Define the common parameters for the users we want to create
WORKSHOP_NAME = 'Workshop'
OWNER = '<EMAIL>'
groupName = "sagemaker-users"
group = iamResource.Group(groupName)
userString = "user"
userNumberMin = 1
userNumberMax = 26
def ListUsers() :
print("Listing Users")
paginator = iamClient.get_paginator('list_users')
# For each user
for page in paginator.paginate():
# Print the user
for user in page['Users']:
#pp.pprint(user)
print("User: {0}\nUserID: {1}\nARN: {2}\nCreatedOn: {3}\n".format(
user['UserName'],
user['UserId'],
user['Arn'],
user['CreateDate']
)
)
def CreateUsers() :
print("Creating Users")
# For each user
for userNumber in range (userNumberMin, userNumberMax):
userName= userString + "{0:02d}".format(userNumber)
print("Creating: " + userName)
iamClient.create_user(UserName=userName, Tags=[
{'Key' : 'userNumber', 'Value' : str(userNumber) },
{'Key' : 'workshop', 'Value' : WORKSHOP_NAME },
{'Key' : 'AWSContact', 'Value' : OWNER }
]
)
iamClient.create_login_profile(UserName=userName,
Password=<PASSWORD>,
PasswordResetRequired=True)
group.add_user(UserName=userName)
def DeleteUsers() :
print("Deleting Users")
for userNumber in range (userNumberMin, userNumberMax):
userName= userString + "{0:02d}".format(userNumber)
group.remove_user(UserName=userName)
iamClient.delete_login_profile(UserName=userName)
iamClient.delete_user(UserName=userName)
ListUsers()
CreateUsers()
ListUsers()
#DeleteUsers()
``` |
{
"source": "jonreal/openWearable",
"score": 3
} |
#### File: gui/pyplotter/antagonist_reflex.py
```python
import socket
import sys
import numpy as np
import fcntl, os
import errno
from matplotlib import pyplot as plt
from matplotlib import animation
UDP_IP_ADDRESS = "127.0.0.1"
UDP_PORT_NO = 1500
serverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverSock.bind(('', UDP_PORT_NO))
fcntl.fcntl(serverSock, fcntl.F_SETFL, os.O_NONBLOCK)
x = np.linspace(-3.0, 0, 100)
y1 = np.multiply(x, 0.0)
y2 = np.multiply(x, 0.0)
y3 = np.multiply(x, 0.0)
y4 = np.multiply(x, 0.0)
y5 = np.multiply(x, 0.0)
y6 = np.multiply(x, 0.0)
y7 = np.multiply(x, 0.0)
y8 = np.multiply(x, 0.0)
y9 = np.multiply(x, 0.0)
y10 = np.multiply(x, 0.0)
fig, (ax1, ax2, ax3) = plt.subplots(3,1)
ax1.set_xlim([-3, 0])
ax1.set_ylim([0, 5])
ax2.set_xlim([-3, 0])
ax2.set_ylim([0, 5])
ax3.set_xlim([-3, 0])
ax3.set_ylim([0, 5])
ln1, = ax1.plot([], [], lw=2)
ln2, = ax1.plot([], [], lw=2)
ln3, = ax1.plot([], [], lw=2)
ln4, = ax1.plot([], [], lw=2)
ln5, = ax2.plot([], [], lw=2)
ln6, = ax2.plot([], [], lw=2)
ln7, = ax3.plot([], [], lw=2)
ln8, = ax3.plot([], [], lw=2)
ln9, = ax1.plot([], [], lw=2)
ln10, = ax1.plot([], [], lw=2)
ymin = 0
ymax = 60
def init():
ln1.set_data([], [])
ln2.set_data([], [])
ln3.set_data([], [])
ln4.set_data([], [])
ln5.set_data([], [])
ln6.set_data([], [])
ln7.set_data([], [])
ln8.set_data([], [])
ln9.set_data([], [])
ln10.set_data([], [])
return ln1, ln2, ln3, ln4, ln5, ln6, ln7, ln8, ln9, ln10
def animate(i):
global y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, x, ax1, ax2
while (1) :
try :
n, add = serverSock.recvfrom(128)
val = n.decode("utf-8").split('\t')
# Format mesg
time = float(val[0])
p_res = float(val[1])
p_d1 = float(val[2])
p_m1 = float(val[3])
p_d2 = float(val[4])
p_m2 = float(val[5])
dp_m1 = float(val[6])
dp_m2 = float(val[7])
u1 = float(val[8])
u2 = float(val[9])
p1_max = float(val[10])
p2_max = float(val[11])
#print(data)
print(time,'\t',
p_res,'\t',
p_d1,'\t',
p_m1,'\t',
p_d2,'\t',
p_m2,'\t',
dp_m1,'\t',
dp_m2,'\t',
u1,'\t',
u2,'\t',
p1_max,'\t',
p2_max,'\t',
);
# update buffers
y1 = np.concatenate([y1[1:],np.array([p_d1])])
y2 = np.concatenate([y2[1:],np.array([p_m1])])
y3 = np.concatenate([y3[1:],np.array([p_d2])])
y4 = np.concatenate([y4[1:],np.array([p_m2])])
y5 = np.concatenate([y5[1:],np.array([dp_m1])])
y6 = np.concatenate([y6[1:],np.array([dp_m2])])
y7 = np.concatenate([y7[1:],np.array([u1])])
y8 = np.concatenate([y8[1:],np.array([u2])])
y9 = np.concatenate([y9[1:],np.array([p1_max])])
y10 = np.concatenate([y10[1:],np.array([p2_max])])
ymin = np.amin(np.concatenate([y1,y2,y3,y4,y9,y10]))
ymax = np.amax(np.concatenate([y1,y2,y3,y4,y9,y10]))
ax1.set_ylim([(ymin - 0.1*(ymax-ymin)),(ymax + 0.1*(ymax-ymin))])
ymin = np.amin(np.concatenate([y5,y6]))
ymax = np.amax(np.concatenate([y5,y6]))
ax2.set_ylim([(ymin - 0.1*(ymax-ymin)),(ymax + 0.1*(ymax-ymin))])
ymin = np.amin(np.concatenate([y7,y8]))
ymax = np.amax(np.concatenate([y7,y8]))
ax3.set_ylim([(ymin - 0.1*(ymax-ymin)),(ymax + 0.1*(ymax-ymin))])
except os.error as e:
if e.errno == errno.EAGAIN :
break;
else :
raise e
ln1.set_data(x,y1)
ln2.set_data(x,y2)
ln3.set_data(x,y3)
ln4.set_data(x,y4)
ln5.set_data(x,y5)
ln6.set_data(x,y6)
ln7.set_data(x,y7)
ln8.set_data(x,y8)
ln9.set_data(x,y9)
ln10.set_data(x,y10)
return ln1, ln2, ln3, ln4, ln5, ln6, ln7, ln8, ln9, ln10, ax1, ax2
# ---
anim = animation.FuncAnimation(fig, animate, init_func=init,
interval=33, blit=False)
plt.show()
```
#### File: openWearable/ros/ow_subscriber.py
```python
import rospy
from std_msgs.msg import String
def callback(data):
strdata = str(data)
# hacky split
val = strdata.split(':')
val = val[1].split('\\t');
temp = val[0].split('"');
frame = int(temp[1])
state = int(val[1])
buttons = int(val[2])
print(frame, state, buttons)
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber('openwearable', String, callback)
rospy.spin()
if __name__ == '__main__':
listener()
``` |
{
"source": "jonreding2010/MAQS.Python",
"score": 3
} |
#### File: Framework/base/BaseExtendableTest.py
```python
import Framework.base.BaseTest as BaseTest
class BaseExtendableTest():
# Gets the test object
def get_test_object(self):
return BaseTest.TestObject
# Sets the test object
def set_test_object(self, value):
BaseTest.BaseTestObjects = value
# Setup before a test
# [TestInitialize]
# [SetUp]
def setup(self):
# Do base generic setup
BaseTest.setup()
# Create a test object
def create_new_test_object(self):
return
#protected override abstract void CreateNewTestObject();
```
#### File: Framework/base/BaseTest.py
```python
from Framework.utilities.logger.constants import LoggingEnabled, MessageType, TestResultType
from Framework.utilities.logger import ConsoleLogger, FileLogger, LoggingConfig, Logger
from Framework.utilities.helper import StringProcessor
from Framework.base import BaseTestObject
from unittest import TestResult
import os
import traceback
import datetime
class BaseTest():
# All logged exceptions caught and saved to be thrown later.
loggedExceptions = []
# Logging Enabled Setting from Config file.
loggingEnabledSetting = []
# The test result object.
testResult = []
# The Collection of Base Test Objects to use.
baseTestObjects = None
# The Performance Timer Collection.
perfTimerCollection = []
# The TestNG Test Context.
testContextInstance = ""
# The Fully Qualified Test Class Name.
fullyQualifiedTestClassName = ""
# Gets the Performance Timer Collection.
# @return Performance Timer Collection
def get_perf_timer_collection(self):
return perfTimerCollection
# Sets the Performance Timer Collection.
# @param perfTimerCollection Performance Timer Collection to use
def set_perf_timer_collection(self, perfTimerCollection):
perfTimerCollection = perfTimerCollection
# Gets the Logger for this test.
# @return Logger object
def get_logger(self):
return self.get_test_object().getLog()
# Set the Logger for this test.
# @param log The Logger object
def set_logger(self, log):
self.get_test_object().setLog(log)
# Gets the Logging Enabled setting.
# @return Logging Enabled setting
def get_logging_enabled_setting(self):
return loggingEnabledSetting
# Set the Logging Enabled setting.
# @param setting The LoggingEnabled enum
def set_Logging_Enabled(self, setting):
loggingEnabledSetting = setting
# Get logged exceptions for this test.
# @return ArrayList of logged exceptions for this test
def get_logged_exceptions(self):
result = []
if loggedExceptions.containsKey(fullyQualifiedTestClassName):
result = loggedExceptions.get(fullyQualifiedTestClassName);
return result
# Set Logged Exception List - Add/Update entry in Hash Map with test class name as key.
# param loggedExceptionList ArrayList of logged exceptions to use.
def set_logged_exceptions(self, loggedExceptionList):
loggedExceptions.append(fullyQualifiedTestClassName, loggedExceptionList)
# Gets the Driver Store.
# @return The Driver Store
def get_manager_store(self):
return self.get_test_object().get_manager_store()
# Gets the TestNG Test Context.
# @return The TestNG Test Context
def get_test_context(self):
return testContextInstance
# Sets the TestNG Test context.
# @param testContext The TestNG Test Context to use
def set_test_context(self, testContext):
testContextInstance = testContext
# Get the BaseTestObject for this test.
# @return The BaseTestObject
def get_test_object(self):
if self.fullyQualifiedTestName in BaseTestObject:
# if (!baseTestObjects.containsKey(fullyQualifiedTestClassName.get())):
self.create_new_test_object()
return baseTestObjects.get(self.fullyQualifiedTestClassName.get())
# Sets the Test Object.
# @param baseTestObject The Base Test Object to use
def set_test_object(self, baseTestObject):
key = fullyQualifiedTestClassName
if (baseTestObjects.containsKey(key)):
baseTestObjects.replace(key, baseTestObject)
else:
baseTestObjects.put(key, baseTestObject)
# Setup before a test.
# @param method The initial executing Method object
# @param testContext The initial executing Test Context object
## @BeforeMethod(alwaysRun = true)
def set_up(self, method, testContext):
this.testContextInstance = testContext;
# Get the Fully Qualified Test Class Name and set it in the object
testName = method.getDeclaringClass() + "." + method.getName()
testName = testName.replaceFirst("class ", "")
fullyQualifiedTestClassName = testName
self.create_new_test_object()
# Cleanup after a test.
## @AfterMethod(alwaysRun = true)
def tear_down(self):
try:
self.before_logging_teardown(testResult);
except Exception as e:
self.try_to_log(MessageType.WARNING, "Failed before logging teardown because: %s", e.getMessage())
# Log the test result
if testResult.getStatus() == "SUCCESS":
tryToLog(MessageType.SUCCESS, "Test Passed")
elif testResult.getStatus() == "FAILURE":
tryToLog(MessageType.ERROR, "Test Failed")
elif testResult.getStatus() == "SKIP":
tryToLog(MessageType.INFORMATION, "Test was skipped");
else:
tryToLog(MessageType.WARNING, "Test had an unexpected result.")
# Cleanup log files we don't want
try:
if self.Log is FileLogger and testResult == TestResultType.PASS and self.LoggingEnabledSetting == LoggingEnabled.ONFAIL:
os.remove(FileLogger.Log.getFilePath)
except Exception as e:
tryToLog(MessageType.WARNING, "Failed to cleanup log files because: %s", e.getMessage())
# Get the Fully Qualified Test Name
fullyQualifiedTestName = fullyQualifiedTestClassName
try:
baseTestObject = get_test_object()
# Release logged messages
loggedExceptions.remove(fullyQualifiedTestName)
# Release the Base Test Object
baseTestObjects.remove(fullyQualifiedTestName, baseTestObject)
# Create console logger to log subsequent messages
self.set_test_object(BaseTestObject(ConsoleLogger(), fullyQualifiedTestName))
fullyQualifiedTestClassName.remove()
# Set the test result after each test execution.
# @param testResult The result object
##@AfterMethod(alwaysRun = true)
def set_test_result(self, testResult):
this.testContextInstance = testResult.getTestContext()
this.testResult = testResult
# Steps to do before logging teardown results.
# @param resultType The test result
def before_Logging_Teardown(self, resultType):
NotImplementedError
# Setup logging data.
# @return Logger
def create_Logger(self):
loggingEnabledSetting = LoggingConfig.get_LoggingEnabled_Setting();
self.set_Logged_Exceptions([])
if (loggingEnabledSetting != LoggingEnabled.NO):
return LoggingConfig.getLogger(StringProcessor
.safeFormatter("%s - %s", fullyQualifiedTestClassName.get(),
datetime.utcnow().strftime("uuuu-MM-dd-HH-mm-ss-SSSS")))
else:
return ConsoleLogger()
# Get the type of test result.
# @return The type of test result
def getResultType(self):
result = testResult.getStatus()
if result == TestResult.Success:
return TestResultType.PASS
elif result == TestResult.FAILURE:
return TestResultType.FAIL
elif result == TestResult.SKIP:
return TestResultType.SKIP;
else:
return TestResultType.OTHER
# Get the test result type as text.
# @return The test result type as text
def getResultText(self):
result = testResult.getStatus()
if result == TestResult.SUCCESS:
return "SUCCESS"
elif result == TestResult.FAILURE:
return "FAILURE"
elif result == TestResult.SKIP:
return "SKIP"
else:
return "OTHER"
# Get the fully qualified test name.
# @return The test name including class
def getFullyQualifiedTestClassName(self):
return fullyQualifiedTestClassName
# Try to log a message - Do not fail if the message is not logged.
# @param messageType The type of message
# @param message The message text
# @param args String format arguments
def tryToLog(self, messageType, message, args):
# Get the formatted message
formattedMessage = StringProcessor.safeFormatter(message, args)
try:
# Write to the log
get_logger().logMessage(messageType, formattedMessage)
# If this was an error and written to a file, add it to the console output as well
if messageType == MessageType.ERROR and not isinstance(get_logger(), ConsoleLogger):
print(formattedMessage)
except Exception as e:
print(formattedMessage);
print("Logging failed because: " + e.getMessage())
# Log a verbose message and include the automation specific call stack data.
# @param message The message text
# @param args String format arguments
def logVerbose(self, message, args):
messages = []
messages.append(StringProcessor.safeFormatter(message, args) + os.linesep)
for element in traceback.format_stack():
# If the stack trace element is from the com.magenic package (excluding this method) append the stack trace line
if (element.toString().startsWith("com.magenic") and not element.contains("BaseTest.logVerbose")):
messages.append(element + os.linesep)
get_logger().logMessage(MessageType.VERBOSE, messages)
# Create a Base test object.
def create_new_test_object(self):
newLogger = create_Logger()
set_test_object(BaseTestObject(newLogger, fullyQualifiedTestClassName))
```
#### File: Framework/base/ManagerDictionary.py
```python
from Framework.base import BaseTest
class ManagerDictionary():
# Get the driver for the associated driver manager
# @param T Type of driver
# @param key Key for the manager
# @returns The managed driver
def get_Driver(self, driverType, key):
return driverType.key.Get()
# Get a driver
# @param T The type of driver
# @param U The type of driver manager
# @returns The driver</returns>
def GetDriver(self, driverType, driverManager):
# where U : DriverManager
return self[type(driverManager).FullName].Get()
# Add a manager
# @param manager The manager
def add(self, manager):
self.Add(manager.GetType().FullName, manager);
# Add or replace a manager
# @param manager The manager
def add_or_override(self, manager):
self.add_or_override(manager.GetType().FullName, manager)
# Add or replace a manager
# @param Key for storing the manager
# @param manager The manager
def add_or_override(self, key, manager):
self.remove(key);
self.add(key, manager);
# Remove a driver manager
# @param key Key for the manager you want removed
# @returns True if the manager was removed
def remove(self, key):
if (self.ContainsKey(key)):
self[key].Dispose()
return BaseTest.remove(key)
# Remove a driver manager
# @param type The type of manager
# @returns True if the manager was removed
def remove(self, type):
key = type.FullName;
if (self.ContainsKey(key)):
self[key].Dispose()
return self.remove(key)
# Clear the dictionary
def clear(self):
for driver in self:
driver.Value.Dispose();
BaseTest.clear()
# Cleanup the driver
def dispose(self):
dispose(True);
# GC.SuppressFinalize(self)
# Cleanup the driver
# @param disposing Dispose managed objects
def dispose(self, disposing):
# Only dealing with managed objects
if (disposing):
self.clear()
```
#### File: Framework/base/SoftAssert.py
```python
from Framework.utilities.logger import ConsoleLogger, Logger
from Framework.base import SoftAssertException
from Framework.utilities.helper import StringProcessor
import os
from Framework.utilities.logger.constants import MessageType
class SoftAssert():
# Initializes a new instance of the SoftAssert class.
# Setup the Logger
# @param logger Logger to be used
def __init__(self, logger):
self.Log = logger;
# Initializes a new instance of the <see cref="SoftAssert"/> class
def __init__(self):
self.Log = ConsoleLogger()
# List of all asserted exceptions
listOfExceptions = []
# Gets a value indicating whether the user checked for failures
DidUserCheckForFailures = False
# Gets a count of total number of Asserts
NumberOfAsserts = 0
# Gets a count of total number of Passed Asserts
NumberOfPassedAsserts = 0
# Gets a count of total number of Failed Asserts
NumberOfFailedAsserts = 0
# Gets the logger being used
Log = None
# Gets a value indicating whether the user checked for failures
DidUserCheckForFailures = False;
# Gets a count of total number of Asserts
NumberOfAsserts
# Gets a count of total number of Passed Asserts
NumberOfPassedAsserts
# Gets a count of total number of Failed Asserts
NumberOfFailedAsserts
# Gets the logger being used
##protected Logger Log { get; private set; }
Log
# Override the logger
# @param log The new logger
def override_Logger(self, log):
self.Log = log
# Gets a value indicating whether the boolean if the user checks for failures at the end of the test.
# @returns If the user checked for failures. If the number of asserts is 0, it returns true.
def did_user_check(self):
if NumberOfAsserts > 0:
return DidUserCheckForFailures;
else:
return True
# Check if there are any failed soft asserts.
# @returns>True if there are failed soft asserts
def did_soft_asserts_fail(self):
return NumberOfFailedAsserts > 0
# Asserts if two strings are equal
# @param expectedText Expected value of the string
# @param actualText Actual value of the string
# @param message Message to be used when logging</param>
# @returns Boolean if they are equal
def are_equal(self, expectedText, actualText, message = ""):
return are_equal(expectedText, actualText, "", message)
# Asserts if two strings are equal
# @param expectedText Expected value of the string
# @param actualText Actual value of the string
# @param softAssertName Soft assert name
# @param message Message to be used when logging
# @returns Boolean if they are equal
def are_equal(self, expectedText, actualText, softAssertName, message = ""):
#void test()
if expectedText != actualText:
if (message == "" or message == None):
raise SoftAssertException(StringProcessor.SafeFormatter("SoftAssert.AreEqual failed for {0}. Expected '{1}' but got '{2}'", softAssertName, expectedText, actualText))
raise SoftAssertException(StringProcessor.SafeFormatter("SoftAssert.AreEqual failed for {0}. Expected '{1}' but got '{2}'. {3}", softAssertName, expectedText, actualText, message))
return invoke_test(test, expectedText, actualText, message)
# Soft assert for IsTrue
# @param condition">Boolean condition
# @param softAssertName Soft assert name
# @param failureMessage Failure message
# @returns Boolean if condition is met
def is_true(self, condition, softAssertName, failureMessage = ""):
#void test()
if not condition:
if not failureMessage or failureMessage == "":
raise SoftAssertException(StringProcessor.SafeFormatter("SoftAssert.IsTrue failed for: {0}", softAssertName))
raise SoftAssertException(StringProcessor.SafeFormatter("SoftAssert.IsTrue failed for: {0}. {1}", softAssertName, failureMessage))
return invoke_test(test, softAssertName, failureMessage)
# Soft assert for IsFalse
# @param condition Boolean condition
# @param softAssertName Soft assert name
# @param failureMessage Failure message
# @returns Boolean if condition is met
def is_false(self, condition, softAssertName, failureMessage = ""):
# void test()
if condition:
if not failureMessage or failureMessage == "":
raise SoftAssertException(StringProcessor.SafeFormatter("SoftAssert.IsFalse failed for: {0}", softAssertName))
raise SoftAssertException(StringProcessor.SafeFormatter("SoftAssert.IsFalse failed for: {0}. {1}", softAssertName, failureMessage))
return invoke_test(test, softAssertName, failureMessage);
# Log final assert count summary
def log_final_assert_data(self):
message = []
##MessageType type
message.AppendLine(StringProcessor.SafeFormatter(
"Total number of Asserts: {0}. {3}Passed Asserts = {1} {3}Failed Asserts = {2}{3}",
NumberOfAsserts, NumberOfPassedAsserts, NumberOfFailedAsserts, os.linesep))
if listOfExceptions.Count > 0:
type = MessageType.ERROR
message.AppendLine("List of failed exceptions:")
for exception in listOfExceptions:
# Will log all the exceptions that were caught in Asserts to the log file.
message.AppendLine(exception)
else:
# There are no exceptions that were caught in Asserts.
type = MessageType.INFORMATION;
message.AppendLine("There are no failed exceptions in the Asserts.")
Log.LogMessage(type, message.ToString().TrimEnd())
# Fail test if there were one or more failures
def fail_Test_if_assert_failed(self):
fail_Test_if_assert_failed("*See log for more details")
# Fail test if there were one or more failures
# @param message Customer error message
def fail_Test_if_assert_failed(self, message):
log_final_assert_data()
this.DidUserCheckForFailures = True
if (did_soft_asserts_fail()):
errors = ''.Join(os.linesep, listOfExceptions)
raise Exception("Soft Asserts failed:" + os.linesep + errors + os.linesep + message)
# Wrap an assert inside a soft assert
# @param assertFunction The assert function
# @returns True if the asset passed
def Assert(self, assertFunction):
# Resetting every time we invoke a test to verify the user checked for failures
DidUserCheckForFailures = False
result = False
try:
assertFunction.Invoke()
this.NumberOfPassedAsserts = ++NumberOfPassedAsserts
result = True
Log.LogMessage(MessageType.SUCCESS, "SoftAssert passed for: {0}.", assertFunction.Method.Name)
except Exception as ex:
this.NumberOfFailedAsserts = ++NumberOfFailedAsserts
result = False
Log.LogMessage(MessageType.WARNING, "SoftAssert failed for: {0}. {1}", assertFunction.Method.Name, ex.Message)
listOfExceptions.Add(ex.Message)
finally:
this.NumberOfAsserts = ++NumberOfAsserts
return result
# Wrap an assert that is expected to fail and the expected failure
# @param assertFunction The assert function
# @param expectedException The type of expected exception
# @param assertName soft assert name
# @param failureMessage Failure message
# @returns True if the assert failed
def assert_fails(self, assertFunction, expectedException, assertName, failureMessage = ""):
# Resetting every time we invoke a test to verify the user checked for failures
this.DidUserCheckForFailures = False
result = False
try:
assertFunction.Invoke()
this.NumberOfFailedAsserts = ++self.NumberOfFailedAsserts
result = False
Log.LogMessage(MessageType.WARNING, "SoftAssert failed for assert {0}: {1} passed. Expected failure type {2}.", assertName, assertFunction.Method.Name, expectedException)
except Exception as ex:
if ex.GetType().Equals(expectedException):
NumberOfPassedAsserts = ++NumberOfPassedAsserts
result = True
Log.LogMessage(MessageType.SUCCESS, "SoftAssert passed for assert {0}: {1}.", assertName, assertFunction.Method.Name)
else:
NumberOfFailedAsserts = ++NumberOfFailedAsserts
result = False;
Log.LogMessage(MessageType.WARNING, "SoftAssert failed for assert {0}: {1}. Expected failure:{2} Actual failure: {3}", assertName, assertFunction.Method.Name, expectedException, ex.Message)
listOfExceptions.Add(ex.Message)
finally:
NumberOfAsserts = ++NumberOfAsserts
return result;
# Executes the assert type passed as parameter and updates the total assert count
# @param test Test method Action
# @param expectedText Expected value of the string
# @param actualText Actual value of the string
# @param message Test Name or Message
# @returns Boolean if the assert is true
def invoke_test(self, test, expectedText, actualText, message):
# Resetting every time we invoke a test to verify the user checked for failures
this.DidUserCheckForFailures = False
result = False
try:
test.invoke();
this.NumberOfPassedAsserts = ++NumberOfPassedAsserts
result = True
Log.LogMessage(expectedText, actualText, message, result)
except Exception as ex:
this.NumberOfFailedAsserts = ++NumberOfFailedAsserts
result = False;
Log.LogMessage(expectedText, actualText, message, result)
listOfExceptions.Add(ex.Message);
finally:
this.NumberOfAsserts = ++NumberOfAsserts
return result;
# Executes the assert type passed as parameter and updates the total assert count
# @param test Test method Action
# @param softAssertName Soft assert name
# @param message Test Name or Message
# @returns Boolean if the assert is true
def invoke_test(self, test, softAssertName, message):
# Resetting every time we invoke a test to verify the user checked for failures
this.DidUserCheckForFailures = False
result = False
try:
test.Invoke()
this.NumberOfPassedAsserts = ++NumberOfPassedAsserts
result = True
Log.LogMessage(MessageType.SUCCESS, "SoftAssert passed for: {0}.", softAssertName)
except Exception as ex:
NumberOfFailedAsserts = ++NumberOfFailedAsserts
result = False
Log.LogMessage(MessageType.WARNING, "SoftAssert failed for: {0}. {1}", softAssertName, message)
listOfExceptions.Add(ex.Message)
finally:
this.NumberOfAsserts = ++NumberOfAsserts
return result
# Logs the message to the logger
# @param expectedText Expected value of the string
# @param actualText Actual value of the string
# @param message Test Name or Message
# @param result Decides the message type to be logged
def log_message(self, expectedText, actualText, message, result):
if result:
Log.LogMessage(MessageType.SUCCESS, StringProcessor.SafeFormatter("Soft Assert '{0}' passed. Expected Value = '{1}', Actual Value = '{2}'.", message, expectedText, actualText))
else:
Log.LogMessage(MessageType.WARNING, StringProcessor.SafeFormatter("Soft Assert '{0}' failed. Expected Value = '{1}', Actual Value = '{2}'.", message, expectedText, actualText))
```
#### File: utilities/helper/Config.py
```python
import os.path
from os import path
from Framework.utilities.helper import ConfigSection as ConfigSection
from Framework.utilities.helper import StringProcessor as StringProcessor
from binascii import Error
import xml.etree.ElementTree as et
# Configuration class.
class Config:
# The default section MagenicMaqs.
DEFAULT_MAQS_SECTION = ConfigSection.MagenicMaqs
# default config.xml file name.
CONFIG_FILE = "config.xml"
# The configuration containing values loaded in from the config.xml file.
configValues = []
# The configuration containing values that were added to the configuration.
overrideConfig = []
# The base configs object.
configs = []
@staticmethod
def config(parameter_list):
try:
if path.exists(CONFIG_FILE):
tree = et.parse(CONFIG_FILE)
root = tree.getroot()
for node in root:
configValues.add(node)
'''
builder = configs.xmlBuilder(CONFIG_FILE)
configValues = builder.getConfiguration();
configValues.setSynchronizer(ReadWriteSynchronizer())
'''
except Exception as e:
raise TimeoutError(StringProcessor.safeFormatter("Exception creating the xml configuration object from the file : " + e.message))
# Gets a section from the configuration.
# @param section The desired section
# @return A HashMap of the values in the section
def get_section(self, section):
sectionValues = []
# first parse the override config
overridePaths = overrideConfig.getKeys(section);
while (overridePaths.hasNext()):
key = overridePaths.next()
sectionValues.put(key.replaceFirst(section + "\\.", ""), overrideConfig.getString(key))
# then parse the base config, ignoring duplicates
configValuePaths = configValues.getKeys(section);
while (configValuePaths.hasNext()):
key = configValuePaths.next();
editedKey = key.replaceFirst(section + "\\.", "");
if (editedKey not in sectionValues):
sectionValues.put(editedKey, configValues.getString(key))
return sectionValues
# Add dictionary of values to maqs section.
# @param configurations Dictionary of configuration values
# @param overrideExisting True to override existing values, False otherwise
def addGeneralTestSettingValues(self, configurations, overrideExisting):
add_test_setting_values(configurations, DEFAULT_MAQS_SECTION, overrideExisting)
# Add dictionary of values to specified section.
# @param configurations Dictionary of configuration values
# @param section Section to add the value to
# @param overrideExisting True to override existing values, False otherwise
def add_test_setting_values(self, configurations, section, overrideExisting):
for entry in configurations:
sectionedKey = section + "." + entry.getKey()
if sectionedKey not in overrideConfig or overrideExisting == True:
overrideConfig.setProperty(sectionedKey, entry.getValue())
# Get the specified value out of the default section.
# @param key The key
# @return The configuration value
def get_general_value(self, key):
return get_value_for_section(DEFAULT_MAQS_SECTION, key)
# Get the specified value out of the default section.
# @param key The key
# @param defaultValue The value to return if the key does not exist
# @return The configuration value
def get_general_value(self, key,defaultValue):
return get_value_for_section(DEFAULT_MAQS_SECTION, key, defaultValue)
# Get the specified value out of the specified section.
# @param section The section to search
# @param key The key
# @return The configuration value
def get_value_for_section(self, section, key):
return get_value_for_section(section, key, "")
# Get the specified value out of the specified section.
# @param section The section to search
# @param key The key
# @param defaultValue The value to return if the key is not found
# @return The configuration value
def get_value_for_section(self, section, key, defaultValue):
return get_value_for_section(section.toString(), key, defaultValue)
# Get the specified value out of the specified section.
# @param section The section to search
# @param key The key
# @param defaultValue The value to return if the key is not found
# @return The configuration value
def get_value_for_section(self, section, key, defaultValue):
keyWithSection = section + "." + key;
return get_value(keyWithSection, defaultValue);
# Get the configuration value for a specific key. Does not assume a section.
# @param key The key
# @return The configuration value - Returns the empty string if the key is not found
def get_value(self, key):
retVal = overrideConfig.getString(key, "");
if len(retVal) == 0:
return configValues.getString(key, "")
else:
return retVal
# Get the configuration value for a specific key. Does not assume a section.
# @param key The key
# @param defaultValue Value to return if the key does not exist
# @return The configuration value - Returns the default string if the key is not found
def get_value(self, key, defaultValue):
retVal = get_value(key);
if len(retVal) == 0:
return defaultValue
else:
return retVal
# Check the config for a specific key. Does not assume a section.
# @param key The key
# @return True if the key exists, false otherwise
def does_key_exist(self, key):
if overrideConfig.containsKey(key):
return True
else:
return key in configValues
# Check the config for a specific key. Searches the specified section.
# @param key The key
# @param section The specified section
# @return True if the key exists, false otherwise
def does_key_exist(self, key, section):
keyWithSection = section + "." + key;
return does_key_exist(keyWithSection)
# Check the config for a specific key. Searches the default section.
# @param key The key
# @return True if the key exists, false otherwise
def does_general_key_exist(self, key):
return does_key_exist(key, DEFAULT_MAQS_SECTION)
```
#### File: utilities/logger/HtmlFileLogger.py
```python
from asyncio.streams import StreamWriter
import os
from Framework.utilities.logger import ConsoleLogger, Logger, FileLogger, MessageType
from datetime import datetime
from Framework.utilities.helper import StringProcessor
class HtmlFileLogger(FileLogger):
# The default log name.
DEFAULTLOGNAME = "FileLog.html"
# Default header for the HTML file, this gives us our colored text.
DEFAULTHTMLHEADER = "<!DOCTYPE html><html><header><title>Test Log</title></header><body>"
# Gets the file extension
extension = ".html"
# Gets or sets the FilePath value
filePath = ""
# Initializes a new instance of the HtmlFileLogger class
# @param logFolder Where log files should be saved
# @param name File Name
# @param messageLevel Messaging level
# @param append True to append to an existing log file or false to overwrite it - If the file does not exist this, flag will have no affect
def __init__(self, logFolder, name = DEFAULTLOGNAME, messageLevel = MessageType.INFORMATION, append = false):
writer = StreamWriter(filePath, True)
writer.Write(DEFAULTHTMLHEADER);
writer.Flush();
writer.Close();
# Write the formatted message (one line) to the console as a generic message
# @param messageType The type of message
# @param message The message text
# @param args String format arguments
def log_message(self, messageType, message, args):
# If the message level is greater that the current log level then do not log it.
if (Logger.should_message_be_logged(messageType)):
# Log the message
# lock (this.FileLock):
date = datetime.now().strftime(Logger.DEFAULTDATEFORMAT)
#date = dateTime.now().UtcNow()ToString(Logger.DEFAULTDATEFORMAT, CultureInfo.InvariantCulture);
writer = []
try:
#using (StreamWriter writer = new StreamWriter(this.FilePath, true)):
# Set the style
writer.append(get_text_with_color_flag(messageType))
# Add the content
writer.WriteLine(StringProcessor.SafeFormatter("{0}{1}", os.linesep, date))
writer.Write(StringProcessor.SafeFormatter("{0}:\t", messageType.ToString()))
writer.WriteLine(StringProcessor.SafeFormatter(message, args))
# Close off the style
writer.Write("</p>");
# Close the pre tag when logging Errors
if (messageType == "ERROR"):
writer.Write("</pre>");
except Exception as e:
# Failed to write to the event log, write error to the console instead
console = ConsoleLogger()
console.LogMessage(MessageType.ERROR, StringProcessor.SafeFormatter("Failed to write to event log because: {0}", e.message));
console.LogMessage(messageType, message, args);
# Dispose the class
def dispose(self):
dispose(True)
#GC.SuppressFinalize(self)
# Dispose the class
# @param disposing True if you want to release managed resources
def dispose(self, disposing):
if (disposing and os.path.isfile(filePath)):
writer = StreamWriter(filePath, True)
writer.WriteLine("</body></html>");
writer.Flush();
writer.Close();
# Get the HTML style key for the given message type
# @param type The message type</param>
# @return string - The HTML style key for the given message type
def get_text_with_color_flag(self, type):
if type == MessageType.VERBOSE:
return "<p style =\"color:purple\">"
elif type == MessageType.ACTION:
return "<p style =\"color:gold\">"
elif type == MessageType.STEP:
return "<p style =\"color:orange\">"
elif type == MessageType.ERROR:
return "<pre><p style=\"color:red\">"
elif type == MessageType.GENERIC:
return "<p style =\"color:black\">"
elif type == MessageType.INFORMATION:
return "<p style =\"color:blue\">"
elif type == MessageType.SUCCESS:
return "<p style=\"color:green\">"
elif type == MessageType.WARNING:
return "<p style=\"color:orange\">"
else:
print(FileLogger.unknown_message_type_message(type));
return "<p style=\"color:hotpink\">"
``` |
{
"source": "jonreding2010/PythonLogging",
"score": 2
} |
#### File: PythonLogging/baseTest/BaseTestObject.py
```python
from baseLogger.Logger import Logger
from baseLogger.constants.MessageType import MessageType
from baseTest.ManagerDictionary import ManagerDictionary
from performance.PerfTimerCollection import PerfTimerCollection
from utilities.StringProcessor import StringProcessor
# The BaseTestObject class.
class BaseTestObject:
# The Logger.
logger = Logger()
# The Performance Timer Collection.
perfTimerCollection = str()
# Concurrent Hash Map of string key value pairs.
values = dict()
# Concurrent Hash Map of string key and object value pairs.
objects = dict()
# Dictionary of String key and driver value pairs.
managerStore = ManagerDictionary
# ArrayList of Strings for associated files.
associatedFiles = list()
# The Fully Qualified Test Name.
fullyQualifiedTestName = str()
# Was the object closed.
isClosed = False
# Check if the object has been closed
# @return True if the object is closed
def getClosed(self):
return self.isClosed
# Initializes a new instance of the BaseTestObject class.
# @param logger The test's logger
# @param fullyQualifiedTestName The test's fully qualified test name
def __init__(self, base_test_object=None, logger=None, fully_qualified_test_name=str()):
if base_test_object is None:
self.set_up_without_base_test_object(logger, fully_qualified_test_name)
else:
self.set_up_base_test_object(base_test_object)
def set_up_without_base_test_object(self, logger, fully_qualified_test_name):
self.logger = logger
self.perfTimerCollection = PerfTimerCollection(logger, fully_qualified_test_name)
self.values = dict()
self.objects = dict()
self.managerStore = dict()
self.associatedFiles = list()
self.fullyQualifiedTestName = fully_qualified_test_name
logger.logMessage(MessageType.INFORMATION, "Setup test object for " + fully_qualified_test_name)
# Initializes a new instance of the BaseTestObject class.
# @param baseTestObject An existing base test object
def set_up_base_test_object(self, base_test_object):
self.logger = base_test_object.get_logger()
self.perfTimerCollection = base_test_object.get_perf_timer_collection()
self.values = base_test_object.get_values()
self.objects = base_test_object.getObjects()
self.managerStore = base_test_object.get_manager_store()
self.associatedFiles = list()
self.fullyQualifiedTestName = base_test_object.get_fully_qualified_test_name()
base_test_object.get_logger().logMessage(MessageType.INFORMATION, "Setup test object")
# Gets the logger.
# @return The logger
def get_logger(self):
return self.logger
# Sets the logger.
# @param logger The logger to use
def set_logger(self, logger):
self.logger = logger
# Gets the Performance Timer Collection.
# @return Performance Timer Collection
def get_perf_timer_collection(self):
return self.perfTimerCollection
# Sets the Performance Timer Collection.
# @param perfTimerCollection Performance Timer Collection
def set_perf_timer_collection(self, perf_timer_collection):
self.perfTimerCollection = perf_timer_collection
def get_fully_qualified_test_name(self):
return self.fullyQualifiedTestName
# Gets the Concurrent Hash Map of string key value pairs.
# @return Concurrent Hash Map of string key value pairs
def get_values(self):
return self.values
# Sets the Concurrent Hash Map of string key and object value pairs.
# @param values Concurrent Hash Map of string key value pairs to use
# def setValues(self, ConcurrentHashMap<String, String> values) {
def set_values(self, new_values):
self.values = new_values
# Gets the Concurrent Hash Map of string key and object value pairs.
# @return Concurrent Hash Map of string key and object value pairs
# def public ConcurrentMap<String, Object> getObjects() {
def get_objects(self):
return self.objects
# Sets the Concurrent Hash Map of string key and object value pairs.
# @param objects Concurrent Hash Map of string key and object value pairs to use
# def setObjects(self, ConcurrentHashMap<String, Object> objects) {
def set_objects(self, new_objects):
self.objects = new_objects
# Gets the Concurrent Hash Map of string key and driver value pairs.
# @return Concurrent Hash Map of string key and driver value pairs
def get_manager_store(self):
return self.managerStore
# Sets the Concurrent Hash Map of string key and driver value pairs.
# @param managerStore Concurrent Hash Map of string key and driver value pairs to use.
def set_manager_store(self, manager_store):
self.managerStore = manager_store
# Sets a string value, will replace if the key already exists.
# @param key The key
# @param value The value to associate with the key
def set_value(self, key, value):
if self.values.containsKey(key):
self.values.replace(key, value)
else:
self.values.put(key, value)
# Sets an object value, will replace if the key already exists.
# @param key The key
# @param value The value to associate with the key
def set_object(self, key, value):
if self.objects.containsKey(key):
self.objects.replace(key, value)
else:
self.objects.put(key, value)
# Add driver manager.
# @param <T> the type parameter
# @param driverManager the driver manager
# @param overrideIfExists the override if exists
# public <T extends DriverManager<?>> void addDriverManager(final T driverManager, final boolean overrideIfExists) {
def add_driver_manager(self, driver_manager, override_if_exists=False):
if override_if_exists:
self.override_driver_manager(driver_manager.getClass().getTypeName(), driver_manager)
else:
self.managerStore.put(driver_manager.getClass().getTypeName(), driver_manager)
# Override driver manager.
# @param key the key
# @param driverManager the driver manager
# def overrideDriverManager(final String key, final DriverManager<?> driverManager) {
def override_driver_manager(self, key, driver_manager):
if self.managerStore.containsKey(key):
self.managerStore.putOrOverride(key, driver_manager)
else:
self.managerStore.put(key, driver_manager)
# Add associated file boolean.
# @param path the path
# @return the boolean
def add_associated_file(self, path):
if path.exists():
return self.associatedFiles.append(path)
return False
# Dispose of the driver store.
# @param closing the closing
def close(self, closing=False):
if not closing:
if self.managerStore is None:
return
self.logger.logMessage(MessageType.VERBOSE, "Start dispose")
# for (final DriverManager<?> singleDriver : this.managerStore.values()) {
for singleDriver in self.managerStore:
if singleDriver is not None:
try:
singleDriver.close()
except Exception as e:
# raise DriverDisposalException(StringProcessor.safe_formatter("Unable to properly dispose of
# driver"), e)
raise Error(StringProcessor.safe_formatter("Unable to properly dispose of driver"), e)
self.managerStore = None
self.logger.logMessage(MessageType.VERBOSE, "End dispose")
self.isClosed = True
# Remove associated file boolean.
# @param path the path
# @return the boolean
def remove_associated_file(self, path):
return self.associatedFiles.remove(path)
# Get array of associated files string [ ].
# @return the string [ ]
def get_array_of_associated_files(self):
return self.associatedFiles
# Contains associated file boolean.
# @param path the path
# @return the boolean
def contains_associated_file(self, path):
return path in self.associatedFiles
```
#### File: PythonLogging/baseTest/BaseTest.py
```python
import os
import traceback
from datetime import datetime
from baseLogger.ConsoleLogger import ConsoleLogger
from baseLogger.FileLogger import FileLogger
from baseLogger.LoggingConfig import LoggingConfig
from baseLogger.constants.LoggingEnabled import LoggingEnabled
from baseLogger.constants.MessageType import MessageType
from baseLogger.constants.TestResultType import TestResultType
from baseTest.BaseTestObject import BaseTestObject
from baseTest.TestResult import TestResult
from utilities.Config import Config
from utilities.StringProcessor import StringProcessor
# Base test class.
class BaseTest:
# All logged exceptions caught and saved to be thrown later.
loggedExceptions = dict()
# Logging Enabled Setting from Config file.
loggingEnabledSetting = str()
# The test result object.
testResult = str()
# The Collection of Base Test Objects to use.
baseTestObjects = dict()
# The Performance Timer Collection.
perfTimerCollection = str()
# The TestNG Test Context.
testContextInstance = str()
# The Fully Qualified Test Class Name.
# ThreadLocal<String> fullyQualifiedTestClassName = new ThreadLocal<>();
fullyQualifiedTestClassName = list()
# Initializes a new instance of the BaseTest class.
def __init__(self):
self.loggedExceptions = dict()
self.baseTestObjects = dict()
# Gets the Performance Timer Collection.
# @return Performance Timer Collection
def get_perf_timer_collection(self):
return self.perfTimerCollection
# Sets the Performance Timer Collection.
# @param perfTimerCollection Performance Timer Collection to use
def set_perf_timer_collection(self, perf_timer_collection):
self.perfTimerCollection = perf_timer_collection
# Gets the Logger for this test.
# @return Logger object
def get_logger(self):
return self.get_test_object().getLogger()
# Set the Logger for this test.
# @param log The Logger object
def set_logger(self, log):
self.get_test_object().setLogger(log)
# Gets the Logging Enabled setting.
# @return Logging Enabled setting
def get_logging_enabled_setting(self):
return self.loggingEnabledSetting
# Set the Logging Enabled setting.
# @param setting The LoggingEnabled enum
def set_logging_enabled(self, setting):
self.loggingEnabledSetting = setting
# Get logged exceptions for this test.
# @return ArrayList of logged exceptions for this test
def get_logged_exceptions(self):
if not (self.fullyQualifiedTestClassName in self.loggedExceptions):
result = list()
else:
result = self.loggedExceptions.get(self.fullyQualifiedTestClassName)
return result
# Set Logged Exception List - Add/Update entry in Hash Map with test class name as key.
# @param loggedExceptionList ArrayList of logged exceptions to use.
def set_logged_exceptions(self, logged_exception_list):
self.loggedExceptions[self.fullyQualifiedTestClassName] = logged_exception_list
# Gets the Driver Store.
# @return The Driver Store
def get_manager_store(self):
return self.get_test_object().getManagerStore()
# Gets the TestNG Test Context.
# @return The TestNG Test Context
def get_test_context(self):
return self.testContextInstance
# Sets the TestNG Test context.
# @param testContext The TestNG Test Context to use
def set_test_context(self, test_context):
self.testContextInstance = test_context
# Get the BaseTestObject for this test.
# @return The BaseTestObject
def get_test_object(self):
if not (self.fullyQualifiedTestClassName in self.baseTestObjects):
self.create_new_test_object()
return self.baseTestObjects.get(self.fullyQualifiedTestClassName)
# Sets the Test Object.
# @param baseTestObject The Base Test Object to use
def set_test_object(self, base_test_object):
key = self.fullyQualifiedTestClassName
# if key in self.baseTestObjects:
# self.baseTestObjects[key] base_test_object)
# else:
self.baseTestObjects[key] = base_test_object
# Setup before a test.
# @param method The initial executing Method object
# @param testContext The initial executing Test Context object
# @BeforeMethod(alwaysRun = true)
def setup(self, method, test_context):
self.testContextInstance = test_context
# Get the Fully Qualified Test Class Name and set it in the object
test_name = method.getDeclaringClass() + "." + method.getName()
test_name = test_name.replaceFirst("class ", "")
self.fullyQualifiedTestClassName.append(test_name)
self.create_new_test_object()
# Cleanup after a test.
# @AfterMethod(alwaysRun = true)
def teardown(self):
try:
self.before_logging_teardown(self.testResult)
except Exception as e:
self.try_to_log(MessageType.WARNING, "Failed before logging teardown because: {}", e.message)
# Log the test result
if self.testResult.getStatus() == TestResult.SUCCESS:
self.try_to_log(MessageType.SUCCESS, "Test Passed")
elif self.testResult.getStatus() == TestResult.FAILURE:
self.try_to_log(MessageType.ERROR, "Test Failed")
elif self.testResult.getStatus() == TestResult.SKIP:
self.try_to_log(MessageType.INFORMATION, "Test was skipped")
else:
self.try_to_log(MessageType.WARNING, "Test had an unexpected result.")
# Cleanup log files we don't want
try:
if isinstance(FileLogger,
self.get_logger()) and self.testResult.getStatus() == TestResult.SUCCESS and self.loggingEnabledSetting == LoggingEnabled.ONFAIL:
# Files.delete(self.getLogger()).getFilePath())
os.remove(self.get_logger().get_file_path())
except Exception as e:
self.try_to_log(MessageType.WARNING, "Failed to cleanup log files because: {}", e.message)
# Get the Fully Qualified Test Name
fully_qualified_test_name = self.fullyQualifiedTestClassName
try:
base_test_object = self.get_test_object()
# Release logged messages
self.loggedExceptions.pop(fully_qualified_test_name)
# Release the Base Test Object
self.baseTestObjects.pop(fully_qualified_test_name, base_test_object)
except Exception:
pass
# Create console logger to log subsequent messages
self.set_test_object(BaseTestObject(ConsoleLogger(), fully_qualified_test_name))
self.fullyQualifiedTestClassName.clear()
# Set the test result after each test execution.
# @param testResult The result object
# @AfterMethod(alwaysRun = true)
def set_test_result(self, test_result):
self.testContextInstance = test_result.get_test_context()
self.testResult = test_result
# Steps to do before logging teardown results.
# @param resultType The test result
def before_logging_teardown(self, result_type):
pass
# Setup logging data.
# @return Logger
def create_logger(self):
self.loggingEnabledSetting = LoggingConfig.get_logging_level_setting(Config())
self.set_logged_exceptions(list)
if self.loggingEnabledSetting != LoggingEnabled.NO:
log = LoggingConfig().get_logger(StringProcessor.safe_formatter("{} - {}",
[self.fullyQualifiedTestClassName,
str(datetime.now().strftime(
"uuuu-MM-dd-HH-mm-ss-SSSS"))]),
Config())
else:
log = ConsoleLogger()
return log
# Get the type of test result.
# @return The type of test result
def get_result_type(self):
status = self.testResult.getStatus()
if status == TestResult.SUCCESS:
return TestResultType.PASS
elif status == TestResult.FAILURE:
return TestResultType.FAIL
elif status == TestResult.SKIP:
return TestResultType.SKIP
else:
return TestResultType.OTHER
# Get the test result type as text.
# @return The test result type as text
def get_result_text(self):
status = self.testResult.getStatus()
if status == TestResult.SUCCESS:
return "SUCCESS"
elif status == TestResult.FAILURE:
return "FAILURE"
elif status == TestResult.SKIP:
return "SKIP"
else:
return "OTHER"
# Get the fully qualified test name.
# @return The test name including class
def get_fully_qualified_test_class_name(self):
return self.fullyQualifiedTestClassName
# Try to log a message - Do not fail if the message is not logged.
# @param messageType The type of message
# @param message The message text
# @param args String format arguments
def try_to_log(self, message_type, message, args=None):
# Get the formatted message
formatted_message = StringProcessor.safe_formatter(message, args)
try:
# Write to the log
self.get_logger().logMessage(message_type, formatted_message)
# If this was an error and written to a file, add it to the console
# output as well
if message_type == MessageType.ERROR and not isinstance(ConsoleLogger, self.get_logger()):
print(formatted_message)
except Exception as e:
print(formatted_message)
print("Logging failed because: " + e.message)
# Log a verbose message and include the automation specific call stack data.
# @param message The message text
# @param args String format arguments
def log_verbose(self, message, args=None):
messages = list()
messages.append(StringProcessor.safe_formatter(message, args) + os.linesep)
for element in traceback.format_exc():
# If the stack trace element is from the com.magenic package
# (excluding this method) append the stack trace line
if element.startsWith("com.magenic") and not ("BaseTest.logVerbose" in str(element)):
messages.append(element + os.linesep)
self.get_logger().logMessage(MessageType.VERBOSE, messages)
# Create a Base test object.
def create_new_test_object(self):
new_logger = self.create_logger()
self.set_test_object(BaseTestObject(new_logger, self.fullyQualifiedTestClassName))
```
#### File: PythonLogging/baseTest/DriverManager.py
```python
from baseTest.BaseTestObject import BaseTestObject
# The type Driver manager.
class DriverManager:
# Base Test Object.
baseTestObject = BaseTestObject
# The Base driver.
baseDriver = object()
# The Get driver.
getDriverSupplier = object()
# Instantiates a new Driver manager.
# @param getDriverFunction driver function supplier
# @param baseTestObject the base test object
def __init__(self, get_driver_function, base_test_object):
self.baseTestObject = base_test_object
self.getDriverSupplier = get_driver_function
# Gets base driver.
# @return the base driver
def get_base_driver(self):
return self.baseDriver
# Sets base driver.
# @param baseDriver the base driver
def set_base_driver(self, base_driver):
self.baseDriver = base_driver
# Is driver initialized boolean.
# @return the boolean
def is_driver_initialized(self):
return self.baseDriver is not None
# Gets logger.
# @return the logger
def get_logger(self):
return self.baseTestObject.get_logger()
# Get base object.
# @return the object
def get_base(self):
if self.baseDriver is None:
self.baseDriver = self.getDriverSupplier
return self.baseDriver
# Gets test object.
# @return the test object
def get_test_object(self):
return self.baseTestObject
```
#### File: PythonLogging/baseTestUnitTests/BaseTestObjectUnitTests.py
```python
import unittest
from baseTest.BaseGenericTest import BaseGenericTest
from baseTest.BaseTestObject import BaseTestObject
from baseTest.DriverManager import DriverManager
from performance.PerfTimerCollection import PerfTimerCollection
# The type Base test object test.
class BaseTestObjectUnitTest(unittest.TestCase, BaseGenericTest):
# Test Constructor with Log and Method.
# @Test(groups = TestCategories.FRAMEWORK)
def testBaseTestObject1(self):
test_object = self.get_test_object()
# final String methodName = this.method.getName();
base_test_object = BaseTestObject(test_object.get_logger(), "FakeTestName")
self.assertIsNotNone(base_test_object, "Checking that Base Test Object instantiated correctly")
# Test Constructor with Test Object.
# @Test(groups = TestCategories.FRAMEWORK)
def testBaseTestObject2(self):
test_object = self.get_test_object()
base_test_object = BaseTestObject(test_object)
self.assertIsNotNone(base_test_object, "Checking that Base Test Object instantiated correctly")
# Test set value.
# @Test(groups = TestCategories.FRAMEWORK)
def testSetValue(self):
test_object = self.get_test_object()
key = "SetKey"
value = "SetKey Value"
test_object.setValue(key, value)
self.assertTrue(test_object.getValues().containsKey(key), "Checking that key exists in test object dictionary")
self.assertEquals(test_object.getValues().get(key), value, "Checking that value set correctly")
# Test set object.
# @Test(groups = TestCategories.FRAMEWORK)
def testSetObject(self):
# TODO: check test in java
test_object = self.get_test_object()
test_key = "SetObject"
test_object = dict()
test_object.append(test_key, test_object)
self.assertTrue(test_key in test_object, "Checking that key exists in test object dictionary")
self.assertEquals(test_object.get(test_key), test_object, "Checking that value set correctly")
# Test get log.
# @Test(groups = TestCategories.FRAMEWORK)
def testGetLog(self):
test_object = self.get_test_object()
self.assertIsNotNone(test_object.getLogger(), "Checking that logger is not null.")
# Test set log.
# @Test(groups = TestCategories.FRAMEWORK)
def testSetLog(self):
test_object = self.get_test_object()
logger = self.get_logger()
test_object.setLogger(logger)
self.assertEquals(test_object.get_logger(), logger, "Checking that logger set correctly.")
# Test Get Perf Collection Timer - Not Null.
# @Test(groups = TestCategories.FRAMEWORK)
def testGetPerfTimerCollectionNotNull(self):
test_object = self.get_test_object()
self.assertIsNotNone(test_object.getPerfTimerCollection(), "Checking that logger is not null.")
# Test Set Perf Collection Timer - Get/Set.
# @Test(groups = TestCategories.FRAMEWORK)
def testSetPerfTimerCollectionGetSet(self):
test_object = self.get_test_object()
perf_timer_collection = PerfTimerCollection(test_object.get_logger(), "FakeTestName")
test_object.setPerfTimerCollection(perf_timer_collection)
self.assertEquals(test_object.getPerfTimerCollection(), perf_timer_collection,
"Checking that perf timer collection set correctly.")
# Test get values.
# @Test(groups = TestCategories.FRAMEWORK)
def testGetValues(self):
test_object = self.get_test_object()
self.assertIsNotNone(test_object.getValues(), "Checking that values is not null.")
# Test get objects.
# @Test(groups = TestCategories.FRAMEWORK)
def testGetObjects(self):
test_object = self.get_test_object()
self.assertIsNotNone(test_object.getObjects(), "Checking that objects is not null.")
# Test Get Manager Store - Not Null.
# @Test(groups = TestCategories.FRAMEWORK)
def testGetManagerStoreNotNull(self):
test_object = self.get_test_object()
self.assertIsNotNone(test_object.getManagerStore(), "Checking that objects is not null.")
# Test add driver manager.
# @Test(groups = TestCategories.FRAMEWORK)
def testAddDriverManager(self):
test_object = self.get_test_object()
# supplier = () -> null;
supplier = None
driver_manager = self.getDriverManager(test_object, supplier)
self.assertEquals(test_object.getManagerStore().size(), 0, "Checking that manager store is empty")
test_object.addDriverManager(driver_manager)
self.assertEquals(test_object.getManagerStore().size(), 1, "Checking that manager store has 1 object added")
# Test add driver manager - Overwrite True.
# @Test(groups = TestCategories.FRAMEWORK)
def testAddDriverManagerTrue(self):
test_object = self.get_test_object()
# supplier = () -> null;
supplier = None
driver_manager = self.getDriverManager(test_object, supplier)
driver_manager2 = self.getDriverManager(test_object, supplier)
self.assertEquals(test_object.getManagerStore().size(), 0, "Checking that manager store is empty")
test_object.addDriverManager(driver_manager, True)
self.assertEquals(test_object.getManagerStore().size(), 1, "Checking that manager store has 1 object added")
test_object.addDriverManager(driver_manager2, True)
self.assertEquals(test_object.getManagerStore().size(), 1, "Checking that manager store has 1 object added")
# Test add driver manager - Overwrite False.
# @Test(groups = TestCategories.FRAMEWORK)
def testAddDriverManagerFalse(self):
test_object = self.get_test_object()
# supplier = () -> null;
supplier = None
driver_manager = self.getDriverManager(test_object, supplier)
self.assertEquals(test_object.getManagerStore().size(), 0, "Checking that manager store is empty")
test_object.addDriverManager(driver_manager, False)
self.assertEquals(test_object.getManagerStore().size(), 1, "Checking that manager store has 1 object added")
# Test add driver manager 2.
# @Test(groups = TestCategories.FRAMEWORK)
def testAddDriverManager2(self):
test_object = self.get_test_object()
# supplier = () -> null;
supplier = None
driver_manager = self.getDriverManager(test_object, supplier)
test_key = "DriverManager1"
self.assertEquals(test_object.getManagerStore().size(), 0, "Checking that manager store is empty")
test_object.addDriverManager(test_key, driver_manager)
self.assertEquals(test_object.getManagerStore().size(), 1, "Checking that manager store has 1 object added")
self.assertTrue(test_object.getManagerStore().containsKey(test_key), "Checking if key exists in Manager Store")
# Test close.
# @Test(groups = TestCategories.FRAMEWORK)
def testClose(self):
test_object = self.get_test_object()
# supplier = () -> null;
supplier = None
driver_manager = self.getDriverManager(test_object, supplier)
test_key = "DriverManager1"
test_object.addDriverManager(test_key, driver_manager)
test_object.close()
self.assertIsNone(test_object.getManagerStore(), "Checking that manager store has been closed");
self.assertEquals(test_object.getValues().size(), 0, "Checking if values in manager store are closed");
# Test add associated file.
# @Test(groups = TestCategories.FRAMEWORK)
def testAddAssociatedFile(self):
test_object = self.get_test_object()
File temp = None
try:
temp = File.createTempFile("tempfile", ".tmp")
except IOError as e:
e.printStackTrace()
assert temp.exists();
self.assertTrue(test_object.addAssociatedFile(temp.getAbsolutePath()), "Checking that associated file was added");
self.assertEquals((test_object.getArrayOfAssociatedFiles()).length, 1,
"Checking that one file was added to array.")
# Test remove associated file.
# @Test(groups = TestCategories.FRAMEWORK)
def testRemoveAssociatedFile(self):
test_object = self.get_test_object()
File temp = None
try:
temp = File.createTempFile("tempfile", ".tmp")
except IOError as e:
e.printStackTrace()
assert temp.exists();
path = temp.getAbsolutePath()
self.assertTrue(test_object.addAssociatedFile(path), "Checking that associated file was added")
self.assertTrue(test_object.removeAssociatedFile(path), "Checking that assocai")
# Test get array of associated files.
# @Test(groups = TestCategories.FRAMEWORK)
def testGetArrayOfAssociatedFiles(self):
test_object = self.get_test_object()
File temp = None
try:
temp = File.createTempFile("tempfile", ".tmp")
except IOError as e:
e.printStackTrace()
assert temp.exists();
path = temp.getAbsolutePath()
self.assertTrue(test_object.addAssociatedFile(path), "Checking that associated file was added")
self.assertIsNotNone(test_object.getArrayOfAssociatedFiles(), "Checking that array is instantiated")
self.assertEquals(test_object.getArrayOfAssociatedFiles().length, 1, "Checking that array is not empty")
# Test contains associated file.
# @Test(groups = TestCategories.FRAMEWORK)
def testContainsAssociatedFile(self):
test_object = self.get_test_object()
File temp = None
try:
temp = File.createTempFile("tempfile", ".tmp")
except IOError as e:
e.printStackTrace()
assert temp.exists();
path = temp.getAbsolutePath()
self.assertTrue(test_object.addAssociatedFile(path), "Checking that associated file was added")
self.assertIsNotNone(test_object.getArrayOfAssociatedFiles(), "Checking that array is instantiated")
self.assertTrue(test_object.containsAssociatedFile(path), "Checking if array contains file")
@staticmethod
def getDriverManager(supplier, test_object):
return DriverManager(supplier, test_object)
```
#### File: PythonLogging/baseTestUnitTests/DriverManagerUnitTests.py
```python
import unittest
from baseLogger.ConsoleLogger import ConsoleLogger
from baseTest.BaseGenericTest import BaseGenericTest
from baseTest.BaseTestObject import BaseTestObject
from baseTest.DriverManager import DriverManager
from baseTest.ManagerDictionary import ManagerDictionary
class DriverManagerUnitTest(unittest.TestCase, BaseGenericTest):
# @Test(groups = TestCategories.FRAMEWORK)
def testGetBaseDriver(self):
driver_manager = self.getDriverManager()
driver_manager.set_base_driver("Fake String")
self.assertIsNotNone(driver_manager.get_base_driver())
# @Test(groups = TestCategories.FRAMEWORK)
def testSetBaseDriver(self):
driver_manager = self.getDriverManager()
driver_manager.set_base_driver("Fake String")
self.assertIsNotNone(driver_manager.get_base_driver())
# @Test(groups = TestCategories.FRAMEWORK)
def testIsDriverInitializedTrue(self):
driver_manager = self.getDriverManager()
self.assertIsNotNone(driver_manager.get_base())
self.assertTrue(driver_manager.is_driver_initialized())
# @Test(groups = TestCategories.FRAMEWORK)
def testIsDriverInitializedFalse(self):
driver_manager = self.getDriverManager()
self.assertFalse(driver_manager.is_driver_initialized())
# @Test(groups = TestCategories.FRAMEWORK)
def testGetLogger(self):
driver_manager = self.getDriverManager()
self.assertIsNotNone(driver_manager.get_logger())
# @Test(groups = TestCategories.FRAMEWORK)
def testGetBase(self):
driver_manager = self.getDriverManager()
self.assertIsNotNone(driver_manager.get_base())
# Can we add a manager by type
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testAddManagerByType(self):
dictionary = self.getDictionary()
dictionary.Add(self.getDriverManager())
self.assertTrue(dictionary.ContainsKey(typeof(WebServiceDriverManager).FullName))
# Does adding item increment count
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testAddIncrementCount(self):
dictionary = self.getDictionary()
dictionary.Add(self.getDriverManager())
self.assertEquals(1, dictionary.Count)
# Is empty count zero
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testEmptyCountZero(self):
dictionary = self.getDictionary()
self.assertEquals(0, dictionary.Count)
# Does clear remove all item
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testClearRemovesAll(self):
dictionary = self.getDictionary()
dictionary.Add(self.getDriverManager())
dictionary.Add(str(), self.getDriverManager())
dictionary.Clear()
self.assertEquals(0, dictionary.Count)
# Throw exception if we try add on top of an existing manager
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
# [ExpectedException(typeof(ArgumentException))]
def testThrowDriverAlreadyExist(self):
dictionary = self.getDictionary()
dictionary.Add(self.getDriverManager())
dictionary.Add(self.getDriverManager())
self.fail("Previous line should have failed the test.")
# Throw exception if we try add a named manager on top of an existing manager
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
# [ExpectedException(typeof(ArgumentException))]
def testThrowNamedDriverAlreadyExist(self):
dictionary = self.getDictionary()
dictionary.Add(str(), self.getDriverManager())
dictionary.Add(str(), self.getDriverManager())
self.fail("Previous line should have failed the test.")
# Can override existing
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testCanOverrideExisting(self):
dictionary = self.getDictionary()
dictionary.Add(self.getDriverManager())
dictionary.AddOrOverride(self.getDriverManager())
self.assertEquals(1, dictionary.Count)
# Can use override for new manager
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testCanOverrideNonExisting(self):
dictionary = self.getDictionary()
dictionary.AddOrOverride(self.getDriverManager())
self.assertEqual(1, dictionary.Count)
# Can add named and unnamed
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testAddNamedAndUnnamed(self):
dictionary = self.getDictionary()
dictionary.Add(str(), self.getDriverManager())
dictionary.Add(self.getDriverManager())
self.assertEquals(2, dictionary.Count)
# Remove by type
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testRemoveByType(self):
dictionary = self.getDictionary()
manager_to_keep = self.getDriverManager()
dictionary.Add(self.getDriverManager())
dictionary.Add(str(), manager_to_keep)
dictionary.Remove(typeof(WebServiceDriverManager))
self.assertEquals(manager_to_keep, dictionary[str()])
# Remove by name
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testRemoveByName(self):
dictionary = self.getDictionary()
manager_to_keep = self.getDriverManager()
dictionary.Add(manager_to_keep)
dictionary.Add(str(), self.getDriverManager())
dictionary.Remove(str())
self.assertEquals((((WebServiceDriverManager)managerToKeep).Get(),
dictionary.GetDriver <EventFiringWebServiceDriver, WebServiceDriverManager > ())
# Managers map correctly
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def testManagersMap(self):
dictionary = self.getDictionary()
manager_to_keep = self.getDriverManager()
manager_to_keep2 = self.getDriverManager()
dictionary.Add(manager_to_keep)
dictionary.Add(str()), manager_to_keep2)
self.assertEquals((((WebServiceDriverManager)managerToKeep).Get(),
dictionary.GetDriver <EventFiringWebServiceDriver, WebServiceDriverManager> ())
self.assertEquals(manager_to_keep2, dictionary[str()])
# Manager dispose
# [TestMethod]
# [TestCategory(TestCategories.Framework)]
def ManagerDispose(self):
manager = ManagerDictionary()
manager.close()
self.assertIsNotNone(manager)
@staticmethod
def getDictionary():
base_test_object = BaseTestObject(ConsoleLogger(), str())
return base_test_object.get_manager_store()
# private DriverManager<String> getDriverManager() {
def getDriverManager(self):
baseTestObject = BaseTestObject(ConsoleLogger(), str())
return DriverManager(() -> "Fake String here", getTestObject())
```
#### File: PythonLogging/loggingUnitTests/HtmlFileLoggerUnitTest.py
```python
import os
from os import path
from baseLogger.HtmlFileLogger import HtmlFileLogger
from baseLogger.LoggingConfig import LoggingConfig
from baseLogger.constants.MessageType import MessageType
import unittest
from utilities.StringProcessor import StringProcessor
# Unit test class for HtmlFileLogger
class HtmlFileLoggerUnitTest(unittest.TestCase):
Test_Message = "Test to ensure that the file in the created directory can be written to."
Log_Message = "Test to ensure LogMessage works as expected."
# Test logging to a new file.
def test_HtmlFileLoggerNoAppendTest(self):
html_logger = HtmlFileLogger("", False, MessageType.INFORMATION, "WriteToHtmlFileLogger")
html_logger.log_message(MessageType.WARNING, "Hello, this is a test.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Test logging to an existing file.
def test_HtmlFileLoggerAppendFileTest(self):
html_logger = HtmlFileLogger("", True, "WriteToExistingHtmlFileLogger")
html_logger.log_message(MessageType.WARNING, "This is a test to write to an existing file.")
html_logger.log_message(MessageType.WARNING, "This is a test to append to current file.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Test Writing to the Html File Logger
def test_WriteToHtmlFileLogger(self):
html_logger = HtmlFileLogger("", False, MessageType.INFORMATION, "WriteToHtmlFileLogger")
html_logger.log_message("Hello, this is a test.", "", MessageType.WARNING)
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Test Writing to an Existing Html File Logger
def test_WriteToExistingHtmlFileLogger(self):
html_logger = HtmlFileLogger("", True, MessageType.GENERIC, "WriteToExistingHtmlFileLogger")
html_logger.log_message(MessageType.WARNING, "This is a test.")
html_logger.log_message(MessageType.WARNING, "This is a test to write to an existing file.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertFalse(path.exists(file_path))
# Verify HtmlFileLogger constructor creates the correct directory if it does not already exist.
# Delete Directory after each run.
def test_HtmlFileLoggerConstructorCreateDirectory(self):
html_logger = HtmlFileLogger(True, LoggingConfig().get_log_directory(),
"HtmlFileLoggerCreateDirectory", MessageType.GENERIC)
html_logger.log_message(MessageType.WARNING,
"Test to ensure that the file in the created directory can be written to.")
file = html_logger.get_file_path()
self.assertTrue(self.readTextFile(self.Test_Message in html_logger.get_file_path()))
file.delete()
self.assertTrue(path.exists(html_logger.get_file_path()))
file = html_logger.get_directory()
try:
os.remove(file)
except IOError as e:
e.printStackTrace()
# Verify that HtmlFileLogger can log message without defining a Message Type
def test_HtmlFileLoggerLogMessage(self):
html_logger = HtmlFileLogger("", True, MessageType.INFORMATION, "HtmlFileLoggerLogMessage")
html_logger.log_message("Test to ensure LogMessage works as expected.")
html_text = self.readTextFile(html_logger.get_file_path())
# os.remove(html_logger.get_file_path())
self.assertFalse(path.exists(html_logger.get_file_path()))
self.assertTrue(self.Log_Message in html_text, "Expected Log Message to be contained in log.")
# Verify that HTML File Logger can log message and defining a Message Type.
def test_HtmlFileLoggerLogMessageSelectType(self):
html_logger = HtmlFileLogger("", True, MessageType.INFORMATION, "HtmlFileLoggerLogMessageType")
html_logger.log_message("Test to ensure LogMessage works as expected.", None, MessageType.GENERIC)
html_text = self.readTextFile(html_logger.get_file_path())
# os.remove(html_logger.get_file_path())
self.assertFalse(path.exists(html_logger.get_file_path()))
self.assertTrue(self.Test_Message in html_text, "Expected Log Message to be contained in log.")
# Verify that File Path field can be accessed and updated
def test_HtmlFileLoggerSetFilePath(self):
html_logger = HtmlFileLogger("", True, MessageType.GENERIC, "HtmlFileLoggerSetFilePath")
html_logger.set_file_path("test file path")
file_path = html_logger.get_file_path()
# os.remove(html_logger.get_file_path())
self.assertFalse(path.exists(html_logger.get_file_path()))
self.assertEquals(file_path, "test file path", "Expected 'test file path' as file path")
# Verify that HTML File Logger catches and handles errors caused by incorrect file Paths
def test_HtmlFileLoggerCatchThrownException(self):
html_logger = HtmlFileLogger(True, "", "HtmlFileLoggerCatchThrownException", MessageType.GENERIC)
html_logger.set_file_path("<>")
html_logger.log_message(MessageType.GENERIC, "Test throws error as expected.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Verify that HTML File Logger catches and handles errors caused by incorrect file Paths.
def test_FileLoggerEmptyFileNameException(self):
with self.assertRaises(AttributeError):
html_logger = HtmlFileLogger()
self.assertIsNone(html_logger)
# Verify File Logger with No Parameters assigns the correct default values.
def test_FileLoggerNoParameters(self):
html_logger = HtmlFileLogger("")
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory,
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION, html_logger.get_message_type(),
"Expected Information Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Verify File Logger with only append parameter assigns the correct default values.
def test_FileLoggerAppendOnly(self):
html_logger = HtmlFileLogger("", True)
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION.name, html_logger.get_message_type(),
"Expected Information Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertFalse(path.exists(file_path))
# Verify File Logger with only File Name parameter assigns the correct default values.
# Verify default extension is added '.html'
def test_FileLoggerNameOnlyAddExtension(self):
html_logger = HtmlFileLogger("", False, MessageType.INFORMATION, "FileNameOnly")
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("FileNameOnly.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION, html_logger.get_message_type(),
"Expected Information Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Verify File Logger with only Message Type parameter assigns the correct default values.
def test_FileLoggerMessageTypeOnly(self):
html_logger = HtmlFileLogger("", False, MessageType.WARNING)
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.WARNING, html_logger.get_message_type(), "Expected Warning Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Verify File Logger with only Append and File Name parameters assigns the correct default values.
def test_FileLoggerAppendFileName(self):
html_logger = HtmlFileLogger("", True, MessageType.INFORMATION, "AppendFileName")
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("AppendFileName.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION.name, html_logger.get_message_type(),
"Expected Information Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertFalse(path.exists(file_path))
# Verify File Logger with only Log Folder and Append parameters assigns the correct default values.
def test_FileLoggerAppendLogFolder(self):
append_file_directory_path = LoggingConfig.get_log_directory() + "/" + "Append File Directory"
html_logger = HtmlFileLogger(append_file_directory_path, True)
self.assertEquals(append_file_directory_path, html_logger.get_directory(),
"Expected Directory 'Append File Directory'.")
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION.name, html_logger.get_message_type(),
"Expected Information Message Type.")
# Verify File Logger with only Log Folder and File Name parameters assigns the correct default values.
def test_FileLoggerLogFolderFileName(self):
log_folder_file_name_directory = LoggingConfig.get_log_directory() + "/" + "Log Folder File Name Directory"
html_logger = HtmlFileLogger(log_folder_file_name_directory, "LogFolderFileName.html")
self.assertEquals(log_folder_file_name_directory, html_logger.get_directory(),
"Expected Directory 'Log Folder File Name Directory'.")
self.assertEquals("LogFolderFileName.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION.name, html_logger.get_message_type(),
"Expected Information Message Type.")
# Verify File Logger with only Log Folder and Messaging Level parameters assigns the correct default values.
def test_FileLoggerLogFolderMessagingLevel(self):
log_folder_messaging_level_directory_path = LoggingConfig.get_log_directory() + "/" \
+ "Log Folder Messaging Level Directory"
html_logger = HtmlFileLogger(log_folder_messaging_level_directory_path, False, MessageType.WARNING)
self.assertEquals(log_folder_messaging_level_directory_path, html_logger.get_directory(),
"Expected Directory 'Log Folder Messaging Level Directory'.")
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.WARNING.name, html_logger.get_message_type(),
"Expected Warning Message Type.")
# Verify File Logger with only Append and Messaging Level parameters assigns the correct default values.
def test_FileLoggerAppendMessagingLevel(self):
html_logger = HtmlFileLogger("", True, MessageType.WARNING)
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.WARNING.name, html_logger.get_message_type(), "Expected Warning Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertFalse(path.exists(file_path))
# Verify File Logger with only Messaging Level and file name parameters assigns the correct default values.
def test_FileLoggerMessagingLevelFileName(self):
html_logger = HtmlFileLogger("", False, MessageType.WARNING, "MessagingTypeFile.html")
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("MessagingTypeFile.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.WARNING.name, html_logger.get_message_type(), "Expected Warning Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertFalse(path.exists(file_path))
# Verify File Logger with only Append, log folder and file name parameters assigns the correct default values.
def test_FileLoggerAppendLogFolderFileName(self):
append_log_folder_file_name_directory_path = LoggingConfig.get_log_directory() + "\\" \
+ "AppendLogFolderFileNameDirectory "
html_logger = HtmlFileLogger(append_log_folder_file_name_directory_path, True, MessageType.INFORMATION,
"AppendLogFolderFileName.html")
self.assertEquals(append_log_folder_file_name_directory_path, html_logger.get_directory(),
"Expected Directory AppendLogFolderFileNameDirectory")
self.assertEquals("AppendLogFolderFileName.html", html_logger.get_file_name(),
"Expected correct File Name.")
self.assertEquals(MessageType.INFORMATION.name, html_logger.get_message_type(),
"Expected Information Message Type.")
# Verify File Logger with only Append, log folder and Messaging Level parameters assigns the correct default values.
def test_FileLoggerAppendLogFolderMessagingLevel(self):
append_log_folder_file_name_directory = LoggingConfig.get_log_directory() + "\\" \
+ "AppendLogFolderFileNameDirectory "
html_logger = HtmlFileLogger(append_log_folder_file_name_directory, True, MessageType.WARNING)
self.assertEquals(append_log_folder_file_name_directory, html_logger.get_directory(),
"Expected Directory AppendLogFolderFileNameDirectory")
self.assertEquals("FileLog.html", html_logger.get_file_name(), "Expected correct File Name.")
self.assertEquals(MessageType.WARNING.name, html_logger.get_message_type(), "Expected Warning Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertFalse(path.exists(file_path))
# Verify File Logger with only File Name, Append and Messaging Level parameters assigns the correct default values.
def test_FileLoggerFileNameAppendMessagingLevel(self):
html_logger = HtmlFileLogger("FileNameAppendMessagingLevel.html", True, MessageType.WARNING)
self.assertEquals(html_logger.DEFAULT_LOG_FOLDER, html_logger.get_directory(),
StringProcessor.safe_formatter("Expected Directory '{}'.", html_logger.DEFAULT_LOG_FOLDER))
self.assertEquals("FileNameAppendMessagingLevel.html", html_logger.get_file_name(),
"Expected correct File Name.")
self.assertEquals(MessageType.WARNING.name, html_logger.get_message_type(), "Expected Warning Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Verify File Logger with only Log Folder,
# File Name and Messaging Level parameters assigns the correct default values.
def test_FileLoggerLogFolderFileNameMessagingLevel(self):
log_folder_file_name_messaging_level_directory_path = LoggingConfig.get_log_directory() \
+ "/" + "LogFolderFileNameMessagingLevelDirectory"
html_logger = HtmlFileLogger(log_folder_file_name_messaging_level_directory_path,
"LogFolderFileNameMessagingLevel.html", MessageType.WARNING)
self.assertEquals(log_folder_file_name_messaging_level_directory_path, html_logger.get_directory(),
"Expected Directory 'LogFolderFileNameMessagingLevelDirectory'")
self.assertEquals("LogFolderFileNameMessagingLevel.html", html_logger.get_file_name(),
"Expected correct File Name.")
self.assertEquals(MessageType.WARNING, html_logger.get_message_type(), "Expected Warning Message Type.")
file_path = html_logger.get_file_path() + "\\" + html_logger.get_file_name()
self.assertTrue(path.exists(file_path))
# Verify that HTML File Logger catches and handles errors caused by empty file name.
def test_HtmlFileLoggerEmptyFileNameException(self):
with self.assertRaises(AttributeError):
html_logger = HtmlFileLogger()
self.assertIsNone(html_logger)
# Read a file and return it as a string
# @ param filePath The file path to read
# @ return The contents of the file
@staticmethod
def readTextFile(file_path):
text = ""
try:
text = open(file_path).read()
except FileNotFoundError as e:
# TODO: Print stack trace
e.args
return text
```
#### File: PythonLogging/utilities/Config.py
```python
import pathlib
from os import path
from utilities.constants.ConfigSection import ConfigSection
from utilities.StringProcessor import StringProcessor
from utilities.MaqsConfigException import MaqsConfigException
import xml.etree.ElementTree as ElementTree
# Configuration class.
class Config:
def __init__(self):
# The default section MagenicMaqs.
self.default_maqs_section = ConfigSection.MagenicMaqs
# The configuration containing values loaded in from the config.xml file.
self.config_values = self.get_file()
# The configuration containing values that were added to the configuration.
self.override_config = dict()
# The base configs object.
self.configs = dict()
# reads the config file and reads its values
@staticmethod
def get_file():
# default config.xml file name.
config_file = pathlib.Path(__file__).with_name("config.xml")
try:
if path.exists(config_file):
tree = ElementTree.parse(config_file)
root = tree.getroot()
new_config_values = dict()
for parent in root:
new_config_values[parent.tag] = dict()
for child in parent:
new_config_values[parent.tag][child.tag] = child.text
return new_config_values
except Exception as e:
raise TimeoutError(StringProcessor.safe_formatter(
"Exception creating the xml configuration object from the file : " + e.args))
# Validates the app config section by ensuring required values are present
# @param configSection The config section to be validated
# @param configValidation A list of strings containing the requried field names
def validate(self, config_section, config_validation):
if config_validation is None:
raise MaqsConfigException("The value passed in for config_validation"
" (required fields in a config) is null")
config_section_passed = Config.get_section(self, config_section)
# config_section_passed = self.get_section(config_section)
exceptions = []
for requiredField in config_validation.RequiredFields:
if requiredField not in config_section_passed:
exceptions.append("Key missing " + requiredField)
if exceptions.__sizeof__() > 0:
message = []
for mess in exceptions:
message.append(mess)
raise MaqsConfigException(message)
# Gets a specific section from the configuration.
# @param section The desired section
# @return A dictionary of the values in the section
def get_section(self, section):
section_values = {}
# first parse the override config
override_paths = self.override_config.get(section)
if override_paths is not None:
for key in override_paths:
key = key.__str__()
edited_key = key.replace(section + ".", "")
section_values[edited_key] = override_paths.get(key)
# then parse the base config, ignoring duplicates
config_value_paths = self.config_values.get(section)
if config_value_paths is not None:
for key in config_value_paths:
key = key.__str__()
edited_key = key.replace(section + ".", "")
if edited_key not in section_values:
section_values[edited_key] = config_value_paths.get(key)
return section_values
# Add dictionary of values to maqs section.
# @param configurations Dictionary of configuration values
# @param overrideExisting True to override existing values, False otherwise
def add_general_test_setting_values(self, configurations, override_existing):
self.add_test_setting_values(configurations, self.default_maqs_section.value, override_existing)
# Add dictionary of values to specified section.
# @param configurations Dictionary of configuration values
# @param section Section to add the value to
# @param overrideExisting True to override existing values, False otherwise
def add_test_setting_values(self, configurations, section, override_existing):
for key, value in configurations.items():
# Make sure the section exists
if self.look_for_section(section, self.override_config) is None:
self.override_config = {section: {}}
# See if we need to add a new key value pair
if self.contains_key(key, self.override_config) is False:
self.override_config[section][key] = value
elif override_existing:
# We want to override existing values
self.override_config[section][key] = value
# Get the specified value out of the default section.
# @param key The key
# @param defaultValue The value to return if the key does not exist
# @return The configuration value
def get_general_value(self, key, default_value=None):
if default_value is None:
return self.get_value_for_section(self.default_maqs_section.value, key)
else:
return self.get_value_for_section(self.default_maqs_section.value, key, default_value)
# Get the specified value out of the specified section.
# @param section The section to search
# @param key The key
# @param defaultValue The value to return if the key is not found
# @return The configuration value
def get_value_for_section(self, section, key, default_value=None):
key_with_section = section + "." + key
return self.get_value(key_with_section, default_value)
@staticmethod
def get_key(key, dictionary):
if dictionary is not None:
for section in dictionary:
if section in key:
for value in dictionary[section]:
key = key[key.find(".") + 1:len(key)]
if value == key:
return dictionary[section][value]
return None
@staticmethod
def contains_section(section, dictionary):
if dictionary is not None or dictionary > 0:
for sections in dictionary:
if section in sections:
return True
return False
@staticmethod
def contains_key(key, dictionary):
if dictionary is not None:
for section in dictionary:
for value in dictionary[section]:
if value == key:
return True
return False
@staticmethod
def look_for_section(section, dictionary):
if len(dictionary) > 0:
for sections in dictionary:
if section in sections:
return section
return None
def check_configs_for_key(self, key):
ret_val = self.get_key(key, self.override_config)
if ret_val is None:
return self.get_key(key, self.config_values)
else:
return ret_val
# Get the configuration value for a specific key. Does not assume a section.
# @param key The key
# @param defaultValue Value to return if the key does not exist
# @return The configuration value - Returns the default string if the key is not found
def get_value(self, key, default_value=None):
ret_val = self.check_configs_for_key(key)
if default_value is not None and ret_val != default_value:
return default_value
else:
return ret_val
# Check the config for a specific key. Searches the specified section.
# @param key The key
# @param section The specified section
# @return True if the key exists, false otherwise
def does_key_exist(self, key, section=None):
if section is not None:
if section in self.override_config:
if key in self.override_config[section]:
return True
else:
return False
else:
return key in self.config_values[section]
else:
if self.override_config is not None and len(self.override_config) > 0:
return key in self.override_config
else:
if self.get_key(key, self.config_values):
return True
return False
# Check the config for a specific key. Searches the default section.
# @param key The key
# @return True if the key exists, false otherwise
def does_general_key_exist(self, key):
return self.does_key_exist(key, self.default_maqs_section.value)
```
#### File: PythonLogging/utilities/GenericWait.py
```python
import time
from numpy.core import long
from utilities.Config import Config
from datetime import datetime
# The type Generic wait.
class GenericWait:
def __init__(self):
self.retry_time_from_config = long(Config().get_general_value("WaitTime"))
self.timeout_from_config = long(Config().get_general_value("Timeout"))
# Wait until boolean.
# @param <T> the type parameter
# @param waitForTrue the wait for true
# @param arg the arg
# @return the boolean
# @throws InterruptedException the interrupted exception
# @throws FunctionException the function exception
def wait_until(self, wait_for_true, arg=None):
if not self.wait_boolean(wait_for_true, self.retry_time_from_config, self.timeout_from_config, arg):
return self.wait_boolean(wait_for_true, self.retry_time_from_config, self.timeout_from_config, arg)
else:
raise TimeoutError("Timed out waiting for the wait_until method to return true")
# Wait for true.
# @ param waitForTrue the wait for true
# @ param arg the arg
def wait_for_true(self, wait_for_true, arg=None):
if not self.wait(wait_for_true, self.retry_time_from_config, self.timeout_from_config, True, arg):
raise TimeoutError("Timed out waiting for the wait_for_true method to return true")
# Wait until match t.
# @param waitForTrue the wait for true
# @param retryTime the retry time
# @param timeout the timeout
# @param comparativeValue the comparative value
# @return the t
# @throws InterruptedException the interrupted exception
# def waitUntilMatch(Supplier<T> waitForTrue, long retryTime, long timeout, T comparativeValue):
def wait_until_match(self, wait_for_true, comparative_value, retry_time=None, timeout=None):
if retry_time is None:
retry_time = self.retry_time_from_config
if timeout is None:
timeout = self.timeout_from_config
start_time = datetime.now()
value = wait_for_true
# Checks if the two values are equal
params_are_equal = self.params_equals([value, comparative_value])
# while the params are not equal and the timout hasn't been met,
# running them through another function because we can't use an operator with T
while not params_are_equal and (start_time - datetime.now()) < timeout:
# If they aren't, wait
time.sleep(retry_time)
value = wait_for_true
# Check if they are equal
# running them through another function because we can't use an operator with T
if timeout is self.timeout_from_config and retry_time is self.retry_time_from_config:
params_are_equal = self.params_equals([value, comparative_value])
else:
if self.params_equals([value, comparative_value]):
return value
return value
# Wait for match.
# @param <T> the type parameter
# @param waitForTrue the wait for true
# @param retryTime the retry time
# @param timeout the timeout
# @param comparativeValue the comparative value
# @throws InterruptedException the interrupted exception
# @throws TimeoutException the timeout exception
# def waitForMatch(Supplier<T> waitForTrue, long retryTime, long timeout,T comparativeValue):
def wait_for_match(self, wait_for_true, comparative_value, retry_time=None, timeout=None):
if retry_time is None:
retry_time = self.retry_time_from_config
if timeout is None:
timeout = self.timeout_from_config
# Set start time and exception holder
start_time = datetime.now()
# Checks if the two values are equal
params_are_equal = self.params_equals([wait_for_true, comparative_value])
# While the params are not equal & the timeout hasn't met, keep checking
while not params_are_equal and (start_time - datetime.now()) < timeout:
if timeout is self.timeout_from_config and retry_time is self.retry_time_from_config:
# If they aren't, wait
time.sleep(retry_time)
# Check if they are equal running them through another function because we can't use an operator with T
params_are_equal = self.params_equals([wait_for_true, comparative_value])
if timeout is not self.timeout_from_config and retry_time is not self.retry_time_from_config:
# If they aren't, wait
time.sleep(retry_time)
if not params_are_equal:
raise TimeoutError(
"Timed out waiting for the supplier to return the expected value of " + comparative_value)
# Wait for t.
# @param <T> the type parameter
# @param waitFor the wait for
# @return the t
# def waitFor(Supplier<T> waitFor):
def wait_for(self, wait_for, arg=None):
return self.wait(wait_for, self.retry_time_from_config, self.timeout_from_config, arg)
# Wait boolean.
# @param waitForTrue the wait for true
# @param retryTime the retry time
# @param timeout the timeout
# @param throwException the throw exception
# @return the boolean
# public static boolean wait(BooleanSupplier waitForTrue, long retryTime, long timeout, boolean throwException)
@staticmethod
def wait_boolean(wait_for_true, retry_time, timeout, throw_exception, arg=None):
# Set start time and exception holder
start_time = datetime.now()
exception = None
while int((datetime.now() - start_time).total_seconds()) < timeout:
try:
# Clear out old exception
exception = None
# Check if the function returns true
if wait_for_true:
return True
except Exception as e:
# Save of the exception if we want to throw exceptions
exception = ValueError("BooleanSupplier exception caught.", e)
# Give the system a second before checking if the page is updating
time.sleep(retry_time)
# Check if we had an exceptions
if throw_exception and exception is not None:
raise exception
# We timed out waiting for the function to return true
return False
# Wait t.
# @param <T> the type parameter
# @param <U> the type parameter
# @param waitFor the wait for two parameter
# @param retryTime the retry time
# @param timeout the timeout
# @param arg the arg
# @return the t
# def wait(self, Function<U, T> waitFor, long retryTime, long timeout, U arg):
@staticmethod
def wait(wait_for_true, retry_time, timeout, arg=None):
# Set start time and exception holder
start_time = datetime.now()
exception = None
times = int((datetime.now() - start_time).total_seconds())
boolean = times < timeout
while int((datetime.now() - start_time).total_seconds()) < timeout:
try:
if arg is None:
return wait_for_true.append(arg)
value = wait_for_true
if value is not None:
return value
except Exception as e:
exception = e
# Give the system a second before checking if the page is updating
time.sleep(retry_time)
raise TimeoutError("Timed out waiting for the wait method to return" + exception)
@staticmethod
def params_equals(param):
# For each item
for item in param:
# and each item
for item2 in param:
# Compare each item
if not item.equals(item2):
# If any do not match, then they are not equal
return False
# If we get here, then we had no mismatches
return True
``` |
{
"source": "jonreding2010/PythonUtilities",
"score": 3
} |
#### File: PythonUtilities/utilities/StringProcessor.py
```python
import os
import traceback
import sys
class StringProcessor:
# Creates a string based on the arguments. If no args are applied, then we want to just return the message
# @param message The message being used
# @param args The arguments being used
# @return A final string
@staticmethod
def safe_formatter(message, args=None):
if args is None:
return message
try:
if isinstance(args, str):
return message.format(args)
else:
return message.format(*args)
except Exception:
builder = "Message: " + str(message)
builder += " Arguments: "
for arg in args:
builder += str(arg) + " "
return builder.rstrip()
# Gets a string of a nested exception list
# @param e Exception to print as string</param>
# return A string of the Exceptions with stack trace</returns>
@staticmethod
def safe_exception_formatter(exception):
sb = ""
return StringProcessor.get_exception(exception, sb)
# Recursive function to grab the inner exceptions
# @param ex Exception to look into</param>
# @param sb String builder to build the string</param>
# @param level Recursive level for spacing of logs</param>
# return A string with the exceptions</returns>
@staticmethod
def get_exception(exception, sb, level=0):
# if str(traceback.format_stack()) is not None:
# sb.append(os.linesep + spaces + exception.msg + str(traceback.format_stack()))
exc_type, exc_value, exc_tb = sys.exc_info()
tbe = traceback.TracebackException(exc_type, exc_value, exc_tb)
formatted = ''.join(tbe.format())
trace = traceback.format_exc(2)
sb = os.linesep + exception.msg + os.linesep + traceback.format_exc()
if exception is Exception:
# if (ex is Exception and (ex as AggregateException).InnerExceptions.Count > 0):
for nested_exception in exception:
# for exception in (ex as AggregateException).InnerExceptions):
StringProcessor.get_exception(nested_exception, sb, level + 1)
elif len(exception.args) is 0:
StringProcessor.get_exception(exception.InnerException, sb, level + 2)
return sb
```
#### File: PythonUtilities/utilitiesUnitTest/test_ConfigUnitTest.py
```python
import unittest
from utilities.Config import Config
from utilities.constants.ConfigSection import ConfigSection
class ConfigUnitTest(unittest.TestCase):
# Test getting an entire section from the config.
# @Test(groups = TestCategories.UTILITIES)
def test_getSectionWithConfigSecEnumTest(self):
config = Config()
test_section = config.get_section(str(ConfigSection.SeleniumMaqs.value))
self.assertEquals(test_section.get("TestKey"), "testValueTwo")
self.assertEquals(test_section.get("Browser"), "Internet Explorer")
# Test adding a list of test settings to the config.
# @Test(groups = TestCategories.UTILITIES)
def test_addTestSettingValuesNewSectionTest(self):
new_value_map = {"BROWSER1": "CHROME1", "DBString2": "Dbstring2222"}
config = Config()
config.add_test_setting_values(new_value_map, "NewSection", False)
test_section = config.get_section("NewSection")
self.assertEquals(test_section.get("BROWSER1"), "CHROME1")
self.assertEquals(test_section.get("DBString2"), "Dbstring2222")
# Test overriding existing values in the config.
# @Test(groups = TestCategories.UTILITIES)
def test_addGeneralTestSettingValuesOverrideValuesTest(self):
new_value_map = {"BrowserOverride": "CHROME", "TimeoutOverride": "13333333"}
config = Config()
config.add_general_test_setting_values(new_value_map, True)
self.assertEquals(config.get_general_value("BrowserOverride"), "CHROME")
self.assertEquals(config.get_general_value("TimeoutOverride"), "13333333")
# Test not overriding existing values in the config.
# @Test(groups = TestCategories.UTILITIES)
def test_addGeneralTestSettingValuesDoNotOverrideValuesTest(self):
new_value_map = {"DontBrowserOverride": "CHROME", "DontTimeoutOverride": "13333333"}
new_value_map_two = {"DontBrowserOverride": "IE", "DontTimeoutOverride": "5555"}
# add values to the override config since the values don't exist in the override config
config = Config()
config.add_general_test_setting_values(new_value_map, False)
self.assertEquals(config.get_general_value("DontBrowserOverride"), "CHROME")
self.assertEquals(config.get_general_value("DontTimeoutOverride"), "13333333")
# don't add the values to the override config since the values do exist in the override config
config.add_general_test_setting_values(new_value_map_two, False)
self.assertEquals(config.get_general_value("DontBrowserOverride"), "CHROME")
self.assertEquals(config.get_general_value("DontTimeoutOverride"), "13333333")
# do add the values because of the override flag
config.add_general_test_setting_values(new_value_map_two, True)
self.assertEquals(config.get_general_value("DontBrowserOverride"), "IE")
self.assertEquals(config.get_general_value("DontTimeoutOverride"), "5555")
# Test getting a value out of the default section of the config.
# @Test(groups = TestCategories.UTILITIES)
def test_getGeneralValueTest(self):
config = Config()
self.assertEquals(config.get_general_value("TestKey"), "testValue")
self.assertEquals(config.get_general_value("nonExistentKey", "defaultValue"), "defaultValue")
# Test getting a value of a specified section of the config.
# @Test(groups = TestCategories.UTILITIES)
def test_getValueForSectionTest(self):
config = Config()
self.assertEquals(config.get_value_for_section("SeleniumMaqs", "TestKey"), "testValueTwo")
self.assertEquals(config.get_value_for_section(ConfigSection.SeleniumMaqs.value, "Browser"), "Internet Explorer")
self.assertEquals(config.get_value_for_section("SeleniumMaqs", "nonExistentKey", "defaultValue"), "defaultValue")
# Test getting a value from the config using the full defined path.
# @Test(groups = TestCategories.UTILITIES)
def test_getValueTest(self):
config = Config()
self.assertEquals(config.get_value("MagenicMaqs.TestKey", "defaultValue"), "defaultValue")
self.assertEquals(config.get_value("SeleniumMaqs.TestKey"), "testValueTwo")
# Test checking if the key exists.
# @Test(groups = TestCategories.UTILITIES)
def test_doesKeyExistTest(self):
config = Config()
self.assertTrue(config.does_key_exist("SeleniumMaqs.TestKey"))
self.assertTrue(config.does_general_key_exist("TimeoutOverride"))
self.assertTrue(config.does_key_exist("HubAddress", ConfigSection.SeleniumMaqs.value))
self.assertFalse(config.does_key_exist("HubAddress", ConfigSection.MagenicMaqs.value))
``` |
{
"source": "jonreiter/python-experiments",
"score": 3
} |
#### File: jonreiter/python-experiments/arg_bool.py
```python
import argparse
def test_arg_spec(args_list, expected_values):
parser = argparse.ArgumentParser()
parser.add_argument('--flag1', type=eval, choices=[True, False], default=True)
parser.add_argument('--flag2', type=eval, choices=[True, False], default=False)
args = parser.parse_args(args_list)
assert(args.flag1 == expected_values[0])
assert(args.flag2 == expected_values[1])
test_arg_spec(['--flag1=True', '--flag2=False'], [True, False])
test_arg_spec(['--flag1=True', '--flag2=True'], [True, True])
test_arg_spec(['--flag1=False', '--flag2=False'], [False, False])
test_arg_spec(['--flag1=False', '--flag2=True'], [False, True])
``` |
{
"source": "jonreyno/AGS",
"score": 2
} |
#### File: AGS/Script/push_version.py
```python
import codecs
import json
import re
from collections import namedtuple
AgsVersion = namedtuple('AgsVersion', ['version', 'version_friendly', 'app_id'])
def load_version(path):
with open(path, "r") as f:
j = json.load(f)
version = j['version'].split('.')
version_friendly = j['versionFriendly'].split('.')
app_id = j['appID']
return AgsVersion(version, version_friendly, app_id)
def read_file(path, encoding):
print path, encoding
with codecs.open(path, "r", encoding=encoding) as f:
return f.read()
def write_file(path, encoding, data):
with codecs.open(path, "w", encoding=encoding) as f:
f.write(data)
def replace_group(match, group, data, replacement):
return data[:match.start(group)] + replacement + data[match.end(group):]
def main():
version = load_version("../version.json")
# -----------------------------------------------------------------------------
path = "../Common/core/def_version.h"
encoding = "utf-8"
data = read_file(path, encoding)
m = re.search(r'\#define ACI_VERSION_STR\s+"(.*)"', data)
data = replace_group(m, 1, data, ".".join(version.version))
m = re.search(r'\#define ACI_VERSION_MSRC_DEF\s+(.*)', data)
data = replace_group(m, 1, data, ",".join(version.version))
write_file(path, encoding, data)
# -----------------------------------------------------------------------------
path = "../Editor/AGS.Types/Properties/AssemblyInfo.cs"
encoding = "utf-8-sig"
data = read_file(path, encoding)
m = re.search(r'AGS_EDITOR_FRIENDLY_VERSION\s*=\s*"([^"]+)', data)
data = replace_group(m, 1, data, ".".join(version.version_friendly))
m = re.search(r'AGS_EDITOR_VERSION\s*=\s*"([^"]+)', data)
data = replace_group(m, 1, data, ".".join(version.version))
write_file(path, encoding, data)
# -----------------------------------------------------------------------------
path = "../Manual/ags.tex"
encoding = "utf-8"
data = read_file(path, encoding)
m = re.search(r'\\title\{Adventure Game Studio\s+(.*)\}', data)
data = replace_group(m, 1, data, ".".join(version.version_friendly))
write_file(path, encoding, data)
# -----------------------------------------------------------------------------
path = "../Android/library/AndroidManifest.xml"
encoding = "utf-8"
data = read_file(path, encoding)
m = re.search(r'android\:versionName\s*=\s*"(.*)"', data)
data = replace_group(m, 1, data, ".".join(version.version_friendly))
write_file(path, encoding, data)
if __name__ == "__main__":
main()
``` |
{
"source": "jonringer/portalocker",
"score": 3
} |
#### File: portalocker/portalocker/utils.py
```python
import os
import time
import atexit
import tempfile
import contextlib
from . import exceptions
from . import constants
from . import portalocker
current_time = getattr(time, "monotonic", time.time)
DEFAULT_TIMEOUT = 5
DEFAULT_CHECK_INTERVAL = 0.25
LOCK_METHOD = constants.LOCK_EX | constants.LOCK_NB
__all__ = [
'Lock',
'open_atomic',
]
@contextlib.contextmanager
def open_atomic(filename, binary=True):
'''Open a file for atomic writing. Instead of locking this method allows
you to write the entire file and move it to the actual location. Note that
this makes the assumption that a rename is atomic on your platform which
is generally the case but not a guarantee.
http://docs.python.org/library/os.html#os.rename
>>> filename = 'test_file.txt'
>>> if os.path.exists(filename):
... os.remove(filename)
>>> with open_atomic(filename) as fh:
... written = fh.write(b'test')
>>> assert os.path.exists(filename)
>>> os.remove(filename)
'''
assert not os.path.exists(filename), '%r exists' % filename
path, name = os.path.split(filename)
# Create the parent directory if it doesn't exist
if path and not os.path.isdir(path): # pragma: no cover
os.makedirs(path)
temp_fh = tempfile.NamedTemporaryFile(
mode=binary and 'wb' or 'w',
dir=path,
delete=False,
)
yield temp_fh
temp_fh.flush()
os.fsync(temp_fh.fileno())
temp_fh.close()
try:
os.rename(temp_fh.name, filename)
finally:
try:
os.remove(temp_fh.name)
except Exception:
pass
class Lock(object):
def __init__(
self, filename, mode='a', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=False,
flags=LOCK_METHOD, **file_open_kwargs):
'''Lock manager with build-in timeout
filename -- filename
mode -- the open mode, 'a' or 'ab' should be used for writing
truncate -- use truncate to emulate 'w' mode, None is disabled, 0 is
truncate to 0 bytes
timeout -- timeout when trying to acquire a lock
check_interval -- check interval while waiting
fail_when_locked -- after the initial lock failed, return an error
or lock the file
**file_open_kwargs -- The kwargs for the `open(...)` call
fail_when_locked is useful when multiple threads/processes can race
when creating a file. If set to true than the system will wait till
the lock was acquired and then return an AlreadyLocked exception.
Note that the file is opened first and locked later. So using 'w' as
mode will result in truncate _BEFORE_ the lock is checked.
'''
if 'w' in mode:
truncate = True
mode = mode.replace('w', 'a')
else:
truncate = False
self.fh = None
self.filename = filename
self.mode = mode
self.truncate = truncate
self.timeout = timeout
self.check_interval = check_interval
self.fail_when_locked = fail_when_locked
self.flags = flags
self.file_open_kwargs = file_open_kwargs
def acquire(
self, timeout=None, check_interval=None, fail_when_locked=None):
'''Acquire the locked filehandle'''
if timeout is None:
timeout = self.timeout
if timeout is None:
timeout = 0
if check_interval is None:
check_interval = self.check_interval
if fail_when_locked is None:
fail_when_locked = self.fail_when_locked
# If we already have a filehandle, return it
fh = self.fh
if fh:
return fh
# Get a new filehandler
fh = self._get_fh()
try:
# Try to lock
fh = self._get_lock(fh)
except exceptions.LockException as exception:
# Try till the timeout has passed
timeoutend = current_time() + timeout
while timeoutend > current_time():
# Wait a bit
time.sleep(check_interval)
# Try again
try:
# We already tried to the get the lock
# If fail_when_locked is true, then stop trying
if fail_when_locked:
raise exceptions.AlreadyLocked(exception)
else: # pragma: no cover
# We've got the lock
fh = self._get_lock(fh)
break
except exceptions.LockException:
pass
else:
# We got a timeout... reraising
raise exceptions.LockException(exception)
# Prepare the filehandle (truncate if needed)
fh = self._prepare_fh(fh)
self.fh = fh
return fh
def release(self):
'''Releases the currently locked file handle'''
if self.fh:
self.fh.close()
self.fh = None
def _get_fh(self):
'''Get a new filehandle'''
return open(self.filename, self.mode, **self.file_open_kwargs)
def _get_lock(self, fh):
'''
Try to lock the given filehandle
returns LockException if it fails'''
portalocker.lock(fh, self.flags)
return fh
def _prepare_fh(self, fh):
'''
Prepare the filehandle for usage
If truncate is a number, the file will be truncated to that amount of
bytes
'''
if self.truncate:
fh.seek(0)
fh.truncate(0)
return fh
def __enter__(self):
return self.acquire()
def __exit__(self, type_, value, tb):
self.release()
def __delete__(self, instance): # pragma: no cover
instance.release()
class RLock(Lock):
"""
A reentrant lock, functions in a similar way to threading.RLock in that it
can be acquired multiple times. When the corresponding number of release()
calls are made the lock will finally release the underlying file lock.
"""
def __init__(
self, filename, mode='a', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=False,
flags=LOCK_METHOD):
super(RLock, self).__init__(filename, mode, timeout, check_interval,
fail_when_locked, flags)
self._acquire_count = 0
def acquire(
self, timeout=None, check_interval=None, fail_when_locked=None):
if self._acquire_count >= 1:
fh = self.fh
else:
fh = super(RLock, self).acquire(timeout, check_interval,
fail_when_locked)
self._acquire_count += 1
return fh
def release(self):
if self._acquire_count == 0:
raise exceptions.LockException(
"Cannot release more times than acquired")
if self._acquire_count == 1:
super(RLock, self).release()
self._acquire_count -= 1
class TemporaryFileLock(Lock):
def __init__(self, filename='.lock', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=True,
flags=LOCK_METHOD):
Lock.__init__(self, filename=filename, mode='w', timeout=timeout,
check_interval=check_interval,
fail_when_locked=fail_when_locked, flags=flags)
atexit.register(self.release)
def release(self):
Lock.release(self)
if os.path.isfile(self.filename): # pragma: no branch
os.unlink(self.filename)
``` |
{
"source": "jonringer/prawcore",
"score": 3
} |
#### File: prawcore/examples/device_id_auth_trophies.py
```python
import os
import prawcore
import sys
def main():
"""Provide the program's entry point when directly executed."""
if len(sys.argv) != 2:
print("Usage: {} USERNAME".format(sys.argv[0]))
return 1
authenticator = prawcore.UntrustedAuthenticator(
prawcore.Requestor("prawcore_device_id_auth_example"),
os.environ["PRAWCORE_CLIENT_ID"],
)
authorizer = prawcore.DeviceIDAuthorizer(authenticator)
authorizer.refresh()
user = sys.argv[1]
with prawcore.session(authorizer) as session:
data = session.request("GET", "/api/v1/user/{}/trophies".format(user))
for trophy in data["data"]["trophies"]:
description = trophy["data"]["description"]
print(
trophy["data"]["name"]
+ (" ({})".format(description) if description else "")
)
return 0
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jonringer/pymatgen",
"score": 2
} |
#### File: io/lobster/inputs.py
```python
import itertools
import os
import warnings
from typing import Any, Dict, List, Optional
import numpy as np
import spglib
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Kpoints, Potcar
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.2"
__maintainer__ = "<NAME>, <NAME> "
__email__ = "<EMAIL>, <EMAIL>"
__date__ = "Dec 13, 2017"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class Lobsterin(dict, MSONable):
"""
This class can handle and generate lobsterin files
Furthermore, it can also modify INCAR files for lobster, generate KPOINT files for fatband calculations in Lobster,
and generate the standard primitive cells in a POSCAR file that are needed for the fatband calculations.
There are also several standard lobsterin files that can be easily generated.
"""
# reminder: lobster is not case sensitive
# keyword + one float can be used in file
FLOATKEYWORDS = [
"COHPstartEnergy",
"COHPendEnergy",
"gaussianSmearingWidth",
"useDecimalPlaces",
"COHPSteps",
]
# one of these keywords +endstring can be used in file
STRINGKEYWORDS = [
"basisSet",
"cohpGenerator",
"realspaceHamiltonian",
"realspaceOverlap",
"printPAWRealSpaceWavefunction",
"printLCAORealSpaceWavefunction",
"kSpaceCOHP",
"EwaldSum",
]
# the keyword alone will turn on or off a function
BOOLEANKEYWORDS = [
"saveProjectionToFile",
"skipdos",
"skipcohp",
"skipcoop",
"skipcobi",
"skipMadelungEnergy",
"loadProjectionFromFile",
"forceEnergyRange",
"DensityOfEnergy",
"BWDF",
"BWDFCOHP",
"skipPopulationAnalysis",
"skipGrossPopulation",
"userecommendedbasisfunctions",
"skipProjection",
"writeBasisFunctions",
"writeMatricesToFile",
"noFFTforVisualization",
"RMSp",
"onlyReadVasprun.xml",
"noMemoryMappedFiles",
"skipPAWOrthonormalityTest",
"doNotIgnoreExcessiveBands",
"doNotUseAbsoluteSpilling",
"skipReOrthonormalization",
"forceV1HMatrix",
"useOriginalTetrahedronMethod",
"forceEnergyRange",
"bandwiseSpilling",
"kpointwiseSpilling",
]
# several of these keywords + ending can be used in a lobsterin file:
LISTKEYWORDS = ["basisfunctions", "cohpbetween", "createFatband"]
# all keywords known to this class so far
AVAILABLEKEYWORDS = FLOATKEYWORDS + STRINGKEYWORDS + BOOLEANKEYWORDS + LISTKEYWORDS
def __init__(self, settingsdict: dict):
"""
Args:
settingsdict: dict to initialize Lobsterin
"""
super().__init__()
# check for duplicates
listkey = [key.lower() for key in settingsdict.keys()]
if len(listkey) != len(list(set(listkey))):
raise OSError("There are duplicates for the keywords! The program will stop here.")
self.update(settingsdict)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Lobsterin. Warns if parameter is not in list of
valid lobsterintags. Also cleans the parameter and val by stripping
leading and trailing white spaces. Similar to INCAR class.
"""
# due to the missing case sensitivity of lobster, the following code is necessary
found = False
for key_here in self.keys():
if key.strip().lower() == key_here.lower():
new_key = key_here
found = True
if not found:
new_key = key
if new_key.lower() not in [element.lower() for element in Lobsterin.AVAILABLEKEYWORDS]:
raise ValueError("Key is currently not available")
super().__setitem__(new_key, val.strip() if isinstance(val, str) else val)
def __getitem__(self, item):
"""
implements getitem from dict to avoid problems with cases
"""
found = False
for key_here in self.keys():
if item.strip().lower() == key_here.lower():
new_key = key_here
found = True
if not found:
new_key = item
val = dict.__getitem__(self, new_key)
return val
def diff(self, other):
"""
Diff function for lobsterin. Compares two lobsterin and indicates which parameters are the same.
Similar to the diff in INCAR.
Args:
other (Lobsterin): Lobsterin object to compare to
Returns:
dict with differences and similarities
"""
similar_param = {}
different_param = {}
key_list_others = [element.lower() for element in other.keys()]
for k1, v1 in self.items():
k1lower = k1.lower()
if k1lower not in key_list_others:
different_param[k1.upper()] = {"lobsterin1": v1, "lobsterin2": None}
else:
for key_here in other.keys():
if k1.lower() == key_here.lower():
new_key = key_here
if isinstance(v1, str):
if v1.strip().lower() != other[new_key].strip().lower():
different_param[k1.upper()] = {
"lobsterin1": v1,
"lobsterin2": other[new_key],
}
else:
similar_param[k1.upper()] = v1
elif isinstance(v1, list):
new_set1 = {element.strip().lower() for element in v1}
new_set2 = {element.strip().lower() for element in other[new_key]}
if new_set1 != new_set2:
different_param[k1.upper()] = {
"lobsterin1": v1,
"lobsterin2": other[new_key],
}
else:
if v1 != other[new_key]:
different_param[k1.upper()] = {
"lobsterin1": v1,
"lobsterin2": other[new_key],
}
else:
similar_param[k1.upper()] = v1
for k2, v2 in other.items():
if k2.upper() not in similar_param and k2.upper() not in different_param:
for key_here in self.keys():
if k2.lower() == key_here.lower():
new_key = key_here
else:
new_key = k2
if new_key not in self:
different_param[k2.upper()] = {"lobsterin1": None, "lobsterin2": v2}
return {"Same": similar_param, "Different": different_param}
def _get_nbands(self, structure: Structure):
"""
get number of nbands
"""
if self.get("basisfunctions") is None:
raise OSError("No basis functions are provided. The program cannot calculate nbands.")
basis_functions = [] # type: List[str]
for string_basis in self["basisfunctions"]:
# string_basis.lstrip()
string_basis_raw = string_basis.strip().split(" ")
while "" in string_basis_raw:
string_basis_raw.remove("")
for i in range(0, int(structure.composition.element_composition[string_basis_raw[0]])):
basis_functions.extend(string_basis_raw[1:])
no_basis_functions = 0
for basis in basis_functions:
if "s" in basis:
no_basis_functions = no_basis_functions + 1
elif "p" in basis:
no_basis_functions = no_basis_functions + 3
elif "d" in basis:
no_basis_functions = no_basis_functions + 5
elif "f" in basis:
no_basis_functions = no_basis_functions + 7
return int(no_basis_functions)
def write_lobsterin(self, path="lobsterin", overwritedict=None):
"""
writes a lobsterin file
Args:
path (str): filename of the lobsterin file that will be written
overwritedict (dict): dict that can be used to overwrite lobsterin, e.g. {"skipdos": True}
"""
# will overwrite previous entries
# has to search first if entry is already in Lobsterindict (due to case insensitivity)
if overwritedict is not None:
for key, entry in overwritedict.items():
found = False
for key2 in self.keys():
if key.lower() == key2.lower():
self[key2] = entry
found = True
if not found:
self[key] = entry
filename = path
with open(filename, "w") as f:
for key in Lobsterin.AVAILABLEKEYWORDS:
if key.lower() in [element.lower() for element in self.keys()]:
if key.lower() in [element.lower() for element in Lobsterin.FLOATKEYWORDS]:
f.write(key + " " + str(self.get(key)) + "\n")
elif key.lower() in [element.lower() for element in Lobsterin.BOOLEANKEYWORDS]:
# checks if entry is True or False
for key_here in self.keys():
if key.lower() == key_here.lower():
new_key = key_here
if self.get(new_key):
f.write(key + "\n")
elif key.lower() in [element.lower() for element in Lobsterin.STRINGKEYWORDS]:
f.write(key + " " + str(self.get(key) + "\n"))
elif key.lower() in [element.lower() for element in Lobsterin.LISTKEYWORDS]:
for entry in self.get(key):
f.write(key + " " + str(entry) + "\n")
def as_dict(self):
"""
:return: MSONable dict
"""
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Lobsterin
"""
return Lobsterin({k: v for k, v in d.items() if k not in ["@module", "@class"]})
def write_INCAR(
self,
incar_input: str = "INCAR",
incar_output: str = "INCAR.lobster",
poscar_input: str = "POSCAR",
isym: int = -1,
further_settings: dict = None,
):
"""
Will only make the run static, insert nbands, make ISYM=-1, set LWAVE=True and write a new INCAR.
You have to check for the rest.
Args:
incar_input (str): path to input INCAR
incar_output (str): path to output INCAR
poscar_input (str): path to input POSCAR
isym (int): isym equal to -1 or 0 are possible. Current Lobster version only allow -1.
further_settings (dict): A dict can be used to include further settings, e.g. {"ISMEAR":-5}
"""
# reads old incar from file, this one will be modified
incar = Incar.from_file(incar_input)
warnings.warn("Please check your incar_input before using it. This method only changes three settings!")
if isym == -1:
incar["ISYM"] = -1
elif isym == 0:
incar["ISYM"] = 0
else:
ValueError("isym has to be -1 or 0.")
incar["NSW"] = 0
incar["LWAVE"] = True
# get nbands from _get_nbands (use basis set that is inserted)
incar["NBANDS"] = self._get_nbands(Structure.from_file(poscar_input))
if further_settings is not None:
for key, item in further_settings.items():
incar[key] = further_settings[key]
# print it to file
incar.write_file(incar_output)
@staticmethod
def get_basis(
structure: Structure,
potcar_symbols: list,
address_basis_file: str = os.path.join(MODULE_DIR, "lobster_basis/BASIS_PBE_54_standard.yaml"),
):
"""
will get the basis from given potcar_symbols (e.g., ["Fe_pv","Si"]
#include this in lobsterin class
Args:
structure (Structure): Structure object
potcar_symbols: list of potcar symbols
Returns:
returns basis
"""
Potcar_names = list(potcar_symbols)
AtomTypes_Potcar = [name.split("_")[0] for name in Potcar_names]
AtomTypes = structure.symbol_set
if set(AtomTypes) != set(AtomTypes_Potcar):
raise OSError("Your POSCAR does not correspond to your POTCAR!")
BASIS = loadfn(address_basis_file)["BASIS"]
basis_functions = []
list_forin = []
for itype, type in enumerate(Potcar_names):
if type not in BASIS:
raise ValueError(
"You have to provide the basis for"
+ str(type)
+ "manually. We don't have any information on this POTCAR."
)
basis_functions.append(BASIS[type].split())
tojoin = str(AtomTypes_Potcar[itype]) + " "
tojoin2 = "".join(str(str(e) + " ") for e in BASIS[type].split())
list_forin.append(str(tojoin + tojoin2))
return list_forin
@staticmethod
def get_all_possible_basis_functions(
structure: Structure,
potcar_symbols: list,
address_basis_file_min: str = os.path.join(MODULE_DIR, "lobster_basis/BASIS_PBE_54_min.yaml"),
address_basis_file_max: str = os.path.join(MODULE_DIR, "lobster_basis/BASIS_PBE_54_max.yaml"),
):
"""
Args:
structure: Structure object
potcar_symbols: list of the potcar symbols
address_basis_file_min: path to file with the minimum required basis by the POTCAR
address_basis_file_max: path to file with the largest possible basis of the POTCAR
Returns: List of dictionaries that can be used to create new Lobsterin objects in
standard_calculations_from_vasp_files as dict_for_basis
"""
max_basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=potcar_symbols,
address_basis_file=address_basis_file_max,
)
min_basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=potcar_symbols,
address_basis_file=address_basis_file_min,
)
all_basis = get_all_possible_basis_combinations(min_basis=min_basis, max_basis=max_basis)
list_basis_dict = []
for ibasis, basis in enumerate(all_basis):
basis_dict = {}
for iel, elba in enumerate(basis):
basplit = elba.split()
basis_dict[basplit[0]] = " ".join(basplit[1:])
list_basis_dict.append(basis_dict)
return list_basis_dict
@staticmethod
def write_POSCAR_with_standard_primitive(POSCAR_input="POSCAR", POSCAR_output="POSCAR.lobster", symprec=0.01):
"""
writes a POSCAR with the standard primitive cell. This is needed to arrive at the correct kpath
Args:
POSCAR_input (str): filename of input POSCAR
POSCAR_output (str): filename of output POSCAR
symprec (float): precision to find symmetry
"""
structure = Structure.from_file(POSCAR_input)
kpath = HighSymmKpath(structure, symprec=symprec)
new_structure = kpath.prim
new_structure.to(fmt="POSCAR", filename=POSCAR_output)
@staticmethod
def write_KPOINTS(
POSCAR_input: str = "POSCAR",
KPOINTS_output="KPOINTS.lobster",
reciprocal_density: int = 100,
isym: int = -1,
from_grid: bool = False,
input_grid: list = [5, 5, 5],
line_mode: bool = True,
kpoints_line_density: int = 20,
symprec: float = 0.01,
):
"""
writes a KPOINT file for lobster (only ISYM=-1 and ISYM=0 are possible), grids are gamma centered
Args:
POSCAR_input (str): path to POSCAR
KPOINTS_output (str): path to output KPOINTS
reciprocal_density (int): Grid density
isym (int): either -1 or 0. Current Lobster versions only allow -1.
from_grid (bool): If True KPOINTS will be generated with the help of a grid given in input_grid. Otherwise,
they will be generated from the reciprocal_density
input_grid (list): grid to generate the KPOINTS file
line_mode (bool): If True, band structure will be generated
kpoints_line_density (int): density of the lines in the band structure
symprec (float): precision to determine symmetry
"""
structure = Structure.from_file(POSCAR_input)
if not from_grid:
kpointgrid = Kpoints.automatic_density_by_vol(structure, reciprocal_density).kpts
mesh = kpointgrid[0]
else:
mesh = input_grid
# The following code is taken from: SpacegroupAnalyzer
# we need to switch off symmetry here
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = [] # type: List[Any]
zs = []
magmoms = []
for species, g in itertools.groupby(structure, key=lambda s: s.species):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, "magmom"):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, "spin"):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
# For now, we are setting magmom to zero. (Taken from INCAR class)
cell = latt, positions, zs, magmoms
# TODO: what about this shift?
mapping, grid = spglib.get_ir_reciprocal_mesh(mesh, cell, is_shift=[0, 0, 0])
# exit()
# get the kpoints for the grid
if isym == -1:
kpts = []
weights = []
all_labels = []
for gp in grid:
kpts.append(gp.astype(float) / mesh)
weights.append(float(1))
all_labels.append("")
elif isym == 0:
# time reversal symmetry: k and -k are equivalent
kpts = []
weights = []
all_labels = []
newlist = [list(gp) for gp in list(grid)]
mapping = []
for gp in newlist:
minusgp = [-k for k in gp]
if minusgp in newlist and minusgp not in [[0, 0, 0]]:
mapping.append(newlist.index(minusgp))
else:
mapping.append(newlist.index(gp))
for igp, gp in enumerate(newlist):
if mapping[igp] > igp:
kpts.append(np.array(gp).astype(float) / mesh)
weights.append(float(2))
all_labels.append("")
elif mapping[igp] == igp:
kpts.append(np.array(gp).astype(float) / mesh)
weights.append(float(1))
all_labels.append("")
else:
ValueError("Only isym=-1 and isym=0 are allowed.")
# line mode
if line_mode:
kpath = HighSymmKpath(structure, symprec=symprec)
if not np.allclose(kpath.prim.lattice.matrix, structure.lattice.matrix):
raise ValueError(
"You are not using the standard primitive cell. The k-path is not correct. Please generate a "
"standard primitive cell first."
)
frac_k_points, labels = kpath.get_kpoints(line_density=kpoints_line_density, coords_are_cartesian=False)
for k, f in enumerate(frac_k_points):
kpts.append(f)
weights.append(0.0)
all_labels.append(labels[k])
if isym == -1:
comment = (
"ISYM=-1, grid: " + str(mesh) if not line_mode else "ISYM=-1, grid: " + str(mesh) + " plus kpoint path"
)
elif isym == 0:
comment = (
"ISYM=0, grid: " + str(mesh) if not line_mode else "ISYM=0, grid: " + str(mesh) + " plus kpoint path"
)
KpointObject = Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts,
kpts_weights=weights,
labels=all_labels,
)
KpointObject.write_file(filename=KPOINTS_output)
@classmethod
def from_file(cls, lobsterin: str):
"""
Args:
lobsterin (str): path to lobsterin
Returns:
Lobsterin object
"""
with zopen(lobsterin, "rt") as f:
data = f.read().split("\n")
if len(data) == 0:
raise OSError("lobsterin file contains no data.")
Lobsterindict = {} # type: Dict
for datum in data:
# will remove all comments to avoid complications
raw_datum = datum.split("!")[0]
raw_datum = raw_datum.split("//")[0]
raw_datum = raw_datum.split("#")[0]
raw_datum = raw_datum.split(" ")
while "" in raw_datum:
raw_datum.remove("")
if len(raw_datum) > 1:
# check which type of keyword this is, handle accordingly
if raw_datum[0].lower() not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]:
if raw_datum[0].lower() not in [datum2.lower() for datum2 in Lobsterin.FLOATKEYWORDS]:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = " ".join(raw_datum[1:])
else:
raise ValueError("Same keyword " + str(raw_datum[0].lower()) + "twice!")
else:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = float(raw_datum[1])
else:
raise ValueError("Same keyword " + str(raw_datum[0].lower()) + "twice!")
else:
if raw_datum[0].lower() not in Lobsterindict:
Lobsterindict[raw_datum[0].lower()] = [" ".join(raw_datum[1:])]
else:
Lobsterindict[raw_datum[0].lower()].append(" ".join(raw_datum[1:]))
elif len(raw_datum) > 0:
Lobsterindict[raw_datum[0].lower()] = True
return cls(Lobsterindict)
@staticmethod
def _get_potcar_symbols(POTCAR_input: str) -> list:
"""
will return the name of the species in the POTCAR
Args:
POTCAR_input(str): string to potcar file
Returns:
list of the names of the species in string format
"""
potcar = Potcar.from_file(POTCAR_input)
for pot in potcar:
if pot.potential_type != "PAW":
raise OSError("Lobster only works with PAW! Use different POTCARs")
# Warning about a bug in lobster-4.1.0
with zopen(POTCAR_input, "r") as f:
data = f.read()
if isinstance(data, bytes):
data = data.decode("utf-8")
if "SHA256" in data or "COPYR" in data:
warnings.warn(
"These POTCARs are not compatible with "
"Lobster up to version 4.1.0."
"\n The keywords SHA256 and COPYR "
"cannot be handled by Lobster"
" \n and will lead to wrong results."
)
if potcar.functional != "PBE":
raise OSError("We only have BASIS options for PBE so far")
Potcar_names = [name["symbol"] for name in potcar.spec]
return Potcar_names
@classmethod
def standard_calculations_from_vasp_files(
cls,
POSCAR_input: str = "POSCAR",
INCAR_input: str = "INCAR",
POTCAR_input: Optional[str] = None,
dict_for_basis: Optional[dict] = None,
option: str = "standard",
):
"""
will generate Lobsterin with standard settings
Args:
POSCAR_input(str): path to POSCAR
INCAR_input(str): path to INCAR
POTCAR_input (str): path to POTCAR
dict_for_basis (dict): can be provided: it should look the following:
dict_for_basis={"Fe":'3p 3d 4s 4f', "C": '2s 2p'} and will overwrite all settings from POTCAR_input
option (str): 'standard' will start a normal lobster run where COHPs, COOPs, DOS, CHARGE etc. will be
calculated
'standard_from_projection' will start a normal lobster run from a projection
'standard_with_fatband' will do a fatband calculation, run over all orbitals
'onlyprojection' will only do a projection
'onlydos' will only calculate a projected dos
'onlycohp' will only calculate cohp
'onlycoop' will only calculate coop
'onlycohpcoop' will only calculate cohp and coop
Returns:
Lobsterin Object with standard settings
"""
warnings.warn(
"Always check and test the provided basis functions. The spilling of your Lobster calculation might help"
)
# warn that fatband calc cannot be done with tetrahedron method at the moment
if option not in [
"standard",
"standard_from_projection",
"standard_with_fatband",
"onlyprojection",
"onlydos",
"onlycohp",
"onlycoop",
"onlycobi",
"onlycohpcoop",
"onlycohpcoopcobi",
"onlymadelung",
]:
raise ValueError("The option is not valid!")
Lobsterindict = {} # type: Dict[Any,Any]
# this basis set covers most elements
Lobsterindict["basisSet"] = "pbeVaspFit2015"
# energies around e-fermi
Lobsterindict["COHPstartEnergy"] = -15.0
Lobsterindict["COHPendEnergy"] = 5.0
if option in [
"standard",
"onlycohp",
"onlycoop",
"onlycobi",
"onlycohpcoop",
"onlycohpcoopcobi",
"standard_with_fatband",
]:
# every interaction with a distance of 6.0 is checked
Lobsterindict["cohpGenerator"] = "from 0.1 to 6.0 orbitalwise"
# the projection is saved
Lobsterindict["saveProjectionToFile"] = True
if option == "standard_from_projection":
Lobsterindict["cohpGenerator"] = "from 0.1 to 6.0 orbitalwise"
Lobsterindict["loadProjectionFromFile"] = True
# TODO: add cobi here! might be relevant lobster version
if option == "onlycohp":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycoop":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycohpcoop":
Lobsterindict["skipdos"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycohpcoopcobi":
Lobsterindict["skipdos"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlydos":
Lobsterindict["skipcohp"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlyprojection":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
Lobsterindict["saveProjectionToFile"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlycobi":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
Lobsterindict["skipMadelungEnergy"] = True
if option == "onlymadelung":
Lobsterindict["skipdos"] = True
Lobsterindict["skipcohp"] = True
Lobsterindict["skipcoop"] = True
Lobsterindict["skipPopulationAnalysis"] = True
Lobsterindict["skipGrossPopulation"] = True
Lobsterindict["saveProjectionToFile"] = True
# lobster-4.1.0
Lobsterindict["skipcobi"] = True
incar = Incar.from_file(INCAR_input)
if incar["ISMEAR"] == 0:
Lobsterindict["gaussianSmearingWidth"] = incar["SIGMA"]
if incar["ISMEAR"] != 0 and option == "standard_with_fatband":
raise ValueError("ISMEAR has to be 0 for a fatband calculation with Lobster")
if dict_for_basis is not None:
# dict_for_basis={"Fe":'3p 3d 4s 4f', "C": '2s 2p'}
# will just insert this basis and not check with poscar
basis = [key + " " + value for key, value in dict_for_basis.items()]
elif POTCAR_input is not None:
# get basis from POTCAR
potcar_names = Lobsterin._get_potcar_symbols(POTCAR_input=POTCAR_input)
basis = Lobsterin.get_basis(structure=Structure.from_file(POSCAR_input), potcar_symbols=potcar_names)
else:
raise ValueError("basis cannot be generated")
Lobsterindict["basisfunctions"] = basis
if option == "standard_with_fatband":
Lobsterindict["createFatband"] = basis
return cls(Lobsterindict)
def get_all_possible_basis_combinations(min_basis: list, max_basis: list) -> list:
"""
Args:
min_basis: list of basis entries: e.g., ['Si 3p 3s ']
max_basis: list of basis entries: e.g., ['Si 3p 3s ']
Returns: all possible combinations of basis functions, e.g. [['Si 3p 3s']]
"""
max_basis_lists = [x.split() for x in max_basis]
min_basis_lists = [x.split() for x in min_basis]
# get all possible basis functions
basis_dict: Dict[str, dict] = {}
for iel, el in enumerate(max_basis_lists):
basis_dict[el[0]] = {"fixed": [], "variable": [], "combinations": []}
for basis in el[1:]:
if basis in min_basis_lists[iel]:
basis_dict[el[0]]["fixed"].append(basis)
if basis not in min_basis_lists[iel]:
basis_dict[el[0]]["variable"].append(basis)
for L in range(0, len(basis_dict[el[0]]["variable"]) + 1):
for subset in itertools.combinations(basis_dict[el[0]]["variable"], L):
basis_dict[el[0]]["combinations"].append(" ".join([el[0]] + basis_dict[el[0]]["fixed"] + list(subset)))
list_basis = []
for el, item in basis_dict.items():
list_basis.append(item["combinations"])
# get all combinations
start_basis = list_basis[0]
if len(list_basis) > 1:
for iel, el in enumerate(list_basis[1:], 1):
new_start_basis = []
for ielbasis, elbasis in enumerate(start_basis):
for ielbasis2, elbasis2 in enumerate(list_basis[iel]):
if not isinstance(elbasis, list):
new_start_basis.append([elbasis, elbasis2])
else:
new_start_basis.append(elbasis.copy() + [elbasis2])
start_basis = new_start_basis
return start_basis
return [[basis] for basis in start_basis]
``` |
{
"source": "jonringer/pyscreenshot",
"score": 3
} |
#### File: pyscreenshot/check/showall.py
```python
import time
from entrypoint2 import entrypoint
import pyscreenshot
from pyscreenshot import backends
@entrypoint
def show():
im = []
blist = []
for x in backends():
try:
print("--> grabbing by " + x)
im.append(pyscreenshot.grab(bbox=(500, 400, 800, 600), backend=x))
blist.append(x)
except Exception as e:
print(e)
print(im)
print(blist)
for x in im:
x.show()
time.sleep(0.5)
```
#### File: pyscreenshot/tests/test_mac_quartz.py
```python
from bt import backend_to_check
from pyscreenshot.util import platform_is_osx
if platform_is_osx():
def test_mac_quartz():
backend_to_check("mac_quartz")
```
#### File: pyscreenshot/tests/test_maim.py
```python
from bt import backend_to_check, prog_check
from pyscreenshot.util import use_x_display
if use_x_display():
if prog_check(["maim", "--version"]):
def test_maim():
backend_to_check("maim")
```
#### File: pyscreenshot/tests/test_pyqt5.py
```python
from bt import backend_to_check, check_import
from pyscreenshot.util import platform_is_osx
# qt color problem on osx
if not platform_is_osx():
if check_import("PyQt5"):
def test_pyqt5():
backend_to_check("pyqt5")
``` |
{
"source": "jonringer/sopel",
"score": 2
} |
#### File: sopel/cli/run.py
```python
from __future__ import unicode_literals, absolute_import, print_function, division
import argparse
import os
import platform
import signal
import sys
import time
import traceback
from sopel import bot, config, logger, tools, __version__
from . import utils
if sys.version_info < (2, 7):
tools.stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')
sys.exit(1)
if sys.version_info.major == 2:
tools.stderr('Warning: Python 2.x is near end of life. Sopel support at that point is TBD.')
if sys.version_info.major == 3 and sys.version_info.minor < 3:
tools.stderr('Error: When running on Python 3, Python 3.3 is required.')
sys.exit(1)
ERR_CODE = 1
"""Error code: program exited with an error"""
ERR_CODE_NO_RESTART = 2
"""Error code: program exited with an error and should not be restarted
This error code is used to prevent systemd from restarting the bot when it
encounters such an error case.
"""
def run(settings, pid_file, daemon=False):
delay = 20
# Inject ca_certs from config to web for SSL validation of web requests
if not settings.core.ca_certs:
tools.stderr(
'Could not open CA certificates file. SSL will not work properly!')
def signal_handler(sig, frame):
if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
tools.stderr('Got quit signal, shutting down.')
p.quit('Closing')
elif sig == signal.SIGUSR2 or sig == signal.SIGILL:
tools.stderr('Got restart signal.')
p.restart('Restarting')
# Define empty variable `p` for bot
p = None
while True:
if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(settings, daemon=daemon)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, 'SIGTERM'):
signal.signal(signal.SIGTERM, signal_handler)
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, signal_handler)
if hasattr(signal, 'SIGUSR2'):
signal.signal(signal.SIGUSR2, signal_handler)
if hasattr(signal, 'SIGILL'):
signal.signal(signal.SIGILL, signal_handler)
logger.setup_logging(p)
p.run(settings.core.host, int(settings.core.port))
except KeyboardInterrupt:
break
except Exception: # TODO: Be specific
trace = traceback.format_exc()
try:
tools.stderr(trace)
except Exception: # TODO: Be specific
pass
logfile = open(os.path.join(settings.core.logdir, settings.basename + '.exceptions.log'), 'a')
logfile.write('Critical exception in core')
logfile.write(trace)
logfile.write('----------------------------------------\n\n')
logfile.close()
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
tools.stderr(
'Warning: Disconnected. Reconnecting in %s seconds...' % delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
def add_legacy_options(parser):
parser.add_argument("-d", '--fork', action="store_true",
dest="daemonize", help="Daemonize Sopel")
parser.add_argument("-q", '--quit', action="store_true", dest="quit",
help=(
"Gracefully quit Sopel "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel stop` instead)"))
parser.add_argument("-k", '--kill', action="store_true", dest="kill",
help=(
"Kill Sopel "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel stop --kill` instead)"))
parser.add_argument("-r", '--restart', action="store_true", dest="restart",
help=(
"Restart Sopel "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel restart` instead)"))
parser.add_argument("-l", '--list', action="store_true",
dest="list_configs",
help="List all config files found")
parser.add_argument('--quiet', action="store_true", dest="quiet",
help="Suppress all output")
parser.add_argument('-w', '--configure-all', action='store_true',
dest='wizard',
help=(
"Run the configuration wizard "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel configure` instead)"))
parser.add_argument('--configure-modules', action='store_true',
dest='mod_wizard',
help=(
"Run the configuration wizard, but only for the "
"module configuration options "
"(deprecated, and will be removed in Sopel 8; "
"use `sopel configure --modules` instead)"))
parser.add_argument('-v', action="store_true",
dest='version_legacy',
help=(
"Show version number and exit "
"(deprecated, and will be removed in Sopel 8; "
"use -V/--version instead)"))
parser.add_argument('-V', '--version', action='store_true',
dest='version',
help='Show version number and exit')
def build_parser():
"""Build an ``argparse.ArgumentParser`` for the bot"""
parser = argparse.ArgumentParser(description='Sopel IRC Bot',
usage='%(prog)s [options]')
add_legacy_options(parser)
utils.add_common_arguments(parser)
subparsers = parser.add_subparsers(
title='sub-commands',
description='List of Sopel\'s sub-commands',
dest='action',
metavar='{start,configure,stop,restart}')
# manage `legacy` sub-command
parser_legacy = subparsers.add_parser('legacy')
add_legacy_options(parser_legacy)
utils.add_common_arguments(parser_legacy)
# manage `start` sub-command
parser_start = subparsers.add_parser(
'start',
description='Start a Sopel instance',
help='Start a Sopel instance')
parser_start.add_argument(
'-d', '--fork',
dest='daemonize',
action='store_true',
default=False,
help='Run Sopel as a daemon (fork)')
parser_start.add_argument(
'--quiet',
action="store_true",
dest="quiet",
help="Suppress all output")
utils.add_common_arguments(parser_start)
# manage `configure` sub-command
parser_configure = subparsers.add_parser(
'configure', help='Sopel\'s Wizard tool')
parser_configure.add_argument(
'--modules',
action='store_true',
default=False,
dest='modules')
utils.add_common_arguments(parser_configure)
# manage `stop` sub-command
parser_stop = subparsers.add_parser(
'stop',
description='Stop a running Sopel instance',
help='Stop a running Sopel instance')
parser_stop.add_argument(
'-k', '--kill',
action='store_true',
default=False,
help='Kill Sopel without a graceful quit')
parser_stop.add_argument(
'--quiet',
action="store_true",
dest="quiet",
help="Suppress all output")
utils.add_common_arguments(parser_stop)
# manage `restart` sub-command
parser_restart = subparsers.add_parser(
'restart',
description='Restart a running Sopel instance',
help='Restart a running Sopel instance')
parser_restart.add_argument(
'--quiet',
action="store_true",
dest="quiet",
help="Suppress all output")
utils.add_common_arguments(parser_restart)
return parser
def check_not_root():
"""Check if root is running the bot.
It raises a ``RuntimeError`` if the user has root privileges on Linux or
if it is the ``Administrator`` account on Windows.
"""
opersystem = platform.system()
if opersystem in ["Linux", "Darwin"]:
# Linux/Mac
if os.getuid() == 0 or os.geteuid() == 0:
raise RuntimeError('Error: Do not run Sopel with root privileges.')
elif opersystem in ["Windows"]:
# Windows
if os.environ.get("USERNAME") == "Administrator":
raise RuntimeError('Error: Do not run Sopel as Administrator.')
else:
tools.stderr(
"Warning: %s is an uncommon operating system platform. "
"Sopel should still work, but please contact Sopel's developers "
"if you experience issues."
% opersystem)
def print_version():
"""Print Python version and Sopel version on stdout."""
py_ver = '%s.%s.%s' % (sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
print('Sopel %s (running on Python %s)' % (__version__, py_ver))
print('https://sopel.chat/')
def print_config():
"""Print list of available configurations from default homedir."""
configs = utils.enumerate_configs(config.DEFAULT_HOMEDIR)
print('Config files in %s:' % config.DEFAULT_HOMEDIR)
configfile = None
for configfile in configs:
print('\t%s' % configfile)
if not configfile:
print('\tNone found')
print('-------------------------')
def get_configuration(options):
"""Get or create a configuration object from ``options``.
:param options: argument parser's options
:type options: ``argparse.Namespace``
:return: a configuration object
:rtype: :class:`sopel.config.Config`
This may raise a :exc:`sopel.config.ConfigurationError` if the
configuration file is invalid.
.. seealso::
The configuration file is loaded by
:func:`~sopel.cli.run.utils.load_settings` or created using the
configuration wizard.
"""
try:
settings = utils.load_settings(options)
except config.ConfigurationNotFound as error:
print(
"Welcome to Sopel!\n"
"I can't seem to find the configuration file, "
"so let's generate it!\n")
settings = utils.wizard(error.filename)
settings._is_daemonized = options.daemonize
return settings
def get_pid_filename(options, pid_dir):
"""Get the pid file name in ``pid_dir`` from the given ``options``.
:param options: command line options
:param str pid_dir: path to the pid directory
:return: absolute filename of the pid file
By default, it's ``sopel.pid``, but if a configuration filename is given
in the ``options``, its basename is used to generate the filename, as:
``sopel-{basename}.pid`` instead.
"""
name = 'sopel.pid'
if options.config:
basename = os.path.basename(options.config)
if basename.endswith('.cfg'):
basename = basename[:-4]
name = 'sopel-%s.pid' % basename
return os.path.abspath(os.path.join(pid_dir, name))
def get_running_pid(filename):
"""Retrieve the PID number from the given ``filename``.
:param str filename: path to file to read the PID from
:return: the PID number of a Sopel instance if running, ``None`` otherwise
:rtype: integer
This function tries to retrieve a PID number from the given ``filename``,
as an integer, and returns ``None`` if the file is not found or if the
content is not an integer.
"""
if not os.path.isfile(filename):
return
with open(filename, 'r') as pid_file:
try:
return int(pid_file.read())
except ValueError:
pass
def command_start(opts):
"""Start a Sopel instance"""
# Step One: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except config.ConfigurationError as e:
tools.stderr(e)
return ERR_CODE_NO_RESTART
if config_module.core.not_configured:
tools.stderr('Bot is not configured, can\'t start')
return ERR_CODE_NO_RESTART
# Step Two: Manage logfile, stdout and stderr
utils.redirect_outputs(config_module, opts.quiet)
# Step Three: Handle process-lifecycle options and manage the PID file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
pid = get_running_pid(pid_file_path)
if pid is not None and tools.check_pid(pid):
tools.stderr('There\'s already a Sopel instance running '
'with this config file.')
tools.stderr('Try using either the `sopel stop` '
'or the `sopel restart` command.')
return ERR_CODE
if opts.daemonize:
child_pid = os.fork()
if child_pid != 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Four: Run Sopel
ret = run(config_module, pid_file_path)
# Step Five: Shutdown Clean-Up
os.unlink(pid_file_path)
if ret == -1:
# Restart
os.execv(sys.executable, ['python'] + sys.argv)
else:
# Quit
return ret
def command_configure(opts):
"""Sopel Configuration Wizard"""
configpath = utils.find_config(
config.DEFAULT_HOMEDIR, opts.config or 'default')
if getattr(opts, 'modules', False):
utils.plugins_wizard(configpath)
else:
utils.wizard(configpath)
def command_stop(opts):
"""Stop a running Sopel instance"""
# Get Configuration
try:
settings = utils.load_settings(opts)
except config.ConfigurationNotFound as error:
tools.stderr('Configuration "%s" not found' % error.filename)
return ERR_CODE
if settings.core.not_configured:
tools.stderr('Sopel is not configured, can\'t stop')
return ERR_CODE
# Redirect Outputs
utils.redirect_outputs(settings, opts.quiet)
# Get Sopel's PID
filename = get_pid_filename(opts, settings.core.pid_dir)
pid = get_running_pid(filename)
if pid is None or not tools.check_pid(pid):
tools.stderr('Sopel is not running!')
return ERR_CODE
# Stop Sopel
if opts.kill:
tools.stderr('Killing the Sopel')
os.kill(pid, signal.SIGKILL)
return
tools.stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(pid, signal.SIGUSR1)
else:
# Windows will not generate SIGTERM itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(pid, signal.SIGTERM)
def command_restart(opts):
"""Restart a running Sopel instance"""
# Get Configuration
try:
settings = utils.load_settings(opts)
except config.ConfigurationNotFound as error:
tools.stderr('Configuration "%s" not found' % error.filename)
return ERR_CODE
if settings.core.not_configured:
tools.stderr('Sopel is not configured, can\'t stop')
return ERR_CODE
# Redirect Outputs
utils.redirect_outputs(settings, opts.quiet)
# Get Sopel's PID
filename = get_pid_filename(opts, settings.core.pid_dir)
pid = get_running_pid(filename)
if pid is None or not tools.check_pid(pid):
tools.stderr('Sopel is not running!')
return ERR_CODE
tools.stderr('Asking Sopel to restart')
if hasattr(signal, 'SIGUSR2'):
os.kill(pid, signal.SIGUSR2)
else:
# Windows will not generate SIGILL itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(pid, signal.SIGILL)
def command_legacy(opts):
"""Legacy Sopel run script
The ``legacy`` command manages the old-style ``sopel`` command line tool.
Most of its features are replaced by the following commands:
* ``sopel start`` replaces the default behavior (run the bot)
* ``sopel stop`` replaces the ``--quit/--kill`` options
* ``sopel restart`` replaces the ``--restart`` option
* ``sopel configure`` replaces the
``-w/--configure-all/--configure-modules`` options
The ``-v`` option for "version" is deprecated, ``-V/--version`` should be
used instead.
.. seealso::
The github issue `#1471`__ tracks various changes requested for future
versions of Sopel, some of them related to this legacy command.
.. __: https://github.com/sopel-irc/sopel/issues/1471
"""
# Step One: Handle "No config needed" options
if opts.version:
print_version()
return
elif opts.version_legacy:
tools.stderr(
'WARNING: option -v is deprecated; '
'use `sopel -V/--version` instead')
print_version()
return
# TODO: allow to use a different homedir
configpath = utils.find_config(
config.DEFAULT_HOMEDIR, opts.config or 'default')
if opts.wizard:
tools.stderr(
'WARNING: option -w/--configure-all is deprecated; '
'use `sopel configure` instead')
utils.wizard(configpath)
return
if opts.mod_wizard:
tools.stderr(
'WARNING: option --configure-modules is deprecated; '
'use `sopel configure --modules` instead')
utils.plugins_wizard(configpath)
return
if opts.list_configs:
print_config()
return
# Step Two: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except config.ConfigurationError as e:
tools.stderr(e)
return ERR_CODE_NO_RESTART
if config_module.core.not_configured:
tools.stderr('Bot is not configured, can\'t start')
return ERR_CODE_NO_RESTART
# Step Three: Manage logfile, stdout and stderr
utils.redirect_outputs(config_module, opts.quiet)
# Step Four: Handle process-lifecycle options and manage the PID file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
old_pid = get_running_pid(pid_file_path)
if old_pid is not None and tools.check_pid(old_pid):
if not opts.quit and not opts.kill and not opts.restart:
tools.stderr(
'There\'s already a Sopel instance running with this config file')
tools.stderr(
'Try using either the `sopel stop` command or the `sopel restart` command')
return ERR_CODE
elif opts.kill:
tools.stderr(
'WARNING: option -k/--kill is deprecated; '
'use `sopel stop --kill` instead')
tools.stderr('Killing the Sopel')
os.kill(old_pid, signal.SIGKILL)
return
elif opts.quit:
tools.stderr(
'WARNING: options -q/--quit is deprecated; '
'use `sopel stop` instead')
tools.stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(old_pid, signal.SIGUSR1)
else:
# Windows will not generate SIGTERM itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(old_pid, signal.SIGTERM)
return
elif opts.restart:
tools.stderr(
'WARNING: options --restart is deprecated; '
'use `sopel restart` instead')
tools.stderr('Asking Sopel to restart')
if hasattr(signal, 'SIGUSR2'):
os.kill(old_pid, signal.SIGUSR2)
else:
# Windows will not generate SIGILL itself
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/signal
os.kill(old_pid, signal.SIGILL)
return
elif opts.kill or opts.quit or opts.restart:
tools.stderr('Sopel is not running!')
return ERR_CODE
if opts.daemonize:
child_pid = os.fork()
if child_pid != 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Five: Initialize and run Sopel
ret = run(config_module, pid_file_path)
os.unlink(pid_file_path)
if ret == -1:
os.execv(sys.executable, ['python'] + sys.argv)
else:
return ret
def main(argv=None):
"""Sopel run script entry point"""
try:
# Step One: Parse The Command Line
parser = build_parser()
# make sure to have an action first (`legacy` by default)
# TODO: `start` should be the default in Sopel 8
argv = argv or sys.argv[1:]
if not argv:
argv = ['legacy']
elif argv[0].startswith('-') and argv[0] not in ['-h', '--help']:
argv = ['legacy'] + argv
opts = parser.parse_args(argv)
# Step Two: "Do not run as root" checks
try:
check_not_root()
except RuntimeError as err:
tools.stderr('%s' % err)
return ERR_CODE
# Step Three: Handle command
action = getattr(opts, 'action', 'legacy')
command = {
'legacy': command_legacy,
'start': command_start,
'configure': command_configure,
'stop': command_stop,
'restart': command_restart,
}.get(action)
return command(opts)
except KeyboardInterrupt:
print("\n\nInterrupted")
return ERR_CODE
if __name__ == '__main__':
sys.exit(main())
```
#### File: sopel/modules/tld.py
```python
from __future__ import unicode_literals, absolute_import, print_function, division
import re
import sys
import requests
from sopel import web
from sopel.module import commands, example
if sys.version_info.major >= 3:
unicode = str
uri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'
r_tag = re.compile(r'<(?!!)[^>]+>')
@commands('tld')
@example('.tld ru')
def gettld(bot, trigger):
"""Show information about the given Top Level Domain."""
page = requests.get(uri).text
tld = trigger.group(2)
if not tld:
bot.reply("You must provide a top-level domain to search.")
return # Stop if no tld argument is provided
if tld[0] == '.':
tld = tld[1:]
search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if not matches:
search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
matches = list(matches[0])
i = 0
while i < len(matches):
matches[i] = r_tag.sub("", matches[i])
i += 1
desc = matches[2]
if len(desc) > 400:
desc = desc[:400] + "..."
reply = "%s -- %s. IDN: %s, DNSSEC: %s" % (
matches[1], desc, matches[3], matches[4]
)
else:
search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
search = search.format(unicode(tld))
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
matches = matches[0]
dict_val = dict()
dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"] = matches
for key in dict_val:
if dict_val[key] == " ":
dict_val[key] = "N/A"
dict_val[key] = r_tag.sub('', dict_val[key])
if len(dict_val["notes"]) > 400:
dict_val["notes"] = dict_val["notes"][:400] + "..."
reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
else:
reply = "No matches found for TLD: {0}".format(unicode(tld))
# Final touches + output
reply = web.decode(reply)
bot.reply(reply)
```
#### File: sopel/test/test_bot.py
```python
from __future__ import unicode_literals, absolute_import, print_function, division
import pytest
from sopel import bot, config, plugins
@pytest.fixture
def tmpconfig(tmpdir):
conf_file = tmpdir.join('conf.ini')
conf_file.write("\n".join([
"[core]",
"owner=testnick",
"nick = TestBot",
"enable = coretasks"
""
]))
return config.Config(conf_file.strpath)
def test_remove_plugin_unknown_plugin(tmpconfig):
sopel = bot.Sopel(tmpconfig, daemon=False)
sopel.scheduler.stop()
sopel.scheduler.join(timeout=10)
plugin = plugins.handlers.PyModulePlugin('admin', 'sopel.modules')
with pytest.raises(plugins.exceptions.PluginNotRegistered):
sopel.remove_plugin(plugin, [], [], [], [])
def test_remove_plugin_unregistered_plugin(tmpconfig):
sopel = bot.Sopel(tmpconfig, daemon=False)
sopel.scheduler.stop()
sopel.scheduler.join(timeout=10)
plugin = sopel._plugins.get('coretasks')
assert plugin is not None, 'coretasks should always be loaded'
# Unregister the plugin
plugin.unregister(sopel)
# And now it must raise an exception
with pytest.raises(plugins.exceptions.PluginNotRegistered):
sopel.remove_plugin(plugin, [], [], [], [])
def test_reload_plugin_unregistered_plugin(tmpconfig):
sopel = bot.Sopel(tmpconfig, daemon=False)
sopel.scheduler.stop()
sopel.scheduler.join(timeout=10)
plugin = sopel._plugins.get('coretasks')
assert plugin is not None, 'coretasks should always be loaded'
# Unregister the plugin
plugin.unregister(sopel)
# And now it must raise an exception
with pytest.raises(plugins.exceptions.PluginNotRegistered):
sopel.reload_plugin(plugin.name)
``` |
{
"source": "jonringer/todoist-python",
"score": 2
} |
#### File: todoist/managers/labels.py
```python
from .. import models
from .generic import AllMixin, GetByIdMixin, Manager, SyncMixin
class LabelsManager(Manager, AllMixin, GetByIdMixin, SyncMixin):
state_name = "labels"
object_type = "label"
def add(self, name, **kwargs):
"""
Creates a local label object.
"""
obj = models.Label({"name": name}, self.api)
obj.temp_id = obj["id"] = self.api.generate_uuid()
obj.data.update(kwargs)
self.state[self.state_name].append(obj)
cmd = {
"type": "label_add",
"temp_id": obj.temp_id,
"uuid": self.api.generate_uuid(),
"args": {key: obj.data[key] for key in obj.data if key != "id"},
}
self.queue.append(cmd)
return obj
def update(self, label_id, **kwargs):
"""
Updates a label remotely.
"""
args = {"id": label_id}
args.update(kwargs)
cmd = {
"type": "label_update",
"uuid": self.api.generate_uuid(),
"args": args,
}
self.queue.append(cmd)
def delete(self, label_id):
"""
Deletes a label remotely.
"""
cmd = {
"type": "label_delete",
"uuid": self.api.generate_uuid(),
"args": {"id": label_id},
}
self.queue.append(cmd)
def update_orders(self, id_order_mapping):
"""
Updates the orders of multiple labels remotely.
"""
cmd = {
"type": "label_update_orders",
"uuid": self.api.generate_uuid(),
"args": {"id_order_mapping": id_order_mapping},
}
self.queue.append(cmd)
def get(self, label_id):
"""
Gets an existing label.
"""
params = {"token": self.token, "label_id": label_id}
obj = self.api._get("labels/get", params=params)
if obj and "error" in obj:
return None
data = {"labels": []}
if obj.get("label"):
data["labels"].append(obj.get("label"))
self.api._update_state(data)
return obj
```
#### File: todoist/managers/reminders.py
```python
from .. import models
from .generic import AllMixin, GetByIdMixin, Manager, SyncMixin
class RemindersManager(Manager, AllMixin, GetByIdMixin, SyncMixin):
state_name = "reminders"
object_type = "reminder"
def add(self, item_id, **kwargs):
"""
Creates a local reminder object.
"""
obj = models.Reminder({"item_id": item_id}, self.api)
obj.temp_id = obj["id"] = self.api.generate_uuid()
obj.data.update(kwargs)
self.state[self.state_name].append(obj)
cmd = {
"type": "reminder_add",
"temp_id": obj.temp_id,
"uuid": self.api.generate_uuid(),
"args": {key: obj.data[key] for key in obj.data if key != "id"},
}
self.queue.append(cmd)
return obj
def update(self, reminder_id, **kwargs):
"""
Updates a reminder remotely.
"""
args = {"id": reminder_id}
args.update(kwargs)
cmd = {
"type": "reminder_update",
"uuid": self.api.generate_uuid(),
"args": args,
}
self.queue.append(cmd)
def delete(self, reminder_id):
"""
Deletes a reminder remotely.
"""
cmd = {
"type": "reminder_delete",
"uuid": self.api.generate_uuid(),
"args": {"id": reminder_id},
}
self.queue.append(cmd)
def get(self, reminder_id):
"""
Gets an existing reminder.
"""
params = {"token": self.token, "reminder_id": reminder_id}
obj = self.api._get("reminders/get", params=params)
if obj and "error" in obj:
return None
data = {"reminders": []}
if obj.get("reminder"):
data["reminders"].append(obj.get("reminder"))
self.api._update_state(data)
return obj
``` |
{
"source": "JonRivera/bridges-to-prosperity-ds-d",
"score": 3
} |
#### File: app/api/database.py
```python
import logging
import json
import os
import psycopg2
import pandas as pd
from dotenv import load_dotenv
from fastapi import APIRouter
from pydantic import BaseModel, Field, validator
log = logging.getLogger(__name__)
router = APIRouter()
# Load environment variables from .env
load_dotenv()
class PostgreSQL:
def __init__(self):
"Add custom fields here"
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST = os.getenv("DB_HOST")
DB_PORT = os.getenv("DB_PORT")
connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=<PASSWORD>,
host=DB_HOST, port='5432')
# Methods can reference this variable
columns = ['bridge_name',
'bridge_opportunity_project_code',
'bridge_opportunity_needs_assessment',
'bridge_opportunity_level1_government',
'bridge_opportunity_level2_government',
'bridge_opportunity_stage',
'bridge_opportunity_gps_latitude',
'bridge_opportunity_gps_longitude',
'bridge_opportunity_bridge_type',
'bridge_opportunity_span_m',
'bridge_opportunity_individuals_directly_served',
'bridge_opportunity_comments',
'form_form_name',
'form_created_by',
'proposed_bridge_location_gps_latitude',
'proposed_bridge_location_gps_longitude',
'current_crossing_method',
'nearest_all_weather_crossing_point',
'days_per_year_river_is_flooded',
'flood_duration_during_rainy_season',
'market_access_blocked_by_river',
'education_access_blocked_by_river',
'health_access_blocked_by_river',
'other_access_blocked_by_river',
'primary_occupations',
'primary_crops_grown',
'river_crossing_deaths_in_last_3_years',
'river_crossing_injuries_in_last_3_years',
'incident_descriptions',
'notes_on_social_information',
'cell_service_quality',
'four_wd _accessibility',
'name_of_nearest_city',
'name_of_nearest_paved_or_sealed_road',
'bridge_classification',
'flag_for_rejection',
'rejection_reason',
'bridge_type',
'estimated_span_m',
'height_differential_between_banks',
'bridge_opportunity_general_project_photos',
'bridge_opportunity_casesafeid',
'senior_engineering_review_conducted',
'country']
def conn_curs(self):
"""
makes a connection to the database
"""
# Establishes connection and cursor
connection = self.connection
cursor = self.connection.cursor()
return connection, cursor
def cursor(self):
self.cursor = self.connection.cursor()
def execute(self, query):
self.cursor.execute(query)
def close(self):
self.connection.close()
def fetch_query_records(self, query: str):
"""This is a custom query, that returns all the records based on customed query"""
# Establishes connection and cursor
conn, cursor = self.conn_curs()
cursor.execute(query)
# fetches all records, we can limit # of records by put LIMIT statement in query
result = cursor.fetchall()
# closes connection, for now, closing causes a bug that prevents add post/get requests, but in the
# future closing the connection may be a proper step to implement, at the moment it works find without it.
# cursor.close()
# conn.close()
return result
def fetch_all_records(self):
"""This is query returns all data/records in json format"""
# Establishes connection and cursor
conn = self.connection
cursor = self.connection.cursor()
query = """SELECT * from public."B2P_Merged_Final";"""
cursor.execute(query)
result = cursor.fetchall()
# At the moment this code is unnecessary, closing connections adds a bug, but in the future it mayb
# Be use full to close these connections
# cursor.close()
# conn.close()
df = pd.DataFrame(result, columns=['index'] + self.columns)
df = df.iloc[:, 1:] # skip 1 column, as it corresponds to an index column
df_json = df.to_json(orient='records')
parsed = json.loads(df_json)
return parsed
def fetch_query_given_project(self, project_code):
# Establishes connection and cursor
conn = self.connection
cursor = self.connection.cursor()
query = f"""SELECT * FROM public."B2P_Merged_Final" where "bridge_opportunity_project_code" = '{project_code}';"""
cursor.execute(query)
result = cursor.fetchall()
# At the moment this code is unecessary, closing connections adds a bug, but in the future it mayb
# Be use full to close these connections
# cursor.close()
# conn.close()
df = pd.DataFrame(result, columns=['index'] + self.columns) #
df = df.iloc[:, 1:] # skip 1 column, as it corresponds to an index column
df_json = df.to_json(orient='records')
parsed = json.loads(df_json)
return parsed
class Item(BaseModel):
"""Use this data model to parse the request body JSON."""
project_code: str = Field(..., example='1007374')
def to_df(self):
"""Convert pydantic object to pandas dataframe with 1 row."""
return pd.DataFrame([dict(self)])
@router.post('/data_by_bridge_code')
async def get_record(item: Item):
"""
# Returns all records, based on project_code
# Request Body
- `project_code`: string
# Response All Records Based on Bridge Code in JSON FORMAT
- 'Bridge_Name':str
- 'Project_Code',:str
- 'Needs_Assessment': str
"""
PSQL = PostgreSQL()
json_output = PSQL.fetch_query_given_project(item.project_code)
return json_output
@router.get('/all_data')
async def get_all_record():
"""
#Response All Data/Records in JSON FORMAT
#Columns are now formated in lowercase, as shown below
- 'bridge_name'
- 'bridge_opportunity_project_code'
- 'bridge_opportunity_needs_assessment'
- 'bridge_opportunity_level1_government'
- 'bridge_opportunity_level2_government'
- 'bridge_opportunity_stage',
- 'bridge_opportunity_gps_latitude'
- 'bridge_opportunity_gps_longitude'
- 'bridge_opportunity_bridge_type'
- 'bridge_opportunity_span_m'
- 'bridge_opportunity_individuals_directly_served'
- 'bridge_opportunity_comments'
- 'form_form_name'
- 'form_created_by'
- 'proposed_bridge_location_gps_latitude'
- 'proposed_bridge_location_gps_longitude'
- ...
"""
pg = PostgreSQL()
return_json = pg.fetch_all_records()
return return_json
class Item1(BaseModel):
"""Use this data model to parse the request body JSON.."""
input1: str = Field(..., example='output1')
output2: str = Field(..., example='output2')
# @validator('input1')
# def title_must_be_a_string(cls, value):
# """Validate that Title is a string."""
# assert type(value) == str, f'Title == {value}, must be a string'
# return value
#
# @validator('output1')
# def post_must_be_a_string(cls, value):
# """Validate that post is a string."""
# assert type(value) == str, f'Title == {value}, must be a string'
# return value
@router.post('/predict')
async def predict(item: Item1):
"""
Returns Prediction 🔮
### Request Body
- 'Bridge_Name':str
- 'Project_Code',:str
- 'Needs_Assessment': str
"""
prediction = item.input1 + '+' + item.output2
return {"prediction": prediction}
``` |
{
"source": "JonRivera/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 4
} |
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/Study_Guide_Resources/Make_Insert_Data.py
```python
import pandas as pd
import sqlite3
# Creating & Inserting Data W/ Sqlite
import sqlite3
conn = sqlite3.connect('example_db.sqlite')
def create_statement(conn):
curs = conn.cursor()
create_statement = """
CREATE TABLE if not exists students1 (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name CHAR(30),
favorite_number INTEGER,
least_favorite_number INTEGER
);
"""
curs.execute(create_statement)
curs.close()
conn.commit()
def insert_data(conn):
# SIMILAR TO GETTING DATA OF ROWS USING SL_CURS.EXECUTE AND THEN
# FETCHING
my_data = [
('Jon', 7, 12),
('Alejandro', 77, 43),
('Rivera', 100, 137)
]
for row in my_data:
curs = conn.cursor()
insert_row = """
INSERT INTO students
(name ,favorite_number, least_favorite_number)
VALUES""" + str(row) + ";"
curs.execute(insert_row)
conn.commit()
# Creates student table
create_statement(conn)
# Insert data from my data into students
insert_data(conn)
# commit after all insert statements have been integrated into
# data
curs = conn.cursor()
print(curs.execute('SELECT * FROM students LIMIT 10;'))
print(curs.fetchall)
``` |
{
"source": "JonRivera/Package_Repo",
"score": 4
} |
#### File: Package_Repo/lambdata_JonRivera/package1.py
```python
class Car:
"""makes a car class"""
def __init__(self, mpg=40, horsepower=170, price=25000, color='red'):
self.mpg = mpg
self.horsepower = horsepower
self.price = price
self.color = color
def sound(self):
return "ROOOM"
class Ferrari(Car):
""" Uses class enheritance to make Ferrari"""
def __init__(self, mpg, horsepower, price, color, edition='sport'):
super().__init__(mpg, horsepower, price, color)
self.edition = edition
def sound(self):
return "boom --VROOM"
``` |
{
"source": "JonRivera/Post_Here_Subreddit_Category_Prediction_Project",
"score": 3
} |
#### File: Post_Here_Subreddit_Category_Prediction_Project/database/reddit.py
```python
import praw
from database.query import insert_post, conn_curs
with open("secrets", "r") as file:
secrets = [i.strip('\n') for i in file.readlines()]
reddit = praw.Reddit(client_id=secrets[0],
client_secret=secrets[1],
username=secrets[2],
password=secrets[3],
user_agent=f'u/{secrets[1]}')
def get_data(subs, n_posts=1):
"""
Fetches then upload a post to postgres
subs - set of sub-reddits you plan to get posts from
n_posts - how many posts to grab per sub
"""
conn, curs = conn_curs() # this one connects to allan
curs.execute("SELECT Distinct(subreddit) FROM posts")
x = [i[0] for i in curs.fetchall()]
for i in subs:
if i not in x:
print(i)
sub = reddit.subreddit(i)
hot = sub.hot(limit=n_posts)
for post in hot:
text = f"{post.title} {post.selftext}".replace("'", "")
which_sub = str(post.subreddit)[:20]
insert_post(text, which_sub)
print('uploaded')
print('Finished sub')
return
if __name__ == "__main__":
reddits = {'learnSQL', 'MovieSuggestions', 'dating_advice', 'philosophy', 'worldnews', 'tifu', 'patientgamers',
'explainlikeimfive', 'OutOfTheLoop', 'books', 'ProRevenge', 'TellMeAFact', 'bestoflegaladvice',
'talesfromtechsupport', 'TalesFromRetail', 'britishproblems', 'whowouldwin', 'WritingPrompts', 'AskMen',
'AskWomen', 'askscience', 'newreddits', 'HailCorporate', 'boringdystopia', 'bestof', 'KarmaCourt',
'AmItheAsshole', 'RedditWritesSeinfeld', 'nosleep', 'pcmasterrace', 'learnpython', 'politics',
'LifeProTips', 'Jokes', 'gaming', 'Showerthoughts', 'teenagers', 'linux', 'television', 'soccer',
'hockey', 'ADHD', 'Games', 'LifeProTips', 'CasualConversation', 'nfl', 'socialanxiety', 'seduction',
'DecidingToBeBetter', 'socialskills', 'godtiersuperpowers', '3amjokes','ShouldIbuythisgame','dadjokes',
'Jokes', 'offmychest', 'PoliticalDiscussion'}
get_data(reddits, n_posts=300)
``` |
{
"source": "JonRivera/TwitOff",
"score": 3
} |
#### File: TwitOff/twitoff/twitter.py
```python
from os import getenv
import tweepy
import basilica
from decouple import config
from .model import DB, Tweet, User
TWITTER_USERS = ['calebhicks', 'elonmusk', 'rrherr', 'SteveMartinToGo',
'alyankovic', 'nasa', 'sadserver', 'jkhowland', 'austen',
'common_squirrel', 'KenJennings', 'conanobrien',
'big_ben_clock', 'IAM_SHAKESPEARE']
TWITTER_AUTH = tweepy.OAuthHandler(config('TWITTER_CONSUMER_KEY'),
config('TWITTER_CONSUMER_SECRET'))
TWITTER_AUTH.set_access_token(config('TWITTER_ACCESS_TOKEN'),
config('TWITTER_ACCESS_TOKEN_SECRET'))
TWITTER = tweepy.API(TWITTER_AUTH)
BASILICA = basilica.Connection(config('BASILICA_KEY'))
def add_or_update_user(username):
"""Add or update a user and their Tweets, error if not a Twitter user."""
try:
twitter_user = TWITTER.get_user(username)
db_user = (User.query.get(twitter_user.id) or
User(id=twitter_user.id, name=username))
DB.session.add(db_user)
# Lets get the tweets - focusing on primary (not retweet/reply)
tweets = twitter_user.timeline(
count=200, exclude_replies=True, include_rts=False,
tweet_mode='extended', since_id=db_user.newest_tweet_id
)
# If we got new tweets then newest tweet for that user, is the new id for
# the newest id we got.
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
embedding = BASILICA.embed_sentence(tweet.full_text,
model='twitter')
db_tweet = Tweet(id=tweet.id, text=tweet.full_text[:300],
embedding=embedding)
db_user.tweets.append(db_tweet) # memory, ram
DB.session.add(db_tweet) # in data base
except Exception as e:
print('Error processing {}: {}'.format(username, e))
raise e
else:
DB.session.commit()
def insert_example_users():
# Making Users)
add_or_update_user('austen')
add_or_update_user('elonmusk')
add_or_update_user('jonny')
``` |
{
"source": "jonrkarr/paper_2018_curr_opin_biotechnol",
"score": 2
} |
#### File: paper_2018_curr_opin_biotechnol/paper_2018_curr_opin_biotechnol/analyze_biomodels.py
```python
import bioservices
import ete3
import glob
import datanator.data_source.bio_portal
import libsbml
import os
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import subprocess
import wc_utils.util.list
import wc_utils.workbook.core
import wc_utils.workbook.io
RELEASE_NAME = 'BioModels_Database-r30_pub-sbml_files'
SBML_FILES_URL = 'ftp://ftp.ebi.ac.uk/pub/databases/biomodels/releases/2016-05-10/{}.tar.bz2'.format(RELEASE_NAME)
DATA_DIRNAME = os.path.join(os.path.dirname(__file__), 'data')
SBML_FILES_ARCHIVE_FILENAME = os.path.join(DATA_DIRNAME, 'sbml_files.tar.bz2')
SBML_FILES_DIRNAME = os.path.join(DATA_DIRNAME, 'sbml_files')
SBML_FILES_DATABASE_FILENAME = os.path.join(DATA_DIRNAME, 'sbml_files.sqlite')
ANNOTATIONS_EXCEL_FILENAME = os.path.join(DATA_DIRNAME, 'models.xlsx')
SUMMARY_EXCEL_FILENAME = os.path.join(DATA_DIRNAME, 'summary.xlsx')
def create_data_directory():
""" Create directory for files """
if not os.path.isdir(DATA_DIRNAME):
os.makedirs(DATA_DIRNAME)
def download_biomodels():
""" Download BioModels release and extract content """
# download release
if not os.path.isfile(SBML_FILES_ARCHIVE_FILENAME):
print('Downloading BioModels ...')
subprocess.call(['wget', SBML_FILES_URL, '-O', SBML_FILES_ARCHIVE_FILENAME])
print(' done.')
# extract release archive
if not os.path.isdir(SBML_FILES_DIRNAME):
print('Unpacking BioModels ...')
subprocess.call(['tar', '-xvjf', SBML_FILES_ARCHIVE_FILENAME, '-C', DATA_DIRNAME])
os.rename(os.path.join(DATA_DIRNAME, RELEASE_NAME), SBML_FILES_DIRNAME)
print(' done.')
def get_database_engine():
"""
Returns:
:obj:`sqlalchemy.engine.Engine`: database engine
"""
return sqlalchemy.create_engine('sqlite:///' + SBML_FILES_DATABASE_FILENAME)
def get_database_session():
"""
Returns:
:obj:`sqlalchemy.orm.session.Session`: sqlalchemy session
"""
engine = get_database_engine()
return sqlalchemy.orm.sessionmaker(bind=engine)()
def setup_database(clear=False):
if not os.path.isfile(SBML_FILES_DATABASE_FILENAME) or clear:
clear_database()
def clear_database():
engine = get_database_engine()
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
def load_database():
sbml_reader = libsbml.SBMLReader()
session = get_database_session()
if session.query(Model).count() > 0:
return
print('Loading models ...')
curated_models = sorted(glob.glob(os.path.join(SBML_FILES_DIRNAME, 'curated', '*.xml')))
for i_model, filename in enumerate(curated_models):
if i_model % 100 == 0:
print(' Loading curated model {} of {}'.format(i_model + 1, len(curated_models)))
model = load_model_into_database(filename, True, sbml_reader, session)
non_curated_models = sorted(glob.glob(os.path.join(SBML_FILES_DIRNAME, 'non_curated', '*.xml')))
for i_model, filename in enumerate(non_curated_models):
if i_model % 100 == 0:
print(' Loading non-curated model {} of {}'.format(i_model + 1, len(non_curated_models)))
model = load_model_into_database(filename, False, sbml_reader, session)
print(' done.')
session.commit()
def load_model_into_database(filename, curated, sbml_reader, session):
"""
Args:
filename (:obj:`str`): path to a SBML file
curated (:obj:`bool`): :obj:`True`, the model has been curated
sbml_reader (:obj:`libsbml.SBMLReader`): SBML file reader
session (:obj:`sqlalchemy.orm.session.Session`): sqlalchemy session
Returns:
:obj:`Model`: model
"""
# todo: detect mathematical type (ODE, SSA, logical, FBA, spatial, rule-based)
doc = sbml_reader.readSBMLFromFile(filename)
sbml_model = doc.getModel()
if not sbml_model:
return None
id, _, _ = os.path.basename(filename).partition('.xml')
label = sbml_model.getId()
name = sbml_model.getName()
annotations = parse_model_annotations(sbml_model, session)
type = parse_model_type(doc, annotations)
num_reaction_parameters = 0
reactions_sbml = sbml_model.getListOfReactions()
for i_reaction in range(sbml_model.getNumReactions()):
reaction_sbml = reactions_sbml.get(i_reaction)
kinetic_law_sbml = reaction_sbml.getKineticLaw()
if kinetic_law_sbml:
num_reaction_parameters += kinetic_law_sbml.getNumParameters()
num_reaction_parameters += kinetic_law_sbml.getNumLocalParameters()
model = get_or_create_object(session, Model, id=id)
model.label = label
model.name = name
model.type = type
model.compartments = sbml_model.getNumCompartments()
model.species = sbml_model.getNumSpecies()
model.rules = sbml_model.getNumRules()
model.reactions = sbml_model.getNumReactions()
model.global_parameters = sbml_model.getNumParameters()
model.reaction_parameters = num_reaction_parameters
model.curated = curated
model.annotations.extend(annotations)
session.add(model)
return model
def parse_model_type(doc, annotations):
"""
Args:
doc (:obj:`libsbml.SBMLDocument`): SBML document
annotations (:obj:`list`: of :obj:`Annotation`): list of annotations
Returns:
:obj:`str`: model type
"""
model = doc.getModel()
if doc.getPackageRequired('spatial'):
return 'spatial'
if doc.getPackageRequired('qual'):
return 'logical'
if doc.getPackageRequired('multi'):
return 'rule-based'
for annotation in annotations:
if annotation.namespace == 'mamo' and annotation.id == 'MAMO_0000046':
return 'ordinary differential equation'
if doc.getPackageRequired('fbc'):
return 'flux balance analysis'
reactions = model.getListOfReactions()
for i_reaction in range(model.getNumReactions()):
reaction = reactions.get(i_reaction)
kinetic_law = reaction.getKineticLaw()
if kinetic_law:
has_lower_bound = False
has_upper_bound = False
has_flux_value = False
has_obj_coeff = False
parameters = kinetic_law.getListOfParameters()
for i_parameter in range(kinetic_law.getNumParameters()):
parameter = parameters.get(i_parameter)
id = parameter.getId()
if id == 'LOWER_BOUND':
has_lower_bound = True
elif id == 'UPPER_BOUND':
has_upper_bound = True
elif id == 'FLUX_VALUE':
has_flux_value = True
elif id == 'OBJECTIVE_COEFFICIENT':
has_obj_coeff = True
parameters = kinetic_law.getListOfLocalParameters()
for i_parameter in range(kinetic_law.getNumLocalParameters()):
parameter = parameters.get(i_parameter)
id = parameter.getId()
if id == 'LOWER_BOUND':
has_lower_bound = True
elif id == 'UPPER_BOUND':
has_upper_bound = True
elif id == 'FLUX_VALUE':
has_flux_value = True
elif id == 'OBJECTIVE_COEFFICIENT':
has_obj_coeff = True
if has_lower_bound and has_upper_bound and has_flux_value and has_obj_coeff:
return 'flux balance analysis'
return None
def parse_model_annotations(model, session):
"""
Args:
model (:obj:`libsbml.Model`): model
session (:obj:`sqlalchemy.orm.session.Session`): sqlalchemy session
Returns:
:obj:`list` of :obj:`Annotation`: list of annotations
"""
if not model.isSetAnnotation():
return {}
annotations_sbml = model.getAnnotation().getChild('RDF').getChild('Description')
tags = {}
annotations = []
attr = libsbml.XMLTriple('resource', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'rdf')
for i_child in range(annotations_sbml.getNumChildren()):
child = annotations_sbml.getChild(i_child)
relationship = child.getName()
if relationship in ['creator', 'created', 'modified']:
continue
for i_bag in range(child.getNumChildren()):
bag = child.getChild(i_bag)
if bag.getName() != 'Bag':
raise ValueError('Expected Bag, got {0}.{1} for model {2}'.format(child.getName(), bag.getName(), model.getId()))
for i_li in range(bag.getNumChildren()):
li = bag.getChild(i_li)
if li.getName() != 'li':
raise ValueError('Expected {0}.{1}.li, got {0}.{1}.{2} for model {3}'.format(
child.getName(), bag.getName(), li.getName(), model.getId()))
resource = li.getAttrValue(attr)
if resource.startswith('http://identifiers.org/'):
tmp = resource.split('/')
namespace = tmp[3]
id = '/'.join(tmp[4:])
else:
namespace = 'url'
id = resource
annotations.append(get_or_create_object(session, Annotation, namespace=namespace, id=id, relationship=relationship))
return annotations
def get_or_create_object(session, cls, **kwargs):
"""
Args:
session (:obj:`sqlalchemy.orm.session.Session`): sqlalchemy session
cls (:obj:`type`): class to search or create
**kwargs (:obj:`dict`): dictionary of keyword arguments to pass to filter_by and the class constructor
Returns:
:obj:`Base`
"""
q = session.query(cls).filter_by(**kwargs)
if q.count():
return q.first()
return cls(**kwargs)
def model_to_str(model):
"""
Args:
model (:obj:`Model`): model
Returns:
:obj:`str`: string representation of model
"""
str = model.id
for annotation in model.annotations:
str += '\n {}: {}:{}'.format(annotation.relationship, annotation.namespace, annotation.id)
return str
def export_annotations_to_excel():
if os.path.isfile(ANNOTATIONS_EXCEL_FILENAME):
return
session = get_database_session()
wb = wc_utils.workbook.core.Workbook()
ws_models = wb['Models'] = wc_utils.workbook.core.Worksheet()
ws_model_annotations = wb['Model annotations'] = wc_utils.workbook.core.Worksheet()
ws_annotations = wb['Annotations'] = wc_utils.workbook.core.Worksheet()
ws_namespaces = wb['Namespaces'] = wc_utils.workbook.core.Worksheet()
style = wc_utils.workbook.io.WorkbookStyle()
style['Models'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Model annotations'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Annotations'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Namespaces'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
ws_models.append(wc_utils.workbook.core.Row([
'ID', 'Label', 'Name', 'Type',
'Compartments', 'Species', 'Rules', 'Reactions', 'Global parameters', 'Reaction parameters',
'Superkingdom', 'Kingdom', 'Phylum', 'Species',
'Is curated', 'Number annotations']))
ws_model_annotations.append(wc_utils.workbook.core.Row(['Model', 'Relationship', 'Namespace', 'ID', 'Description']))
ws_annotations.append(wc_utils.workbook.core.Row(['Relationship', 'Namespace', 'Frequency']))
ws_namespaces.append(wc_utils.workbook.core.Row(['Namespace', 'Frequency']))
bio_portal = datanator.data_source.bio_portal.BioPortal()
bio_portal_ontologies = bio_portal.get_ontologies()
del(bio_portal_ontologies['MAMO']) # remove MAMO becuse OWL can't be parsed by pronto
kegg = bioservices.kegg.KEGG()
reactome = bioservices.reactome.Reactome()
loaded_ontologies = {}
ncbi_taxa = ete3.NCBITaxa()
n_model = session.query(Model).count()
print('Annotating models ...')
for i_model, model in enumerate(session.query(Model).order_by(Model.id).all()):
if i_model % 100 == 0:
print(' Annotating model {} of {}'.format(i_model + 1, n_model))
species_name = None
phylum_name = None
kingdom_name = None
superkingdom_name = None
taxon_id = next((int(float(a.id)) for a in model.annotations if a.namespace == 'taxonomy'), None)
if taxon_id:
for taxon_id, rank in ncbi_taxa.get_rank(ncbi_taxa.get_lineage(taxon_id)).items():
if rank == 'species':
species_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'phylum':
phylum_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'kingdom':
kingdom_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'superkingdom':
superkingdom_name = ncbi_taxa.translate_to_names([taxon_id])[0]
ws_models.append(wc_utils.workbook.core.Row([
model.id, model.label, model.name, model.type,
model.compartments or None, model.species or None, model.rules or None, model.reactions or None,
model.global_parameters or None, model.reaction_parameters or None,
superkingdom_name, kingdom_name, phylum_name, species_name,
model.curated, len(model.annotations)
]))
for annotation in sorted(model.annotations, key=lambda ann: (ann.relationship, ann.namespace, ann.id)):
onto_id = annotation.namespace.upper()
if onto_id.startswith('OBO.'):
onto_id = onto_id[4:]
term_id = annotation.id
if onto_id in bio_portal_ontologies and term_id.startswith(onto_id + ':'):
if onto_id not in loaded_ontologies:
loaded_ontologies[onto_id] = bio_portal.get_ontology(onto_id)
onto = loaded_ontologies[onto_id]
if term_id in onto:
description = onto[term_id].name
else:
description = None
elif annotation.namespace == 'kegg.pathway':
md = kegg.parse(kegg.get(annotation.id))
if isinstance(md, dict):
description = md['NAME'][0]
else:
description = None
elif annotation.namespace == 'reactome':
md = reactome.query_by_id('Pathway', annotation.id)
if 'displayName' in md:
description = md['displayName']
else:
description = None
elif annotation.namespace == 'taxonomy':
description = ncbi_taxa.translate_to_names([int(float(annotation.id))])[0]
else:
description = None
ws_model_annotations.append(wc_utils.workbook.core.Row(
[model.id, annotation.relationship, annotation.namespace, annotation.id, description]))
print(' done')
q = session \
.query(Annotation.relationship, Annotation.namespace, sqlalchemy.func.count(Model._id)) \
.join(Model, Annotation.models) \
.group_by(Annotation.relationship, Annotation.namespace) \
.order_by(sqlalchemy.func.count(Model._id).desc())
for relationship, namespace, count in q.all():
ws_annotations.append(wc_utils.workbook.core.Row([relationship, namespace, count]))
q = session \
.query(Annotation.namespace, sqlalchemy.func.count(Model._id)) \
.join(Model, Annotation.models) \
.group_by(Annotation.namespace) \
.order_by(sqlalchemy.func.count(Model._id).desc())
for namespace, count in q.all():
ws_namespaces.append(wc_utils.workbook.core.Row([namespace, count]))
wc_utils.workbook.io.ExcelWriter(ANNOTATIONS_EXCEL_FILENAME).run(wb, style=style)
def summarize_models():
wb = wc_utils.workbook.core.Workbook()
style = wc_utils.workbook.io.WorkbookStyle()
ws = wb['Pathways'] = wc_utils.workbook.core.Worksheet()
style['Pathways'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
summarize_models_by_pathway(ws)
ws_species = wb['Species'] = wc_utils.workbook.core.Worksheet()
ws_phyla = wb['Phyla'] = wc_utils.workbook.core.Worksheet()
style['Species'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Phyla'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
summarize_models_by_taxonomy(ws_species, ws_phyla)
ws = wb['Mathematical types'] = wc_utils.workbook.core.Worksheet()
style['Mathematical types'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
summarize_models_by_mathematical_type(ws)
wc_utils.workbook.io.ExcelWriter(SUMMARY_EXCEL_FILENAME).run(wb, style=style)
def summarize_models_by_pathway(ws):
"""
Args:
wc_utils.workbook.core.Worksheet
"""
session = get_database_session()
#bio_portal = datanator.data_source.bio_portal.BioPortal()
#onto = bio_portal.get_ontology('EFO')
#self.assertEqual(onto['SBO:0000001'].name, 'rate law')
def summarize_models_by_taxonomy(ws_species, ws_phyla):
"""
Args:
wc_utils.workbook.core.Worksheet
"""
session = get_database_session()
q_annotated = session \
.query(Annotation.id, Model.curated, sqlalchemy.func.count(Model._id)) \
.join(Model, Annotation.models) \
.filter(Annotation.namespace == 'taxonomy') \
.group_by(Annotation.id, Model.curated)
annotated_model_ids = [m[0] for m in session
.query(Model._id)
.join(Annotation, Model.annotations)
.filter(Annotation.namespace == 'taxonomy')
.group_by(Model._id)
.all()]
q_unannotated = session \
.query(Model.curated, sqlalchemy.func.count(Model._id)) \
.filter(~Model._id.in_(annotated_model_ids)) \
.group_by(Model.curated)
count_unannotated = {}
for curated, count in q_unannotated.all():
count_unannotated[curated] = count
ncbi_taxa = ete3.NCBITaxa()
species = {}
phyla = {}
for model_taxon_id, model_curated, count in q_annotated.all():
model_taxon_id = int(float(model_taxon_id))
species_name = None
phylum_name = None
kingdom_name = None
superkingdom_name = None
for taxon_id, rank in ncbi_taxa.get_rank(ncbi_taxa.get_lineage(model_taxon_id)).items():
if rank == 'species':
species_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'phylum':
phylum_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'kingdom':
kingdom_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'superkingdom':
superkingdom_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if (superkingdom_name, kingdom_name, phylum_name, species_name) not in species:
species[(superkingdom_name, kingdom_name, phylum_name, species_name)] = {True: 0, False: 0}
species[(superkingdom_name, kingdom_name, phylum_name, species_name)][model_curated] += count
if (superkingdom_name, kingdom_name, phylum_name) not in phyla:
phyla[(superkingdom_name, kingdom_name, phylum_name)] = {True: 0, False: 0}
phyla[(superkingdom_name, kingdom_name, phylum_name)][model_curated] += count
for (superkingdom_name, kingdom_name, phylum_name, species_name), counts in species.items():
ws_species.append(wc_utils.workbook.core.Row([
species_name or '<Annotated rank above species>',
phylum_name,
kingdom_name,
superkingdom_name,
counts[True] or None,
counts[False] or None]))
ws_species.sort(key=lambda row: (row[-2] or 0) + (row[-1] or 0), reverse=True)
ws_species.insert(0, wc_utils.workbook.core.Row(['Not annotated', None, None, count_unannotated[True], count_unannotated[False]]))
ws_species.insert(0, wc_utils.workbook.core.Row(['Species', 'Phylum', 'Kingdom', 'Superkingdom', 'Curated', 'Non-curated']))
for (superkingdom_name, kingdom_name, phylum_name), counts in phyla.items():
ws_phyla.append(wc_utils.workbook.core.Row([
superkingdom_name,
kingdom_name,
phylum_name or '<Annotated rank above phylum>',
counts[True] or None,
counts[False] or None]))
ws_phyla.sort(key=lambda row: (row[-2] or 0) + (row[-1] or 0), reverse=True)
ws_phyla.insert(0, wc_utils.workbook.core.Row(['Not annotated', None, count_unannotated[True], count_unannotated[False]]))
ws_phyla.insert(0, wc_utils.workbook.core.Row(['Superkingdom', 'Kingdom', 'Phylum', 'Curated', 'Non-curated']))
def summarize_models_by_mathematical_type(ws):
"""
Args:
wc_utils.workbook.core.Worksheet
"""
session = get_database_session()
q = session.query(Model.type, Model.curated, sqlalchemy.func.count(Model._id)) \
.group_by(Model.type, Model.curated) \
.order_by(Model.type)
data = {}
for type, curated, count in q.all():
if type not in data:
data[type] = {}
data[type][curated] = count
ws.append(wc_utils.workbook.core.Row(['Type', 'Curated', 'Non-curated']))
for type in data.keys():
ws.append(wc_utils.workbook.core.Row([
type[0].upper() + type[1:] if type else 'Unknown',
data[type][True] if True in data[type] else None,
data[type][False] if False in data[type] else None,
]))
Base = sqlalchemy.ext.declarative.declarative_base()
# :obj:`Base`: base model for local sqlite database
model_annotation = sqlalchemy.Table(
'model_annotation', Base.metadata,
sqlalchemy.Column('model__id', sqlalchemy.Integer, sqlalchemy.ForeignKey('model._id'), index=True),
sqlalchemy.Column('annotation__id', sqlalchemy.Integer, sqlalchemy.ForeignKey('annotation._id'), index=True),
)
# :obj:`sqlalchemy.Table`: Model:Annotation many-to-many association table
class Model(Base):
_id = sqlalchemy.Column(sqlalchemy.Integer(), primary_key=True)
id = sqlalchemy.Column(sqlalchemy.String())
label = sqlalchemy.Column(sqlalchemy.String())
name = sqlalchemy.Column(sqlalchemy.String())
type = sqlalchemy.Column(sqlalchemy.String())
curated = sqlalchemy.Column(sqlalchemy.Boolean())
compartments = sqlalchemy.Column(sqlalchemy.Integer())
species = sqlalchemy.Column(sqlalchemy.Integer())
rules = sqlalchemy.Column(sqlalchemy.Integer())
reactions = sqlalchemy.Column(sqlalchemy.Integer())
global_parameters = sqlalchemy.Column(sqlalchemy.Integer())
reaction_parameters = sqlalchemy.Column(sqlalchemy.Integer())
annotations = sqlalchemy.orm.relationship('Annotation', secondary=model_annotation, backref=sqlalchemy.orm.backref('models'))
__tablename__ = 'model'
class Annotation(Base):
_id = sqlalchemy.Column(sqlalchemy.Integer(), primary_key=True)
namespace = sqlalchemy.Column(sqlalchemy.String(), index=True)
id = sqlalchemy.Column(sqlalchemy.String(), index=True)
relationship = sqlalchemy.Column(sqlalchemy.String(), index=True)
__tablename__ = 'annotation'
if __name__ == "__main__":
create_data_directory()
download_biomodels()
setup_database()
load_database()
export_annotations_to_excel()
summarize_models()
``` |
{
"source": "JonRob812/CAMOnion",
"score": 2
} |
#### File: CAMOnion/CAMOnion/cadgraphicsview.py
```python
import argparse
import signal
import sys
from functools import partial
from typing import Optional
from PyQt5 import QtWidgets as qw, QtCore as qc, QtGui as qg
import ezdxf
from ezdxf.addons.drawing import Frontend, RenderContext
from ezdxf.addons.drawing.pyqt import _get_x_scale, PyQtBackend, CorrespondingDXFEntity, \
CorrespondingDXFEntityStack
from ezdxf.drawing import Drawing
class CADGraphicsView(qw.QGraphicsView):
def __init__(self, view_buffer: float = 0.2):
super().__init__()
self._zoom = 1
self._default_zoom = 1
self._zoom_limits = (0.5, 100)
self._view_buffer = view_buffer
self.setTransformationAnchor(qw.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(qw.QGraphicsView.AnchorUnderMouse)
self.setVerticalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOff)
self.setDragMode(qw.QGraphicsView.ScrollHandDrag)
self.setFrameShape(qw.QFrame.NoFrame)
self.setRenderHints(qg.QPainter.Antialiasing | qg.QPainter.TextAntialiasing | qg.QPainter.SmoothPixmapTransform)
def clear(self):
pass
def fit_to_scene(self):
r = self.sceneRect()
bx, by = r.width() * self._view_buffer / 2, r.height() * self._view_buffer / 2
self.fitInView(self.sceneRect().adjusted(-bx, -by, bx, by), qc.Qt.KeepAspectRatio)
self._default_zoom = _get_x_scale(self.transform())
self._zoom = 1
def _get_zoom_amount(self) -> float:
return _get_x_scale(self.transform()) / self._default_zoom
def wheelEvent(self, event: qg.QWheelEvent) -> None:
# dividing by 120 gets number of notches on a typical scroll wheel. See QWheelEvent documentation
delta_notches = event.angleDelta().y() / 120
zoom_per_scroll_notch = 0.2
factor = 1 + zoom_per_scroll_notch * delta_notches
resulting_zoom = self._zoom * factor
if resulting_zoom < self._zoom_limits[0]:
factor = self._zoom_limits[0] / self._zoom
elif resulting_zoom > self._zoom_limits[1]:
factor = self._zoom_limits[1] / self._zoom
self.scale(factor, factor)
self._zoom *= factor
class CADGraphicsViewWithOverlay(CADGraphicsView):
element_selected = qc.pyqtSignal(object, qc.QPointF)
graphics_view_clicked = qc.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__()
self._current_item: Optional[qw.QGraphicsItem] = None
self.setParent(parent)
def clear(self):
super().clear()
self._current_item = None
def drawForeground(self, painter: qg.QPainter, rect: qc.QRectF) -> None:
if self._current_item is not None:
# if self._current_item.
if self._current_item.isEnabled():
r = self._current_item.boundingRect()
r.setHeight(r.height()-.1)
r = self._current_item.sceneTransform().mapRect(r)
painter.fillRect(r, qg.QColor(0, 255, 0, 100))
def mouseMoveEvent(self, event: qg.QMouseEvent) -> None:
pos = self.mapToScene(event.pos())
self._current_item = self.scene().itemAt(pos, qg.QTransform())
self.element_selected.emit(self._current_item, pos)
# print(self.element_selected, pos)
self.scene().invalidate(self.sceneRect(), qw.QGraphicsScene.ForegroundLayer)
super().mouseMoveEvent(event)
def mousePressEvent(self, event: qg.QMouseEvent) -> None:
if self._current_item:
self.graphics_view_clicked.emit(self._current_item)
print(event.pos())
super().mousePressEvent(event)
```
#### File: CAMOnion/database/tables.py
```python
from abc import ABC
from sqlalchemy import create_engine, select, Table, Column, Sequence, Integer, String, MetaData, ForeignKey, func, \
exists, DECIMAL, and_, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from decimal import Decimal as D
import sqlalchemy.types as types
Base = declarative_base()
class SqliteNumeric(types.TypeDecorator):
impl = types.String
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(types.VARCHAR(100))
def process_bind_param(self, value, dialect):
return str(value)
def process_result_value(self, value, dialect):
return round(D(value), 5)
# can overwrite the imported type name
# @note: the TypeDecorator does not guarantie the scale and precision.
# you can do this with separate checks
Numeric = SqliteNumeric
class Tool(Base):
__tablename__ = 'tools'
id = Column(Integer, primary_key=True)
qb_id = Column(Integer, unique=True)
tool_type_id = Column(Integer, ForeignKey('tool_types.id'))
tool_number = Column(Integer)
name = Column(String(50))
diameter = Column(Numeric)
number_of_flutes = Column(Integer)
pitch = Column(Numeric)
operations = relationship('Operation', back_populates='tool')
tool_type = relationship('Tool_Type', back_populates='tools')
def __str__(self):
return str(self.name)
class Tool_Type(Base):
__tablename__ = 'tool_types'
id = Column(Integer, primary_key=True)
tool_type = Column(String(50))
tools = relationship("Tool", back_populates='tool_type')
class Feature(Base):
__tablename__ = 'features'
name = Column(String(50), unique=True)
id = Column(Integer, primary_key=True)
description = Column(String(50))
operations = relationship('Operation', back_populates='feature')
feature_type_id = Column(Integer, ForeignKey('feature_types.id'))
feature_type = relationship('Feature_Type', back_populates='features')
class Feature_Type(Base):
__tablename__ = 'feature_types'
id = Column(Integer, primary_key=True)
feature_type = Column(String(50), unique=True)
features = relationship('Feature', back_populates='feature_type')
camo_ops = relationship('CamoOp', back_populates='feature_type')
class Operation(Base):
__tablename__ = 'operations'
id = Column(Integer, primary_key=True)
feature_id = Column(Integer, ForeignKey('features.id'))
feature = relationship('Feature', back_populates='operations')
tool_id = Column(Integer, ForeignKey('tools.id'))
tool = relationship('Tool', back_populates='operations')
camo_op_id = Column(Integer, ForeignKey('camo_ops.id'))
camo_op = relationship('CamoOp', back_populates='operations')
peck = Column(Numeric)
feed = Column(Numeric)
speed = Column(Numeric)
def fixed_speed(self, max_rpm):
if self.speed > max_rpm:
return max_rpm
else:
return self.speed
def fixed_feed(self, max_rpm):
if self.speed > max_rpm:
return self.feed * (max_rpm / self.speed)
else:
return self.feed
class CamoOp(Base):
__tablename__ = 'camo_ops'
id = Column(Integer, primary_key=True)
op_type = Column(String(50))
function = Column(String(50))
priority = Column(Numeric)
feature_type_id = Column(Integer, ForeignKey('feature_types.id'))
feature_type = relationship('Feature_Type', back_populates='camo_ops')
operations = relationship('Operation', back_populates='camo_op')
def __str__(self):
return {self.op_type}
class Machine(Base):
__tablename__ = 'machines'
id = Column(Integer, primary_key=True)
name = Column(String(50))
max_rpm = Column(Integer)
spot = Column(String(16))
drill = Column(String(16))
tap = Column(String(16))
peck = Column(String(16))
ream = Column(String(16))
countersink = Column(String(16))
drill_format = Column(String(1000))
tap_format = Column(String(1000))
program_start = Column(String(1000))
program_end = Column(String(1000))
tool_start = Column(String(1000))
tool_end = Column(String(1000))
op_start = Column(String(1000))
```
#### File: CAMOnion/data_models/file_tree.py
```python
from PyQt5.QtCore import QAbstractItemModel, QModelIndex, Qt
import typing
from CAMOnion.core import CamoItemTypes as ct
class FileTreeNode(object):
def __init__(self, data):
self._data = data
if type(data) == tuple:
self._data = list(data)
if type(data) is str or not hasattr(data, "__getitem__"):
self._data = [data]
self._columncount = len(self._data)
self._children = []
self._parent = None
self._row = 0
self.type = None
def data(self, column):
if 0 <= column < len(self._data):
return self._data[column]
def columnCount(self):
return self._columncount
def childCount(self):
return len(self._children)
def child(self, row):
if 0 <= row < self.childCount():
return self._children[row]
def parent(self):
return self._parent
def row(self):
return self._row
def addChild(self, child):
child._parent = self
child._row = len(self._children)
self._children.append(child)
self._columncount = max(child.columnCount(), self._columncount)
def removeChild(self, position):
if position < 0 or position > len(self._children):
return False
child = self._children.pop(position)
child._parent = None
return True
class FileTreeModel(QAbstractItemModel):
def __init__(self, camo_file, session):
super().__init__()
self.session = session
self.origin_index = None
self.setup_index = None
self.geo_index = None
self.indexes = {}
self._root = FileTreeNode(None)
origins_node = FileTreeNode('Origins')
setups_node = FileTreeNode('Setups')
geo_node = FileTreeNode('Geometry')
self._root.addChild(setups_node)
self._root.addChild(origins_node)
self._root.addChild(geo_node)
for origin in camo_file.origins:
origin_node = FileTreeNode((origin.name, origin))
origin_node.type = ct.OOrigin
origins_node.addChild(origin_node)
for setup in camo_file.setups:
features = [feature for feature in camo_file.features if feature.setup == setup]
setup_node = FileTreeNode((setup.name, setup))
setup_node.type = ct.OSetup
setups_node.addChild(setup_node)
for feature in features:
feature_node = FileTreeNode((feature.db_feature(self.session).name, feature))
feature_node.type = ct.OFeature
setup_node.addChild(feature_node)
geo_entities = camo_file.dxf_doc.modelspace().entity_space.entities
for entity in geo_entities:
entity_node = FileTreeNode((str(entity), entity))
entity_node.type = ct.ODXF
geo_node.addChild(entity_node)
def index(self, row, column, parent_index=None):
if not parent_index:
parent_index = QModelIndex()
if not parent_index.isValid():
parent = self._root
else:
parent = parent_index.internalPointer()
if not QAbstractItemModel.hasIndex(self, row, column, parent_index):
return QModelIndex()
if child := parent.child(row):
index = QAbstractItemModel.createIndex(self, row, column, child)
if 'Geometry' in child._data:
self.geo_index = index
if 'Setups' in child._data:
self.setup_index = index
if hasattr(child, 'entity'):
self.indexes[str(child.entity)] = index
return index
else:
return QModelIndex()
def parent(self, index):
if index.isValid():
if p := index.internalPointer().parent():
return QAbstractItemModel.createIndex(self, p.row(), 0, p)
return QModelIndex()
def rowCount(self, parent_index=None):
if parent_index and parent_index.isValid():
return parent_index.internalPointer().childCount()
return self._root.childCount()
def columnCount(self, index=None):
if index.isValid():
return index.internalPointer().columnCount()
return self._root.columnCount()
def data(self, index=None, role=None):
if not index.isValid():
return None
node = index.internalPointer()
if role == Qt.DisplayRole:
return node.data(index.column())
return None
def setData(self, index: QModelIndex, value: typing.Any, role: int = ...) -> bool:
if index.isValid() and role == Qt.EditRole:
node = index.internalPointer()
node._data[index.column()] = value
row = index.row()
column = index.column()
return True
return False
def flags(self, index: QModelIndex) -> Qt.ItemFlags:
if index.isValid():
node = index.internalPointer()
flags = Qt.ItemIsEnabled
if node.childCount() == 0:
flags |= Qt.ItemIsSelectable
return flags
return super(FileTreeModel, self).flags(index)
def removeRow(self, row: int, parent: QModelIndex = QModelIndex()) -> bool:
if not parent.isValid():
# parent is not valid when it is the root node, since the "parent"
# method returns an empty QModelIndex
parentNode = self._rootNode
else:
parentNode = parent.internalPointer() # the node
parentNode.removeChild(row)
return True
```
#### File: CAMOnion/dialogs/featuredialog.py
```python
from CAMOnion.ui.camo_feature_dialog_ui import Ui_Dialog
from PyQt5.QtWidgets import QDialog, QGridLayout
class FeatureDialog(QDialog, Ui_Dialog):
def __init__(self, controller):
super().__init__()
self.controller = controller
self.setupUi(self)
self.frame_layout = QGridLayout(self.frame)
self.buttonBox.accepted.connect(self.accept)
```
#### File: CAMOnion/engine/drill.py
```python
def drill(op):
g = op.machine.__dict__[op.base_operation.camo_op.op_type.lower()]
if g == '83':
g = f'{g} Q{round(op.base_operation.peck, 4)}'
remaining_points = op.points[1:]
canned_points = '\n'.join([f'X{point[0]:.4f} Y{point[1]:.4f}' for point in remaining_points])
points = canned_points
if op.base_operation.camo_op.op_type == 'Tap':
code_format = 'tap_format'
p = float(op.base_operation.tool.pitch)
if float(p) < 4:
p = float(p) / 25.4
else:
p = round(1 / float(op.base_operation.tool.pitch), 4)
d = op.part_feature.depths[op.base_operation.camo_op.op_type]
s = str(int(op.base_operation.fixed_speed(op.part_feature.setup.machine.max_rpm)))
f = round(float(p) * float(s),4)
elif op.base_operation.camo_op.op_type == 'Ream':
code_format = 'drill_format'
d = op.part_feature.depths['Ream']
f = op.base_operation.fixed_feed(op.part_feature.setup.machine.max_rpm)
p = None
s = None
elif op.base_operation.camo_op.op_type == 'Countersink':
code_format = 'drill_format'
d = op.part_feature.depths[op.base_operation.camo_op.op_type]
f = op.base_operation.fixed_feed(op.part_feature.setup.machine.max_rpm)
p = None
s = None
else: # must be a Drill
code_format = 'drill_format'
d = op.part_feature.depths[op.base_operation.camo_op.op_type]
f = op.base_operation.fixed_feed(op.part_feature.setup.machine.max_rpm)
p = None
s = None
if f is not None:
f = str(round(float(f), 2))
if p is not None:
p = str(round(float(p), 4))
return op.machine.__dict__[code_format].format(code=g, depth=d, r_plane=.1, speed=s, feed=f, pitch=p,
points=points)
```
#### File: CAMOnion/file/__init__.py
```python
from datetime import datetime
import pickle
import ezdxf
import copy
from CAMOnion.core import Origin
class CamoFile:
def __init__(self):
self.filename = None
self.can_save = False
self.date_created = datetime.now()
self.date_saved = None
self.active_origin = Origin('Home - G54')
self.setups = []
self.origins = []
self.features = []
self.operations = []
self.geometry = []
self.origins.append(self.active_origin)
self.dxf_doc = ezdxf.new(ezdxf.DXF2018)
def set_filename(self, filename):
self.filename = filename
self.can_save = True
def save_camo_file(camo_file):
if camo_file.can_save:
camo_file.date_saved = datetime.now()
with open(camo_file.filename, 'wb') as file:
save_camo = copy.copy(camo_file)
save_camo.dxf_doc_encoded = save_camo.dxf_doc.encode_base64()
del save_camo.dxf_doc
pickle.dump(save_camo, file)
del save_camo
def open_camo_file(camo_file):
with open(camo_file, 'rb') as file:
camo_file = pickle.load(file)
camo_file.dxf_doc = ezdxf.decode_base64(camo_file.dxf_doc_encoded)
del camo_file.dxf_doc_encoded
return camo_file
```
#### File: CAMOnion/CAMOnion/operationwindow.py
```python
from PyQt5.QtWidgets import QMainWindow
from CAMOnion.ui.camo_operation_window_ui import Ui_MainWindow
class OperationWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
```
#### File: CAMOnion/widgets/facewidget.py
```python
from CAMOnion.ui.camo_face_widget_ui import Ui_Form
from PyQt5.QtWidgets import QWidget
class FaceWidget(QWidget, Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
```
#### File: CAMOnion/widgets/positionwidget.py
```python
from CAMOnion.ui.camo_position_widget_ui import Ui_Form
from CAMOnion.ui.camo_position_frame_ui import Ui_Frame
from CAMOnion.core.widget_tools import get_combo_data
from PyQt5.QtWidgets import QFrame, QWidget
from PyQt5 import QtCore as qc
class PositionWidget(QFrame, Ui_Frame, QWidget):
change_current_origin = qc.pyqtSignal(object)
change_active_setup = qc.pyqtSignal(object)
def __init__(self):
super().__init__()
self.setupUi(self)
self.origin_combo.currentIndexChanged.connect(self.emit_change_origin)
self.active_setup_combo.currentIndexChanged.connect(self.emit_change_active_setup)
def emit_change_origin(self):
self.change_current_origin.emit(get_combo_data(self.origin_combo))
def emit_change_active_setup(self):
self.change_active_setup.emit(get_combo_data(self.active_setup_combo))
``` |
{
"source": "JonRob812/SuperDuper",
"score": 2
} |
#### File: PyInstaller/building/utils.py
```python
import glob
import hashlib
import os
import os.path
import pkgutil
import platform
import shutil
import sys
import struct
from PyInstaller.config import CONF
from .. import compat
from ..compat import is_darwin, is_win, EXTENSION_SUFFIXES, \
open_file, is_py3, is_py37, is_cygwin
from ..depend import dylib
from ..depend.bindepend import match_binding_redirect
from ..utils import misc
from ..utils.misc import load_py_data_struct, save_py_data_struct
from .. import log as logging
if is_win:
from ..utils.win32 import winmanifest, winresource
logger = logging.getLogger(__name__)
#-- Helpers for checking guts.
#
# NOTE: By _GUTS it is meant intermediate files and data structures that
# PyInstaller creates for bundling files and creating final executable.
def _check_guts_eq(attr, old, new, last_build):
"""
rebuild is required if values differ
"""
if old != new:
logger.info("Building because %s changed", attr)
return True
return False
def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):
"""
rebuild is required if mtimes of files listed in old toc are newer
than last_build
if pyc=1, check for .py files, too
Use this for calculated/analysed values read from cache.
"""
for (nm, fnm, typ) in old:
if misc.mtime(fnm) > last_build:
logger.info("Building because %s changed", fnm)
return True
elif pyc and misc.mtime(fnm[:-1]) > last_build:
logger.info("Building because %s changed", fnm[:-1])
return True
return False
def _check_guts_toc(attr, old, toc, last_build, pyc=0):
"""
rebuild is required if either toc content changed or mtimes of
files listed in old toc are newer than last_build
if pyc=1, check for .py files, too
Use this for input parameters.
"""
return (_check_guts_eq(attr, old, toc, last_build)
or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))
#---
def add_suffix_to_extensions(toc):
"""
Returns a new TOC with proper library suffix for EXTENSION items.
"""
# TODO: Fix this recursive import
from .datastruct import TOC
new_toc = TOC()
for inm, fnm, typ in toc:
if typ == 'EXTENSION':
if is_py3:
# Change the dotted name into a relative path. This places C
# extensions in the Python-standard location. This only works
# in Python 3; see comments above
# ``sys.meta_path.append(CExtensionImporter())`` in
# ``pyimod03_importers``.
inm = inm.replace('.', os.sep)
# In some rare cases extension might already contain a suffix.
# Skip it in this case.
if os.path.splitext(inm)[1] not in EXTENSION_SUFFIXES:
# Determine the base name of the file.
if is_py3:
base_name = os.path.basename(inm)
else:
base_name = inm.rsplit('.')[-1]
assert '.' not in base_name
# Use this file's existing extension. For extensions such as
# ``libzmq.cp36-win_amd64.pyd``, we can't use
# ``os.path.splitext``, which would give only the ```.pyd`` part
# of the extension.
inm = inm + os.path.basename(fnm)[len(base_name):]
elif typ == 'DEPENDENCY':
# Use the suffix from the filename.
# TODO Verify what extensions are by DEPENDENCIES.
binext = os.path.splitext(fnm)[1]
if not os.path.splitext(inm)[1] == binext:
inm = inm + binext
new_toc.append((inm, fnm, typ))
return new_toc
def applyRedirects(manifest, redirects):
"""
Apply the binding redirects specified by 'redirects' to the dependent assemblies
of 'manifest'.
:param manifest:
:type manifest:
:param redirects:
:type redirects:
:return:
:rtype:
"""
redirecting = False
for binding in redirects:
for dep in manifest.dependentAssemblies:
if match_binding_redirect(dep, binding):
logger.info("Redirecting %s version %s -> %s",
binding.name, dep.version, binding.newVersion)
dep.version = binding.newVersion
redirecting = True
return redirecting
def checkCache(fnm, strip=False, upx=False, upx_exclude=None, dist_nm=None):
"""
Cache prevents preprocessing binary files again and again.
'dist_nm' Filename relative to dist directory. We need it on Mac
to determine level of paths for @loader_path like
'@loader_path/../../' for qt4 plugins.
"""
from ..config import CONF
# On darwin a cache is required anyway to keep the libaries
# with relative install names. Caching on darwin does not work
# since we need to modify binary headers to use relative paths
# to dll depencies and starting with '@loader_path'.
if not strip and not upx and not is_darwin and not is_win:
return fnm
if dist_nm is not None and ":" in dist_nm:
# A file embedded in another pyinstaller build via multipackage
# No actual file exists to process
return fnm
if strip:
strip = True
else:
strip = False
upx_exclude = upx_exclude or []
upx = (upx and (is_win or is_cygwin) and
os.path.normcase(os.path.basename(fnm)) not in upx_exclude)
# Load cache index
# Make cachedir per Python major/minor version.
# This allows parallel building of executables with different
# Python versions as one user.
pyver = ('py%d%s') % (sys.version_info[0], sys.version_info[1])
arch = platform.architecture()[0]
cachedir = os.path.join(CONF['cachedir'], 'bincache%d%d_%s_%s' % (strip, upx, pyver, arch))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
cacheindexfn = os.path.join(cachedir, "index.dat")
if os.path.exists(cacheindexfn):
try:
cache_index = load_py_data_struct(cacheindexfn)
except Exception as e:
# tell the user they may want to fix their cache
# .. however, don't delete it for them; if it keeps getting
# corrupted, we'll never find out
logger.warn("pyinstaller bincache may be corrupted; "
"use pyinstaller --clean to fix")
raise
else:
cache_index = {}
# Verify if the file we're looking for is present in the cache.
# Use the dist_mn if given to avoid different extension modules
# sharing the same basename get corrupted.
if dist_nm:
basenm = os.path.normcase(dist_nm)
else:
basenm = os.path.normcase(os.path.basename(fnm))
# Binding redirects should be taken into account to see if the file
# needs to be reprocessed. The redirects may change if the versions of dependent
# manifests change due to system updates.
redirects = CONF.get('binding_redirects', [])
digest = cacheDigest(fnm, redirects)
cachedfile = os.path.join(cachedir, basenm)
cmd = None
if basenm in cache_index:
if digest != cache_index[basenm]:
os.remove(cachedfile)
else:
# On Mac OS X we need relative paths to dll dependencies
# starting with @executable_path
if is_darwin:
dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)
return cachedfile
# Optionally change manifest and its deps to private assemblies
if fnm.lower().endswith(".manifest"):
manifest = winmanifest.Manifest()
manifest.filename = fnm
with open(fnm, "rb") as f:
manifest.parse_string(f.read())
if CONF.get('win_private_assemblies', False):
if manifest.publicKeyToken:
logger.info("Changing %s into private assembly", os.path.basename(fnm))
manifest.publicKeyToken = None
for dep in manifest.dependentAssemblies:
# Exclude common-controls which is not bundled
if dep.name != "Microsoft.Windows.Common-Controls":
dep.publicKeyToken = None
applyRedirects(manifest, redirects)
manifest.writeprettyxml(cachedfile)
return cachedfile
if upx:
if strip:
fnm = checkCache(fnm, strip=True, upx=False)
bestopt = "--best"
# FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out)
# A better configure-time check is due.
if CONF["hasUPX"] >= (3,) and os.name == "nt":
bestopt = "--lzma"
upx_executable = "upx"
if CONF.get('upx_dir'):
upx_executable = os.path.join(CONF['upx_dir'], upx_executable)
cmd = [upx_executable, bestopt, "-q", cachedfile]
else:
if strip:
strip_options = []
if is_darwin:
# The default strip behaviour breaks some shared libraries
# under Mac OSX.
# -S = strip only debug symbols.
strip_options = ["-S"]
cmd = ["strip"] + strip_options + [cachedfile]
if not os.path.exists(os.path.dirname(cachedfile)):
os.makedirs(os.path.dirname(cachedfile))
# There are known some issues with 'shutil.copy2' on Mac OS X 10.11
# with copying st_flags. Issue #1650.
# 'shutil.copy' copies also permission bits and it should be sufficient for
# PyInstalle purposes.
shutil.copy(fnm, cachedfile)
# TODO find out if this is still necessary when no longer using shutil.copy2()
if hasattr(os, 'chflags'):
# Some libraries on FreeBSD have immunable flag (libthr.so.3, for example)
# If flags still remains, os.chmod will failed with:
# OSError: [Errno 1] Operation not permitted.
try:
os.chflags(cachedfile, 0)
except OSError:
pass
os.chmod(cachedfile, 0o755)
if os.path.splitext(fnm.lower())[1] in (".pyd", ".dll"):
# When shared assemblies are bundled into the app, they may optionally be
# changed into private assemblies.
try:
res = winmanifest.GetManifestResources(os.path.abspath(cachedfile))
except winresource.pywintypes.error as e:
if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT:
# Not a win32 PE file
pass
else:
logger.error(os.path.abspath(cachedfile))
raise
else:
if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]):
for name in res[winmanifest.RT_MANIFEST]:
for language in res[winmanifest.RT_MANIFEST][name]:
try:
manifest = winmanifest.Manifest()
manifest.filename = ":".join([cachedfile,
str(winmanifest.RT_MANIFEST),
str(name),
str(language)])
manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language],
False)
except Exception as exc:
logger.error("Cannot parse manifest resource %s, "
"%s", name, language)
logger.error("From file %s", cachedfile, exc_info=1)
else:
# optionally change manifest to private assembly
private = CONF.get('win_private_assemblies', False)
if private:
if manifest.publicKeyToken:
logger.info("Changing %s into a private assembly",
os.path.basename(fnm))
manifest.publicKeyToken = None
# Change dep to private assembly
for dep in manifest.dependentAssemblies:
# Exclude common-controls which is not bundled
if dep.name != "Microsoft.Windows.Common-Controls":
dep.publicKeyToken = None
redirecting = applyRedirects(manifest, redirects)
if redirecting or private:
try:
manifest.update_resources(os.path.abspath(cachedfile),
[name],
[language])
except Exception as e:
logger.error(os.path.abspath(cachedfile))
raise
if cmd:
logger.info("Executing - " + ' '.join(cmd))
# terminates if execution fails
compat.exec_command(*cmd)
# update cache index
cache_index[basenm] = digest
save_py_data_struct(cacheindexfn, cache_index)
# On Mac OS X we need relative paths to dll dependencies
# starting with @executable_path
if is_darwin:
dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)
return cachedfile
def cacheDigest(fnm, redirects):
hasher = hashlib.md5()
with open(fnm, "rb") as f:
for chunk in iter(lambda: f.read(16 * 1024), b""):
hasher.update(chunk)
if redirects:
redirects = str(redirects)
if is_py3:
redirects = redirects.encode('utf-8')
hasher.update(redirects)
digest = bytearray(hasher.digest())
return digest
def _check_path_overlap(path):
"""
Check that path does not overlap with WORKPATH or SPECPATH (i.e.
WORKPATH and SPECPATH may not start with path, which could be
caused by a faulty hand-edited specfile)
Raise SystemExit if there is overlap, return True otherwise
"""
from ..config import CONF
specerr = 0
if CONF['workpath'].startswith(path):
logger.error('Specfile error: The output path "%s" contains '
'WORKPATH (%s)', path, CONF['workpath'])
specerr += 1
if CONF['specpath'].startswith(path):
logger.error('Specfile error: The output path "%s" contains '
'SPECPATH (%s)', path, CONF['specpath'])
specerr += 1
if specerr:
raise SystemExit('Error: Please edit/recreate the specfile (%s) '
'and set a different output name (e.g. "dist").'
% CONF['spec'])
return True
def _make_clean_directory(path):
"""
Create a clean directory from the given directory name
"""
if _check_path_overlap(path):
if os.path.isdir(path) or os.path.isfile(path):
try:
os.remove(path)
except OSError:
_rmtree(path)
os.makedirs(path)
def _rmtree(path):
"""
Remove directory and all its contents, but only after user confirmation,
or if the -y option is set
"""
from ..config import CONF
if CONF['noconfirm']:
choice = 'y'
elif sys.stdout.isatty():
choice = compat.stdin_input('WARNING: The output directory "%s" and ALL ITS '
'CONTENTS will be REMOVED! Continue? (y/N)' % path)
else:
raise SystemExit('Error: The output directory "%s" is not empty. '
'Please remove all its contents or use the '
'-y option (remove output directory without '
'confirmation).' % path)
if choice.strip().lower() == 'y':
print("On your own risk, you can use the option `--noconfirm` "
"to get rid of this question.")
logger.info('Removing dir %s', path)
shutil.rmtree(path)
else:
raise SystemExit('User aborted')
# TODO Refactor to prohibit empty target directories. As the docstring
#below documents, this function currently permits the second item of each
#2-tuple in "hook.datas" to be the empty string, in which case the target
#directory defaults to the source directory's basename. However, this
#functionality is very fragile and hence bad. Instead:
#
#* An exception should be raised if such item is empty.
#* All hooks currently passing the empty string for such item (e.g.,
# "hooks/hook-babel.py", "hooks/hook-matplotlib.py") should be refactored
# to instead pass such basename.
def format_binaries_and_datas(binaries_or_datas, workingdir=None):
"""
Convert the passed list of hook-style 2-tuples into a returned set of
`TOC`-style 2-tuples.
Elements of the passed list are 2-tuples `(source_dir_or_glob, target_dir)`.
Elements of the returned set are 2-tuples `(target_file, source_file)`.
For backwards compatibility, the order of elements in the former tuples are
the reverse of the order of elements in the latter tuples!
Parameters
----------
binaries_or_datas : list
List of hook-style 2-tuples (e.g., the top-level `binaries` and `datas`
attributes defined by hooks) whose:
* The first element is either:
* A glob matching only the absolute or relative paths of source
non-Python data files.
* The absolute or relative path of a source directory containing only
source non-Python data files.
* The second element ist he relative path of the target directory
into which these source files will be recursively copied.
If the optional `workingdir` parameter is passed, source paths may be
either absolute or relative; else, source paths _must_ be absolute.
workingdir : str
Optional absolute path of the directory to which all relative source
paths in the `binaries_or_datas` parameter will be prepended by (and
hence converted into absolute paths) _or_ `None` if these paths are to
be preserved as relative. Defaults to `None`.
Returns
----------
set
Set of `TOC`-style 2-tuples whose:
* First element is the absolute or relative path of a target file.
* Second element is the absolute or relative path of the corresponding
source file to be copied to this target file.
"""
toc_datas = set()
for src_root_path_or_glob, trg_root_dir in binaries_or_datas:
if not trg_root_dir:
raise SystemExit("Empty DEST not allowed when adding binary "
"and data files. "
"Maybe you want to used %r.\nCaused by %r." %
(os.curdir, src_root_path_or_glob))
# Convert relative to absolute paths if required.
if workingdir and not os.path.isabs(src_root_path_or_glob):
src_root_path_or_glob = os.path.join(
workingdir, src_root_path_or_glob)
# Normalize paths.
src_root_path_or_glob = os.path.normpath(src_root_path_or_glob)
if os.path.isfile(src_root_path_or_glob):
src_root_paths = [src_root_path_or_glob]
else:
# List of the absolute paths of all source paths matching the
# current glob.
src_root_paths = glob.glob(src_root_path_or_glob)
if not src_root_paths:
msg = 'Unable to find "%s" when adding binary and data files.' % (
src_root_path_or_glob)
# on Debian/Ubuntu, missing pyconfig.h files can be fixed with
# installing python-dev
if src_root_path_or_glob.endswith("pyconfig.h"):
msg += """This would mean your Python installation doesn't
come with proper library files. This usually happens by missing development
package, or unsuitable build parameters of Python installation.
* On Debian/Ubuntu, you would need to install Python development packages
* apt-get install python3-dev
* apt-get install python-dev
* If you're building Python by yourself, please rebuild your Python with
`--enable-shared` (or, `--enable-framework` on Darwin)
"""
raise SystemExit(msg)
for src_root_path in src_root_paths:
if os.path.isfile(src_root_path):
# Normalizing the result to remove redundant relative
# paths (e.g., removing "./" from "trg/./file").
toc_datas.add((
os.path.normpath(os.path.join(
trg_root_dir, os.path.basename(src_root_path))),
os.path.normpath(src_root_path)))
elif os.path.isdir(src_root_path):
for src_dir, src_subdir_basenames, src_file_basenames in \
os.walk(src_root_path):
# Ensure the current source directory is a subdirectory
# of the passed top-level source directory. Since
# os.walk() does *NOT* follow symlinks by default, this
# should be the case. (But let's make sure.)
assert src_dir.startswith(src_root_path)
# Relative path of the current target directory,
# obtained by:
#
# * Stripping the top-level source directory from the
# current source directory (e.g., removing "/top" from
# "/top/dir").
# * Normalizing the result to remove redundant relative
# paths (e.g., removing "./" from "trg/./file").
trg_dir = os.path.normpath(os.path.join(
trg_root_dir,
os.path.relpath(src_dir, src_root_path)))
for src_file_basename in src_file_basenames:
src_file = os.path.join(src_dir, src_file_basename)
if os.path.isfile(src_file):
# Normalize the result to remove redundant relative
# paths (e.g., removing "./" from "trg/./file").
toc_datas.add((
os.path.normpath(
os.path.join(trg_dir, src_file_basename)),
os.path.normpath(src_file)))
return toc_datas
def _load_code(modname, filename):
path_item = os.path.dirname(filename)
if os.path.basename(filename).startswith('__init__.py'):
# this is a package
path_item = os.path.dirname(path_item)
if os.path.basename(path_item) == '__pycache__':
path_item = os.path.dirname(path_item)
importer = pkgutil.get_importer(path_item)
package, _, modname = modname.rpartition('.')
if hasattr(importer, 'find_loader'):
loader, portions = importer.find_loader(modname)
else:
loader = importer.find_module(modname)
logger.debug('Compiling %s', filename)
if loader and hasattr(loader, 'get_code'):
return loader.get_code(modname)
else:
# Just as ``python foo.bar`` will read and execute statements in
# ``foo.bar``, even though it lacks the ``.py`` extension, so
# ``pyinstaller foo.bar`` should also work. However, Python's import
# machinery doesn't load files without a ``.py`` extension. So, use
# ``compile`` instead.
#
# On a side note, neither the Python 2 nor Python 3 calls to
# ``pkgutil`` and ``find_module`` above handle modules ending in
# ``.pyw``, even though ``imp.find_module`` and ``import <name>`` both
# work. This code supports ``.pyw`` files.
# Open the source file in binary mode and allow the `compile()` call to
# detect the source encoding.
with open_file(filename, 'rb') as f:
source = f.read()
return compile(source, filename, 'exec')
def get_code_object(modname, filename):
"""
Get the code-object for a module.
This is a extra-simple version for compiling a module. It's
not worth spending more effort here, as it is only used in the
rare case if outXX-Analysis.toc exists, but outXX-PYZ.toc does
not.
"""
try:
if filename in ('-', None):
# This is a NamespacePackage, modulegraph marks them
# by using the filename '-'. (But wants to use None,
# so check for None, too, to be forward-compatible.)
logger.debug('Compiling namespace package %s', modname)
txt = '#\n'
return compile(txt, filename, 'exec')
else:
logger.debug('Compiling %s', filename)
co = _load_code(modname, filename)
if not co:
raise ValueError("Module file %s is missing" % filename)
return co
except SyntaxError as e:
print("Syntax error in ", filename)
print(e.args)
raise
def strip_paths_in_code(co, new_filename=None):
# Paths to remove from filenames embedded in code objects
replace_paths = sys.path + CONF['pathex']
# Make sure paths end with os.sep
replace_paths = [os.path.join(f, '') for f in replace_paths]
if new_filename is None:
original_filename = os.path.normpath(co.co_filename)
for f in replace_paths:
if original_filename.startswith(f):
new_filename = original_filename[len(f):]
break
else:
return co
code_func = type(co)
consts = tuple(
strip_paths_in_code(const_co, new_filename)
if isinstance(const_co, code_func) else const_co
for const_co in co.co_consts
)
if hasattr(co, 'replace'): # is_py38
return co.replace(co_consts=consts, co_filename=new_filename)
elif hasattr(co, 'co_kwonlyargcount'):
# co_kwonlyargcount was added in some version of Python 3
return code_func(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, consts, co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
else:
return code_func(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, consts, co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def fake_pyc_timestamp(buf):
"""
Reset the timestamp from a .pyc-file header to a fixed value.
This enables deterministic builds without having to set pyinstaller
source metadata (mtime) since that changes the pyc-file contents.
_buf_ must at least contain the full pyc-file header.
"""
assert buf[:4] == compat.BYTECODE_MAGIC, \
"Expected pyc magic {}, got {}".format(compat.BYTECODE_MAGIC, buf[:4])
start, end = 4, 8
if is_py37:
# see https://www.python.org/dev/peps/pep-0552/
(flags,) = struct.unpack_from(">I", buf, 4)
if flags & 1:
# We are in the future and hash-based pyc-files are used, so
# clear "check_source" flag, since there is no source
buf[4:8] = struct.pack(">I", flags ^ 2)
return buf
else:
# no hash-based pyc-file, timestamp is the next field
start, end = 8, 12
ts = b'pyi0' # So people know where this comes from
return buf[:start] + ts + buf[end:]
```
#### File: PyInstaller/utils/tests.py
```python
from __future__ import print_function
import os
import sys
import traceback
import distutils.ccompiler
import inspect
import textwrap
import shutil
import pytest
from _pytest.runner import Skipped
from PyInstaller.compat import is_darwin, is_win, is_linux, is_py2, is_py3
# Wrap some pytest decorators to be consistent in tests.
parametrize = pytest.mark.parametrize
skipif = pytest.mark.skipif
skipif_notwin = skipif(not is_win, reason='requires Windows')
skipif_notosx = skipif(not is_darwin, reason='requires Mac OS X')
skipif_notlinux = skipif(not is_linux, reason='requires GNU/Linux')
skipif_win = skipif(is_win, reason='does not run on Windows')
skipif_linux = skipif(is_win, reason='does not run on GNU/Linux')
skipif_winorosx = skipif(is_win or is_darwin, reason='does not run on Windows or Mac OS X')
xfail = pytest.mark.xfail
xfail_py2 = xfail(is_py2, reason='fails with Python 2.7')
xfail_py3 = xfail(is_py3, reason='fails with Python 3')
def _check_for_compiler():
import tempfile, sys
# change to some tempdir since cc.has_function() would compile into the
# current directory, leaving garbage
old_wd = os.getcwd()
tmp = tempfile.mkdtemp()
os.chdir(tmp)
cc = distutils.ccompiler.new_compiler()
if is_win:
try:
cc.initialize()
has_compiler = True
# This error is raised on Windows if a compiler can't be found.
except distutils.errors.DistutilsPlatformError:
has_compiler = False
else:
# The C standard library contains the ``clock`` function. Use that to
# determine if a compiler is installed. This doesn't work on Windows::
#
# Users\bjones\AppData\Local\Temp\a.out.exe.manifest : general error
# c1010070: Failed to load and parse the manifest. The system cannot
# find the file specified.
has_compiler = cc.has_function('clock', includes=['time.h'])
os.chdir(old_wd)
# TODO: Find a way to remove the gerneated clockXXXX.c file, too
shutil.rmtree(tmp)
return has_compiler
# A decorator to skip tests if a C compiler isn't detected.
has_compiler = _check_for_compiler()
skipif_no_compiler = skipif(not has_compiler, reason="Requires a C compiler")
def skip(reason):
"""
Unconditionally skip the currently decorated test with the passed reason.
This decorator is intended to be called either directly as a function _or_
indirectly as a decorator. This differs from both:
* `pytest.skip()`, intended to be called only directly as a function.
Attempting to call this function indirectly as a decorator produces
extraneous ignorable messages on standard output resembling
`SKIP [1] PyInstaller/utils/tests.py:65: could not import 'win32com'`.
* `pytest.mark.skip()`, intended to be called only indirectly as a
decorator. Attempting to call this decorator directly as a function
reduces to a noop.
Parameters
----------
reason : str
Human-readable message justifying the skipping of this test.
"""
return skipif(True, reason=reason)
def importorskip(modname, minversion=None):
"""
This decorator skips the currently decorated test if the module with the
passed name is unimportable _or_ importable but of a version less than the
passed minimum version if any.
This decorator's name is intentionally mispelled as `importerskip` rather
than `importerskip` to coincide with the `pytest.importorskip()` function
internally called by this decorator.
Parameters
----------
modname : str
Fully-qualified name of the module required by this test.
minversion : str
Optional minimum version of this module as a string (e.g., `3.14.15`)
required by this test _or_ `None` if any module version is acceptable.
Defaults to `None`.
Returns
----------
pytest.skipif
Decorator describing these requirements if unmet _or_ the identity
decorator otherwise (i.e., if these requirements are met).
"""
# Defer to the eponymous function of the same name.
try:
pytest.importorskip(modname, minversion)
# Silently convert expected import and syntax errors into @skip decoration.
except Skipped as exc:
return skip(str(exc))
# Convert all other unexpected errors into the same decoration.
except Exception as exc:
# For debuggability, print a verbose stacktrace.
print('importorskip: Exception in module "{}":'.format(modname))
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
return skip(str(exc))
# Else, this module is importable and optionally satisfies this minimum
# version. Reduce this decoration to a noop.
else:
return pytest.mark.skipif(False, reason='')
def gen_sourcefile(tmpdir, source, test_id=None):
"""
Generate a source file for testing.
The source will be written into a file named like the
test-function. This file will then be passed to `test_script`.
If you need other related file, e.g. as `.toc`-file for
testing the content, put it at at the normal place. Just mind
to take the basnename from the test-function's name.
:param script: Source code to create executable from. This
will be saved into a temporary file which is
then passed on to `test_script`.
:param test_id: Test-id for parametrized tests. If given, it
will be appended to the script filename,
separated by two underscores.
Ensure that the caller of `test_source` is in a UTF-8
encoded file with the correct '# -*- coding: utf-8 -*-' marker.
"""
if is_py2:
if isinstance(source, str):
source = source.decode('UTF-8')
testname = inspect.stack()[1][3]
if test_id:
# For parametrized test append the test-id.
testname = testname + '__' + test_id
# Periods are not allowed in Python module names.
testname = testname.replace('.', '_')
scriptfile = tmpdir / testname + '.py'
source = textwrap.dedent(source)
with scriptfile.open('w', encoding='utf-8') as ofh:
print(u'# -*- coding: utf-8 -*-', file=ofh)
print(source, file=ofh)
return scriptfile
``` |
{
"source": "jonrossclaytor/black-friday",
"score": 3
} |
#### File: black-friday/trainer/hp_tuning.py
```python
from google.cloud import storage
import json
import logging
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
def complete_hp_tuning(x_train_part, y_train_part, project_id, bucket_name, num_iterations):
# perform hyperparameter tuning
best_accuracy = -1
for i in range(0, num_iterations):
# ramdom split for train and validation
x_train, x_test, y_train, y_test = train_test_split(x_train_part, y_train_part, test_size=0.2)
# randomly assign hyperparameters
n_estimators = np.random.randint(10, 1000)
max_depth = np.random.randint(10, 1000)
min_samples_split = np.random.randint(2, 10)
min_samples_leaf = np.random.randint(1, 10)
max_features = ['auto','sqrt','log2',None][np.random.randint(0, 3)]
# fit the model on the training set with the parameters
rf_model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, max_features=max_features)
rf_model.fit(x_train, y_train)
# make predictions on the test set
y_pred = rf_model.predict(x_test)
# assess the accuracy
total_preds = 0
total_correct = 0
for j in range(0, y_pred.shape[0]):
total_preds += 1
if np.array_equal(y_pred[j], y_test.values[j]):
total_correct += 1
accuracy = (total_correct / total_preds)
# determine whether to update parameters
if accuracy > best_accuracy:
best_accuracy = accuracy
best_n_estimators = n_estimators
best_max_depth = max_depth
best_min_samples_split = min_samples_split
best_min_samples_leaf = min_samples_leaf
best_max_features = max_features
# create a dictionary with the results
best_params = {'n_estimators':best_n_estimators,
'max_depth':best_max_depth,
'min_samples_split':best_min_samples_split,
'min_samples_leaf':best_min_samples_leaf,
'max_features':best_max_features}
logging.info('Completed hp tuning interation {}, best accuracy {} with params {}'.format(str(i+1), str(best_accuracy), best_params))
# write parameters to disk
output = json.dumps(best_params)
f = open('best_params.json','w')
f.write(output)
f.close()
# upload to cloud storage
storage_client = storage.Client(project=project_id)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob('best_params.json')
blob.upload_from_filename('best_params.json')
return best_params
``` |
{
"source": "jonrsmart/web-scraping-challenge",
"score": 3
} |
#### File: jonrsmart/web-scraping-challenge/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import pymongo
import pandas as pd
import requests
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
#Featured Article Scrape
url1 = 'https://mars.nasa.gov/news/'
browser.visit(url1)
html1 = browser.html
soup1 = BeautifulSoup(html1, "html.parser")
results = soup1.find_all('div', class_='slide')
featured = []
for result in results:
title = result.find('div', class_='content_title').find('a').text
paragraph = result.find('div', class_='rollover_description_inner').text
featured.append({'title': title, 'paragraph': paragraph})
#Mars Facts Table
url2 = 'https://space-facts.com/mars/'
tables = pd.read_html(url2)
mars_data = tables[0]
mars_data.columns = ['Data', 'Value']
mars_data.set_index('Data', inplace=True)
mars_html = mars_data.to_html()
mars_html2 = mars_html.replace('\n', '')
#Mars Images Scrape
url3 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
html2 = browser.html
soup = BeautifulSoup(html2, 'html.parser')
browser.visit(url3)
image_info = []
titles = []
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
images = soup.find_all('div', class_='item')
for image in images:
title = image.find('h3').text
titles.append(title)
for title in titles:
browser.links.find_by_partial_text(title).click()
html2 = browser.html
soup = BeautifulSoup(html2, 'html.parser')
bigpic = soup.find_all('div', class_='downloads')[0].li.a['href']
hemi = soup.find('h2', class_='title').text
image_info.append({'title':hemi, 'img_url':bigpic})
browser.visit(url3)
mars_dictionary = {'featured': featured,'mars_facts':mars_html2, 'mars_images':image_info}
return mars_dictionary
``` |
{
"source": "jonrtaylor/zipline-intro",
"score": 2
} |
#### File: zipline-intro/intro_zipline/winners.py
```python
import zipline.api as algo
from zipline.pipeline import Pipeline
from zipline.pipeline.factors import AverageDollarVolume, Returns
from zipline.finance.execution import MarketOrder
def initialize(context):
"""
Called once at the start of a backtest, and once per day at
the start of live trading.
"""
# Attach the pipeline to the algo
algo.attach_pipeline(make_pipeline(), 'pipeline')
# Rebalance every day, 30 minutes before market close.
algo.schedule_function(
rebalance,
algo.date_rules.every_day(),
algo.time_rules.market_close(minutes=30),
)
def make_pipeline():
"""
Create a pipeline that filters by dollar volume and
calculates 1-year return.
"""
pipeline = Pipeline(
columns={
"1y_returns": Returns(window_length=252),
},
screen=AverageDollarVolume(window_length=30) > 10e6
)
return pipeline
def before_trading_start(context, data):
"""
Called every day before market open.
"""
factors = algo.pipeline_output('pipeline')
# Get the top and bottom 3 stocks by 1-year return
returns = factors["1y_returns"].sort_values(ascending=False)
context.winners = returns.index[:3]
def rebalance(context, data):
"""
Execute orders according to our schedule_function() timing.
"""
# calculate intraday returns for our winners
current_prices = data.current(context.winners, "price")
prior_closes = data.history(context.winners, "close", 2, "1d").iloc[0]
intraday_returns = (current_prices - prior_closes) / prior_closes
positions = context.portfolio.positions
# Exit positions we no longer want to hold
for asset, position in positions.items():
if asset not in context.winners:
algo.order_target_value(asset, 0, style=MarketOrder())
# Enter long positions
for asset in context.winners:
# if already long, nothing to do
if asset in positions:
continue
# if the stock is up for the day, don't enter
if intraday_returns[asset] > 0:
continue
# otherwise, buy a fixed $100K position per asset
algo.order_target_value(asset, 100e3, style=MarketOrder())
``` |
{
"source": "jonrzhang/MegEngine",
"score": 2
} |
#### File: megengine/functional/math.py
```python
import collections
import functools
import math
import numbers
from typing import Optional, Sequence, Tuple, Union
from ..core._imperative_rt.core2 import apply
from ..core.ops import builtin
from ..core.ops.special import Const
from ..core.tensor import utils
from ..tensor import Tensor
from .elemwise import clip, exp, log, log1p
from .tensor import reshape, squeeze
__all__ = [
"argmax",
"argmin",
"argsort",
"isinf",
"isnan",
"max",
"mean",
"min",
"norm",
"normalize",
"prod",
"sign",
"sort",
"std",
"sum",
"topk",
"var",
]
def isnan(inp: Tensor) -> Tensor:
r"""
Returns a new tensor representing if each element is ``NaN`` or not.
:param inp: input tensor.
:return: result tensor.
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
x = tensor([1, float("nan"), 0])
print(F.isnan(x).numpy())
Outputs:
.. testoutput::
[False True False]
"""
return inp != inp
def isinf(inp: Tensor) -> Tensor:
r"""
Returns a new tensor representing if each element is ``Inf`` or not.
:param inp: input tensor.
:return: result tensor.
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
x = tensor([1, float("inf"), 0])
print(F.isinf(x).numpy())
Outputs:
.. testoutput::
[False True False]
"""
return abs(inp).astype("float32") == float("inf")
def sign(inp: Tensor):
r"""
Returns a new tensor representing the sign of each element in input tensor.
:param: input tensor.
:return: the sign of input tensor.
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
x = tensor([1, -1, 0])
print(F.sign(x).numpy())
Outputs:
.. testoutput::
[ 1 -1 0]
"""
return (inp > 0).astype(inp.dtype) - (inp < 0).astype(inp.dtype)
def sum(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
r"""
Returns the sum of input tensor along given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced.
Default: None
:param keepdims: whether the output tensor has axis retained or not.
Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.sum(x)
print(out.numpy())
Outputs:
.. testoutput::
21
"""
return inp.sum(axis=axis, keepdims=keepdims)
def prod(
inp: Tensor, axis: Optional[Union[int, Sequence[int]]] = None, keepdims=False
) -> Tensor:
r"""
Returns the product of input tensor along given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.prod(x)
print(out.numpy())
Outputs:
.. testoutput::
720
"""
return inp.prod(axis=axis, keepdims=keepdims)
def mean(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
"""
Returns the mean value of input tensor along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.mean(x)
print(out.numpy())
Outputs:
.. testoutput::
3.5
"""
return inp.mean(axis=axis, keepdims=keepdims)
def var(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
"""
Returns the variance value of input tensor along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
out = F.var(data)
print(out.numpy().round(decimals=4))
Outputs:
.. testoutput::
2.9167
"""
if axis is None:
m = mean(inp, axis=axis, keepdims=False)
else:
m = mean(inp, axis=axis, keepdims=True)
v = inp - m
return mean(v ** 2, axis=axis, keepdims=keepdims)
def std(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
"""
Returns the standard deviation of input tensor along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
out = F.std(data, axis=1)
print(out.numpy().round(decimals=4))
Outputs:
.. testoutput::
[0.8165 0.8165]
"""
return var(inp, axis=axis, keepdims=keepdims) ** 0.5
def min(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
r"""
Returns the min value of input tensor along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.min(x)
print(out.numpy())
Outputs:
.. testoutput::
1
"""
return inp.min(axis=axis, keepdims=keepdims)
def max(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
r"""
Returns the max value of the input tensor along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.max(x)
print(out.numpy())
Outputs:
.. testoutput::
6
"""
return inp.max(axis=axis, keepdims=keepdims)
def norm(
inp: Tensor, ord: float = None, axis: int = None, keepdims=False,
):
"""
Calculates ``p``-norm of input tensor along
given axis.
:param inp: input tensor.
:param ord: power of value applied to inp. Default: 2
:param axis: dimension to reduce. If None, input must be a vector. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(-3, 3, dtype=np.float32))
out = F.norm(x)
print(out.numpy().round(decimals=4))
Outputs:
.. testoutput::
4.3589
"""
if axis is None:
if inp.ndim != 1:
raise TypeError("axis is required unless input is a vector")
if ord is None:
ord = 2
if ord == 0:
return sum(inp != 0, axis=axis, keepdims=keepdims)
if ord == math.inf:
return max(abs(inp))
if ord == -math.inf:
return min(abs(inp))
return sum(abs(inp) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
def argmin(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
r"""
Returns the indices of the minimum values along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.argmin(x)
print(out.numpy())
Outputs:
.. testoutput::
0
"""
if isinstance(axis, collections.abc.Iterable):
axis = list(axis)
axis.sort(reverse=True)
for ai in axis:
op = builtin.Argmin(axis=ai)
(inp,) = apply(op, inp)
if not keepdims:
inp = squeeze(inp, ai)
return inp
if axis is None:
assert not keepdims, "can not set axis=None and keepdims=True"
inp = inp.flatten()
axis = 0
op = builtin.Argmin(axis=axis)
(result,) = apply(op, inp)
if not keepdims:
result = squeeze(result, axis)
return result
def argmax(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
r"""
Returns the indices of the maximum values along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.argmax(x)
print(out.numpy())
Outputs:
.. testoutput::
5
"""
if isinstance(axis, collections.abc.Iterable):
axis = list(axis)
axis.sort(reverse=True)
for ai in axis:
op = builtin.Argmax(axis=ai)
(inp,) = apply(op, inp)
if not keepdims:
inp = squeeze(inp, ai)
return inp
if axis is None:
assert not keepdims, "can not set axis=None and keepdims=True"
inp = inp.flatten()
axis = 0
op = builtin.Argmax(axis=axis)
(result,) = apply(op, inp)
if not keepdims:
result = squeeze(result, axis)
return result
def normalize(
inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
) -> Tensor:
r"""
Performs :math:`L_p` normalization of input tensor along
given axis.
For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
:param inp: input tensor.
:param ord: power of value applied to input tensor. Default: 2
:param axis: dimension to reduce.If None, input must be a vector. Default: None
:param eps: a small value to avoid division by zero. Default: 1e-12
:return: normalized output tensor.
"""
if axis is None:
return inp / clip(norm(inp, ord, axis), lower=eps)
else:
return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
def argsort(inp: Tensor, descending: bool = False) -> Tensor:
r"""
Returns the indices that would sort the input tensor.
:param inp: input tensor. If it's 2d, the result would be array of indices show how to sort each row in the input tensor.
:param descending: sort in descending order, where the largest comes first. Default: False
:return: indices of int32 indicates how to sort the input.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([1,2], dtype=np.float32))
indices = F.argsort(x)
print(indices.numpy())
Outputs:
.. testoutput::
[0 1]
"""
assert len(inp.shape) <= 2, "Input should be 1d or 2d"
if descending:
order = "DESCENDING"
else:
order = "ASCENDING"
op = builtin.Argsort(order=order)
if len(inp.shape) == 1:
inp = inp.reshape(1, -1)
_, result = apply(op, inp)
return result[0]
_, result = apply(op, inp)
return result
def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
r"""
Returns sorted tensor and the indices would sort the input tensor.
:param inp: input tensor. If it's 2d, the result would be sorted by row.
:param descending: sort in descending order, where the largest comes first. Default: False
:return: tuple of two tensors `(sorted_tensor, indices_of_int32)`.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([1,2], dtype=np.float32))
out, indices = F.sort(x)
print(out.numpy())
Outputs:
.. testoutput::
[1. 2.]
"""
assert len(inp.shape) <= 2, "Input should be 1d or 2d"
if descending:
order = "DESCENDING"
else:
order = "ASCENDING"
op = builtin.Argsort(order=order)
if len(inp.shape) == 1:
inp = inp.reshape(1, -1)
tns, ind = apply(op, inp)
return tns[0], ind[0]
tns, ind = apply(op, inp)
return tns, ind
def topk(
inp: Tensor,
k: int,
descending: bool = False,
kth_only: bool = False,
no_sort: bool = False,
) -> Tuple[Tensor, Tensor]:
r"""
Selects the ``Top-K``(by default) smallest elements of 2d matrix by row.
:param inp: input tensor. If input tensor is 2d, each row will be sorted.
:param k: number of elements needed.
:param descending: if True, return the largest elements instead. Default: False
:param kth_only: if True, only the k-th element will be returned. Default: False
:param no_sort: if True, the returned elements can be unordered. Default: False
:return: tuple of two tensors `(topk_tensor, indices_of_int32)`.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
top, indices = F.topk(x, 5)
print(top.numpy(), indices.numpy())
Outputs:
.. testoutput::
[1. 2. 3. 4. 5.] [7 0 6 1 5]
"""
if descending:
inp = -inp
if kth_only:
mode = "KTH_ONLY"
elif no_sort:
mode = "VALUE_IDX_NOSORT"
else:
mode = "VALUE_IDX_SORTED"
op = builtin.TopK(mode=mode)
if not isinstance(k, Tensor):
(k,) = Const(k, dtype="int32", device=inp.device)()
if len(inp.shape) == 1:
inp = inp.reshape(1, -1)
res = apply(op, inp, k)
if kth_only:
tns = res[0]
else:
tns, ind = res[0][0], res[1][0]
else:
res = apply(op, inp, k)
if kth_only:
tns = res
else:
tns, ind = res[0], res[1]
if descending:
tns = -tns
return tns, ind
```
#### File: megengine/random/distribution.py
```python
from typing import Iterable, Optional
from .. import Tensor
from ..core._imperative_rt import invoke_op
from ..core._imperative_rt.core2 import apply
from ..core.ops.builtin import GaussianRNG, UniformRNG
from ..core.tensor import utils
from .rng import _random_seed_generator
__all__ = ["normal", "uniform"]
def normal(
mean: float = 0, std: float = 1, size: Optional[Iterable[int]] = None
) -> Tensor:
r"""
Random variable with Gaussian distribution :math:`N(\mu, \sigma)`.
:param size: output tensor size.
:param mean: the mean or expectation of the distribution.
:param std: the standard deviation of the distribution (variance = :math:`\sigma ^ 2`).
:return: the output tensor.
Examples:
.. testcode::
import megengine as mge
import megengine.random as rand
x = rand.normal(mean=0, std=1, size=(2, 2))
print(x.numpy())
Outputs:
.. testoutput::
:options: +SKIP
[[-0.20235455 -0.6959438 ]
[-1.4939808 -1.5824696 ]]
"""
if size is None:
size = (1,)
seed = _random_seed_generator().__next__()
op = GaussianRNG(seed=seed, mean=mean, std=std)
_ref = Tensor([], dtype="int32")
size = utils.astensor1d(size, _ref, dtype="int32")
(output,) = apply(op, size)
return output
def uniform(
low: float = 0, high: float = 1, size: Optional[Iterable[int]] = None
) -> Tensor:
r"""
Random variable with uniform distribution $U(0, 1)$.
:param size: output tensor size.
:param low: lower range.
:param high: upper range.
:return: the output tensor.
Examples:
.. testcode::
import megengine as mge
import megengine.random as rand
x = rand.uniform(size=(2, 2))
print(x.numpy())
Outputs:
.. testoutput::
:options: +SKIP
[[0.76901674 0.70496535]
[0.09365904 0.62957656]]
"""
assert low < high, "Uniform is not defined when low >= high"
if size is None:
size = (1,)
seed = _random_seed_generator().__next__()
op = UniformRNG(seed=seed)
_ref = Tensor([], dtype="int32")
size = utils.astensor1d(size, _ref, dtype="int32")
(output,) = apply(op, size)
return low + (high - low) * output
```
#### File: test/integration/test_sgd_momentum.py
```python
import itertools
import os
import numpy as np
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.jit import trace
from megengine.module import Module
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.23], dtype=np.float32)
def forward(self, x):
x = x * self.a
return x
def test_sgd_momentum():
net = Simple()
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.clear_grad()
gm = ad.GradManager().attach(net.parameters())
data = tensor([2.34])
# do a step of train
with gm:
loss = net(data)
gm.backward(loss)
optim.step()
np.testing.assert_almost_equal(optim._state[net.a]["momentum_buffer"].numpy(), 2.34)
# do a step of infer
loss = net(data)
np.testing.assert_almost_equal(loss.numpy(), 2.34 * (1.23 - 2.34), 5)
np.testing.assert_almost_equal(optim._state[net.a]["momentum_buffer"].numpy(), 2.34)
# do a step of train
optim.clear_grad()
with gm:
loss = net(data)
gm.backward(loss)
optim.step()
np.testing.assert_almost_equal(loss.numpy(), 2.34 * (1.23 - 2.34), 5)
np.testing.assert_almost_equal(
optim._state[net.a]["momentum_buffer"].numpy(), 0.9 * 2.34 + 2.34, 5
)
def test_sgd_momentum_trace():
origin_inplace = os.getenv("MEGENGINE_INPLACE_UPDATE")
symbolic = (True, False)
inplace = (0, 1)
for symbolic, inplace in itertools.product(symbolic, inplace):
os.environ["MEGENGINE_INPLACE_UPDATE"] = str(inplace)
@trace(symbolic=symbolic)
def train_func(data, *, model=None, optim=None, gm=None):
optim.clear_grad()
with gm:
loss = net(data)
gm.backward(loss)
optim.step()
return loss
@trace(symbolic=symbolic)
def eval_func(data, *, model=None, optim=None, gm=None):
loss = net(data)
return loss
net = Simple()
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
gm = ad.GradManager().attach(net.parameters())
data = tensor([2.34])
train_func(data, model=net, optim=optim, gm=gm)
np.testing.assert_almost_equal(
optim._state[net.a]["momentum_buffer"].numpy(), 2.34
)
# do 3 steps of infer
for _ in range(3):
loss = eval_func(data)
np.testing.assert_almost_equal(loss.numpy(), 2.34 * (1.23 - 2.34), 5)
np.testing.assert_almost_equal(
optim._state[net.a]["momentum_buffer"].numpy(), 2.34
)
# do a step of train
train_func(data, model=net, optim=optim, gm=gm)
np.testing.assert_almost_equal(loss.numpy(), 2.34 * (1.23 - 2.34), 5)
np.testing.assert_almost_equal(
optim._state[net.a]["momentum_buffer"].numpy(), 0.9 * 2.34 + 2.34, 5
)
if origin_inplace:
os.environ["MEGENGINE_INPLACE_UPDATE"] = origin_inplace
else:
del os.environ["MEGENGINE_INPLACE_UPDATE"]
```
#### File: unit/quantization/test_qconfig.py
```python
from functools import partial
from megengine.quantization import QConfig, tqt_qconfig
from megengine.quantization.fake_quant import TQT
def test_equal():
qconfig = QConfig(
weight_observer=None,
act_observer=None,
weight_fake_quant=partial(TQT, dtype="qint8", narrow_range=True),
act_fake_quant=partial(TQT, dtype="qint8", narrow_range=False),
)
assert qconfig == tqt_qconfig
``` |
{
"source": "Jon-Salmon/pontoon",
"score": 2
} |
#### File: sync/formats/arb.py
```python
import codecs
import copy
import json
import logging
from collections import OrderedDict
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from pontoon.sync import SyncError
from pontoon.sync.exceptions import ParseError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.utils import create_parent_directory
from pontoon.sync.vcs.models import VCSTranslation
log = logging.getLogger(__name__)
SCHEMA = { "type": "object" }
class ARBEntity(VCSTranslation):
"""
Represents an entity in a ARB file.
"""
def __init__(self, order, key, data):
self.key = key
self.data = data
self.order = order
self.strings = {None: self.source_string} if self.source_string else {}
@property
def source_string(self):
return self.data
@property
def source_string_plural(self):
return ''
@property
def comments(self):
return []
@property
def fuzzy(self):
return False
@fuzzy.setter
def fuzzy(self, fuzzy):
pass # We don't use fuzzy in JSON
@property
def source(self):
return []
class ARBResource(ParsedResource):
def __init__(self, path, source_resource=None):
self.path = path
self.entities = {}
self.source_resource = source_resource
# Copy entities from the source_resource if it's available.
if source_resource:
for key, entity in source_resource.entities.items():
data = copy.copy(entity.data)
data = None
self.entities[key] = ARBEntity(
entity.order,
entity.key,
data,
)
try:
with codecs.open(path, 'r', 'utf-8') as resource:
self.json_file = json.load(
resource,
object_pairs_hook=OrderedDict
)
validate(self.json_file, SCHEMA)
except (IOError, ValueError, ValidationError) as err:
# If the file doesn't exist or cannot be decoded,
# but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise ParseError(err)
for order, (key, data) in enumerate(self.json_file.items()):
self.entities[key] = ARBEntity(
order,
key,
data,
)
@property
def translations(self):
return sorted(self.entities.values(), key=lambda e: e.order)
def save(self, locale):
"""
Load the source resource, modify it with changes made to this
Resource instance, and save it over the locale-specific
resource.
"""
if not self.source_resource:
raise SyncError(
'Cannot save ARB resource {0}: No source resource given.'
.format(self.path)
)
with codecs.open(self.source_resource.path, 'r', 'utf-8') as resource:
json_file = json.load(resource, object_pairs_hook=OrderedDict)
try:
validate(json_file, SCHEMA)
except ValidationError as e:
raise ParseError(e)
# Iterate over a copy, leaving original free to modify
for key, value in json_file.copy().items():
entity = self.entities[key]
if entity.strings:
json_file[key] = entity.strings[None]
else:
del json_file[key]
create_parent_directory(self.path)
with codecs.open(self.path, 'w+', 'utf-8') as f:
log.debug('Saving file: %s', self.path)
f.write(json.dumps(json_file, ensure_ascii=False, indent=2))
f.write('\n') # Add newline
def parse(path, source_path=None, locale=None):
if source_path is not None:
source_resource = ARBResource(source_path)
else:
source_resource = None
return ARBResource(path, source_resource)
``` |
{
"source": "jonsaunders-git/consensus_engine",
"score": 3
} |
#### File: consensus_engine/models/voting_models.py
```python
from django.db import models
from django.contrib.auth.models import User
from consensus_engine.utils import ProposalState
class ChoiceTicketManager(models.Manager):
""" Manager for Choice Ticket data """
def my_votes(self, user):
return (ChoiceTicket.objects.filter(current=True,
user=user,
proposal_choice__deactivated_date__isnull=True,
)
.annotate(choice_text=models.F('proposal_choice__text'))
.annotate(proposal_id=models.F('proposal_choice__proposal__id'))
.annotate(proposal_name=models.F('proposal_choice__proposal__proposal_name'))
.annotate(proposal_group=models.F('proposal_choice__proposal__proposal_group__group_name'))
.values('proposal_id', 'proposal_name',
'choice_text', 'proposal_group')
.order_by('proposal_group', 'proposal_name'))
def get_current_choice(self, user, proposal):
# if the proposal state trial show the trial data otherwise always show published.
reporting_state = ProposalState.reporting_as_state(proposal.state)
try:
current_choice = (ChoiceTicket.objects
.get(user=user,
proposal_choice__proposal=proposal,
current=True, state=reporting_state))
except (KeyError, ChoiceTicket.DoesNotExist):
current_choice = None
return current_choice
class ChoiceTicket(models.Model):
""" Defines a specific choice at a specific time """
user = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, blank=True)
date_chosen = models.DateTimeField('date chosen')
proposal_choice = models.ForeignKey('ProposalChoice',
on_delete=models.CASCADE)
current = models.BooleanField(default=True, null=True)
state = models.IntegerField(choices=ProposalState.choices(), default=ProposalState.PUBLISHED)
objects = ChoiceTicketManager()
```
#### File: consensus_engine/tests/test_view_create_proposal.py
```python
from django.test import TestCase, RequestFactory
from .mixins import TwoUserMixin, ProposalGroupMixin, ViewMixin
from django.utils import timezone
from consensus_engine.views import CreateProposalView
from consensus_engine.forms import ProposalForm
from consensus_engine.models import Proposal
from django.core.exceptions import PermissionDenied
class CreateProposalViewTest(TwoUserMixin, TestCase,
ProposalGroupMixin, ViewMixin):
path = '/proposals/new/'
form = ProposalForm
view = CreateProposalView
def setUp(self):
self.factory = RequestFactory()
TwoUserMixin.setUp(self)
def test_create_proposal(self):
dt = timezone.now()
self.assertTrue(Proposal.objects.filter(
proposal_name='test proposal').count() == 0)
self.getValidView({'proposal_name': 'test proposal',
'proposal_description': 'test description'}, postargs={'options': '0'})
q = Proposal.objects.filter(proposal_name='test proposal')
self.assertTrue(q.count() == 1)
p = q.first()
self.assertTrue(p.proposal_description == 'test description')
self.assertTrue(p.date_proposed <= timezone.now()
and p.date_proposed >= dt)
self.assertTrue(p.owned_by == self.user)
self.assertTrue(p.proposal_group is None)
def test_create_proposal_within_group(self):
pg = self.create_proposal_group()
dt = timezone.now()
self.assertTrue(Proposal.objects.filter(
proposal_name='test proposal').count() == 0)
self.getValidView(data={'proposal_name': 'test proposal',
'proposal_description': 'test description'},
viewkwargs={'proposal_group_id': pg.id}, postargs={'options': '0'})
q = Proposal.objects.filter(proposal_name='test proposal')
self.assertTrue(q.count() == 1)
p = q.first()
self.assertTrue(p.proposal_description == 'test description')
self.assertTrue(p.date_proposed <= timezone.now()
and p.date_proposed >= dt)
self.assertTrue(p.owned_by == self.user)
self.assertTrue(p.proposal_group == pg)
def test_create_proposal_within_group_not_member(self):
pg = self.create_proposal_group(owned_by=self.user2)
self.assertTrue(Proposal.objects.filter(
proposal_name='test proposal').count() == 0)
with self.assertRaises(PermissionDenied,
msg="Adding a Proposal to a group you are not a member of is not allowed"):
self.getValidView(data={'proposal_name': 'test proposal',
'proposal_description': 'test description'},
viewkwargs={'proposal_group_id': pg.id}, postargs={'options': '0'})
```
#### File: consensus_engine/tests/test_view_delete_choice.py
```python
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.sessions.middleware import SessionMiddleware
from .mixins import TwoUserMixin, ProposalGroupMixin, ViewMixin, ProposalMixin
from django.utils import timezone
from django.core.exceptions import PermissionDenied
from consensus_engine.views import DeleteProposalChoiceView
from consensus_engine.models import Proposal, ProposalChoice
class DeleteProposalChoiceViewTest(TwoUserMixin, TestCase,
ProposalMixin, ViewMixin):
path = '/proposals/1/choice/1/delete'
view = DeleteProposalChoiceView
def setUp(self):
self.factory = RequestFactory()
TwoUserMixin.setUp(self)
def getSessionRequest(self, path=None):
if path is None:
path = self.path
request = self.factory.get(path)
# Recall that middleware are not supported. You can simulate a
# logged-in user by setting request.user manually.
request.user = self.user
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
return request
def executeDeleteView(self, data, viewkwargs={}):
request = self.getSessionRequest()
v = self.get_view(kwargs=viewkwargs)
c = v.get_context_data(**viewkwargs)
self.assertTrue(c['proposal'] is not None)
mutable = request.POST._mutable
request.POST._mutable = True
if 'okay_btn' in data:
request.POST['okay_btn'] = 'okay_btn'
else:
request.POST['cancel_btn'] = 'cancel_btn'
request.POST._mutable = mutable
v.request = request
v.object = viewkwargs['instance']
self.assertTrue(v.delete(request))
return request
def test_edit_choice(self):
p = self.create_proposal_with_two_proposal_choices()
pc1 = p.proposalchoice_set.first()
self.assertTrue(pc1.deactivated_date is None)
request = self.executeDeleteView(
data={'okay_btn'},
viewkwargs={'pk' : pc1.id, 'instance' : pc1, 'proposal_id' : p.id})
pc1 = ProposalChoice.objects.get(pk=pc1.id)
self.assertTrue(pc1.deactivated_date is not None)
def test_edit_choice_cancel(self):
dt = timezone.now()
p = self.create_proposal_with_two_proposal_choices()
pc1 = p.proposalchoice_set.first()
self.assertTrue(pc1.deactivated_date is None)
request = self.executeDeleteView(
data={},
viewkwargs={'pk' : pc1.id, 'instance' : pc1, 'proposal_id' : p.id})
pc1 = ProposalChoice.objects.get(pk=pc1.id)
self.assertTrue(pc1.deactivated_date is None)
def test_edit_choice_permission_denied(self):
p = self.create_proposal_with_two_proposal_choices(owned_by=self.user2)
pc1 = p.proposalchoice_set.first()
self.assertTrue(pc1.deactivated_date is None)
with self.assertRaises(PermissionDenied) as e:
request = self.executeDeleteView(
data={'okay_btn'},
viewkwargs={'pk' : pc1.id, 'instance' : pc1, 'proposal_id' : p.id})
```
#### File: consensus_engine/tests/test_view_edit_proposal_group.py
```python
from django.test import TestCase, RequestFactory
from .mixins import TwoUserMixin, ProposalGroupMixin, ViewMixin
from consensus_engine.views import EditProposalGroupView
from consensus_engine.forms import ProposalGroupForm
from consensus_engine.models import ProposalGroup
from django.core.exceptions import PermissionDenied
class EditProposalGroupViewTest(TwoUserMixin, TestCase,
ProposalGroupMixin, ViewMixin):
path = '/proposalgroups/1/edit/'
form = ProposalGroupForm
view = EditProposalGroupView
def setUp(self):
self.factory = RequestFactory()
TwoUserMixin.setUp(self)
def test_edit_proposal_group(self):
self.assertTrue(ProposalGroup.objects.count() == 0)
pg = self.create_proposal_group()
self.assertTrue(ProposalGroup.objects.filter(group_name='test group').count() == 1)
original_description = pg.group_description
_ = self.getValidView(data={'group_name': 'updated test group',
'group_description': original_description},
viewkwargs={'instance': pg})
q = ProposalGroup.objects.filter(group_name='test group')
self.assertTrue(q.count() == 0)
q2 = ProposalGroup.objects.filter(group_name='updated test group')
self.assertTrue(q2.count() == 1)
pg2 = q2.first()
self.assertTrue(pg == pg2)
q3 = ProposalGroup.objects.filter(group_description=original_description)
self.assertTrue(q3.count() == 1)
_ = self.getValidView(data={'group_name': pg.group_name,
'group_description': 'updated test description'},
viewkwargs={'instance': pg})
q4 = ProposalGroup.objects.filter(group_description=original_description)
self.assertTrue(q4.count() == 0)
q5 = ProposalGroup.objects.filter(group_description='updated test description')
self.assertTrue(q5.count() == 1)
def test_edit_proposal_group_no_permission(self):
self.assertTrue(ProposalGroup.objects.count() == 0)
pg = self.create_proposal_group(owned_by=self.user2)
self.assertTrue(ProposalGroup.objects.filter(group_name='test group').count() == 1)
original_description = pg.group_description
with self.assertRaises(PermissionDenied, msg="Only the owner is allowed to edit group details."):
_ = self.getValidView(data={'group_name': 'updated test group',
'group_description': original_description},
viewkwargs={'instance': pg})
```
#### File: consensus_engine/tests/test_view_proposal.py
```python
from django.test import TestCase, RequestFactory
from .mixins import OneUserMixin, ProposalMixin, TemplateViewMixin
from django.utils import timezone
from consensus_engine.views import ProposalView
from consensus_engine.models import ChoiceTicket, ConsensusHistory
from consensus_engine.converters import DateConverter
class ProposalViewTest(OneUserMixin, TestCase,
ProposalMixin, TemplateViewMixin):
path = 'proposals/<int:proposal_id>'
view = ProposalView
def setUp(self):
self.factory = RequestFactory()
OneUserMixin.setUp(self)
def test_view_new_proposal(self):
p = self.create_new_proposal()
context, _ = self.executeView(viewkwargs={'proposal_id': p.id})
self.assertTrue(context['proposal'] is not None)
p2 = context['proposal']
self.assertTrue(p2.id == p.id)
self.assertTrue(context['current_choice'] is None)
self.assertTrue(context['active_choices'].count() == 0)
def test_view_new_proposal_with_date(self):
p = self.create_new_proposal()
p.determine_consensus()
# save consensus history
snapshot = ConsensusHistory.build_snapshot(p)
snapshot.save()
dc = DateConverter()
qd = dc.to_python(dc.to_url(timezone.now()))
context, _ = self.executeView(viewkwargs={'proposal_id': p.id, 'query_date': qd})
self.assertTrue('query_date' in context)
self.assertTrue('vote_spread' in context)
self.assertTrue(context['vote_spread'] == {})
def test_view_new_proposal_with_choices(self):
p = self.create_proposal_with_two_proposal_choices()
context, _ = self.executeView(viewkwargs={'proposal_id': p.id})
self.assertTrue(context['proposal'] is not None)
p2 = context['proposal']
self.assertTrue(p2.id == p.id)
self.assertTrue(context['current_choice'] is None)
self.assertTrue(context['active_choices'].count() == 2)
self.assertTrue(len(context['vote_spread']) == 2)
def test_view_new_proposal_with_choices_and_date(self):
p = self.create_proposal_with_two_proposal_choices()
p.determine_consensus()
# save consensus history
snapshot = ConsensusHistory.build_snapshot(p)
snapshot.save()
dc = DateConverter()
qd = dc.to_python(dc.to_url(timezone.now()))
context, _ = self.executeView(viewkwargs={'proposal_id': p.id, 'query_date': qd})
self.assertTrue('query_date' in context)
self.assertTrue('vote_spread' in context)
no_votes_data = {1: {'text': 'Yes', 'count': 0, 'percentage': 0},
2: {'text': 'No', 'count': 0, 'percentage': 0}}
self.assertTrue(context['vote_spread'] == no_votes_data)
def test_view_new_proposal_with_choices_and_votes(self):
p = self.create_proposal_with_two_proposal_choices()
p.publish()
# check that total votes = 0 if there are no votes
pc1 = p.proposalchoice_set.first()
_ = p.proposalchoice_set.last()
v = ChoiceTicket.objects.create(user=self.user,
date_chosen=timezone.now(), proposal_choice=pc1, current=True)
context, _ = self.executeView(viewkwargs={'proposal_id': p.id})
self.assertTrue(context['proposal'] is not None)
p2 = context['proposal']
self.assertTrue(p2.id == p.id)
self.assertTrue(context['current_choice'].id == v.id)
self.assertTrue(context['active_choices'].count() == 2)
self.assertTrue(len(context['vote_spread']) == 2)
self.assertTrue(context['vote_spread'][1]['text'] == "Yes")
self.assertTrue(context['vote_spread'][1]['count'] == 1)
self.assertTrue(context['vote_spread'][1]['percentage'] == 100.0)
self.assertTrue(context['vote_spread'][2]['text'] == "No")
self.assertTrue(context['vote_spread'][2]['count'] == 0)
self.assertTrue(context['vote_spread'][2]['percentage'] == 0)
def test_view_new_proposal_with_choices_and_votes_and_date(self):
p = self.create_proposal_with_two_proposal_choices()
p.publish()
p.determine_consensus()
# save consensus history
ss = ConsensusHistory.build_snapshot(p)
ss.save()
dc = DateConverter()
_ = dc.to_python(dc.to_url(timezone.now()))
# check that total votes = 0 if there are no votes
pc1 = p.proposalchoice_set.first()
_ = p.proposalchoice_set.last()
_ = ChoiceTicket.objects.create(user=self.user,
date_chosen=timezone.now(), proposal_choice=pc1, current=True)
p.determine_consensus()
# save consensus history
ss2 = ConsensusHistory.build_snapshot(p)
ss2.save()
qd2 = dc.to_python(dc.to_url(timezone.now()))
context, _ = self.executeView(viewkwargs={'proposal_id': p.id, 'query_date': qd2})
self.assertTrue('vote_spread' in context)
votes_data = {1: {'text': 'Yes', 'count': 1, 'percentage': 100.0},
2: {'text': 'No', 'count': 0, 'percentage': 0}}
self.assertTrue(context['vote_spread'] == votes_data)
# fudge the date to test the history date list
ss3 = ConsensusHistory.build_snapshot(p)
dt = ss3.snapshot_date.replace(year=ss3.snapshot_date.year-1)
ss3.snapshot_date = dt
ss3.save()
context, _ = self.executeView(viewkwargs={'proposal_id': p.id})
self.assertTrue(context["history_date_list"])
def test_view_new_proposal_with_choices_and_votes_and_old_date(self):
p = self.create_proposal_with_two_proposal_choices()
p.determine_consensus()
# save consensus history
ss = ConsensusHistory.build_snapshot(p)
ss.save()
dc = DateConverter()
# check that total votes = 0 if there are no votes
pc1 = p.proposalchoice_set.first()
_ = p.proposalchoice_set.last()
v = ChoiceTicket.objects.create(user=self.user,
date_chosen=timezone.now(), proposal_choice=pc1, current=True)
self.assertTrue(v is not None)
p.determine_consensus()
# save consensus history
ss2 = ConsensusHistory.build_snapshot(p)
ss2.save()
qd2 = dc.to_python("30-06-1970")
context, _ = self.executeView(viewkwargs={'proposal_id': p.id, 'query_date': qd2})
self.assertFalse('vote_spread' in context)
self.assertTrue('error_message' in context)
```
#### File: consensus_engine/tests/test_view_votes.py
```python
from django.test import TestCase, RequestFactory
from .mixins import TwoUserMixin, ProposalGroupMixin, ProposalMixin, TemplateViewMixin
from django.utils import timezone
from consensus_engine.views import MyVotesView, VoteView
from consensus_engine.models import ChoiceTicket
class MyVotesViewTest(TwoUserMixin, TestCase,
ProposalGroupMixin, ProposalMixin, TemplateViewMixin):
view = MyVotesView
def setUp(self):
self.factory = RequestFactory()
TwoUserMixin.setUp(self)
def test_list_votes_no_votes(self):
context, _ = self.executeView()
self.assertTrue(context['votes_list'].count() == 0)
def test_list_votes_some_votes(self):
p = self.create_proposal_with_two_proposal_choices()
p.publish()
# check that total votes = 0 if there are no votes
pc1 = p.proposalchoice_set.first()
_ = ChoiceTicket.objects.create(user=self.user,
date_chosen=timezone.now(), proposal_choice=pc1, current=True)
context, _ = self.executeView()
self.assertTrue(context['votes_list'].count() == 1)
p2 = self.create_proposal_with_two_proposal_choices()
p2.publish()
pc3 = p2.proposalchoice_set.first()
_ = ChoiceTicket.objects.create(user=self.user,
date_chosen=timezone.now(), proposal_choice=pc3, current=True)
context, _ = self.executeView()
self.assertTrue(context['votes_list'].count() == 2)
_ = ChoiceTicket.objects.create(user=self.user2,
date_chosen=timezone.now(), proposal_choice=pc3, current=True)
context, _ = self.executeView()
self.assertTrue(context['votes_list'].count() == 2)
# switch user
self.current_user = self.user2
context, _ = self.executeView()
self.assertTrue(context['votes_list'].count() == 1)
class VoteViewTest(TwoUserMixin, TestCase,
ProposalGroupMixin, ProposalMixin, TemplateViewMixin):
view = VoteView
def setUp(self):
self.factory = RequestFactory()
TwoUserMixin.setUp(self)
def test_vote(self):
p = self.create_proposal_with_two_proposal_choices()
p.publish()
# check that total votes = 0 if there are no votes
pc1 = p.proposalchoice_set.first()
pc2 = p.proposalchoice_set.last()
self.assertTrue(p.total_votes == 0)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc1).count() == 0)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc2).count() == 0)
context, _ = self.executeView(viewkwargs={'proposal_id': p.id}, postargs={'choice': pc1.id})
self.assertTrue(p.total_votes == 1)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc1).count() == 1)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc2).count() == 0)
def test_vote_incorrect_choice(self):
p = self.create_proposal_with_two_proposal_choices()
p.publish()
# check that total votes = 0 if there are no votes
pc1 = p.proposalchoice_set.first()
pc2 = p.proposalchoice_set.last()
self.assertTrue(p.total_votes == 0)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc1).count() == 0)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc2).count() == 0)
context, _ = self.executeView(viewkwargs={'proposal_id': p.id}, postargs={'choice': 99})
self.assertTrue(p.total_votes == 0)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc1).count() == 0)
self.assertTrue(ChoiceTicket.objects.filter(proposal_choice=pc2).count() == 0)
```
#### File: consensus_engine/consensus_engine/utils.py
```python
from enum import IntEnum
class ProposalState(IntEnum):
DRAFT = 0
TRIAL = 1
PUBLISHED = 2
ON_HOLD = 3
ARCHIVED = 4
@classmethod
def choices(cls):
return [(key.value, key.name) for key in cls]
@classmethod
def all_states(cls):
return [key.value for key in cls]
@classmethod
def reporting_as_state(cls, state):
# on-hold and archived proposals always report results from published state
if state in {ProposalState.ON_HOLD, ProposalState.ARCHIVED}:
return ProposalState.PUBLISHED
else:
return state
def get_next_states(self):
""" Returns a list of all the possible next states given a current state """
# implemented this as a definition rather than something more optimised for readability
if self.value == ProposalState.DRAFT:
next_states = [ProposalState.TRIAL,
ProposalState.PUBLISHED,
ProposalState.ON_HOLD,
ProposalState.ARCHIVED]
elif self.value == ProposalState.TRIAL:
next_states = [ProposalState.PUBLISHED,
ProposalState.ON_HOLD,
ProposalState.ARCHIVED]
elif self.value == ProposalState.PUBLISHED:
next_states = [ProposalState.ON_HOLD,
ProposalState.ARCHIVED]
elif self.value == ProposalState.ON_HOLD:
next_states = [ProposalState.PUBLISHED,
ProposalState.ARCHIVED]
elif self.value == ProposalState.ARCHIVED:
next_states = []
return next_states
```
#### File: consensus_engine/views/state_views.py
```python
from django.views.generic.base import TemplateView
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from consensus_engine.models import Proposal
from consensus_engine.utils import ProposalState
@method_decorator(login_required, name='dispatch')
class StateView(TemplateView):
""" Class based view for changing state """
template_name = 'consensus_engine/change_state.html'
def get_context_data(self, **kwargs):
# view the proposal choices
proposal = get_object_or_404(Proposal, pk=kwargs['proposal_id'])
current_state = proposal.current_state
possible_states = current_state.get_next_states()
context = {'proposal': proposal, 'current_state': current_state,
'possible_states': possible_states}
return context
def post(self, request, **kwargs):
proposal = get_object_or_404(Proposal, pk=kwargs['proposal_id'])
try:
selected_state = int(request.POST['state'])
new_state = ProposalState(selected_state)
except (KeyError):
return render(request, 'consensus_engine/change_state.html', {
'proposal': proposal,
'error_message': "You didn't select a state.",
})
success_url = reverse('confirm_state_change', args=[proposal.id, int(new_state)])
return HttpResponseRedirect(success_url)
@method_decorator(login_required, name='dispatch')
class StateChangeConfirmationView(TemplateView):
""" Class based view for confirming that the change of state is what the user wants """
template_name = 'consensus_engine/confirm_state_change.html'
def get_context_data(self, **kwargs):
# view the proposal choices
proposal = get_object_or_404(Proposal, pk=kwargs['proposal_id'])
next_state = kwargs['next_state']
current_state = proposal.current_state
context = {'proposal': proposal, 'current_state': current_state,
'next_state': next_state}
return context
def post(self, request, **kwargs):
proposal = get_object_or_404(Proposal, pk=kwargs['proposal_id'])
selected_state = int(kwargs['next_state'])
new_state = ProposalState(selected_state)
if new_state == ProposalState.TRIAL:
proposal.trial()
elif new_state == ProposalState.PUBLISHED:
default_choices = 'default_choices' in request.POST
proposal.publish(default_group_to_these_choices=default_choices)
elif new_state == ProposalState.ON_HOLD:
proposal.hold()
elif new_state == ProposalState.ARCHIVED:
proposal.archive()
success_url = reverse('view_proposal', args=[proposal.id])
return HttpResponseRedirect(success_url)
```
#### File: consensus_engine/simplepython/simple.py
```python
def addTwoNumbers(a, b):
sum = a + b
return sum
def addList(l1):
sum = 0
for i in l1:
sum += i
return sum
add = lambda x, y : x + y
num1 = 1
num2 = 2
print("The sum is ", addTwoNumbers(num1, num2))
numlist = (1, 2, 3)
print(add(10, 20))
print("The sum is ", addList(numlist))
for i in range(16, 11, -1):
print(i)
print([x for x in range(16) if x > 2])
x = ('apple', 'banana', 'cherry')
y = enumerate(x)
print(list(y))
dict1 = {i: i**2 for i in range(1, 11)}
print(dict1)
txt = "The rain in Spain stays mainly in the plain"
x = "ain" in txt
print(x)
def finite_sequence():
num = 0
while num < 10000:
yield num
num += 1
def tupletest():
return 1, 2
x,y = tupletest()
print(tupletest(), x, y)
``` |
{
"source": "jonsbartlett/SummerSchoolRobot",
"score": 2
} |
#### File: libkoki/tools/markergen.py
```python
import numpy as np
import sys, math, os
import CrcMoose
import cairo
from code_table import *
import getopt
MARKER_VERSION = "v0.5"
G = np.matrix([[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
H = np.matrix([[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1]])
R = np.matrix([[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]])
def hamming_encode(l):
p = np.matrix([l]).T
tmp = G * p
output = tmp.A % 2
return output.T[0]
def hamming_syndrome(l):
r = np.matrix([l]).T
z = H * r
output = z.A % 2
return output.T[0]
def hamming_correct(l, z):
syndrome_val = z[0] + z[1]*2 + z[2]*4
# no errors, return original
if (syndrome_val == 0):
return l
# flip the error bit
l[syndrome_val-1] = (l[syndrome_val-1] + 1) % 2
return l
def hamming_decode(l):
syndrome = hamming_syndrome(l)
corrected = hamming_correct(l, syndrome)
pr = R * np.matrix([corrected]).T
return pr.T.A[0]
def get_code(marker_num):
CRC12 = CrcMoose.CrcAlgorithm(
name = "CRC-12",
width = 12,
polynomial = (12, 11, 3, 2, 1, 0),
seed = 0,
lsbFirst = True,
xorMask = 0)
marker_chr = chr(int((marker_num+1) % 256))
crc = CRC12.calcString(marker_chr)
code = (crc << 8) | marker_num
# print "Marker No:", marker_num, "\t\t(", hex(marker_num), ")"
# print " CRC:", crc, "\t(", hex(crc), ")"
# print " Code:", code, "\t(", hex(code), ")"
return code
def code_to_lists(code):
output = []
for i in range(5):
l = []
for j in range(4):
mask = 0x1 << (i*4+j)
tmp = code & mask
bit = 1
if (tmp == 0):
bit = 0
l.append(bit)
output.append(l)
return output
def encoded_lists(l):
return map(hamming_encode, l)
def code_grid(code):
blocks = encoded_lists(code_to_lists(code))
cell = 0
grid = [[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1]]
for i in range(7):
for j in range(5):
grid[cell / 6][cell % 6] = blocks[j][i]
cell = cell + 1
return grid
def print_grid(grid):
sys.stdout.write("# # # # # # # # # #\n")
sys.stdout.write("# # # # # # # # # #\n")
for i in range(6):
sys.stdout.write("# # ")
for j in range(6):
if grid[i][j] == 1:
sys.stdout.write("# ")
else:
sys.stdout.write(" ")
sys.stdout.write("# #\n")
sys.stdout.write("# # # # # # # # # #\n")
sys.stdout.write("# # # # # # # # # #\n")
def mm_to_in(x):
return x * 0.0393700787
def mm_to_pt(x):
return 72 * mm_to_in(x)
def get_pdf_surface(page_width, page_height, filename):
surface = cairo.PDFSurface(filename, page_width, page_height)
return surface
def finish_surface(surface):
surface.finish()
def short_description(desc):
if desc == "":
return ""
return "'%s'" % (desc)
def render_marker(surface, marker_num, overall_width, offset_x, offset_y,
desc="", show_text=1):
fwd = gen_forwards_table()
rev = gen_reverse_table(fwd)
grid = code_grid(get_code(rev[marker_num]))
marker_width = overall_width * (10.0/12.0)
cell_width = marker_width / 10
cell_grid_offset_x = cell_width * 2
cell_grid_offset_y = cell_width * 2
cr = cairo.Context(surface)
# draw outline
cr.set_line_width(1)
grey = 0.7
cr.set_source_rgb(grey, grey, grey)
cr.rectangle(offset_x, offset_y, overall_width, overall_width)
cr.stroke()
# draw black border
cr.set_source_rgb(0, 0, 0)
cr.rectangle(offset_x + cell_width,
offset_y + cell_width,
marker_width, marker_width)
cr.fill()
# draw white grid background (i.e. zero grid)
cr.set_source_rgb(1, 1, 1)
cr.rectangle(offset_x + cell_width + cell_width * 2,
offset_y + cell_width + cell_width * 2,
marker_width * 0.6, marker_width * 0.6)
cr.fill()
#draw cells
cr.set_source_rgb(0, 0, 0)
for row in range(6):
for col in range(6):
if grid[row][col] == 1:
#draw the 1 bit
cr.rectangle(offset_x + cell_width + cell_width * 2 + col * cell_width,
offset_y + cell_width + cell_width * 2 + row * cell_width,
marker_width * 0.1, marker_width * 0.1)
cr.fill()
# write on marker
if show_text:
font_size = 6
grey = 0.5
cr.select_font_face('Sans')
cr.set_font_size(font_size)
cr.set_source_rgb(grey, grey, grey)
cr.move_to(offset_x + cell_width + font_size, offset_y + cell_width + marker_width - font_size)
cr.show_text('libkoki marker #%d (%s) %s' % (marker_num, MARKER_VERSION, short_description(desc)))
# put dot in top left
cr.new_sub_path()
grey = 0.2
cr.set_source_rgb(grey, grey, grey)
cr.arc(offset_x + cell_width + cell_width,
offset_y + cell_width + cell_width,
cell_width/8, 0, 2 * math.pi)
cr.fill()
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: ./markergen.py [--4up] [--desc val] <code> <output_prefix>"
sys.exit(1)
optlist, args = getopt.getopt(sys.argv[1:], '', ['4up', 'desc='])
if len(args) != 2:
print "Usage: ./markergen.py [--4up] [--desc val] <code> <output_prefix>"
sys.exit(1)
CODE = int(args[0])
OUTFNAME = "%s-%i.pdf" % (args[1], CODE)
FOURUP = False
DESC = ""
# check for options
for opt in optlist:
if opt[0] == "--4up":
FOURUP = True
elif opt[0] == "--desc":
DESC = opt[1]
surface = get_pdf_surface(mm_to_pt(210), mm_to_pt(297), OUTFNAME)
if not FOURUP:
render_marker(surface, CODE, mm_to_pt(100),
mm_to_pt((210 - 100) / 2),
mm_to_pt((297 - 100) / 2), DESC)
else:
render_marker(surface, CODE, mm_to_pt(100),
mm_to_pt(5), mm_to_pt(10), DESC)
render_marker(surface, CODE, mm_to_pt(100),
mm_to_pt(105), mm_to_pt(10), DESC)
render_marker(surface, CODE, mm_to_pt(100),
mm_to_pt(5), mm_to_pt(110), DESC)
render_marker(surface, CODE, mm_to_pt(100),
mm_to_pt(105), mm_to_pt(110), DESC)
finish_surface(surface)
```
#### File: SummerSchoolRobot/robot/transmit.py
```python
class Transmitter:
def __init__(self):
pass #start serial here
def transmit(self,boardName,thingnumber,command):
print boardName, thingnumber, command
``` |
{
"source": "jonschipp/nsm-tools",
"score": 2
} |
#### File: jonschipp/nsm-tools/virus_total.py
```python
import json
import urllib
import urllib2
import sys
apikey = '843fa2012b619be746ead785b933d59820a2e357c7c186e581e8fcadbe2e550e'
def usage():
print '''Submit hash to virtus-total
(Place your VirusTotal apikey in this script)
Usage: %s <hash>''' % sys.argv[0]
exit(1)
def collect(data):
retrieve = data[0]
sha1 = retrieve['sha1']
filenames = retrieve['filenames']
first_seen = retrieve['first-seen']
last_seen = retrieve['last-seen']
last_scan_permalink = retrieve['last-scan-permalink']
last_scan_report = retrieve['last-scan-report']
return sha1, filenames, first_seen, last_seen, last_scan_permalink, last_scan_report
def msg(sha1, filenames, first_seen, last_seen, last_scan_permalink):
print '''===Suspected Malware Item===
SHA1: %s
Filenames: %s
First Seen: %s
Last Seen: %s
Link: %s''' % (sha1, filenames, first_seen, last_seen, last_scan_permalink)
def is_malware(last_scan_report):
for av, scan in last_scan_report.iteritems():
if scan[0] is not None:
return True
return False
def in_database(data, mhash):
result = data[0]['result']
if result == 0:
return False
return True
def arguments():
if len(sys.argv) < 2:
usage()
if '-h' in sys.argv[1]:
usage()
if not apikey:
print "Set apikey in %s to value of your Virus Total key" % sys.argv[0]
exit(1)
mhash = sys.argv[1]
return mhash
def query_api(mhash, apikey):
url = "http://api.vtapi.net/vtapi/get_file_infos.json"
parameters = {"resources": mhash, "apikey": apikey}
encoded = urllib.urlencode(parameters)
req = urllib2.Request(url, encoded)
response = urllib2.urlopen(req)
response_string = response.read()
data = json.loads(response_string)
return data
mhash = arguments()
data = query_api(mhash, apikey)
if not in_database(data, mhash):
print 'No entry for %s in database' % mhash
exit(1)
# Positive match found
sha1, filenames, first_seen, last_seen, last_scan_permalink, last_scan_report = collect(data)
if is_malware(last_scan_report):
msg(sha1, filenames, first_seen, last_seen, last_scan_permalink)
exit(0)
else:
print 'Entry %s is not malicious' % mhash
exit(1)
``` |
{
"source": "jonschull/Lyte",
"score": 2
} |
#### File: Lyte/combined/glowtest.py
```python
import lyte
from vpython import box
box()
lyte.say('this is lyte saying in glowtest.py')
def f():
print('this is a function')
f()
print("14:51:20 03/20/18 EDT")
D={1:1, 2:2}
print(f'{D}= {D} {type(D)}' )
```
#### File: Lyte/combined/vpytohtml.py
```python
from chromedriverService import BrowserFromService, Keys, msg
from copypaste import write_to_clipboard, read_from_clipboard
from time import sleep
from selenium.webdriver import ActionChains
import requests
#B = BrowserFromService(headless = True)
#B.get('http://localhost:8080/_ah/login')
def login():
B.get('http://localhost:8080/_ah/login')
sleep(1)
input=B.find_element_by_id('email')
input.click()
input.send_keys(Keys.RIGHT * 20)
input.send_keys(Keys.BACKSPACE * 20)
input.send_keys('<EMAIL>')
input.send_keys(Keys.TAB*2)
input.send_keys(Keys.RETURN)
#login()
def srcFromFileName(filename='test.py'):
"""get source, apply transformation"""
thesource=open(filename).read()
lines= thesource.split('\n')
lines.insert(1, 'def GlowMe( me=""): pass\n')
lines.insert(2, "get_library('http://localhost:8080/lib/jonlib.js')\n")
fixedLines=[]
for line in lines:
#lines beginning with any of these phrases need to commented out
toxicStarters = 'GlowMe from import DICT=dict'.split()
for poison in toxicStarters:
if line.strip().startswith(poison):
line='##GlowMe '+line
#print(line)
fixedLines.append(line)
return '\n'.join(fixedLines)
def goToWorkspace():
global textarea
B.get('http://localhost:8080/#/user/jschull/folder/Public/')
print('page loaded?')
sleep(3)
B.find_element_by_link_text('Create New Program').click()
actions=ActionChains(B)
actions.send_keys('workspace' + Keys.RETURN).perform() #THIS MAKES PAST WORK.
textarea=B.find_element_by_tag_name('textarea')
#goToWorkspace()
#typeToWorkspace()
#targetName = 'test.py'
def pasteToBrowser( src ):
#select all and delete
textarea=B.find_element_by_tag_name('textarea')
#copy into clipboard
write_to_clipboard('\n' + src + '\n')
sleep(1)
actions=ActionChains(B)
actions.context_click().send_keys(Keys.ARROW_DOWN).perform() #THIS MAKES PASTE WORK.
#paste
actions=ActionChains(B)
actions.send_keys(Keys.SHIFT+Keys.INSERT).perform()
#pasteToBrowser( srcFromFileName( targetName ) )
def getEmbeddableSrc( ):
B.get('http://localhost:8080/#/user/jschull/folder/Public/program/workspace/share')
sleep(1) #allow time for textarea to fill
textarea=B.find_element_by_tag_name('textarea')
embeddableSrc = B.find_element_by_css_selector('.embedSource').text
return embeddableSrc
def createHTML( targetName ): #works but uses glowscript template
src = getEmbeddableSrc( )
src = src.split('<![CDATA[//><!--')[1]
htmlName = targetName.replace('.py', '.html')
f=open( htmlName, 'w' )
f.write(f"""<div id="glowscript" class="glowscript">
<link type="text/css" href="http://localhost:8080/css/redmond/jquery-ui.custom.css" rel="stylesheet" />
<link type="text/css" href="http://localhost:8080/css/ide.css" rel="stylesheet" />
<script type="text/javascript" language="javascript" src="http://localhost:8080/lib/jquery/IDE/jquery.min.js"></script>
<script type="text/javascript" language="javascript" src="http://localhost:8080/lib/jquery/IDE/jquery-ui.custom.min.js"></script>
<script type="text/javascript" src="http://localhost:8080/package/glow.2.7.min.js"></script>
<script type="text/javascript" src="http://localhost:8080/package/RSrun.2.7.min.js"></script>
<script type="text/javascript"><!--//--><![CDATA[//><!--
{src} """)
f.close()
print(f'{htmlName} created')
from plumbum import local, NOHUP, BG
def startGlowScript():
python=local['python']
dev_appserver = local['/Users/jonschull-MBPR/Downloads/google-cloud-sdk/bin/dev_appserver.py']
GSappYAML = local['/Users/jonschull-MBPR/glowscript/glowscript/app.yaml']
python[dev_appserver, GSappYAML] & NOHUP(stdout='/dev/null')
sleep(2) #give the server time to start up
def GSserverIsRunning():
try:
requests.get('http://localhost:8080')
msg('okGS)')
except Exception as e:
msg('newGS')
startGlowScript()
def vpy_to_html(targetName = 'test.py', headless=True):
global B
#headless= True
msg('(GS:localhost:8080?')
if GSserverIsRunning():
msg('OK')
msg(f'Chrome')
B = BrowserFromService(headless = headless)
if headless: print('headless', end='...')
msg(f'logging in')
login()
msg('IN')
#targetName='test.py'
msg(f'{targetName}-->')
goToWorkspace()
pasteToBrowser( srcFromFileName( targetName ) )
createHTML(targetName )
B.get('http://localhost:8080/#/user/jschull/folder/Public/program/workspace')
ActionChains(B).send_keys(Keys.ESCAPE).perform() #get rid of the magic context menu?
sleep(2)
try:
errorTB = B.find_elements_by_class_name('error-traceback')[1].text
errorMsg = B.find_elements_by_class_name('error-details')[1].text
print(f"""GLOWSCRIPT ERROR {errorTB}
{errorMsg}""")
except IndexError:
pass
def createTestPy(timestamp=''):
msg('creating test.py')
with open('test.py', 'w') as f:
f.write(f"""
import DICT
box()
print('this is test.py')
def f():
print('this is a function')
f()
print("{timestamp}")
""")
if __name__=='__main__':
import sys
if len(sys.argv)>1:
pyFile = sys.argv[1]
else:
pyFile='test.py'
import time
createTestPy(time.strftime('%X %x %Z'))
vpy_to_html( 'glowtest.py', headless=False)
```
#### File: jonschull/Lyte/copypaste.py
```python
import subprocess
def write_to_clipboard(output):
process = subprocess.Popen(
'pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)
process.communicate(output.encode('utf-8'))
def read_from_clipboard():
return subprocess.check_output(
'pbpaste', env={'LANG': 'en_US.UTF-8'}).decode('utf-8')
if __name__=='__main__':
s="""one
two
three"""
write_to_clipboard(s)
print(read_from_clipboard())
print('now you try pasting')
```
#### File: jonschull/Lyte/greet.py
```python
def greet(i):
console.log(str(i) + " Hello World!")
for i in range(8):
greet(i)
```
#### File: jonschull/Lyte/makemyVPHTML.py
```python
from makemyPYJ import makeDummyPYJ#, makemyPYJ
makeDummyPYJ('makemyVPHTML')
makeDummyPYJ('sys')
makeDummyPYJ('writeout', ['writeout'])
makeDummyPYJ('plumbum',['local'])
import sys
import writeout
from lyte import say, whereami
from plumbum import local
from beginswith import beginsWith
RS = local['/Users/jonschull-MBPR/rapydscript-ng/rapydscript-ng/bin//rapydscript']
plopen = local['open']
myName = sys.argv[0].replace('.py','')
see = local['see']
def edit():
see(f'{myName}.html')
lines = open(sys.argv[0]).readlines()
hasVPimport = False
for i,line in enumerate(lines):
if beginsWith(line, 'from vpython import'):
lines[i] = '##commented out by makemyHTML##' + line
hasVPimport = True
print('\nhasVPimport',hasVPimport)
def includeInHTML():
global lines
includeLine=0
for i, line in enumerate(lines):
importFlag1 = line.strip().startswith('import makemyVPHTML')
importFlag2 = line.strip().startswith('##LYTEML--INCLUDE') #to avoid recursive import in makemyHTML
flaggedLine = importFlag1 or importFlag2
if flaggedLine:
if importFlag1: firstChar = 'i'
if importFlag2: firstChar = '#'
includeLine = i + 1
indented = line.find(firstChar)
if includeLine:
lines = [line[indented:] for line in lines]
return ''.join(lines[includeLine:])
my__main__ = includeInHTML()
simpleTemplate= f"""<html>
<head>
<meta charset="UTF-8" />
<script type="text/javascript" src="rapydscript.js"></script>
<script type="text/javascript" src="heredoc.js"> </script>
<script type="text/javascript" src="{myName}.js"> </script>
</head>
<body>
</body>
<script type="text/javascript">var compiler = RapydScript.create_embedded_compiler();
eval(compiler.compile(hereDoc(`##### Python zone begins
{my__main__}
## Python zone ends`))); </script> </html> """
VPtemplate = f"""<html>
<head>
<meta charset="UTF-8" />
<script type="text/javascript" src="rapydscript.js"></script>
<script type="text/javascript" src="heredoc.js"> </script>
<script type="text/javascript" src="{myName}.js"> </script>
<link type="text/css" href="supportScripts/jquery-ui.custom.css" rel="stylesheet" />
<link type="text/css" href="supportScripts/ide.css" rel="stylesheet" />
<script type="text/javascript" src="supportScripts/jquery.min.js"></script>
<script type="text/javascript" src="supportScripts/jquery-ui.custom.min.js"></script>
<script type="text/javascript" src="supportScripts/glow.2.7.min.js"></script>
<script type="text/javascript" src="supportScripts/RSrun.2.7.min.js"></script>
<script type="text/javascript" src="supportScripts/heredoc.js"></script>
<script charset="UTF-8" src="supportScripts/rapydscript.js"></script>
<script type="text/javascript" src="supportScripts/GlowScript.js"></script>
</head>
<body>
<div id="lyte"> </div>
<div id="glowscript" class="glowscript"> </div>
</body>
<script type="text/javascript">var compiler = RapydScript.create_embedded_compiler();
var MYVP = compiler.compile(hereDoc(`##### Python zone begins
{my__main__}
## Python zone ends`)); </script> </html> """
if hasVPimport:
template = VPtemplate
else:
template = simpleTemplate
#print(template)
writeout.writeout(f'{myName}.html', template )
print(f'RS', '-b', f'{myName}.py', '-o', f'{myName}.js \n')
import makemyPYJ
#makemyPYJ(myName = myName + '.py') #make PYJ
print(f'myName {myName}')
RS('-b', f'{myName}.pyj', '-o', f'{myName}.js') #compile the PYJ
plopen(f'{myName}.html')
print('whereami', whereami(__name__))
if whereami(__name__).language == 'Python' and whereami(__name__).name != 'makemyVPHTML':
from vpython import box
if whereami(__name__).visible:
##LYTEML--INCLUDE THIS:
box()
print('hello')
```
#### File: jonschull/Lyte/makePYJs.py
```python
import sys
from plumbum import local
def makeMyPYJ():
"""
When RapydScript is asked to import x,
it looks for x.pyj. Therefore,
to facilitate RapydScript compiles, we
* remove pyj and py-cached
* copy the calling x.py to x.pyj
"""
cp = local['cp']
rm = local['rm']
ls = local['ls']
myName = sys.argv[0] #the calling py
PYJname = myName.replace('.py', '.pyj')
#remove all the ".pyj-cached'files
fileNames=ls().split()
for fileName in fileNames:
if fileName.endswith('.pyj-cached'):
rm(fileName)
cp(myName, PYJname)
print(f'makemyPYJ copied {myName} to {PYJname} and removed *.pyj-cached')
def makeDummyPYJ(baseName, funcNames=[]):
if not type(funcNames) == type([]):
funcName = [funcNames]
f=open(baseName+'.pyj','w')
for funcName in funcNames:
f.write(f"""
def {funcName}(*args, **kwargs):
pass
#this dummy PYJ file ({baseName+'.pyj'}) was generated by makePYJs.py at request of {sys.argv[0]}
""")
print(f"makemyPYJs.py executed makeDummPYJ('{baseName}', '{funcName}')")
f.close()
if __name__== '__main__':
myName = sys.argv[0]
baseName=myName.replace('.py', '')
makeDummyPYJ(baseName, funcNames = ['makeMyPYJ', 'makeDummyPYJ'])
```
#### File: jonschull/Lyte/pyonly2.py
```python
from IS import * #testing framework
import SSstache
import sys
import plumbum
from plumbum import local
from plumbum.cmd import touch, ls, pwd
from plumbum.cmd import open as pbOpen #so as not to conflict with python's open
from plumbum.path.utils import copy #watch out for conflict with python's copy
from plumbum.path.utils import delete as pbDelete
rapydscript = local['/Users/jonschull-MBPR/rapydscript-ng/rapydscript-ng/bin//rapydscript']
def RS(*args, **kwargs):
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
question = inspect.getouterframes(curframe, 2)[1][-2][-1].strip()
lineNumber = inspect.getouterframes(curframe, 2)[1][-4]
if SHOWRS:
from trace import trace
if type(args)==type(()):
print(f'\nline {lineNumber}: rapydscript', args)
trace()
print(f'line {lineNumber}: \t\trapydscript', args)
print()
else:
print('f\nline {lineNumber}: rapydscript', ' '.join(args))
Error, output, errorMsg = rapydscript[args].run(retcode=None)
IS('', EQUALTO, errorMsg)
return output
#print(ret.split('plumbum.commands.processes.ProcessExecutionError')[1])
#RS('-x', 'dummy.py')
def lsl(*args):
"""items from ls() returned as list"""
try:
return ls(*args).split()
except plumbum.commands.processes.ProcessExecutionError:
return []
def makeFile(fName, contents="print('makeFile')"):
with open(fName,'w') as f:
f.write(contents)
def contents(pyName=sys.argv[0]):
return open(pyName).read()
if TESTING: #makeFile, contents
makeFile('testMakeFile', 'makefile')
IS(contents('testMakeFile'), EQUALTO, 'makefile' )
pbDelete('testMakeFile')
def baseName(pyName=sys.argv[0]):
return pyName.replace('.py','')
if TESTING:
IS(baseName('somename.py'), EQUALTO, 'somename')
def myHTML(pyName=sys.argv[0]):
jsName = pyName.replace('.py','.js')
return f"""<html>
<head> <meta charset="UTF-8">
<title>{pyName}</title>
</head>
<body>
<script type="text/javascript" language="javascript"
src="{jsName}">
</script>
</body>
</html>
<!--------------------------------------------------------
This file (index.html) and the other files in this folder
were generated by lyte2.py so that the python script named {pyName}
can run in the browser, using the javascript version named {jsName}
---------------------------------------------------------------->
"""
if TESTING:
IS('lyte2.py', IN, myHTML() )
IS(f'{baseName(sys.argv[0])}.js', IN, myHTML(sys.argv[0]) )
makeFile('dummy.py', "print('I am dummy.py')")
IS('dummy.js', IN, myHTML('dummy.py') )
IS('<title>dummy.py</title>', IN, myHTML('dummy.py'))
pbDelete('dummy.py')
pbDelete('dummy')
def makeMyDir(pyName=sys.argv[0]):
ret= AttrDict() #for testing
if pyName:
ret.fileName=pyName
else:
ret.fileName=sys.argv[0]
SSstache.makeHTMLdir(ret.fileName)
return ret
if TESTING:
makeMyDir()
myName= sys.argv[0]
IS(myName, IN, lsl(baseName(myName) ))
IS('supportScripts', IN, lsl(baseName(myName)) )
makeFile('dummy.py', "print('I am dummy.py')" )
makeMyDir('dummy.py')
IS('dummy.py', IN, lsl('dummy') )
IS('supportScripts', IN, lsl('dummy') )
pbDelete('dummy.py')
pbDelete('dummy')
def makeMyIndexHTML(pyName=sys.argv[0]):
makeMyDir(pyName)
myDir=baseName(pyName)
contents(pyName)
makeFile(f'{myDir}/index.html', myHTML(pyName) )
if TESTING:
makeFile('makeMyIndex.py', 'this is to test makeFile and makeMyIndex')
makeMyIndexHTML('makeMyIndex.py')
IS('<html>', IN, contents('makeMyIndex/index.html') )
pbDelete('makeMyIndex')
pbDelete('makeMyIndex.py')
def makeMyJS(pyName=sys.argv[0]):
"""assumes myDir exists"""
myDirName = baseName(pyName)
ret = RS('-x', pyName) #for error checking
RS(pyName, '-o', f"{myDirName}/{myDirName+'.js'}")
return ret
if TESTING:#TESTING: #test makeMyIndexHTML and makeMyJS
makeFile( 'dummy.py', "print('hello dummy')" )
makeMyDir('dummy.py')
makeMyIndexHTML('dummy.py')
myDirName = baseName('dummy.py')
IS('index.html', IN, lsl(myDirName) )
IS('src="dummy.js"', IN, contents(f'{myDirName}/index.html' ) )
IS('<html>', IN, contents(f'{myDirName}/index.html' ) )
programOutput = makeMyJS('dummy.py')
IS('dummy.js', IN, lsl('dummy') )
IS('hello dummy\n', EQUALTO, programOutput)
pbDelete('dummy.py')
pbDelete('dummy')
def lyten(pyName=sys.argv[0]):
"""presumes pyName exists"""
makeMyDir(pyName)
makeMyJS(pyName)
makeMyIndexHTML(pyName)
dirName = baseName(pyName)
indexPath = baseName(pyName)+'/index.html'
print(f'\n|| Created folder with {indexPath}\n')
return indexPath
if 0:#TESTING:
makeFile('dummy.py', "print('this is from lyten')" )
ret = lyten('dummy.py')
IS(ret, EQUALTO, 'dummy/index.html')
pbDelete('dummy')
pbDelete('dummy.py')
#pbOpen(f'dummy/index.html')
#NEED BETTER TEST
def enLytenMe():
lyten() #with no parameters
def runMe(openBrowser=False):
print('_'*20, 'running under rapyscript', '_'*20)
if sys.argv[0] == __file__:
print(f"SORRY: runMe() won't work on {__file__}")
else:
ret = RS('-x', sys.argv[0])
if ret:
print(ret)
if openBrowser:
indexPath = f'{baseName(sys.argv[0])}/index.html'
pbOpen(indexPath)
def testLyte():
python=local['python3']
ret = python('lyte2.py')
print(ret)
return ret
#lyten(f'{pwd().strip()}/import_lyte2/import_lyte2.py')
if TESTING:
print(f"""
___________________________
|Test Summary
|YES: {RESULTS.yes:4}
|NO: {RESULTS.no:4}
""")
#create dummy pyonly.pyj with dummy functions for rapydscript
pyjFile=open('pyonly2.pyj','w')
pyjFile.write('#### pyj is here to satisfy Rapydsript import ####\n\n')
for dirObj in dir():
if not dirObj.startswith('__'):
#print( dirObj)
pyjFile.write(f'def {dirObj}(*args, **kwargs): pass\n')
pyjFile.close()
if __name__=='__main__':
makeFile('hello.py', "print('hello from hello.py')")
if len( sys.argv ) > 1:
lyten('hello.py')
else:
print(f'{sys.argv[0]}: try python3 {sys.argv[0]} hello.py')
```
#### File: jonschull/Lyte/testFLDJS.py
```python
from FDLJS import run
#IRS scam? 2023350440
from vpython import sphere
from vpython import vector as V
from attrthing import AttrThing, gimme
#import makemyVPHTML
edges=[(1,2),(2,3), (3,1), (3,4), (4,5), (4,6), (5,6)]
def IDsFromEdges(edges):
IDs=[]
for t in edges:
IDs.append(str(t[0]))
IDs.append(str(t[1]))
return set(IDs)
spheres=AttrThing()
IDs = IDsFromEdges(edges)
for ID in IDs:
spheres[ID] = sphere()
def updateSpheres(nodes):
global spheres,k,v
for k,v in nodes.items():
x,y,z = v['velocity']
#print('--', k, spheres[k])
spheres[str(k)].pos = V(x,y,z)
#Convert to internal representation
edges = [AttrThing(source = str(s), target = str(t)) for s, t in edges]
# Generate nodes
nodes = run(edges, iterations= 3000, updateNodes= updateSpheres, is_3d=False)
```
#### File: jonschull/Lyte/test_SSstache.py
```python
from SSstache import *
from plumbum.path.utils import delete
from plumbum.cmd import ls, touch, mkdir
def test_makeSupportScriptStache():
delete('xyz')
assert makeSupportScriptStache(stacheDir='xyz').endswith('xyz')
assert ls('xyz').split()==['RSrun.2.7.min.js', 'glow.2.7.min.js', 'ide.css', 'jquery-ui.custom.css', 'jquery-ui.custom.min.js', 'jquery.min.js']
delete('xyz')
def test_prepareHTMLdir():
delete('xyz')
prepareHTMLdir('xyz')
assert('xyz' in ls().strip())
delete('xyz')
def test_makeHTMLdir():
HTMLdirName = '123'
delete( HTMLdirName )
fakeSSname = 'fakeSupportScripts'
delete(fakeSSname)
mkdir(fakeSSname)
scriptNames=['xyz.test', 'xyz2.test']
for scriptName in scriptNames:
touch(f'{fakeSSname}/{scriptName}')
makeHTMLdir( HTMLdirName ,
stacheDir = fakeSSname,
GLOWPATH='.',
scriptNames= scriptNames)
assert('supportScripts' in ls( HTMLdirName ).split() )
assert( ls('123/supportScripts').split() == scriptNames )
delete( HTMLdirName )
delete(fakeSSname)
def test_putInHTMLdir():
open('box2.py','w').write('box(color=color.green)')
putInHTMLdir('box2.py')
assert( 'box2.py' in ls('box2').split() )
delete('box2.py')
delete('box2')
#prepareHTMLdir(dirName='xyz')
#test_makeHTMLdir()
```
#### File: jonschull/Lyte/trace.py
```python
import traceback
def trace(*args, **kwargs):
global lines, stack, ret
ret = ''
stack= traceback.format_stack()
#stack.reverse()
print('Your call sequence:')
for i,line in enumerate(stack[1:-2]):
lines=line.split('\n')
lines[0] = lines[0].split('/')[-1].replace(',','\t\t\t')
if not lines[0].startswith('backend'):
ret = f'step{i:2} {lines[0]}\t\t\t\t' + lines[1].replace('args',f'{args}')
#print('\n', ret.split(),'\n')
word, step, file, word, line, word, caller, called = ret.split()[:8]
other = ''.join(ret.split()[7:])
file=file.replace('"','')
print(f'line {line:3}\t\t{other} \t\t\t\t\t\t\t {file}')
def trace_caller(*args,**kwargs):
trace()
def trace_caller_caller(*args,**kwargs):
trace_caller()
if __name__=='__main__':
trace_caller_caller(10)
``` |
{
"source": "jonschwadron/tictactoe-django",
"score": 2
} |
#### File: tictactoe-django/app/views.py
```python
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse, JsonResponse
import json
from .models import Player
from .models import Game
def index(request):
return render(request, "app/index.html")
def exit_x(request, game_id):
game = Game.objects.get(id=game_id)
game.playerX = None
if game.playerO == None:
game.delete()
else:
game.status = 'waiting_for_players'
game.save()
return redirect ('app:index', permanent=True)
def exit_o(request, game_id):
game = Game.objects.get(id=game_id)
game.playerO = None
if game.playerX == None:
game.delete()
else:
game.status = 'waiting_for_players'
game.save()
return redirect ('app:index', permanent=True)
def join_x(request):
# get alias from POST
alias = request.POST['alias']
# get if alias exists else create alias in Player model
player, created = Player.objects.get_or_create(alias=alias)
# check if empty playerX and waiting_for_players status exists
# if found, join the first game_id in the list and change status to ready_to_play
if Game.objects.filter(playerX=None, status="waiting_for_players").exists():
game = Game.objects.filter(playerX=None, status="waiting_for_players").first()
game.playerX = player
game.status = 'ready_to_play'
game.save()
# if none found, create game with playerX assigned and set status to waiting_for_players
else:
game = Game.objects.create(status="waiting_for_players", playerX=player)
game.save()
return render(request, 'app/join.html', {
'game': game,
'xState': json.dumps(game.xState),
'oState': json.dumps(game.oState),
})
def join_o(request):
# get alias from POST
if 'alias' not in request.POST:
alias = None
else:
alias = request.POST['alias']
# get if alias exists else create alias in Player model
player, created = Player.objects.get_or_create(alias=alias)
# check if empty playerX and waiting_for_players status exists
# if found, join the first game_id in the list and change status to ready_to_play
if Game.objects.filter(playerO=None, status="waiting_for_players").exists():
game = Game.objects.filter(playerO=None, status="waiting_for_players").first()
game.playerO = player
game.status = 'ready_to_play'
game.save()
# if none found, create game with playerX assigned and set status to waiting_for_players
else:
game = Game.objects.create(status="waiting_for_players", playerO=player)
game.save()
return render(request, 'app/join.html', {
'game': game,
'xState': json.dumps(game.xState),
'oState': json.dumps(game.oState),
})
def play_x(request, game_id):
game = Game.objects.get(id=game_id)
game.status = "game_in_progress"
game.save()
return render(request, 'app/game.html', {
'game': game,
'game_id': json.dumps(game.id),
'game_nextPlayer': json.dumps(game.nextPlayer),
})
def play_o(request, game_id):
game = Game.objects.get(id=game_id)
return render(request, 'app/game.html', {
'game': game,
'game_id': json.dumps(game.id),
'game_nextPlayer': json.dumps(game.nextPlayer),
})
def updateGameData(request, game_id):
game = Game.objects.get(id=game_id)
currentPlayer = game.nextPlayer
position = request.POST['tilePosition']
position = int(position)
game.tiles[position] = currentPlayer
winningPlays = [[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6]]
if (any(j is None for j in game.tiles)):
game.nextPlayer = request.POST['nextPlayer']
else:
game.winner = 'T'
game.status = 'finished'
for i in winningPlays:
#check if winning play exist
if (game.tiles[i[0]] == currentPlayer and
game.tiles[i[1]] == currentPlayer and
game.tiles[i[2]] == currentPlayer):
game.winner = currentPlayer
game.status = 'finished'
game.save()
return HttpResponse(status=200)
## TODO: consolidate these functions
def getNextPlayer(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(json.dumps(game.nextPlayer))
def getTiles(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(json.dumps(game.tiles))
def getWinner(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(json.dumps(game.winner))
def updateStatusX(request, game_id):
game = Game.objects.get(id=game_id)
game.xState = request.POST['xState']
game.save()
return HttpResponse(status=200)
def updateStatusO(request, game_id):
game = Game.objects.get(id=game_id)
game.oState = request.POST['oState']
game.save()
return HttpResponse(status=200)
def getStatusX(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(json.dumps(game.xState))
def getStatusO(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(json.dumps(game.oState))
def getPlayerX(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(game.playerX)
def getPlayerO(request, game_id):
game = Game.objects.get(id=game_id)
return HttpResponse(game.playerO)
``` |
{
"source": "jonschwenk/rabpro",
"score": 2
} |
#### File: rabpro/rabpro/merit_utils.py
```python
import numpy as np
import pandas as pd
from shapely import ops
from shapely.geometry import Polygon, LineString
from scipy.ndimage.morphology import distance_transform_edt
from rabpro import utils as ru
def trace_flowpath(
fdr_obj,
da_obj,
cr_stpt,
cr_enpt=None,
n_steps=None,
fmap=[32, 64, 128, 16, 1, 8, 4, 2],
):
"""Walks along a flow direction grid from stpt to enpt. Returns a list of
pixels from stpt to enpt. Walks from downstream to upstream.
Parameters
----------
fdr_obj : [type]
flow direction object opened with gdal.Open(). Assumes flow direction
symbology matches MERIT-Hydro:
32 64 128
16 1
8 4 2
da_obj : [type]
[description]
cr_stpt : [type]
column, row of point to start walk
cr_enpt : [type], optional
column, row of point to end walk. By default None
n_steps : [type], optional
number of steps (pixels) to walk before halting. By default None
fmap : list, optional
[NW, N, NE, W, E, SW, S, SE], by default [32, 64, 128, 16, 1, 8, 4, 2]
Returns
-------
[type]
[description]
"""
imshape = (fdr_obj.RasterXSize, fdr_obj.RasterYSize)
# Make array specifying the fdir values that flow into the center cell
intodirs = np.array([fv for fv in fmap][::-1], dtype=np.uint8)
intodirs = np.insert(
intodirs, 4, 3
) # 3 is a dummy value that should not appear in the fdir_obj values
# Make dictionaries for rows and columns to add for a given fdr value
rowdict = {}
coldict = {}
for ifd, fd in enumerate(fmap):
if ifd < 3:
rowdict[fd] = 1
elif ifd < 5:
rowdict[fd] = 0
else:
rowdict[fd] = -1
if ifd in [0, 3, 5]:
coldict[fd] = 1
elif ifd in [1, 6]:
coldict[fd] = 0
else:
coldict[fd] = -1
stpti = np.ravel_multi_index(cr_stpt, imshape)
da = [
da_obj.ReadAsArray(
xoff=int(cr_stpt[0]), yoff=int(cr_stpt[1]), xsize=1, ysize=1
)[0][0]
]
do_pt = [stpti]
ct = 0
while 1:
cr = np.unravel_index(do_pt[-1], imshape)
# First find all the candidate pixels that drain to this one
nb_fdr = (
neighborhood_vals_from_raster(cr, (3, 3), fdr_obj, nodataval=np.nan)
.reshape(1, 9)
.flatten()
)
# nb_fdr = fdr_obj.ReadAsArray(xoff=int(cr[0])-1, yoff=int(cr[1])-1, xsize=3, ysize=3).reshape(1, 9).flatten()
candidates = np.where(nb_fdr == intodirs)[0]
if len(candidates) == 0:
break
elif len(candidates) == 1:
fdr = nb_fdr[candidates[0]]
else:
nb_das = (
neighborhood_vals_from_raster(cr, (3, 3), da_obj, nodataval=np.nan)
.reshape(1, 9)
.flatten()
)
# nb_das = da_obj.ReadAsArray(xoff=int(cr[0])-1, yoff=int(cr[1])-1, xsize=3, ysize=3).reshape(1,9).flatten()
fdr = nb_fdr[candidates[np.argmax(nb_das[candidates])]]
# Take the step
row = cr[1] + rowdict[fdr]
col = cr[0] + coldict[fdr]
# Handle meridian wrapping
if col < 0:
col = fdr_obj.RasterXSize + col
elif col > fdr_obj.RasterXSize - 1:
col = col - fdr_obj.RasterXSize
do_pt.append(np.ravel_multi_index((col, row), imshape))
da.append(
da_obj.ReadAsArray(xoff=int(col), yoff=int(row), xsize=1, ysize=1)[0][0]
)
# Halt if we've reached the endpoint
if cr == cr_enpt:
break
# Halt if we've reached the requested length
ct = ct + 1
if ct == n_steps:
break
colrow = np.unravel_index(do_pt, imshape)
return (colrow[1], colrow[0])
def neighborhood_vals_from_raster(cr, shape, vrt_obj, nodataval=np.nan, wrap=None):
"""
Queries a (virtual) raster object to return an array of neighbors
surrounding a given point specified by cr (column, row). A shape can be
provided to return as large of a neighborhood as desired; both dimensions
must be odd. This function is almost always unnecessary and could be
replaced with a single call to gdal's ReadAsArray(), except that throws
errors when requesting a neighborhood that is beyond the boundaries of the
raster. Also note that requests for negative offsets do not throw errors,
which is dangerous. This function checks for those cases and handles them.
An option is provided to 'wrap' in cases where neighborhoods are requested
beyond the bounds of the raster. In these cases, the raster is effectively
"grown" by appending copies of itself to ensure no nodata are returned.
(This isn't actually how the code works, just the simplest explanation.)
Parameters
----------
cr : tuple
(column, row) indices within the virtual raster specifying the point
around which a neighborhood is requested.
shape : tuple
Two-element tuple (nrows, ncols) specifying the shape of the
neighborhood around cr to query.
vrt_obj : gdal.Dataset
Dataset object pointing to the raster from which to read; created by
gdal.Open(path_to_raster).
nodataval : object
Value to assign neighbors that are beyond the bounds of the raster. By
default np.nan.
wrap : str or None
String of 'h', 'v', or 'hv' denoting if horizontal and/or vertical
wrapping is desired. If None, no wrapping is performed. By default None.
Returns
-------
Ivals : np.array
Array of same dimensions as shape containing the neighborhood values.
"""
nan_int = (
-9999
) # denotes nan in an integer array since np.nan can't be stored as an integer
if wrap is None:
wrap = ""
# Ensure valid sizes provided
for s in shape:
if s % 2 != 0:
RuntimeError("Requested sizes must be odd.")
# Compute offsets
halfcol = int((shape[1] - 1) / 2)
halfrow = int((shape[0] - 1) / 2)
# Ensure not requesting data beyond bounds of virtual raster
imshape_idcs = (vrt_obj.RasterXSize - 1, vrt_obj.RasterYSize - 1)
max_requ_c = cr[0] + halfcol
min_requ_c = cr[0] - halfcol
max_requ_r = cr[1] + halfrow
min_requ_r = cr[1] - halfrow
c_idcs = np.arange(min_requ_c, max_requ_c + 1)
r_idcs = np.arange(min_requ_r, max_requ_r + 1)
# Handle beyond-boundary cases
individually_flag = False
if max_requ_c > imshape_idcs[0]:
individually_flag = True
replace = c_idcs > imshape_idcs[0]
if "h" in wrap:
c_idcs[replace] = np.arange(0, np.sum(replace))
else:
c_idcs[replace] = nan_int
if max_requ_r > imshape_idcs[1]:
individually_flag = True
replace = r_idcs > imshape_idcs[1]
if "v" in wrap:
r_idcs[replace] = np.arange(0, np.sum(replace))
else:
r_idcs[replace] = nan_int
if min_requ_c < 0:
individually_flag = True
replace = c_idcs < 0
if "h" in wrap:
c_idcs[replace] = np.arange(
imshape_idcs[0], imshape_idcs[0] - np.sum(replace), -1
)
else:
c_idcs[replace] = nan_int
if min_requ_r < 0:
individually_flag = True
replace = r_idcs < 0
if "v" in wrap:
r_idcs[replace] = np.arange(
imshape_idcs[1], imshape_idcs[1] - np.sum(replace), -1
)
else:
r_idcs[replace] = nan_int
if individually_flag is True:
Ivals = np.ones(shape).T * nodataval
for ic, c in enumerate(c_idcs):
for ir, r in enumerate(r_idcs):
if c == nan_int or r == nan_int:
continue
else:
Ivals[ir, ic] = vrt_obj.ReadAsArray(
xoff=int(c), yoff=int(r), xsize=int(1), ysize=int(1)
)[0][0]
else:
Ivals = vrt_obj.ReadAsArray(
xoff=int(cr[0] - halfcol),
yoff=int(cr[1] - halfrow),
xsize=int(shape[1]),
ysize=int(shape[0]),
)
return Ivals
def get_basin_pixels(start_cr, da_obj, fdr_obj, fdir_map=[32, 64, 128, 16, 1, 8, 4, 2]):
"""Returns the indices of all pixels draining to the pixel defined by
start_cr.
Parameters
----------
start_cr : [type]
[description]
da_obj : [type]
[description]
fdr_obj : [type]
[description]
fdir_map : list, optional
[NW, N, NE, W, E, SW, S, SE], by default [32, 64, 128, 16, 1, 8, 4, 2]
Returns
-------
[type]
[description]
"""
# Make arrays for finding neighboring indices
imshape = (fdr_obj.RasterXSize, fdr_obj.RasterYSize)
intodirs = np.flipud(np.array(fdir_map, dtype=np.uint8))
intodirs = np.insert(
intodirs, 4, -99998
) # Need the center element to be a value not possible in the fdr_obj grid
coladds = np.array((-1, 0, 1, -1, 0, 1, -1, 0, 1)) * imshape[1]
rowadds = np.array((-1, -1, -1, 0, 0, 0, 1, 1, 1))
start_idx = np.ravel_multi_index(start_cr, imshape)
done = set()
todo = set([start_idx])
while todo:
doidx = todo.pop()
done.add(doidx)
do_cr = np.unravel_index(doidx, imshape)
nb_fdr = (
neighborhood_vals_from_raster(
do_cr, (3, 3), fdr_obj, nodataval=-999, wrap="h"
)
.reshape(1, 9)
.flatten()
)
where_into = intodirs == nb_fdr
if where_into.sum() == 0:
continue
# Adjust for near-boundary cases, only perform column wrapping though
if do_cr[0] == 0 or do_cr[0] == imshape[0] - 1:
neighs_into = []
wii = np.where(where_into)[0]
for wi in wii:
temp_col = int(do_cr[0] + coladds[wi] / imshape[1])
if temp_col < 0:
temp_col = imshape[0] + temp_col
elif temp_col > imshape[0] - 1:
temp_col = temp_col - imshape[0]
neighs_into.append(
np.ravel_multi_index((temp_col, do_cr[1] + rowadds[wi]), imshape)
)
else:
neighs_into = doidx + rowadds[where_into] + coladds[where_into]
for ni in neighs_into:
if ni not in done:
todo.add(ni)
return list(done)
def blob_to_polygon_shapely(I, ret_type="coords", buf_amt=0.001):
"""
Returns a list of polygons or coords.
Parameters
----------
I : TYPE
DESCRIPTION.
ret_type : str, optional
Type of data to return. Either "coords" or "pgon". The default is
"coords".
buf_amt : numeric, optional
DESCRIPTION. By default 0.001.
Raises
------
ValueError
If `ret_type` is not "coords" or "pgon".
Returns
-------
ret : list of numpy.ndarray or shapely.geometry.Polygon
DESCRIPTION.
"""
# Storage
ret = []
# Get perimeter pixels of the blob
rp, _ = ru.regionprops(I, props=["perimeter"])
for p in rp["perimeter"]:
# Make slightly-buffered shapely polygons of each pixel's outline
pix_pgons = []
for x, y in zip(p[:, 1], p[:, 0]):
pix_pgons.append(
Polygon(
[(x, y), (x + 1, y), (x + 1, y + 1), (x, y + 1), (x, y)]
).buffer(buf_amt)
)
# Union the polygons and extract the boundary
unioned = ops.unary_union(pix_pgons).buffer(-buf_amt)
(
perimx,
perimy,
) = (
unioned.exterior.xy
) # I think unioned should always be a polygon and thus not throw errors, but not sure--could make MultiPolygons
if ret_type == "coords":
ret.append(np.vstack((perimx, perimy)))
elif ret_type == "pgon":
ret.append(Polygon(zip(perimx, perimy)))
else:
raise ValueError('Choose either "coords" or "pgon" as return types.')
return ret
def idcs_to_geopolygons(idcs, gdobj, buf_amt=0.001):
"""
Given a list of of pixel indices within a raster specified by gdobj, creates
georeferenced polygons of the blobs formed by the union of the pixels.
"Wrapping" is also checked - this is to handle cases where the dateline
meridian is crossed and return is therefore a set of polygons rather than a
continuous one.
Parameters
----------
idcs : list of integers
Pixel indices within the raster specified by gdobj that should be
included in the polygon.
gdobj : osgeo.gdal.Dataset
Object created by gdal.Open() on a raster or virtual raster.
buf_amt : numeric, optional
Amount by which to buffer pixels before unioning-helps close tiny gaps.
By default 0.001.
Returns
-------
pgons : list of shapely.geometry.Polygon
List of georeferenced polygons; one per blob of indices.
crossing : bool
If True, the polygons represent those that cross the dateline meridian
(i.e. 180 degrees -> -180 degrees) and have been split.
"""
def Icr_to_geopolygon(cr, mins, maxs, gt):
# mins : [xmin, ymin]
# maxs : [ymin, ymax]
# Ishape : nrows, ncols in I
# gt : geotransform
pgons = []
ncols, nrows = maxs[0] - mins[0] + 1, maxs[1] - mins[1] + 1
I = np.zeros((nrows, ncols), dtype=bool)
I[cr[1] - mins[1], cr[0] - mins[0]] = True
coords = blob_to_polygon_shapely(I, ret_type="coords", buf_amt=0.001)
for c in coords:
coords_trans = ru.xy_to_coords(
c[0] + mins[0] - 0.5, c[1] + mins[1] - 0.5, gdobj.GetGeoTransform()
)
pgons.append(Polygon(zip(coords_trans[0], coords_trans[1])))
return pgons
# Storage
pgons = []
# Transform the coordinates
imshape = (gdobj.RasterXSize, gdobj.RasterYSize)
cr = np.unravel_index(idcs, imshape)
# Max/min of coordinates
xmax, xmin = np.max(cr[0]), np.min(cr[0])
ymax, ymin = np.max(cr[1]), np.min(cr[1])
# Check for wrapping
crossing = False
if xmax - xmin >= imshape[0] - 1: # We have wrapping
crossing = True
# Split into west and east groups
west = cr[0] < (imshape[0] - 1) / 2
east = ~west
for ew in [east, west]:
cr_ew = np.vstack((cr[0][ew], cr[1][ew]))
xmax, xmin = np.max(cr_ew[0]), np.min(cr_ew[0])
ymax, ymin = np.max(cr_ew[1]), np.min(cr_ew[1])
pgons.extend(
Icr_to_geopolygon(
cr_ew, (xmin, ymin), (xmax, ymax), gdobj.GetGeoTransform()
)
)
else:
pgons.extend(
Icr_to_geopolygon(cr, (xmin, ymin), (xmax, ymax), gdobj.GetGeoTransform())
)
return pgons, crossing
def nrows_and_cols_from_search_radius(lon, lat, search_radius, gt):
"""[summary]
Parameters
----------
lon : [type]
[description]
lat : [type]
[description]
search_radius : numeric
search radius in meters
gt : [type]
[description]
Returns
-------
nrows : numeric
[description]
ncols : numeric
[description]
"""
# Determine the number of rows and columns to search
los, las = [], []
for b in [0, 180, 90, 270]:
lo, la = ru.lonlat_plus_distance(lon, lat, search_radius / 1000, bearing=b)
los.append(lo)
las.append(la)
boundsxy = ru.lonlat_to_xy(
np.array([min(los), max(los)]), np.array([min(las), max(las)]), gt
)
nrows = abs(boundsxy[0, 1] - boundsxy[1, 1])
ncols = abs(boundsxy[0, 0] - boundsxy[1, 0])
return nrows, ncols
def map_cl_pt_to_flowline(
lonlat, da_obj, nrows, ncols, da=None, basin_pgon=None, fdr_obj=None, fdr_map=None
):
"""
Maps a point of known drainage area to a flowline of a flow accumulation
grid. Returns the row, col of the mapped-to pixel. User may provide a basin
polygon (in EPSG:4326) if already known. This polygon will be used to ensure
the mapped-to-flowline is the correct one. If the basin polygon is provided,
a flow directors object and its mapping must also be provided as well as the
drainage area.
Parameters
----------
lonlat : list or tuple
Two-element list/tuple containing (longitude, latitude) coordinates of
the point to map to a flowline.
da_obj : osgeo.gdal.Dataset
Flow accumulation object. Created by gdal.Open() on raster containing
flow accumulations.
nrows : int
Number of rows in the neighborhood of the point to search.
ncols : int
Number of rows in the neighborhood of the point to search.
da : float, optional
Drainage area of the point/gage if known. Units should correspond to
those in da_obj, typically km^2. By default None.
basin_pgon : shapely.geometry.polygon.Polygon, optional
Polygon of the watershed of the point, if known. By default None.
fdr_obj : osgeo.gdal.Dataset, optional
Flow direction object. Created by gdal.Open() on raster containing flow
directions. Must be specified in order to use the basin_pgon. By default
None.
fdr_map : list, optional
8-entry list corresponding to the numeric value for flow directions. The
list should take the form [NW, N, NE, W, E, SW, S, SE]. By default None.
Returns
-------
(c_mapped, r_mapped) : tuple or None
x and y coordinates of the mapped points. If no mapping is possible,
None is returned. The x and y coordinates are with respect to the
fac_obj.
solve_method : int
Indicates the reason why mapping succeeded/failed:
1 - (success) DA provided; a nearby flowline pixel was found within 15% of the provided DA
2 - (success) DA provided; match was found on a nearby flowline that is within our DA certainty bounds
3 - (success) basin polygon provided; a mappable flowline was found
4 - (success) DA not provided; mapped to the nearest flowline (>1km^2)
5 - (fail) DA not provided; no nearby flowlines exist
6 - (fail) DA provided; but no nearby DAs were close enough to map to
7 - (fail) basin polygon provided; but no nearby DAs were within the allowable range
8 - (fail) basin polygon provided; no flowlines were 25% within the provided basin
"""
# Check if we have all the required inputs for a basin polygon comparison
if basin_pgon is not None:
if fdr_map is None or fdr_obj is None or da is None:
print(
"You provided a basin polygon but not the drainage area, flow directions, or flow directions map. Cannot use polygon."
)
basin_compare = False
else:
basin_compare = True
else:
basin_compare = False
# Need odd window values for the value-puller
if nrows % 2 == 0:
nrows = nrows + 1
if ncols % 2 == 0:
ncols = ncols + 1
# Get an image of the drainage areas in the neighborhood
cr = ru.lonlat_to_xy(lonlat[0], lonlat[1], da_obj.GetGeoTransform())
pull_shape = (nrows, ncols)
Idas = neighborhood_vals_from_raster(cr[0], pull_shape, da_obj, nodataval=np.nan)
# check to make sure Idas is not all nan?
# np.isnan(Idas).all()
# Make an error image based on provided drainage area, if provided
# This is difficult to do correctly because there are uncertainties with
# the provided drainage area, as well as uncertainties in the MERIT
# drainage area. Furthermore, larger uncertainties are expected for
# larger drainage basins, and smaller for smaller.
def get_DA_error_bounds(da):
"""
Returns the upper and lower drainage area values for a given target
drainage area; the bounds correspond to the range of MERIT drainage
areas to consider as candidates for mapping.
The idea is that smaller basins (order 1-10 km^2) are allowed a greater
% difference when searching for the point to map, while larger ones are
permitted smaller % differences. The reverese is true if considering
absolute differences (i.e. 10 km^2 error is a much higher % for a 1 km^2
basin than a 1000 km^2 basin).
"""
# Make a set of points defining the allowable % error vs. da curve.
# This curve will be linearly interpolated for the provided da to
# return the upper and lower bounds.
das = [0.01, 0.1, 1, 100, 1000, 10000, 100000]
pcts = [100, 75, 50, 25, 20, 17, 15]
pct = np.interp(da, das, pcts)
interval = np.abs(da * pct / 100)
upper = da + interval
lower = max(da - interval, 1)
lower = min(lower, da) # In case provided DA is less than 1
return lower, upper
# Use the known watershed geometry to map the coordinate
if basin_compare is True:
nrows_half = int(nrows / 2 + 0.5) - 1
ncols_half = int(ncols / 2 + 0.5) - 1
# Set some parameters
gt = da_obj.GetGeoTransform()
thresh_DA_min, thresh_DA_max = get_DA_error_bounds(da)
max_trace_dist = 100 # maximum distance to trace a centerline, in kilometers
max_trace_pixels = max(
25, int(max_trace_dist / (111 * gt[1]))
) # rough approximation of # pixels, minimum of 25
# Possible pixels to map to
ppr, ppc = np.where(np.logical_and(Idas > thresh_DA_min, Idas < thresh_DA_max))
# If there are no pixels within our threshold DA, the point is
# unmappable
if len(ppr) == 0:
return (np.nan, np.nan), 7
ppda = Idas[ppr, ppc]
ppi = np.ravel_multi_index((ppr, ppc), Idas.shape)
# Keep track of pixels with DataFrame
df = pd.DataFrame(data={"idx": ppi, "da": ppda, "row": ppr, "col": ppc})
df = df.sort_values(by="da", ascending=False)
# To globalize the rows, cols
c_topleft = cr[0][0] - ncols_half
r_topleft = cr[0][1] - nrows_half
# Resolve the flowlines
cl_trace_ls = []
cl_trace_local_idx = []
while len(df) > 0:
cl, rl = df["col"].values[0], df["row"].values[0]
cr_stpt = (c_topleft + cl, r_topleft + rl)
rc = trace_flowpath(
fdr_obj,
da_obj,
cr_stpt,
cr_enpt=None,
n_steps=max_trace_pixels,
fmap=fdr_map,
)
# Remove the possible pixels from the DataFrame that our flowline
# trace already traverses
r_local = rc[0] - cr[0][1] + nrows_half
c_local = rc[1] - cr[0][0] + ncols_half
# This is crappy boundary handling, but there are few cases where this would occur
out_of_bounds = np.logical_or(r_local < 0, r_local >= Idas.shape[0])
out_of_bounds = out_of_bounds + np.logical_or(
c_local < 0, c_local >= Idas.shape[1]
)
r_local = r_local[~out_of_bounds]
c_local = c_local[~out_of_bounds]
idx_local = np.ravel_multi_index((r_local, c_local), Idas.shape)
df = df[df["idx"].isin(idx_local) == False]
# Store the flowline information.
# Skip cases where flowpath is a single pixel.
# These *should* never be the true flowpath due to ensuring that
# mapping is only attempted above some threshold DA (which is
# much bigger than any single-pixel's area). We therefore skip them.
if len(rc[0]) > 1:
lo, la = ru.xy_to_coords(rc[1], rc[0], gt)
cl_trace_ls.append(LineString(zip(lo, la)))
# Store the flowline
cl_trace_local_idx.append(idx_local)
# Use the known watershed polygon to determine what fraction of each
# extracted flowline is within the boundaries
fraction_in = [
ls.intersection(basin_pgon).length / ls.length for ls in cl_trace_ls
]
# import geopandas as gpd
# gdf = gpd.GeoDataFrame(geometry=cl_trace_ls, crs=CRS.from_epsg(4326))
# gdf.to_file(r'C:\Users\Jon\Desktop\temp\lstest.shp')
# The highest fraction is the correct flowline
if max(fraction_in) > 0.25:
fl_idx = fraction_in.index(max(fraction_in))
else:
return (np.nan, np.nan), 8
# With the flowline known, we now choose the pixel along it within
# our domain that most closely matches the provided DA.
rl, cl = np.unravel_index(cl_trace_local_idx[fl_idx], Idas.shape)
fl_das = Idas[rl, cl]
min_da_idx = np.argmin(fl_das - da)
row_mapped = rl[min_da_idx] + r_topleft
col_mapped = cl[min_da_idx] + c_topleft
return (col_mapped, row_mapped), 3
# We first check if the point is positioned very-near perfectly to avoid
# moving it around unnecessarily
if da is not None:
if (
np.abs(Idas[int((nrows - 1) / 2), int((ncols - 1) / 2)] - da) / da * 100
<= 15
): # If the coordinate's DA is within 15% of MERIT's, we assume it's correct
col_mapped = cr[0][0]
row_mapped = cr[0][1]
solve_method = 1 # a nearby flowline had a close DA
return (col_mapped, row_mapped), solve_method
# Compute a river network mask. Thresholds for candidate DAs are given by
# the get_DA_error_bounds function.
if da is not None:
lower, upper = get_DA_error_bounds(da)
Irn = np.logical_and(
Idas >= lower, Idas <= upper
) # Threshold to get the flowlines
if (
Irn.sum() == 0
): # If no valid flowlines are found, no mapping can be performed
solve_method = (
6 # A DA was provided but no nearby DAs were close enough to map to
)
return (np.nan, np.nan), solve_method
else: # If no DA was provided, use all local flowlines (assumes DA > 1km^2)
Irn = Idas > 1
if Irn.sum() == 0:
solve_method = 5 # No DA was provided, and no nearby flowlines exist
return (np.nan, np.nan), solve_method
# Compute errors based on distance away from provided coordinates
Idist = np.ones(Idas.shape, dtype=bool)
Idist[int((nrows - 1) / 2), int((ncols - 1) / 2)] = False
# Idist = np.log(distance_transform_edt(Idist) + 1) / np.log(100)
Idist = np.sqrt(distance_transform_edt(Idist))
Idist = Idist / np.max(Idist)
# If DA is provided, create error image that combines distance and DA differences
if da is not None:
Iabs_err = np.abs(Idas - da)
Iabs_err[np.logical_or(Idas > upper, Idas < lower)] = np.nan
# If we don't have any valid pixels, can't use the provided drainage area
if np.nansum(Iabs_err) == 0:
da = None
else: # Else we make a weighted error image using both distance and DA differences
# Since we have useful DA information, we provide a small area around the
# center point that is unpenalized for distance. This accounts for very
# small errors in location to avoid moving the point off a valid
# streamline.
# Idist[nrows-8:nrows+9, ncols-8:ncols+9] = 0
wt_da = 2
wt_dist = 1
Ierr = ((Iabs_err / da) * wt_da) + (Idist * wt_dist)
Ierr[~Irn] = np.nan
solve_method = 2 # A DA was provided and a match was found on a nearby flowline that is within our certainty bounds
# If no useful DA is provided, the nearest point along the stream network is
# chosen to map to.
if da is None:
Ierr = Idist
Ierr[~Irn] = np.nan
solve_method = (
4 # A DA was not provided; we map to the nearest flowline (>1km^2)
)
# Select the pixel in the drainage network that has the lowest error
min_err = np.nanmin(Ierr)
me_idx = np.where(Ierr == min_err)
if (
len(me_idx[0]) > 1
): # In the case of ties, choose the one that has the lower Idist error
use_me_idx = np.argmin(Idist[me_idx[0], me_idx[1]])
me_idx = np.array([[me_idx[0][use_me_idx]], [me_idx[1][use_me_idx]]])
col_mapped = cr[0][0] + me_idx[1][0] - (ncols - 1) / 2
row_mapped = cr[0][1] + me_idx[0][0] - (nrows - 1) / 2
# col_mapped = cr[0][0]
# row_mapped = cr[0][1]
return (int(col_mapped), int(row_mapped)), solve_method # longitude, latitude
``` |
{
"source": "jonseddon/cmip6-object-store",
"score": 2
} |
#### File: cmip6_object_store/cmip6_ncrg/read_ncrg.py
```python
import time
from random import randint
from memory_profiler import profile
from netCDF4 import Dataset
from numpy import ma as ma
fpath = (
"http://ceda-archive-o.s3.jc.rl.ac.uk/ag-http-range-get-test/"
"mrros_rcp85_land-gcm_global_60km_11_day_20691201-20791130.nc#mode=bytes"
)
def timer(func):
def timeit(*args, **kwargs):
start = time.time()
print(f"[INFO] Running: {func.__name__}")
result = func(*args, **kwargs)
duration = time.time() - start
print(f"Duration: {duration:.1f} seconds\n\n")
return result
return timeit
def _open(fpath=fpath):
return Dataset(fpath).variables["mrros"]
@timer
@profile
def test_1_read_metadata():
v = _open()
print(v.units)
def get_min_max(data):
fixed_data = ma.masked_equal(data, 9.96921e36)
return fixed_data.min(), fixed_data.max()
@timer
@profile
def test_2_small_slice():
v = _open()
print(f"[INFO] Shape pre-subset: {v.shape}")
start = randint(0, 99)
end = start + 20
data = v[0, start:end, start:end, start:end]
print(f"[INFO] Size: {data.size}")
print(f"[INFO] Shape post-subset: {data.shape}")
mn, mx = get_min_max(data)
print(f"[INFO] min and max: {mn} --> {mx}")
@timer
@profile
def test_3_medium_slice():
v = _open()
print(f"[INFO] Shape pre-subset: {v.shape}")
start = randint(0, 20)
end = start + 200
data = v[0, start:end, start:end, start:end]
print(f"[INFO] Size: {data.size}")
print(f"[INFO] Shape post-subset: {data.shape}")
mn, mx = get_min_max(data)
print(f"[INFO] min and max: {mn} --> {mx}")
@timer
@profile
def test_4_large_slice():
v = _open()
print(f"[INFO] Shape pre-subset: {v.shape}")
start = randint(0, 20)
end = start + 200
data = v[0, 0:2000, start:end, start:end]
print(f"[INFO] Size: {data.size}")
print(f"[INFO] Shape post-subset: {data.shape}")
mn, mx = get_min_max(data)
print(f"[INFO] min and max: {mn} --> {mx}")
def main():
for test in sorted([_ for _ in globals() if _.startswith("test_")]):
func = globals()[test]
func()
if __name__ == "__main__":
main()
```
#### File: cmip6_object_store/cmip6_zarr/compare.py
```python
import glob
import random
import traceback
import warnings
import xarray as xr
from cmip6_object_store.cmip6_zarr.utils import (
get_archive_path,
get_pickle_store,
get_var_id,
read_zarr,
verification_status,
)
def compare_zarrs_with_ncs(project, n_to_test=5):
"""
Randomly selects some datasets and checks that the contents
of a NetCDF files in the archive matches that in a Zarr file
in the Caringo object store.
This logs its outputs in a Pickle file for use elsewhere.
"""
print(f"\nVerifying up to {n_to_test} datasets for: {project}...")
VERIFIED, FAILED = verification_status
verified_pickle = get_pickle_store("verify", project=project)
tested = []
successes, failures = 0, 0
zarr_pickle = get_pickle_store("zarr", project="cmip6").read()
dataset_ids = list(zarr_pickle.keys())
while len(tested) < n_to_test:
dataset_id = random.choice(dataset_ids)
if dataset_id in tested or verified_pickle.read().get(dataset_id) == VERIFIED:
continue
print(f"==========================\nVerifying: {dataset_id}")
try:
_compare_dataset(dataset_id)
verified_pickle.add(dataset_id, VERIFIED)
successes += 1
print(f"Comparison succeeded for: {dataset_id}")
except Exception:
verified_pickle.add(dataset_id, FAILED)
failures += 1
tb = traceback.format_exc()
print(f"FAILED comparison for {dataset_id}: traceback was\n\n: {tb}")
tested.append(dataset_id)
total = successes + failures
return (successes, total)
def _get_nc_file(dataset_id):
archive_dir = get_archive_path(dataset_id)
nc_files = glob.glob(f"{archive_dir}/*.nc")
if not nc_files:
return None
return nc_files[0]
def _compare_dataset(dataset_id):
nc_file = _get_nc_file(dataset_id)
if not nc_file:
return False
print(f"\nWorking on: {dataset_id}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nc_subset = xr.open_dataset(nc_file)
zarr_ds = read_zarr(dataset_id)
zarr_subset = zarr_ds.sel(time=slice(nc_subset.time[0], nc_subset.time[-1]))
result = nc_subset.identical(zarr_subset)
print(f"Testing: {dataset_id}")
print(f"\tResult: {result}")
for prop in ("data_vars", "coords"):
a, b = [
sorted(list(_.keys()))
for _ in (getattr(nc_subset, prop), getattr(zarr_subset, prop))
]
print(f'\nComparing "{prop}": {a} \n------------\n {b}')
assert a == b
a, b = nc_subset.time.values, zarr_subset.time.values
assert list(a) == list(b)
print("Times are identical")
var_id = get_var_id(dataset_id, project="cmip6")
a_var, b_var = nc_subset[var_id], zarr_subset[var_id]
a_min, a_max = float(a_var.min()), float(a_var.max())
b_min, b_max = float(b_var.min()), float(b_var.max())
assert a_min == b_min
print("Minima are identical")
assert a_max == b_max
print("Maxima are identical")
for attr in ("units", "long_name"):
a, b = getattr(a_var, attr), getattr(b_var, attr)
print(f"{attr}: {a} VS {b}")
assert a == b
return result
```
#### File: cmip6_object_store/cmip6_zarr/generate-datasets-list-from-crepp.py
```python
import os
import sys
from datetime import datetime as dt
from functools import reduce
from operator import or_
import django
from crepp_app.models import *
from crepp_site import settings_local
from crepplib.vocabs import *
from crepplib.vocabs import (
ACTION_TYPES,
CHECKSUM_TYPES,
PROCESSING_STATUS_VALUES,
STATUS_VALUES,
)
from django.db.models import Q, Sum
print("using database {}".format(settings_local.DATABASES["default"]["NAME"]))
django.setup()
try:
odir = sys.argv[1]
except IndexError:
odir = os.getcwd()
TODAYS_DATE = dt.today().isoformat().split("T")[0]
ofile = odir + "cmip6-datasets_{}.csv".format(TODAYS_DATE)
print("Output file: ", ofile)
def get_dataset_size(ds):
files = ds.file_set.all()
return files.count(), round(
(files.aggregate(Sum("size"))["size__sum"]) / (1024.0 * 1024), 2
)
req_vars = [
"Amon.clt",
"Amon.evspsbl",
"Amon.hfls",
"Amon.hfss",
"Amon.hurs",
"Amon.huss",
"Amon.pr",
"Amon.prsn",
"Amon.ps",
"Amon.psl",
"Amon.rlds",
"Amon.rlus",
"Amon.rlut",
"Amon.rsds",
"Amon.rsdt",
"Amon.rsus",
"Amon.rsut",
"Amon.sfcWind",
"Amon.tas",
"Amon.tasmax",
"Amon.tasmin",
"Amon.tauu",
"Amon.tauv",
"Amon.ts",
"Amon.uas",
"Amon.vas",
"Amon.zg",
"LImon.snw",
"Lmon.mrro",
"Lmon.mrsos",
"OImon.siconc",
"OImon.sim",
"OImon.sithick",
"OImon.snd",
"OImon.tsice",
"Omon.sos",
"Omon.tos",
"Omon.zos",
"Amon.ta",
"Amon.ua",
"Amon.va",
"Amon.hur",
"Amon.hus",
"Amon.zg",
"Oday.tos",
"day.hurs",
"day.huss",
"day.mrro",
"day.pr",
"day.psl",
"day.sfcWindmax",
"day.snw",
"day.tas",
"day.tasmax",
"day.tasmin",
"day.uas",
"day.vas",
"day.zg",
"CFday.ps",
]
# 'day.ua', 'day.va', ]
# '3hr.huss', '3hr.pr', '3hr.tas', '3hr.vas', '3hr.uas', '6hrPlev.zg1000']
valid_dss = {
"is_withdrawn": False,
"is_paused": False,
"processing_status": PROCESSING_STATUS_VALUES.COMPLETED,
}
c3s34g_valid_ds = Dataset.objects.filter(**valid_dss)
valid_vars_filter = reduce(or_, [Q(name__icontains=val) for val in req_vars])
valid_datasets = c3s34g_valid_ds.filter(valid_vars_filter)
with open(ofile, "a+") as w:
w.writelines("Dataset_id, num_files, size (MB)\n")
for ds in valid_datasets:
nfiles, total_size = get_dataset_size(ds)
w.writelines("{}, {}, {}\n".format(ds.name, nfiles, total_size))
```
#### File: cmip6_object_store/cmip6_zarr/utils.py
```python
import json
import os
import uuid
import s3fs
import xarray as xr
from ..config import CONFIG
from .pickle_store import PickleStore
known_pickles = ["zarr", "error", "verify"]
verification_status = ["VERIFIED", "FAILED"]
def get_credentials(creds_file=None):
if not creds_file:
creds_file = CONFIG["store"]["credentials_file"]
with open(creds_file) as f:
creds = json.load(f)
return creds
def get_uuid():
_uuid = uuid.uuid4()
return _uuid
def get_var_id(dataset_id, project):
var_index = CONFIG[f"project:{project}"]["var_index"]
return dataset_id.split(".")[var_index]
def create_dir(dr):
if not os.path.isdir(dr):
os.makedirs(dr)
def get_pickle_store(store_type, project):
"""
Return a pickle store of type: `store_type`.
Pickle store types can be any listed in: `known_pickles`
Args:
store_type ([string]): pickle type
project ([string]): project
"""
if store_type not in known_pickles:
raise KeyError(f"Pickle store type not known: {store_type}")
_config = CONFIG[f"project:{project}"]
return PickleStore(_config[f"{store_type}_pickle"])
def split_string_at(s, sep, indx):
items = s.split(sep)
first, last = sep.join(items[:indx]), sep.join(items[indx:])
return first, last
def to_dataset_id(path, project="cmip6"):
items = path.replace("/", ".").split(".")
if items[-1].endswith(".nc") or items[-1] == "zarr":
items = items[:-1]
n_facets = CONFIG[f"project:{project}"]["n_facets"]
return ".".join(items[-n_facets:])
def get_zarr_url(path):
dataset_id = to_dataset_id(path)
zarr_path = "/".join(split_string_at(dataset_id, ".", 4)) + ".zarr"
prefix = CONFIG["store"]["endpoint_url"]
return f"{prefix}{zarr_path}"
def read_zarr(path, **kwargs):
dataset_id = to_dataset_id(path)
zarr_path = "/".join(split_string_at(dataset_id, ".", 4)) + ".zarr"
endpoint_url = CONFIG["store"]["endpoint_url"]
jasmin_s3 = s3fs.S3FileSystem(
anon=True, client_kwargs={"endpoint_url": endpoint_url}
)
s3_store = s3fs.S3Map(root=zarr_path, s3=jasmin_s3)
ds = xr.open_zarr(store=s3_store, consolidated=True, **kwargs)
return ds
def get_archive_path(path, project="cmip6"):
dataset_id = to_dataset_id(path)
archive_dir = CONFIG[f"project:{project}"]["archive_dir"]
return os.path.join(archive_dir, dataset_id.replace(".", "/"))
```
#### File: cmip6_object_store/compare/cmip_read.py
```python
"CMIP6 data read comparison code"
import sys
import json
from pprint import pprint
import logging
import time
import os
import pandas as pd
import argparse
from random import randint
parser = argparse.ArgumentParser(description='Gather variables from command line', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'config',
help = "Config file location",
)
parser.add_argument(
'-l', '--log-level',
action = 'store',
dest = 'log_level',
help = 'Set the log level of console output (CRITICAL, ERROR, WARNING, INFO, DEBUG). Default: INFO',
required = False,
default = 'INFO',
)
class ConfigError(Exception):
"Raised on config error"
pass
class ReadError(Exception):
"Raised on problem with the test's read"
class RunError(Exception):
"Raised on problem with running test"
class CMIPRead:
def __init__(self, config):
self.config = config
self.results = {'test': '{}-{}-{}'.format(config['method'],
config['source'], config['read_pattern']),
'config_name': config['config_name'],
'run_at': time.ctime(),
'config_file': config['config_file'],
'config_modified_at': time.ctime(os.stat(config['config_file']).st_mtime) if config['config_file'] else None,
'repeats': config['repeats'],
'read_pattern': config['read_pattern']
}
def get_zarr_store(self):
caringo_s3 = s3fs.S3FileSystem(anon=True,
client_kwargs={'endpoint_url': self.config['endpoint']})
zarr_path = os.path.join(self.config['path'], self.config['file'])
store = s3fs.S3Map(root=zarr_path, s3=caringo_s3)
return store
def save_results(self):
# check if there's a pickled dataframe already on disk
logging.info('Saving data...')
try:
df = pd.read_json('results_df.json')
except ValueError:
df =pd.DataFrame()
df = df.append(self.results, ignore_index=True)
# save to disk
df.to_json('results_df.json')
def _check_s3_nc_path(self, fp):
if fp.startswith('http'):
fp += '#mode=bytes'
return fp
def read_nc_serial(self):
fp = os.path.join(self.config['path'], self.config['file'])
# if S3 url add #mode=bytes to fp
fp = self._check_s3_nc_path(fp)
total_bytes = 0
for ir in range(self.config['repeats']):
nc = Dataset(fp,'r')
var = nc.variables[config['var']]
for i in range(var.shape[0]):
logging.debug('Index: {}'.format(i))
data = var[i,:,:,:]
total_bytes += data.nbytes
nc.close()
return total_bytes
def read_nc_map(self):
fp = os.path.join(self.config['path'], self.config['file'])
# if S3 url add #mode=bytes to fp
fp = self._check_s3_nc_path(fp)
total_bytes = 0
for ir in range(self.config['repeats']):
nc = Dataset(fp,'r')
var = nc.variables[config['var']]
rx = randint(0, var.shape[0]-1)
ry = randint(0, var.shape[1]-1)
logging.debug('Index: [{},{},:,:]'.format(rx,ry))
data = var[rx,ry,:,:]
total_bytes += data.nbytes
nc.close()
return total_bytes
def read_nc_timeseries(self):
fp = os.path.join(self.config['path'], self.config['file'])
# if S3 url add #mode=bytes to fp
fp = self._check_s3_nc_path(fp)
total_bytes = 0
for ir in range(self.config['repeats']):
nc = Dataset(fp,'r')
var = nc.variables[config['var']]
rx = randint(0, var.shape[1]-1)
ry = randint(0, var.shape[2]-1)
logging.debug('Index: [:,{},{},:]'.format(rx,ry))
data = var[:,rx,ry,0]
total_bytes += data.nbytes
nc.close()
return total_bytes
def read_s3nc_serial(self):
# S3netcdf serial read method
fp = os.path.join(self.config['path'], self.config['file'])
total_bytes = 0
for ir in range(self.config['repeats']):
nc = s3Dataset(fp,'r')
var = nc.variables[config['var']]
for i in range(var.shape[0]):
logging.debug('Index: {}'.format(i))
data = var[i,:,:,:]
total_bytes += data.nbytes
nc.close()
return total_bytes
def read_s3nc_map(self):
# s3netcdf map read
fp = os.path.join(self.config['path'], self.config['file'])
total_bytes = 0
for ir in range(self.config['repeats']):
nc = s3Dataset(fp,'r')
var = nc.variables[config['var']]
rx = randint(0, var.shape[0]-1)
ry = randint(0, var.shape[1]-1)
logging.debug('Index: [{},{},:,:]'.format(rx,ry))
data = var[rx,ry,:,:]
total_bytes += data.nbytes
nc.close()
return total_bytes
def read_s3nc_timeseries(self):
#s3netcdf time series read
fp = os.path.join(self.config['path'], self.config['file'])
total_bytes = 0
for ir in range(self.config['repeats']):
nc = s3Dataset(fp,'r')
var = nc.variables[config['var']]
rx = randint(0, var.shape[1]-1)
ry = randint(0, var.shape[2]-1)
logging.debug('Index: [:,{},{},:]'.format(rx,ry))
data = var[:,rx,ry,0]
total_bytes += data.nbytes
logging.debug('shape of results = {}'.format(data.shape))
nc.close()
return total_bytes
def read_zarr_map(self):
store = self.get_zarr_store()
total_bytes = 0
for ir in range(self.config['repeats']):
# open dataset
ds = xr.open_zarr(store=store, consolidated=True)
var = ds[self.config['var']]
rx = randint(0, var.shape[0]-1)
ry = randint(0, var.shape[1]-1)
logging.debug('Index: [{},{},:,:]'.format(rx,ry))
data = var[rx,ry,:,:].load()
total_bytes += data.nbytes
ds.close()
return total_bytes
def read_zarr_timeseries(self):
store = self.get_zarr_store()
total_bytes = 0
for ir in range(self.config['repeats']):
# open dataset
ds = xr.open_zarr(store=store, consolidated=True)
var = ds[self.config['var']]
rx = randint(0, var.shape[1]-1)
ry = randint(0, var.shape[2]-1)
logging.debug('Index: [:,{},{},:]'.format(rx,ry))
data = var[:,rx,ry,0].load()
total_bytes += data.nbytes
ds.close()
return total_bytes
def read_zarr_serial(self):
store = self.get_zarr_store()
total_bytes = 0
for ir in range(self.config['repeats']):
# open dataset
ds = xr.open_zarr(store=store, consolidated=True)
var = ds[self.config['var']]
for i in range(var.shape[0]):
logging.debug('Index: {}'.format(i))
data = var[i,:,:,:].load()
total_bytes += data.nbytes
ds.close()
return total_bytes
def run(self):
logging.info('Starting test...')
logging.debug('config being used: {}'.format(self.config))
# start timer
if self.config['repeats'] > 1:
raise RunError('Repeats ({}) greater than 1 not implemented because of caching file.'.format(self.config['repeats']))
start_time = time.time()
logging.info('Reading using {} from {}...'.format(config['method'],config['source']))
# work out which version of the test to run
if self.config['method'] == 'netCDF4-python':
if self.config['read_pattern'] == 'serial':
bytes_read = self.read_nc_serial()
elif self.config['read_pattern'] == 'map':
bytes_read = self.read_nc_map()
elif self.config['read_pattern'] == 'timeseries':
bytes_read = self.read_nc_timeseries()
else:
bytes_read = None
elif self.config['method'] == 'S3netCDF4-python':
if self.config['read_pattern'] == 'serial':
bytes_read = self.read_s3nc_serial()
elif self.config['read_pattern'] == 'map':
bytes_read = self.read_s3nc_map()
elif self.config['read_pattern'] == 'timeseries':
bytes_read = self.read_s3nc_timeseries()
else:
bytes_read = None
elif self.config['method'] == 'zarr':
if self.config['read_pattern'] == 'serial':
bytes_read = self.read_zarr_serial()
elif self.config['read_pattern'] == 'map':
bytes_read = self.read_zarr_map()
elif self.config['read_pattern'] == 'timeseries':
bytes_read = self.read_zarr_timeseries()
else:
bytes_read = None
else:
raise ConfigError('Test config invalid, check "source" and "method": \n{}'.format(self.config))
total_time = time.time() - start_time
# rate of read in MB/s
rateMB = bytes_read/total_time/10**6
self.results['total_time'] = total_time
self.results['rateMB'] = rateMB
self.results['bytes_read'] = bytes_read
logging.debug('Bytes Read: {}\nTime taken: {}\nRate: {}'.format(bytes_read,total_time, rateMB))
self.save_results()
if __name__=="__main__":
args = parser.parse_args()
if args.config:
config = json.load(open(args.config))
config['config_file'] = args.config
else:
config = {'config_name':'default',
'source': 'disk',
'method': 'netCDF4-python',
'path': 'default',
'log_level': 'debug',
'repeats': 1,
'config_file': None,
'file': 'test0-1.nc',
'var': 'var',
'read_pattern': 'serial',
'path': '/gws/nopw/j04/perf_testing/cmip6'
}
# activate the right venv and import libraries
if config['method'] == 'netCDF4-python':
activate_this_file = "/home/users/mjones07/cmip-venv-nc/bin/activate_this.py"
exec(open(activate_this_file).read(), {'__file__': activate_this_file})
from netCDF4 import Dataset
elif config['method'] == 'S3netCDF4-python':
activate_this_file = "/home/users/mjones07/s3nc_venv/bin/activate_this.py"
exec(open(activate_this_file).read(), {'__file__': activate_this_file})
from S3netCDF4._s3netCDF4 import s3Dataset
elif config['method'] == 'zarr':
activate_this_file = "/home/users/mjones07/cmip-zarr-new/bin/activate_this.py"
exec(open(activate_this_file).read(), {'__file__': activate_this_file})
import xarray as xr
import s3fs
else:
raise ImportError('libs not imported')
logger_format = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=logger_format, level=args.log_level.upper())
cmip_read = CMIPRead(config)
cmip_read.run()
``` |
{
"source": "jonseddon/primavera-dmt",
"score": 2
} |
#### File: primavera-dmt/pdata_app/models.py
```python
from __future__ import unicode_literals, division, absolute_import
import re
import cf_units
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from solo.models import SingletonModel
from django.db.models import PROTECT, SET_NULL, CASCADE
from django.core.exceptions import ValidationError
from pdata_app.utils.common import standardise_time_unit, safe_strftime
from vocabs import (STATUS_VALUES, ESGF_STATUSES, FREQUENCY_VALUES,
ONLINE_STATUS, CHECKSUM_TYPES, VARIABLE_TYPES, CALENDARS)
model_names = ['Project', 'Institute', 'ClimateModel', 'Experiment',
'ActivityId', 'DataSubmission', 'DataFile', 'ESGFDataset',
'CEDADataset', 'DataRequest', 'DataIssue', 'Checksum',
'Settings', 'VariableRequest', 'RetrievalRequest', 'EmailQueue',
'ReplacedFile', 'ObservationDataset', 'ObservationFile']
__all__ = model_names
class Settings(SingletonModel):
"""
Global settings for the app (that can be changed within the app
"""
is_paused = models.BooleanField(default=False, null=False)
standard_time_units = models.CharField(max_length=50,
verbose_name='Standard Time Units', default='days since 1950-01-01')
contact_user_id = models.CharField(max_length=20,
verbose_name='Contact User ID',
default='jseddon')
base_output_dir = models.CharField(max_length=300,
verbose_name='Base directory for '
'retrieved files',
default='/gws/nopw/j04/primavera5/'
'stream1')
current_stream1_dir = models.CharField(
max_length=300,
verbose_name='The directory that retrievals are currently being '
'retrieved to.',
default='/gws/nopw/j04/primavera4/stream1'
)
class Meta:
verbose_name = "Settings"
def __str__(self):
return "App Settings"
class Project(models.Model):
"""
A project
"""
# RFK Relationships
# DataFile
short_name = models.CharField(max_length=100, null=False,
blank=False, unique=True)
full_name = models.CharField(max_length=300, null=False, blank=False)
def __str__(self):
return self.short_name
class Institute(models.Model):
"""
An institute
"""
# RFK Relationships
# DataRequest
short_name = models.CharField(max_length=100, null=False,
blank=False, unique=True)
full_name = models.CharField(max_length=1000, null=False, blank=False)
def __str__(self):
return self.short_name
class ClimateModel(models.Model):
"""
A climate model
"""
# RFK Relationships
# DataFile
# DataRequest
short_name = models.CharField(max_length=100, null=False,
blank=False, unique=True)
full_name = models.CharField(max_length=300, null=False, blank=False)
def __str__(self):
return self.short_name
class Meta:
verbose_name = "Climate Model"
class Experiment(models.Model):
"""
An experiment
"""
# RFK Relationships
# DataFile
# DataRequest
short_name = models.CharField(max_length=100, null=False,
blank=False, unique=True)
full_name = models.CharField(max_length=300, null=False, blank=False)
def __str__(self):
return self.short_name
class ActivityId(models.Model):
"""
An activity id
"""
# RFK Relationships
# DataFile
short_name = models.CharField(max_length=100, null=False,
blank=False, unique=True)
full_name = models.CharField(max_length=300, null=False, blank=False)
def __str__(self):
return self.short_name
class Meta:
verbose_name = 'Activity ID'
class VariableRequest(models.Model):
"""
A variable requested in the CMIP6 data request
"""
table_name = models.CharField(max_length=30, null=False, blank=False,
verbose_name='Table Name')
long_name = models.CharField(max_length=200, null=False, blank=False,
verbose_name='Long Name')
units = models.CharField(max_length=200, null=False, blank=False,
verbose_name='Units')
var_name = models.CharField(max_length=30, null=False, blank=False,
verbose_name='Var Name')
standard_name = models.CharField(max_length=200, null=False, blank=False,
verbose_name='Standard Name')
cell_methods = models.CharField(max_length=200, null=False, blank=False,
verbose_name='Cell Methods')
positive = models.CharField(max_length=20, null=True, blank=True,
verbose_name='Positive')
variable_type = models.CharField(max_length=20,
choices=list(VARIABLE_TYPES.items()),
null=False, blank=False,
verbose_name='Variable Type')
dimensions = models.CharField(max_length=200, null=False, blank=False,
verbose_name='Dimensions')
cmor_name = models.CharField(max_length=20, null=False, blank=False,
verbose_name='CMOR Name')
modeling_realm = models.CharField(max_length=20, null=False, blank=False,
verbose_name='Modeling Realm')
frequency = models.CharField(max_length=200,
choices=list(FREQUENCY_VALUES.items()),
null=False, blank=False,
verbose_name='Frequency')
cell_measures = models.CharField(max_length=200, null=False, blank=False,
verbose_name='Cell Measures')
uid = models.CharField(max_length=200, null=False, blank=False,
verbose_name='UID')
out_name = models.CharField(max_length=20, null=True, blank=False,
verbose_name='Output Name')
def __str__(self):
return 'VariableRequest: {} ({})'.format(self.cmor_name, self.table_name)
class Meta:
verbose_name = "Variable Request"
class DataFileAggregationBase(models.Model):
"""
An abstract base class for datasets containing many files.
Includes a number of convenience methods to calculate
aggregations of properties.
"""
class Meta:
abstract = True
def _file_aggregation(self, field_name):
records = [getattr(datafile, field_name) for datafile in self.get_data_files()]
# Return unique sorted set of records
unique_records = list(set(records))
if unique_records == [None]:
return None
else:
return unique_records
def get_data_files(self):
return self.datafile_set.all()
def project(self):
return self._file_aggregation("project")
def climate_model(self):
return self._file_aggregation("climate_model")
def frequency(self):
return self._file_aggregation("frequency")
def variables(self):
return self._file_aggregation("variable_request")
def get_tape_urls(self):
return self._file_aggregation("tape_url")
def directories(self):
return self._file_aggregation("directory")
def get_file_versions(self):
return self._file_aggregation("version")
def get_data_issues(self):
records = []
for datafile in self.get_data_files():
records.extend(datafile.dataissue_set.all())
records = list(set(records))
records.sort(key=lambda di: di.date_time, reverse=True)
return records
def assign_data_issue(self, issue_text, reporter, date_time=None):
"""
Creates a DataIssue and attaches it to all related DataFile records.
"""
date_time = date_time or timezone.now()
data_issue, _tf = DataIssue.objects.get_or_create(issue=issue_text,
reporter=reporter, date_time=date_time)
data_issue.save()
data_files = self.get_data_files()
data_issue.data_file.add(*data_files)
def start_time(self):
std_units = Settings.get_solo().standard_time_units
start_times = self.datafile_set.values_list('start_time', 'time_units',
'calendar')
if not start_times:
return None
std_times = [
(standardise_time_unit(time, unit, std_units, cal), cal)
for time, unit, cal in start_times
]
none_values_removed = [(std_time, cal)
for std_time, cal in std_times
if std_time is not None]
if not none_values_removed:
return None
earliest_time, calendar = min(none_values_removed, key=lambda x: x[0])
earliest_obj = cf_units.num2date(earliest_time, std_units, calendar)
return earliest_obj.strftime('%Y-%m-%d')
def end_time(self):
std_units = Settings.get_solo().standard_time_units
end_times = self.datafile_set.values_list('end_time', 'time_units',
'calendar')
if not end_times:
return None
std_times = [
(standardise_time_unit(time, unit, std_units, cal), cal)
for time, unit, cal in end_times
]
none_values_removed = [(std_time, cal)
for std_time, cal in std_times
if std_time is not None]
if not none_values_removed:
return None
latest_time, calendar = max(none_values_removed, key=lambda x: x[0])
latest_obj = cf_units.num2date(latest_time, std_units, calendar)
return latest_obj.strftime('%Y-%m-%d')
def online_status(self):
"""
Checks aggregation of online status of all DataFiles.
Returns one of:
ONLINE_STATUS.online
ONLINE_STATUS.offline
ONLINE_STATUS.partial
"""
files_online = self.datafile_set.filter(online=True).count()
files_offline = self.datafile_set.filter(online=False).count()
if files_offline:
if files_online:
return ONLINE_STATUS.partial
else:
return ONLINE_STATUS.offline
else:
return ONLINE_STATUS.online
class DataSubmission(DataFileAggregationBase):
"""
A directory containing a directory tree of data files copied to the
platform.
"""
# RFK relationships:
# ESGFDatasets: ESGFDataset
# DataFiles: DataFile
# Dynamically aggregated from DataFile information:
# project
# climate_model
# frequency
# variables
# data_issues
# start_time
# end_time
status = models.CharField(max_length=20, choices=list(STATUS_VALUES.items()),
verbose_name='Status',
default=STATUS_VALUES.EXPECTED,
blank=False, null=False)
incoming_directory = models.CharField(max_length=500,
verbose_name='Incoming Directory',
blank=False, null=False)
# Current directory
directory = models.CharField(max_length=500,
verbose_name='Current Directory',
blank=True, null=True)
user = models.ForeignKey(User, blank=False, null=False,
verbose_name='User', on_delete=models.CASCADE)
date_submitted = models.DateTimeField(auto_now_add=True,
verbose_name='Date Submitted',
null=False, blank=False)
def __str__(self):
return "Data Submission: %s" % self.incoming_directory
class Meta:
unique_together = ('incoming_directory',)
verbose_name = "Data Submission"
class CEDADataset(DataFileAggregationBase):
"""
A CEDA Dataset - a collection of ESGF Datasets that are held in the
CEDA Data catalogue (http://catalogue.ceda.ac.uk) with their own metadata
records.
"""
# RFK relationships:
# ESGFDataset
# Dynamically aggregated from DataFile information:
# project
# climate_model
# frequency
# variables
# data_issues
# start_time
# end_time
catalogue_url = models.URLField(verbose_name="CEDA Catalogue URL", blank=False, null=False)
directory = models.CharField(max_length=500, verbose_name="Directory", blank=False, null=False)
# The CEDA Dataset might have a DOI
doi = models.CharField(verbose_name="DOI", blank=True, null=True,
max_length=500)
def __str__(self):
return "CEDA Dataset: %s" % self.catalogue_url
class Meta:
verbose_name = "CEDA Dataset"
class ESGFDataset(DataFileAggregationBase):
"""
An ESGF Dataset - a collection of files with an identifier.
This model uses the Directory Reference Syntax (DRS) dataset which consists of:
* drs_id - string representing facets of the DRS, e.g. "a.b.c.d"
* version - string representing the version, e.g. "v20160312"
* directory - string representing the directory containing the actual data files.
And a method:
* get_full_id: Returns full DRS Id made up of drsId version as: `self.drs_id`.`self.version`.
"""
class Meta:
unique_together = ('data_request', 'version')
verbose_name = "ESGF Dataset"
# RFK relationships:
# files: DataFile
# Dynamically aggregated from DataFile information:
# project
# climate_model
# frequency
# variables
# data_issues
# start_time
# end_time
status = models.CharField(max_length=20, choices=ESGF_STATUSES.items(),
verbose_name='Status',
default=ESGF_STATUSES.CREATED,
blank=False, null=False)
version = models.CharField(max_length=20, verbose_name='Version', blank=False, null=False)
directory = models.CharField(max_length=500, verbose_name='Directory', blank=True, null=True)
thredds_url = models.URLField(verbose_name="THREDDS Download URL", blank=True, null=True)
# Each ESGF Dataset will be part of one CEDADataset
ceda_dataset = models.ForeignKey(CEDADataset, blank=True, null=True,
on_delete=SET_NULL, verbose_name='CEDA Dataset')
# Each ESGF Dataset will match exactly one DataRequest
data_request = models.ForeignKey('DataRequest', blank=False, null=False,
on_delete=CASCADE,
verbose_name='Data Request')
@property
def drs_id(self):
"""
Generate the DRS id from the data request.
:returns: the DRS id
:rtype: str
"""
return self.get_drs_id()
def get_drs_id(self, use_out_name=False):
"""
Generate the DRS id from the data request.
:param bool use_out_name: Use out_name if it exists, otherwise use
cmor_name.
:returns: the DRS id
:rtype: str
"""
if self.data_request.datafile_set.count() == 0:
raise ValueError('ESGFDataSet from {} has no DataFiles.'.format(
self.data_request
))
if use_out_name:
out_name = self.data_request.variable_request.out_name
cmor_name = (out_name if out_name else
self.data_request.variable_request.cmor_name)
else:
cmor_name = self.data_request.variable_request.cmor_name
components = [
self.data_request.project.short_name,
self.data_request.datafile_set.
values('activity_id__short_name').
first()['activity_id__short_name'],
self.data_request.institute.short_name,
self.data_request.climate_model.short_name,
self.data_request.experiment.short_name,
self.data_request.rip_code,
self.data_request.variable_request.table_name,
cmor_name,
self.data_request.datafile_set.values('grid').first()['grid']
]
return '.'.join(components)
def get_full_id(self, use_out_name=False):
"""
Return full DRS Id made up of drsId version as: drs_id.version
:param bool use_out_name: Use out_name if it exists, otherwise use
cmor_name.
"""
return "%s.%s" % (self.get_drs_id(use_out_name=use_out_name),
self.version)
def clean(self, *args, **kwargs):
if not re.match(r"^v\d+$", self.version):
raise ValidationError('Version must begin with letter "v" '
'followed by a number (date).')
if self.directory:
if not self.directory.startswith("/"):
raise ValidationError('Directory must begin with "/" because '
'it is a full directory path.')
if self.directory.endswith("/"):
self.directory = self.directory.rstrip("/")
super(ESGFDataset, self).save(*args, **kwargs)
def __str__(self):
"""
Returns full DRS Id.
"""
return self.get_full_id()
class DataRequest(DataFileAggregationBase):
"""
A Data Request for a given set of inputs
"""
class Meta:
verbose_name = 'Data Request'
project = models.ForeignKey(Project, null=False, on_delete=PROTECT,
verbose_name='Project')
institute = models.ForeignKey(Institute, null=False, on_delete=PROTECT,
verbose_name='Institute')
climate_model = models.ForeignKey(ClimateModel, null=False,
on_delete=PROTECT,
verbose_name='Climate Model')
experiment = models.ForeignKey(Experiment, null=False, on_delete=PROTECT,
verbose_name='Experiment')
variable_request = models.ForeignKey(VariableRequest, null=False,
on_delete=PROTECT,
verbose_name='Variable')
rip_code = models.CharField(max_length=20, verbose_name="Variant Label",
null=True, blank=True)
request_start_time = models.FloatField(verbose_name="Start time",
null=False, blank=False)
request_end_time = models.FloatField(verbose_name="End time", null=False,
blank=False)
time_units = models.CharField(verbose_name='Time units', max_length=50,
null=False, blank=False)
calendar = models.CharField(verbose_name='Calendar', max_length=20,
null=False, blank=False,
choices=list(CALENDARS.items()))
def start_date_string(self):
"""Return a string containing the start date"""
dto = cf_units.num2date(self.request_start_time, self.time_units,
self.calendar)
return dto.strftime('%Y-%m-%d')
def end_date_string(self):
"""Return a string containing the end date"""
dto = cf_units.num2date(self.request_end_time, self.time_units,
self.calendar)
return dto.strftime('%Y-%m-%d')
def __str__(self):
return '{}.{}.{}.{}.{}.{}'.format(self.institute, self.climate_model,
self.experiment, self.rip_code,
self.variable_request.table_name,
self.variable_request.cmor_name)
class DataFile(models.Model):
"""
A data file
"""
# RFK relationships:
# checksums: Checksum - multiple is OK
# ManyToMany relationships:
# DataIssues: DataIssue - multiple is OK
name = models.CharField(max_length=200, verbose_name="File name",
null=False, blank=False)
incoming_name = models.CharField(max_length=200,
verbose_name="Original file name",
null=False, blank=False)
incoming_directory = models.CharField(max_length=500,
verbose_name="Incoming directory",
null=False, blank=False)
# This is where the datafile is now
directory = models.CharField(max_length=500,
verbose_name="Current directory",
null=True, blank=True)
size = models.BigIntegerField(null=False, verbose_name="File size")
tape_size = models.BigIntegerField(null=True, verbose_name='Size on Tape')
# This is the file's version
version = models.CharField(max_length=10, verbose_name='File Version',
null=True, blank=True)
# Scientific metadata
project = models.ForeignKey(Project, null=False, on_delete=PROTECT)
institute = models.ForeignKey(Institute, null=False, on_delete=PROTECT,
verbose_name='Institute')
climate_model = models.ForeignKey(ClimateModel, null=False,
on_delete=PROTECT,
verbose_name='Climate Model')
activity_id = models.ForeignKey(ActivityId, null=False,
on_delete=PROTECT,
verbose_name='Activity ID')
experiment = models.ForeignKey(Experiment, null=False, on_delete=PROTECT,
verbose_name='Experiment')
variable_request = models.ForeignKey(VariableRequest, null=False, on_delete=PROTECT)
data_request = models.ForeignKey(DataRequest, null=False, on_delete=PROTECT)
frequency = models.CharField(max_length=20, choices=list(FREQUENCY_VALUES.items()),
verbose_name="Time frequency", null=False, blank=False)
rip_code = models.CharField(max_length=20, verbose_name="Variant Label",
null=False, blank=False)
grid = models.CharField(max_length=20, verbose_name='Grid Label',
null=True, blank=True)
# DateTimes are allowed to be null/blank because some fields (such as orography)
# are time-independent
start_time = models.FloatField(verbose_name="Start time", null=True, blank=True)
end_time = models.FloatField(verbose_name="End time", null=True, blank=True)
time_units = models.CharField(verbose_name='Time units', max_length=50, null=True, blank=True)
calendar = models.CharField(verbose_name='Calendar', max_length=20,
null=True, blank=True, choices=list(CALENDARS.items()))
data_submission = models.ForeignKey(DataSubmission, null=False, blank=False,
on_delete=CASCADE)
# These Dataset fields might only be known after processing so they can be null/blank
esgf_dataset = models.ForeignKey(ESGFDataset, null=True, blank=True,
on_delete=SET_NULL)
ceda_dataset = models.ForeignKey(CEDADataset, null=True, blank=True,
on_delete=SET_NULL)
# URLs will not be known at start so can be blank
ceda_download_url = models.URLField(verbose_name="CEDA Download URL", null=True, blank=True)
ceda_opendap_url = models.URLField(verbose_name="CEDA OpenDAP URL", null=True, blank=True)
esgf_download_url = models.URLField(verbose_name="ESGF Download URL", null=True, blank=True)
esgf_opendap_url = models.URLField(verbose_name="ESGF OpenDAP URL", null=True, blank=True)
# Tape status
online = models.BooleanField(default=True, verbose_name="Is the file online?", null=False, blank=False)
tape_url = models.CharField(verbose_name="Tape URL", max_length=200, null=True, blank=True)
def start_date_string(self):
"""Return a string containing the start date"""
dto = cf_units.num2date(self.start_time, self.time_units, self.calendar)
return dto.strftime('%Y-%m-%d')
def end_date_string(self):
"""Return a string containing the end date"""
dto = cf_units.num2date(self.end_time, self.time_units, self.calendar)
return dto.strftime('%Y-%m-%d')
def __str__(self):
return "%s (Directory: %s)" % (self.name, self.directory)
class Meta:
unique_together = ('name', 'directory')
verbose_name = "Data File"
class ReplacedFile(models.Model):
"""
An old DataFile that has been replaced by another DataFile
"""
name = models.CharField(max_length=200, verbose_name="File name",
null=False, blank=False)
incoming_directory = models.CharField(max_length=500,
verbose_name="Incoming directory",
null=False, blank=False)
size = models.BigIntegerField(null=False, verbose_name="File size")
# This is the file's version
version = models.CharField(max_length=10, verbose_name='File Version',
null=True, blank=True)
# Scientific metadata
project = models.ForeignKey(Project, null=False, on_delete=PROTECT)
institute = models.ForeignKey(Institute, null=False, on_delete=PROTECT,
verbose_name='Institute')
climate_model = models.ForeignKey(ClimateModel, null=False,
on_delete=PROTECT,
verbose_name='Climate Model')
activity_id = models.ForeignKey(ActivityId, null=False,
on_delete=PROTECT,
verbose_name='Activity ID')
experiment = models.ForeignKey(Experiment, null=False, on_delete=PROTECT,
verbose_name='Experiment')
variable_request = models.ForeignKey(VariableRequest, null=False,
on_delete=PROTECT)
data_request = models.ForeignKey(DataRequest, null=False,
on_delete=PROTECT)
frequency = models.CharField(max_length=20,
choices=list(FREQUENCY_VALUES.items()),
verbose_name="Time frequency",
null=False, blank=False)
rip_code = models.CharField(max_length=20, verbose_name="Variant Label",
null=False, blank=False)
grid = models.CharField(max_length=20, verbose_name='Grid Label',
null=True, blank=True)
# DateTimes are allowed to be null/blank because some fields (such as
# orography) are time-independent
start_time = models.FloatField(verbose_name="Start time",
null=True, blank=True)
end_time = models.FloatField(verbose_name="End time",
null=True, blank=True)
time_units = models.CharField(verbose_name='Time units', max_length=50,
null=True, blank=True)
calendar = models.CharField(verbose_name='Calendar', max_length=20,
null=True, blank=True,
choices=list(CALENDARS.items()))
data_submission = models.ForeignKey(DataSubmission,
null=False, blank=False,
on_delete=CASCADE)
# Tape status
tape_url = models.CharField(verbose_name="Tape URL", max_length=200,
null=True, blank=True)
# Checksum
checksum_value = models.CharField(max_length=200, null=True, blank=True)
checksum_type = models.CharField(max_length=20,
choices=list(CHECKSUM_TYPES.items()),
null=True, blank=True)
def start_date_string(self):
"""Return a string containing the start date"""
dto = cf_units.num2date(self.start_time, self.time_units,
self.calendar)
return dto.strftime('%Y-%m-%d')
def end_date_string(self):
"""Return a string containing the end date"""
dto = cf_units.num2date(self.end_time, self.time_units, self.calendar)
return dto.strftime('%Y-%m-%d')
def __str__(self):
return "%s (Directory: %s)" % (self.name, self.directory)
class Meta:
unique_together = ('name', 'incoming_directory')
verbose_name = "Replaced File"
class DataIssue(models.Model):
"""
A recorded issue with a DataFile
NOTE: You can have multiple data issues related to a single DataFile
NOTE: Aggregation is used to associate a DataIssue with an ESGFDataset,
CEDADataset or DataSubmission
"""
issue = models.CharField(max_length=4000, verbose_name="Issue Reported",
null=False, blank=False)
reporter = models.ForeignKey(User, verbose_name="Reporter",
null=False, blank=False,
on_delete=models.CASCADE)
date_time = models.DateTimeField(auto_now_add=True,
verbose_name="Date and Time of Report",
null=False, blank=False)
# DataFile that the Data Issue corresponds to
data_file = models.ManyToManyField(DataFile)
def __str__(self):
return "Data Issue (%s): %s (%s)" % (
self.date_time.strftime('%Y-%m-%d %H:%M:%S'),
self.issue, self.reporter.username
)
class Meta:
verbose_name = "Data Issue"
class Checksum(models.Model):
"""
A checksum
"""
data_file = models.ForeignKey(DataFile, null=False, blank=False,
on_delete=CASCADE)
checksum_value = models.CharField(max_length=200, null=False, blank=False)
checksum_type = models.CharField(max_length=20, choices=list(CHECKSUM_TYPES.items()), null=False,
blank=False)
def __str__(self):
return "%s: %s (%s)" % (self.checksum_type, self.checksum_value,
self.data_file.name)
class TapeChecksum(models.Model):
"""
A checksum for the version of a file on tape
"""
data_file = models.ForeignKey(DataFile, null=False, blank=False,
on_delete=CASCADE)
checksum_value = models.CharField(max_length=200, null=False, blank=False)
checksum_type = models.CharField(max_length=20, choices=list(CHECKSUM_TYPES.items()), null=False,
blank=False)
def __str__(self):
return "%s: %s (%s)" % (self.checksum_type, self.checksum_value,
self.data_file.name)
class RetrievalRequest(models.Model):
"""
A collection of DataRequests to retrieve from Elastic Tape or MASS
"""
class Meta:
verbose_name = "Retrieval Request"
data_request = models.ManyToManyField(DataRequest)
date_created = models.DateTimeField(auto_now_add=True,
verbose_name='Request Created At',
null=False, blank=False)
date_complete = models.DateTimeField(verbose_name='Data Restored At',
null=True, blank=True)
date_deleted = models.DateTimeField(verbose_name='Data Deleted At',
null=True, blank=True)
requester = models.ForeignKey(User, verbose_name='Request Creator',
null=False, blank=False,
on_delete=models.CASCADE)
data_finished = models.BooleanField(default=False,
verbose_name="Data Finished?",
null=False, blank=False)
start_year = models.IntegerField(verbose_name="Start Year", null=True, blank=False)
end_year = models.IntegerField(verbose_name="End Year", null=True, blank=False)
def __str__(self):
return '{}'.format(self.id)
class EmailQueue(models.Model):
"""
A collection of emails that have been queued to send
"""
recipient = models.ForeignKey(User, on_delete=models.CASCADE)
subject = models.CharField(max_length=400, blank=False)
message = models.TextField(blank=False)
sent = models.BooleanField(default=False, null=False)
def __str__(self):
return '{} {}'.format(self.recipient.email, self.subject)
class ObservationDataset(models.Model):
"""
A collection of observation files.
"""
class Meta:
unique_together = ('name', 'version')
verbose_name = 'Observations Dataset'
name = models.CharField(max_length=200,
verbose_name='Name',
null=True, blank=True)
version = models.CharField(max_length=200, verbose_name='Version',
null=True, blank=True)
url = models.URLField(verbose_name='URL', null=True, blank=True)
summary = models.CharField(max_length=4000, verbose_name='Summary',
null=True, blank=True)
date_downloaded = models.DateTimeField(verbose_name='Date downloaded',
null=True, blank=True)
doi = models.CharField(max_length=200, verbose_name='DOI',
null=True, blank=True)
reference = models.CharField(max_length=4000, verbose_name='Reference',
null=True, blank=True)
license = models.URLField(verbose_name='License', null=True, blank=True)
# The following cached fields can be calculated from the foreign key
# relationships. Howevere due to the number of observation files this
# can be slow. Instead a script is run by a cron job to periodically
# save the information here to speed page load.
cached_variables = models.CharField(max_length=1000,
verbose_name='Variables',
null=True, blank=True)
cached_start_time = models.DateTimeField(verbose_name='Start Time',
null=True, blank=True)
cached_end_time = models.DateTimeField(verbose_name='End Time',
null=True, blank=True)
cached_num_files = models.IntegerField(verbose_name='# Data Files',
null=True, blank=True)
cached_directories = models.CharField(max_length=200,
verbose_name='Directory',
null=True, blank=True)
def _file_aggregation(self, field_name):
records = [getattr(obs_file, field_name)
for obs_file in self.obs_files]
# Return unique sorted set of records
unique_records = sorted(set(records))
if None in unique_records:
unique_records.remove(None)
if unique_records == []:
return None
else:
return unique_records
@property
def obs_files(self):
return self.observationfile_set.all()
@property
def variables(self):
var_strings = self._file_aggregation('variable')
if var_strings:
all_vars = [indi_var.strip() for var_string in var_strings
for indi_var in var_string.split(',')]
return sorted(list(set(all_vars)))
else:
return None
@property
def incoming_directories(self):
return self._file_aggregation('incoming_directory')
@property
def directories(self):
return self._file_aggregation('directory')
@property
def tape_urls(self):
return self._file_aggregation('tape_url')
@property
def frequencies(self):
return self._file_aggregation('frequency')
@property
def units(self):
return self._file_aggregation('units')
@property
def start_time(self):
std_units = Settings.get_solo().standard_time_units
start_times = self.obs_files.values_list('start_time', 'time_units',
'calendar')
if not start_times:
return None
std_times = [
(standardise_time_unit(time, unit, std_units, cal), cal)
for time, unit, cal in start_times
]
none_values_removed = [(std_time, cal)
for std_time, cal in std_times
if std_time is not None]
if not none_values_removed:
return None
earliest_time, calendar = min(none_values_removed, key=lambda x: x[0])
earliest_obj = cf_units.num2date(earliest_time, std_units, calendar)
return earliest_obj
@property
def end_time(self):
std_units = Settings.get_solo().standard_time_units
end_times = self.obs_files.values_list('end_time', 'time_units',
'calendar')
if not end_times:
return None
std_times = [
(standardise_time_unit(time, unit, std_units, cal), cal)
for time, unit, cal in end_times
]
none_values_removed = [(std_time, cal)
for std_time, cal in std_times
if std_time is not None]
if not none_values_removed:
return None
latest_time, calendar = max(none_values_removed, key=lambda x: x[0])
latest_obj = cf_units.num2date(latest_time, std_units, calendar)
return latest_obj
@property
def online_status(self):
"""
Checks aggregation of online status of all DataFiles.
Returns one of:
ONLINE_STATUS.online
ONLINE_STATUS.offline
ONLINE_STATUS.partial
"""
files_online = self.obs_files.filter(online=True).count()
files_offline = self.obs_files.filter(online=False).count()
if files_offline:
if files_online:
return ONLINE_STATUS.partial
else:
return ONLINE_STATUS.offline
else:
return ONLINE_STATUS.online
def __str__(self):
if self.version:
return '{} ver {}'.format(self.name, self.version)
else:
return '{}'.format(self.name)
class ObservationFile(models.Model):
"""
A single file containing observations or a reanalysis.
"""
class Meta:
unique_together = ('name', 'incoming_directory')
verbose_name = 'Observations File'
name = models.CharField(max_length=200, verbose_name='File name',
null=False, blank=False)
incoming_directory = models.CharField(max_length=500,
verbose_name='Incoming directory',
null=False, blank=False)
directory = models.CharField(max_length=200, verbose_name='Directory',
null=True, blank=True)
tape_url = models.CharField(verbose_name="Tape URL", max_length=200,
null=True, blank=True)
online = models.BooleanField(default=True, null=False, blank=False,
verbose_name='Is the file online?')
size = models.BigIntegerField(null=False, verbose_name='File size')
checksum_value = models.CharField(max_length=200, null=True, blank=True)
checksum_type = models.CharField(max_length=20,
choices=list(CHECKSUM_TYPES.items()),
null=True, blank=True)
# DateTimes are allowed to be null/blank because some fields (such as
# orography) are time-independent
start_time = models.FloatField(verbose_name="Start time",
null=True, blank=True)
end_time = models.FloatField(verbose_name="End time",
null=True, blank=True)
time_units = models.CharField(verbose_name='Time units', max_length=50,
null=True, blank=True)
calendar = models.CharField(verbose_name='Calendar', max_length=20,
null=True, blank=True,
choices=list(CALENDARS.items()))
frequency = models.CharField(max_length=200, null=True, blank=True,
verbose_name='Frequency')
# Details of the variables in the file
standard_name = models.CharField(max_length=500, null=True, blank=True,
verbose_name='Standard name')
long_name = models.CharField(max_length=500, null=True, blank=True,
verbose_name='Long name')
var_name = models.CharField(max_length=200, null=True, blank=True,
verbose_name='Var name')
units = models.CharField(max_length=200, null=True, blank=True,
verbose_name='Units')
@property
def variable(self):
if self.standard_name:
return self.standard_name
elif self.long_name:
return self.long_name
elif self.var_name:
return self.var_name
else:
return None
@property
def start_string(self):
if self.start_time is not None and self.time_units and self.calendar:
return safe_strftime(
cf_units.num2date(self.start_time, self.time_units,
self.calendar), '%Y-%m-%d')
else:
return None
@property
def end_string(self):
if self.end_time is not None and self.time_units and self.calendar:
return safe_strftime(
cf_units.num2date(self.end_time, self.time_units,
self.calendar), '%Y-%m-%d')
else:
return None
# Foreign Key Relationships
obs_set = models.ForeignKey(ObservationDataset, null=False, blank=False,
on_delete=CASCADE, verbose_name='Obs Set')
def __str__(self):
return '{} (Directory: {})'.format(self.name, self.incoming_directory)
```
#### File: pdata_app/templatetags/cf_unit_date.py
```python
from django import template
register = template.Library()
@register.filter
def strftime(value, arg):
"""
Calls an object's strftime function.
"""
if value:
return value.strftime(arg)
else:
return None
```
#### File: pdata_app/tests/test_esgf_utils.py
```python
from __future__ import unicode_literals, division, absolute_import
try:
from unittest import mock
except ImportError:
import mock
from django.test import TestCase
from pdata_app.utils.esgf_utils import add_data_request, parse_rose_stream_name
from .common import make_example_files
class TestParseRoseStreamName(TestCase):
def test_string_parsed(self):
rose_task_name = 'HadGEM3-GC31-LM_highresSST-present_r1i1p1f1_Amon_psl'
output_dict = parse_rose_stream_name(rose_task_name)
expected = {
'source_id': 'HadGEM3-GC31-LM',
'experiment_id': 'highresSST-present',
'variant_label': 'r1i1p1f1',
'table_id': 'Amon',
'cmor_name': 'psl'
}
self.assertDictEqual(output_dict, expected)
class TestAddDataRequest(TestCase):
def setUp(self):
make_example_files(self)
self.input_dict = {
'source_id': 't',
'experiment_id': 't',
'variant_label': 'r1i1p1f1',
'table_id': 'Amon',
'cmor_name': 'var1'
}
def test_dreq_added(self):
expected = self.input_dict.copy()
expected['data_req'] = self.dreq1
add_data_request(self.input_dict)
self.assertEqual(self.input_dict, expected)
@mock.patch('pdata_app.utils.esgf_utils.logger')
def test_debug_true(self, mock_logger):
add_data_request(self.input_dict)
mock_logger.debug.assert_called_with('Found data request {}'.
format(self.dreq1))
@mock.patch('pdata_app.utils.esgf_utils.logger')
def test_debug_false(self, mock_logger):
add_data_request(self.input_dict, debug_req_found=False)
mock_logger.debug.assert_not_called()
```
#### File: pdata_app/tests/test_replace_file.py
```python
from __future__ import unicode_literals, division, absolute_import
from django.contrib.auth.models import User
from django.test import TestCase
from pdata_app.models import (ActivityId, Checksum, ClimateModel, DataFile,
DataRequest, DataSubmission, Experiment,
Institute, Project, ReplacedFile, Settings,
VariableRequest)
from pdata_app.utils.dbapi import get_or_create
from pdata_app.utils.replace_file import replace_files, restore_files
from vocabs.vocabs import (CALENDARS, FREQUENCY_VALUES, STATUS_VALUES,
VARIABLE_TYPES)
class TestReplaceFile(TestCase):
""" Test pdata_app.utils.replace_file.replace_files """
def setUp(self):
# create the necessary DB objects
_make_test_data_objects(self)
def test_one_file(self):
self.assertEqual(3, DataFile.objects.count())
one_file = DataFile.objects.filter(name='file_one.nc')
replace_files(one_file)
self.assertEqual(2, DataFile.objects.count())
self.assertEqual(3, ReplacedFile.objects.count())
def test_all_files(self):
self.assertEqual(3, DataFile.objects.count())
one_file = DataFile.objects.all()
replace_files(one_file)
self.assertEqual(0, DataFile.objects.count())
self.assertEqual(5, ReplacedFile.objects.count())
def test_metadata_item_copied(self):
one_file = DataFile.objects.filter(name='file_one.nc')
replace_files(one_file)
old_file = ReplacedFile.objects.get(name='file_one.nc')
self.assertEqual('et:1234', old_file.tape_url)
def test_incoming_directory_copied(self):
one_file = DataFile.objects.filter(name='file_one.nc')
replace_files(one_file)
old_file = ReplacedFile.objects.get(name='file_one.nc')
self.assertEqual('/gws/MOHC/MY-MODEL/incoming/v12345678',
old_file.incoming_directory)
def test_metadata_foreign_key_copied(self):
one_file = DataFile.objects.filter(name='file_one.nc')
replace_files(one_file)
old_file = ReplacedFile.objects.get(name='file_one.nc')
self.assertEqual('MY-MODEL', old_file.climate_model.short_name)
def test_other_models_not_moved(self):
climate_model = ClimateModel.objects.first()
self.assertRaises(TypeError, replace_files, climate_model)
def test_checksum_copied(self):
first_file = DataFile.objects.get(name='file_one.nc')
checksum = Checksum.objects.create(checksum_value='1234',
checksum_type='ADLER32',
data_file=first_file)
one_file = DataFile.objects.filter(name='file_one.nc')
replace_files(one_file)
old_file = ReplacedFile.objects.get(name='file_one.nc')
self.assertEqual('1234', old_file.checksum_value)
def test_duplicate_files(self):
copy_file = DataFile.objects.get(name='file_one.nc')
orig_id = copy_file.id
copy_file.id = None
copy_file.save()
orig_file = DataFile.objects.filter(id=orig_id)
replace_files(orig_file)
copy_file = DataFile.objects.filter(name='file_one.nc')
replace_files(copy_file)
num_files = ReplacedFile.objects.filter(name='file_one.nc').count()
self.assertEqual(num_files, 2)
num_files = ReplacedFile.objects.filter(
name='file_one.nc',
incoming_directory='/gws/MOHC/MY-MODEL/incoming/v12345678'
).count()
self.assertEqual(num_files, 1)
num_files = ReplacedFile.objects.filter(
name='file_one.nc',
incoming_directory='/gws/MOHC/MY-MODEL/incoming/v12345678_1'
).count()
self.assertEqual(num_files, 1)
def test_limit_on_inc_dir(self):
copy_file = DataFile.objects.get(name='file_one.nc')
orig_id = copy_file.id
copy_file.id = None
copy_file.save()
orig_file = DataFile.objects.filter(id=orig_id)
replace_files(orig_file)
rep_file = ReplacedFile.objects.get(name='file_one.nc')
inc_dir = rep_file.incoming_directory
for n in range(1, 5):
rep_file.id = None
rep_file.incoming_directory = f'{inc_dir}_{n}'
rep_file.save()
copy_file = DataFile.objects.filter(name='file_one.nc')
self.assertRaises(ValueError, replace_files, copy_file)
class TestRestoreFiles(TestCase):
""" Test pdata_app.utils.replace_file.replace_files """
def setUp(self):
# create the necessary DB objects
_make_test_data_objects(self)
def test_files_restored(self):
restore_files(ReplacedFile.objects.filter(name__in=['file_four.nc',
'file_five.nc']))
self.assertEqual(0, ReplacedFile.objects.count())
self.assertEqual(5, DataFile.objects.count())
def test_metadata_item_copied(self):
restore_files(ReplacedFile.objects.filter(name__in=['file_four.nc',
'file_five.nc']))
data_files = DataFile.objects.order_by('name')
self.assertEqual('v12345678', data_files.last().version)
self.assertEqual('v87654321', data_files.first().version)
def test_metadata_foreign_key_copied(self):
restore_files(ReplacedFile.objects.filter(name__in=['file_four.nc',
'file_five.nc']))
data_file = DataFile.objects.get(name='file_four.nc')
self.assertEqual('experiment',
data_file.experiment.short_name)
def test_checksum(self):
restore_files(ReplacedFile.objects.filter(name__in=['file_four.nc',
'file_five.nc']))
data_file = DataFile.objects.get(name='file_five.nc')
self.assertEqual(1, data_file.checksum_set.count())
self.assertEqual('76543210',
data_file.checksum_set.first().checksum_value)
self.assertEqual('ADLER32',
data_file.checksum_set.first().checksum_type)
def _make_test_data_objects(self):
"""
Create the test DB objects
"""
proj = get_or_create(Project, short_name="CMIP6", full_name="6th "
"Coupled Model Intercomparison Project")
climate_model = get_or_create(ClimateModel, short_name="MY-MODEL",
full_name="Really good model")
institute = get_or_create(Institute, short_name='MOHC',
full_name='Met Office Hadley Centre')
act_id = get_or_create(ActivityId, short_name='HighResMIP',
full_name='High Resolution Model Intercomparison Project')
experiment = get_or_create(Experiment, short_name="experiment",
full_name="Really good experiment")
incoming_directory = '/gws/MOHC/MY-MODEL/incoming/v12345678'
var1 = get_or_create(VariableRequest, table_name='my-table',
long_name='very descriptive', units='1', var_name='my-var',
standard_name='var-name', cell_methods='time: mean',
positive='optimistic', variable_type=VARIABLE_TYPES['real'],
dimensions='massive', cmor_name='my-var', modeling_realm='atmos',
frequency=FREQUENCY_VALUES['ann'], cell_measures='', uid='123abc')
var2 = get_or_create(VariableRequest, table_name='your-table',
long_name='very descriptive', units='1', var_name='your-var',
standard_name='var-name', cell_methods='time: mean',
positive='optimistic', variable_type=VARIABLE_TYPES['real'],
dimensions='massive', cmor_name='your-var', modeling_realm='atmos',
frequency=FREQUENCY_VALUES['ann'], cell_measures='', uid='123abc')
self.dreq1 = get_or_create(DataRequest, project=proj,
institute=institute, climate_model=climate_model,
experiment=experiment, variable_request=var1, rip_code='r1i1p1f1',
request_start_time=0.0, request_end_time=23400.0,
time_units='days since 1950-01-01', calendar='360_day')
self.dreq2 = get_or_create(DataRequest, project=proj,
institute=institute, climate_model=climate_model,
experiment=experiment, variable_request=var2, rip_code='r1i1p1f1',
request_start_time=0.0, request_end_time=23400.0,
time_units='days since 1950-01-01', calendar='360_day')
self.user = get_or_create(User,
username=Settings.get_solo().contact_user_id)
dsub = get_or_create(DataSubmission, status=STATUS_VALUES['VALIDATED'],
incoming_directory=incoming_directory,
directory=incoming_directory, user=self.user)
df1 = get_or_create(DataFile, name='file_one.nc',
incoming_directory=incoming_directory, directory=None, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var1, data_request=self.dreq1,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', online=False, start_time=0., end_time=359.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:1234',
data_submission=dsub)
df2 = get_or_create(DataFile, name='file_two.nc',
incoming_directory=incoming_directory, directory=None, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var2, data_request=self.dreq2,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', online=False, start_time=0., end_time=359.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:5678',
data_submission=dsub)
df3 = get_or_create(DataFile, name='file_three.nc',
incoming_directory=incoming_directory, directory=None, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var2, data_request=self.dreq2,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', online=False, start_time=360., end_time=719.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:8765',
data_submission=dsub)
rf1 = get_or_create(ReplacedFile, name='file_four.nc',
incoming_directory=incoming_directory, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var1, data_request=self.dreq1,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', start_time=0., end_time=359.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:1234',
data_submission=dsub, checksum_value='01234567',
checksum_type='ADLER32')
rf2 = get_or_create(ReplacedFile, name='file_five.nc',
incoming_directory=incoming_directory, size=2,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var1, data_request=self.dreq1,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', start_time=360., end_time=719.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v87654321', tape_url='et:4321',
data_submission=dsub, checksum_value='76543210',
checksum_type='ADLER32')
```
#### File: pdata_app/utils/attribute_update.py
```python
from abc import ABCMeta, abstractmethod
import logging
import os
import re
import shutil
import six
import tempfile
from pdata_app.models import (Checksum, ClimateModel, DataRequest, Institute,
Project, Settings, TapeChecksum)
from pdata_app.utils.common import (adler32, construct_drs_path,
construct_filename, get_gws,
delete_drs_dir, is_same_gws,
run_ncatted, run_ncrename)
logger = logging.getLogger(__name__)
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
class AttributeUpdateError(Exception):
"""
Base class for all custom exceptions
"""
pass
class FileOfflineError(AttributeUpdateError):
"""
Raised when a file is marked as offline in the DMT
"""
def __init__(self, directory, filename):
message = '{} is offline'.format(os.path.join(directory, filename))
Exception.__init__(self, message)
class FileNotOnDiskError(AttributeUpdateError):
"""
Raised when a file is not found on disk
"""
def __init__(self, directory, filename):
message = '{} was not found on disk'.format(os.path.join(directory,
filename))
Exception.__init__(self, message)
class SymLinkIsFileError(AttributeUpdateError):
"""
Raised when a file is found when a symbolic link was expected.
"""
def __init__(self, filepath):
message = ("{} was expected to be a symbolic link but isn't.".
format(filepath))
Exception.__init__(self, message)
@six.add_metaclass(ABCMeta)
class DmtUpdate(object):
"""
Abstract base class for any updates to files in the DMT.
"""
def __init__(self, datafile, new_value, update_file_only=False):
"""
Initialise the class
:param pdata_apps.models.DataFile datafile: the file to update
:param str new_value: the new value to apply
:param bool update_file_only: if true then update just the file and
don't make any changes to the database.
"""
self.datafile = datafile
self.new_value = new_value
self.old_filename = self.datafile.name
self.old_directory = self.datafile.directory
self.old_sym_link_dir = os.path.join(BASE_OUTPUT_DIR,
construct_drs_path(self.datafile))
self.new_filename = None
self.new_directory = None
self.update_file_only = update_file_only
@abstractmethod
def update(self):
"""
Update everything.
"""
pass
def _check_available(self):
"""
Check that the file is online in the DMT and can be found in its
specified location on disk.
:raises FileOfflineError: if file does not have a status of online in
the DMT.
:raises FileNotOnDiskError: if the file is not found on disk.
"""
if not self.datafile.online:
raise FileOfflineError(self.old_directory, self.old_filename)
if not os.path.exists(os.path.join(self.old_directory,
self.old_filename)):
raise FileNotOnDiskError(self.old_directory, self.old_filename)
@abstractmethod
def _update_file_attribute(self):
"""
Update the metadata attribute in the file. Assume the file has its
original path and name.
"""
pass
def _construct_filename(self):
"""
Construct the new filename.
"""
self.new_filename = construct_filename(self.datafile)
def _update_filename_in_db(self):
"""
Update the file's name in the database.
"""
self.datafile.name = self.new_filename
self.datafile.save()
def _update_checksum(self):
"""
Update the checksum and size of the file in the database, preserving
the original values. Assume the file has its new path and name.
"""
# Archive the checksum and calculate its new value
cs = self.datafile.checksum_set.first()
if not cs:
logger.warning('No checksum for {}'.format(self.datafile.name))
else:
if self.datafile.tapechecksum_set.count() == 0:
TapeChecksum.objects.create(
data_file=self.datafile,
checksum_value=cs.checksum_value,
checksum_type=cs.checksum_type
)
# Remove the original checksum now that the tape checksum's
# been created
cs.delete()
new_path = os.path.join(self.new_directory, self.new_filename)
Checksum.objects.create(
data_file=self.datafile,
checksum_type='ADLER32',
checksum_value=adler32(new_path)
)
# Update the file's size
if self.datafile.tape_size is None:
self.datafile.tape_size = self.datafile.size
self.datafile.size = os.path.getsize(new_path)
self.datafile.save()
def _construct_directory(self):
"""
Construct the new directory path.
"""
self.new_directory = os.path.join(get_gws(self.datafile.directory),
construct_drs_path(self.datafile))
def _update_directory_in_db(self):
"""
Update the file's directory.
"""
self.datafile.directory = self.new_directory
self.datafile.save()
def _rename_file(self):
"""
Rename the file on disk and move to its new directory. Update the link
from the primary directory.
"""
if not os.path.exists(self.new_directory):
os.makedirs(self.new_directory)
os.rename(os.path.join(self.old_directory, self.old_filename),
os.path.join(self.new_directory, self.new_filename))
# check for empty directory
if not os.listdir(self.old_directory):
delete_drs_dir(self.old_directory)
# Update the symbolic link if required
if not is_same_gws(self.old_directory, BASE_OUTPUT_DIR):
old_link_path = os.path.join(self.old_sym_link_dir,
self.old_filename)
if os.path.lexists(old_link_path):
if not os.path.islink(old_link_path):
logger.error("{} exists and isn't a symbolic link.".
format(old_link_path))
raise SymLinkIsFileError(old_link_path)
else:
# it is a link so remove it
os.remove(old_link_path)
# check for empty directory
if not os.listdir(self.old_sym_link_dir):
delete_drs_dir(self.old_sym_link_dir)
new_link_dir = os.path.join(BASE_OUTPUT_DIR,
construct_drs_path(self.datafile))
if not os.path.exists(new_link_dir):
os.makedirs(new_link_dir)
os.symlink(os.path.join(self.new_directory, self.new_filename),
os.path.join(new_link_dir, self.new_filename))
@six.add_metaclass(ABCMeta)
class DataRequestUpdate(DmtUpdate):
"""
Abstract base class for updates that require a move of the files to a
different DataRequest object.
"""
def __init__(self, datafile, new_value, update_file_only=False):
"""
Initialise the class
:param pdata_apps.models.DataFile datafile: the file to update
:param str new_value: the new value to apply
:param bool update_file_only: if true then update just the file and
don't make any changes to the database.
"""
super(DataRequestUpdate, self).__init__(datafile, new_value,
update_file_only)
# The name and value of the data_request attribute being modified
self.data_req_attribute_name = None
self.data_req_attribute_value = None
# The destination data_request
self.new_dreq = None
def update(self):
"""
Update everything.
"""
if not self.update_file_only:
# Default mode of operation. Update the data request and
# everything.
self._find_new_dreq()
self._check_available()
self._update_database_attribute()
self._update_file_attribute()
self._construct_filename()
self._update_filename_in_db()
self._construct_directory()
self._update_directory_in_db()
self._rename_file()
self._update_checksum()
self._move_dreq()
else:
# For when this has been run before and we just need to update
# files that have pulled from disk again.
self.old_filename = self.datafile.incoming_name
self._check_available()
self._update_file_attribute()
self._construct_filename()
self._construct_directory()
self._rename_file()
self._update_checksum()
def _find_new_dreq(self):
"""
Find the new data request. If it can't be find the data request (or
there are multiple ones) then Django will raise an exception so that we
don't make any changes to the files or DB.
"""
if self.data_req_attribute_name is None:
raise NotImplementedError("data_req_attribute_name hasn't been "
"set.")
if self.data_req_attribute_value is None:
raise NotImplementedError("data_req_attribute_value hasn't been "
"set.")
# the default values from the existing data request
dreq_dict = {
'project': self.datafile.data_request.project,
'institute': self.datafile.data_request.institute,
'climate_model': self.datafile.data_request.climate_model,
'experiment': self.datafile.data_request.experiment,
'variable_request': self.datafile.data_request.variable_request,
'rip_code': self.datafile.data_request.rip_code
}
# overwrite with the new value
dreq_dict[self.data_req_attribute_name] = self.data_req_attribute_value
# find the data request
self.new_dreq = DataRequest.objects.get(**dreq_dict)
@abstractmethod
def _update_database_attribute(self):
"""
Update the attribute in the database.
"""
pass
def _move_dreq(self):
"""
Move the data file to the new data request
"""
self.datafile.data_request = self.new_dreq
self.datafile.save()
class SourceIdUpdate(DataRequestUpdate):
"""
Update a DataFile's source_id (climate model).
"""
def __init__(self, datafile, new_value, update_file_only=False):
"""
Initialise the class
"""
super(SourceIdUpdate, self).__init__(datafile, new_value,
update_file_only)
self.data_req_attribute_name = 'climate_model'
self.data_req_attribute_value = ClimateModel.objects.get(
short_name=self.new_value
)
def _update_database_attribute(self):
"""
Update the source_id
"""
new_source_id = ClimateModel.objects.get(short_name=self.new_value)
self.datafile.climate_model = new_source_id
self.datafile.save()
def _update_file_attribute(self):
"""
Update the source_id and make the same change in the further_info_url.
Assume the file has its original path and name.
"""
# source_id
run_ncatted(self.old_directory, self.old_filename,
'source_id', 'global', 'c', self.new_value, False)
# further_info_url
further_info_url = ('https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.'
'{}'.format(self.datafile.project.short_name,
self.datafile.institute.short_name,
self.new_value,
self.datafile.experiment.short_name,
self.datafile.rip_code))
run_ncatted(self.old_directory, self.old_filename,
'further_info_url', 'global', 'c', further_info_url, False)
class MipEraUpdate(DataRequestUpdate):
"""
Update a DataFile's mip_era (project in the DMT).
"""
def __init__(self, datafile, new_value, update_file_only=False,
temp_dir=None):
"""
Initialise the class
"""
super(MipEraUpdate, self).__init__(datafile, new_value,
update_file_only)
self.data_req_attribute_name = 'project'
self.data_req_attribute_value = Project.objects.get(
short_name=self.new_value
)
self.temp_dir = temp_dir
def _update_database_attribute(self):
"""
Update the source_id
"""
new_mip_era = Project.objects.get(short_name=self.new_value)
self.datafile.project = new_mip_era
self.datafile.save()
def _update_file_attribute(self):
"""
Update the source_id and make the same change in the further_info_url.
Assume the file has its original path and name.
"""
if self.temp_dir:
orig_path = os.path.join(self.old_directory, self.old_filename)
temp_dir = tempfile.mkdtemp(dir=self.temp_dir)
temp_path = os.path.join(temp_dir, self.old_filename)
shutil.copyfile(orig_path, temp_path)
working_dir = temp_dir
else:
working_dir = self.old_directory
# source_id
run_ncatted(working_dir, self.old_filename,
'mip_era', 'global', 'c', self.new_value, False)
# further_info_url
further_info_url = ('https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.'
'{}'.format(self.new_value,
self.datafile.institute.short_name,
self.datafile.climate_model.short_name,
self.datafile.experiment.short_name,
self.datafile.rip_code))
run_ncatted(working_dir, self.old_filename,
'further_info_url', 'global', 'c', further_info_url, False)
if self.temp_dir:
os.rename(orig_path, orig_path + '.old')
shutil.copyfile(temp_path, orig_path)
os.remove(orig_path + '.old')
os.remove(temp_path)
os.rmdir(temp_dir)
class InstitutionIdUpdate(DataRequestUpdate):
"""
Update a DataFile's institution_id.
"""
def __init__(self, datafile, new_value, update_file_only=False,
temp_dir=None):
"""
Initialise the class
"""
super(InstitutionIdUpdate, self).__init__(datafile, new_value,
update_file_only)
self.data_req_attribute_name = 'institute'
self.data_req_attribute_value = Institute.objects.get(
short_name=self.new_value
)
self.temp_dir = temp_dir
def _update_database_attribute(self):
"""
Update the institution_id.
"""
new_institute = Institute.objects.get(short_name=self.new_value)
self.datafile.institute = new_institute
self.datafile.save()
def _update_file_attribute(self):
"""
Update the institution_id and make the same change in further_info_url,
institution and license.
Assume the file has its original path and name.
"""
if self.temp_dir:
orig_path = os.path.join(self.old_directory, self.old_filename)
temp_dir = tempfile.mkdtemp(dir=self.temp_dir)
temp_path = os.path.join(temp_dir, self.old_filename)
shutil.copyfile(orig_path, temp_path)
working_dir = temp_dir
else:
working_dir = self.old_directory
# institution_id
run_ncatted(working_dir, self.old_filename,
'institution_id', 'global', 'c', self.new_value, False)
# institution
new_insts = {
'MOHC': 'Met Office Hadley Centre, Fitzroy Road, Exeter, Devon, '
'EX1 3PB, UK',
'NERC': 'Natural Environment Research Council, STFC-RAL, Harwell, '
'Oxford, OX11 0QX, UK'
}
inst = new_insts[self.new_value]
run_ncatted(working_dir, self.old_filename,
'institution', 'global', 'c', inst)
# further_info_url
further_info_url = (
'https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.{}'.
format(self.datafile.project.short_name,
self.new_value,
self.datafile.climate_model.short_name,
self.datafile.experiment.short_name,
self.datafile.rip_code))
run_ncatted(working_dir, self.old_filename,
'further_info_url', 'global', 'c', further_info_url)
# license
license_txt = (
f'CMIP6 model data produced by {self.new_value} is licensed under '
f'a Creative Commons Attribution-ShareAlike 4.0 International '
f'License (https://creativecommons.org/licenses). Consult '
f'https://pcmdi.llnl.gov/CMIP6/TermsOfUse for terms of use '
f'governing CMIP6 output, including citation requirements and '
f'proper acknowledgment. Further information about this data, '
f'including some limitations, can be found via the '
f'further_info_url (recorded as a global attribute in this file). '
f'The data producers and data providers make no warranty, either '
f'express or implied, including, but not limited to, warranties '
f'of merchantability and fitness for a particular purpose. All '
f'liabilities arising from the supply of the information '
f'(including any liability arising in negligence) are excluded to '
f'the fullest extent permitted by law.'
)
run_ncatted(working_dir, self.old_filename,
'license', 'global', 'c', license_txt)
if self.temp_dir:
os.rename(orig_path, orig_path + '.old')
shutil.copyfile(temp_path, orig_path)
os.remove(orig_path + '.old')
os.remove(temp_path)
os.rmdir(temp_dir)
class VariantLabelUpdate(DataRequestUpdate):
"""
Update a DataFile's variant_label (rip_code).
"""
def __init__(self, datafile, new_value, update_file_only=False):
"""
Initialise the class
"""
super(VariantLabelUpdate, self).__init__(datafile, new_value,
update_file_only)
self.data_req_attribute_name = 'rip_code'
self.data_req_attribute_value = self.new_value
def _update_database_attribute(self):
"""
Update the variant label
"""
self.datafile.rip_code = self.new_value
self.datafile.save()
def _update_file_attribute(self):
"""
Update the variant_label and make the same change in its constituent
parts and the further_info_url. Assume the file has its original path
and name.
"""
# variant_label
run_ncatted(self.old_directory, self.old_filename,
'variant_label', 'global', 'c', self.new_value, False)
# indexes
ripf = re.match(r'^r(\d+)i(\d+)p(\d+)f(\d+)$', self.new_value)
run_ncatted(self.old_directory, self.old_filename,
'realization_index', 'global', 's', int(ripf.group(1)),
False)
run_ncatted(self.old_directory, self.old_filename,
'initialization_index', 'global', 's', int(ripf.group(2)),
False)
run_ncatted(self.old_directory, self.old_filename,
'physics_index', 'global', 's', int(ripf.group(3)),
False)
run_ncatted(self.old_directory, self.old_filename,
'forcing_index', 'global', 's', int(ripf.group(4)),
False)
# further_info_url
further_info_url = ('https://furtherinfo.es-doc.org/{}.{}.{}.{}.none.'
'{}'.format(self.datafile.project.short_name,
self.datafile.institute.short_name,
self.datafile.climate_model.short_name,
self.datafile.experiment.short_name,
self.new_value))
run_ncatted(self.old_directory, self.old_filename,
'further_info_url', 'global', 'c', further_info_url, False)
class CorrectDirectoryUpdate(DmtUpdate):
"""
Move the files into the correct directory. This is useful as some older
data uses the cmor name rather than out name in its current directory
structure.
"""
def __init__(self, datafile, update_file_only=False):
"""
Initialise the class
:param pdata_apps.models.DataFile datafile: the file to update
:param bool update_file_only: if true then update just the file and
don't make any changes to the database.
"""
super(CorrectDirectoryUpdate, self).__init__(datafile, '',
update_file_only)
def update(self):
"""
Update everything.
"""
self._check_available()
self.new_filename = self.old_filename
self._construct_directory()
self._update_directory_in_db()
self._rename_file()
def _update_file_attribute(self):
"""
This method is required due to the abstract parent class' design but
no changes need to be made to the file other than renaming it and so
this method is empty.
"""
pass
class CorrectFileNameUpdate(DmtUpdate):
"""
Generate the correct filename for a file. This is useful when the date
string in the filename is incorrect.
"""
def __init__(self, datafile, update_file_only=False):
"""
Initialise the class
:param pdata_apps.models.DataFile datafile: the file to update
:param bool update_file_only: if true then update just the file and
don't make any changes to the database.
"""
super(CorrectFileNameUpdate, self).__init__(datafile, '',
update_file_only)
def update(self):
"""
Update everything.
"""
self._check_available()
self._construct_filename()
self.new_directory = self.old_directory
self._update_filename_in_db()
self._rename_file()
def _update_file_attribute(self):
"""
This method is required due to the abstract parent class' design but
no changes need to be made to the file other than renaming it and so
this method is empty.
"""
pass
class AddClimatologyFileNameUpdate(DmtUpdate):
"""
Append a filename with -clim to indicate a climatology.
"""
def __init__(self, datafile, update_file_only=False):
"""
Initialise the class
:param pdata_apps.models.DataFile datafile: the file to update
:param bool update_file_only: if true then update just the file and
don't make any changes to the database.
"""
super(AddClimatologyFileNameUpdate, self).__init__(datafile, '',
update_file_only)
def update(self):
"""
Update everything.
"""
self._check_available()
self.new_filename = self.datafile.name.replace('.nc', '-clim.nc')
self.new_directory = self.old_directory
self._update_filename_in_db()
self._rename_file()
def _update_file_attribute(self):
"""
This method is required due to the abstract parent class' design but
no changes need to be made to the file other than renaming it and so
this method is empty.
"""
pass
class VarNameToOutNameUpdate(DmtUpdate):
"""
Update a file's name and contents from cmor_name to out_name.
"""
def __init__(self, datafile, update_file_only=False,
temp_dir=None):
"""
Initialise the class
:param pdata_apps.models.DataFile datafile: the file to update
:param bool update_file_only: if true then update just the file and
don't make any changes to the database.
"""
super(VarNameToOutNameUpdate, self).__init__(datafile, '',
update_file_only)
# Lets do some checks to make sure that this is a sensible change to
# make.
if self.datafile.variable_request.out_name is None:
raise ValueError(f'File {self.datafile.name} out_name is not '
f'defined.')
if not self.update_file_only:
expected_file_start = (self.datafile.variable_request.cmor_name +
'_')
if not self.datafile.name.startswith(expected_file_start):
raise ValueError(f'File {self.datafile.name} does not start '
f'with {expected_file_start}')
self.temp_dir = temp_dir
def update(self):
"""
Update everything.
"""
if not self.update_file_only:
# Default mode of operation. Update the data request and
# everything.
self._check_available()
self._update_file_attribute()
self._construct_filename()
self._update_filename_in_db()
self._construct_directory()
self._update_directory_in_db()
self._rename_file()
self._update_checksum()
else:
# For when this has been run before and we just need to update
# files that have pulled from disk again.
self._check_available()
self._update_file_attribute()
self.new_filename = self.old_filename
self.new_directory = self.old_directory
self._update_checksum()
def _update_file_attribute(self):
"""
Renamr the variable inside the file. Assume the file has its original
path and name.
"""
if self.temp_dir:
orig_path = os.path.join(self.old_directory, self.old_filename)
temp_dir = tempfile.mkdtemp(dir=self.temp_dir)
temp_path = os.path.join(temp_dir, self.old_filename)
shutil.copyfile(orig_path, temp_path)
working_dir = temp_dir
else:
working_dir = self.old_directory
run_ncrename(working_dir, self.old_filename,
self.datafile.variable_request.cmor_name,
self.datafile.variable_request.out_name, False)
run_ncatted(working_dir, self.old_filename,
'variable_id', 'global', 'c',
self.datafile.variable_request.out_name, True)
if self.temp_dir:
os.rename(orig_path, orig_path + '.old')
shutil.copyfile(temp_path, orig_path)
os.remove(orig_path + '.old')
os.remove(temp_path)
os.rmdir(temp_dir)
```
#### File: pdata_app/utils/esgf_utils.py
```python
from __future__ import unicode_literals, division, absolute_import
import logging
import django
from pdata_app.models import DataRequest
logger = logging.getLogger(__name__)
def add_data_request(stream_cmpts, debug_req_found=True):
"""
Using the dictionary of components of the stream name, find the
corresponding pdata_app.models.DataRequest and add this to the
dictionary.
:param dict stream_cmpts: The components of the dataset stream.
:param bool debug_req_found: If True then generate a debug message naming
the request found.
"""
try:
stream_cmpts['data_req'] = DataRequest.objects.get(
climate_model__short_name=stream_cmpts['source_id'],
experiment__short_name=stream_cmpts['experiment_id'],
rip_code=stream_cmpts['variant_label'],
variable_request__table_name=stream_cmpts['table_id'],
variable_request__cmor_name=stream_cmpts['cmor_name']
)
except django.core.exceptions.ObjectDoesNotExist:
msg = ('Cannot find DataRequest with: climate_model__short_name={},'
'experiment__short_name={}, variant_label={}, '
'variable_request__table_name={}, '
'variable_request__cmor_name={}'.format(
stream_cmpts['source_id'], stream_cmpts['experiment_id'],
stream_cmpts['variant_label'], stream_cmpts['table_id'],
stream_cmpts['cmor_name']
))
logger.error(msg)
raise
if debug_req_found:
logger.debug('Found data request {}'.format(stream_cmpts['data_req']))
def parse_rose_stream_name(stream_name):
"""
Convert the Rose stream name given to this ESGF dataset into more useful
components.....
:param str stream_name: The Rose stream name given to this ESGF data set.
:returns: The dataset components from the stream name.
:rtype: dict
"""
cmpts = stream_name.split('_')
cmpt_names = ['source_id', 'experiment_id', 'variant_label',
'table_id', 'cmor_name']
return {cmpt_name: cmpts[index]
for index, cmpt_name in enumerate(cmpt_names)}
```
#### File: primavera-dmt/scripts/auto_retrieve.py
```python
from __future__ import unicode_literals, division, absolute_import
import argparse
import datetime
import logging.config
import os
import subprocess
import sys
from time import sleep
import django
django.setup()
from django.template.defaultfilters import filesizeformat
from pdata_app.models import RetrievalRequest, Settings
from pdata_app.utils.common import get_request_size, PAUSE_FILES
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
ONE_HOUR = 60 * 60
TWO_TEBIBYTES = 2 * 2 ** 40
# The institution_ids that should be retrieved from MASS
MASS_INSTITUTIONS = ['MOHC', 'NERC']
# The top-level directory to write output data to
STREAM1_DIR = Settings.get_solo().current_stream1_dir
def run_retrieve_request(retrieval_id):
"""
Run retrieve_request.py in a subprocess to fetch the appropriate data
from tape to disk
:param int retrieval_id:
"""
retrieval_request = RetrievalRequest.objects.get(id=retrieval_id)
if get_request_size(retrieval_request.data_request.all(),
retrieval_request.start_year,
retrieval_request.end_year) > TWO_TEBIBYTES:
logger.warning('Skipping retrieval {} as it is bigger than {}.'.format(
retrieval_id, filesizeformat(TWO_TEBIBYTES).encode('utf-8')
))
return
cmd = ('{} {} -l debug --skip_checksums -a {} {}'.format(sys.executable,
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'retrieve_request.py')),
STREAM1_DIR,
retrieval_id))
try:
subprocess.check_output(cmd, shell=True).decode('utf-8')
except OSError as exc:
logger.error('Unable to run command:\n{}\n{}'.format(cmd,
exc.strerror))
sys.exit(1)
except subprocess.CalledProcessError as exc:
logger.error('Retrieval failed: {}\n{}'.format(cmd, exc.output))
else:
logger.debug('Retrieved id {}'.format(retrieval_id))
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Automatically perform '
'PRIMAVERA retrieval '
'requests.')
tape_sys = parser.add_mutually_exclusive_group(required=True)
tape_sys.add_argument("-m", "--mass", help="Restore data from MASS",
action='store_true')
tape_sys.add_argument("-e", "--et", help="Restore data from elastic tape",
action='store_true')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
logger.debug('Starting auto_retrieve.py')
while True:
ret_reqs = (RetrievalRequest.objects.filter(date_complete__isnull=True,
date_deleted__isnull=True).
order_by('date_created'))
for ret_req in ret_reqs:
# check for retrievals that are purely elastic tape or pure MASS
if args.mass:
# however, if pausing the system jump to the wait
if os.path.exists(PAUSE_FILES['moose:']):
logger.debug('Waiting due to {}'.
format(PAUSE_FILES['moose:']))
break
if (ret_req.data_request.filter
(institute__short_name__in=MASS_INSTITUTIONS).count()
and not ret_req.data_request.exclude
(institute__short_name__in=MASS_INSTITUTIONS).count()):
run_retrieve_request(ret_req.id)
elif args.et:
# however, if pausing the system jump to the wait
if os.path.exists(PAUSE_FILES['et:']):
logger.debug('Waiting due to {}'.
format(PAUSE_FILES['et:']))
break
if (ret_req.data_request.exclude
(institute__short_name__in=MASS_INSTITUTIONS).count()
and not ret_req.data_request.filter
(institute__short_name__in=MASS_INSTITUTIONS).count()):
run_retrieve_request(ret_req.id)
else:
raise NotImplementedError('Unknown tape system specified.')
logger.debug('Waiting for one hour at {}'.format(
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')))
sleep(ONE_HOUR)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: primavera-dmt/scripts/create_esgf_retrieval.py
```python
import argparse
import datetime
import json
import logging.config
import sys
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import RetrievalRequest, Settings
from pdata_app.utils.esgf_utils import add_data_request, parse_rose_stream_name
__version__ = '0.1.0b'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(
description='Create a retrieval request from a JSON list of the Rose '
'task names that should be submitted to CREPP.'
)
parser.add_argument('json_file', help='the path to the JSON file to read')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the '
'default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
Task names in the JSON file are in the form:
<climate-model>_<experiment>_<variant-label>_<table>_<variable>
e.g.:
HadGEM3-GC31-LM_highresSST-present_r1i1p1f1_Amon_psl
"""
with open(args.json_file) as fh:
task_names = json.load(fh)
logger.debug('{} task names loaded'.format(len(task_names)))
system_user = User.objects.get(username=Settings.get_solo().contact_user_id)
ret_req = RetrievalRequest.objects.create(requester=system_user,
start_year=1900, end_year=2100)
time_zone = datetime.timezone(datetime.timedelta())
ret_req.date_created = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=time_zone)
ret_req.save()
for task_name in task_names:
task_cmpts = parse_rose_stream_name(task_name)
add_data_request(task_cmpts, debug_req_found=False)
ret_req.data_request.add(task_cmpts['data_req'])
logger.debug('Request id {} created'.format(ret_req.id))
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: primavera-dmt/scripts/create_root_chown_list.py
```python
from __future__ import unicode_literals, division, absolute_import
from multiprocessing.pool import ThreadPool
try:
import dask
except ImportError:
pass
import iris
import django
django.setup()
from pdata_app.models import DataSubmission
from vocabs.vocabs import STATUS_VALUES
STATUS_TO_PROCESS = STATUS_VALUES['PENDING_PROCESSING']
OUTPUT_FILE = ('/home/users/jseddon/primavera/root_cron/'
'primavera_chown_list.txt')
def main():
# Limit the number of Dask threads
if not iris.__version__.startswith('1.'):
dask.config.set(pool=ThreadPool(2))
submissions = DataSubmission.objects.filter(status=STATUS_TO_PROCESS)
with open(OUTPUT_FILE, 'w') as fh:
for submission in submissions:
fh.write(submission.directory + '\n')
if __name__ == "__main__":
main()
```
#### File: scripts/et_indexer/parallel_datafile_update.py
```python
import argparse
import itertools
from multiprocessing import Process, Manager
import os
from ncdf_indexer import NetCDFIndexedFile
import django
django.setup()
def update_file(params):
"""
Update an individual file.
:param str filename: The complete path of the file to process
"""
while True:
# close existing connections so that a fresh connection is made
django.db.connections.close_all()
filename = params.get()
if filename is None:
return
quiet = cmd_args.quiet
f_index = NetCDFIndexedFile(filename)
if not quiet:
print "checksum: {}".format(filename)
try:
f_index.retrieve()
f_index.checksum(checksummer=cmd_args.checksum,
overwrite=cmd_args.overwrite,
quiet=quiet)
except Exception as err:
print "Failed:", err
if not quiet:
print "variable summary:", filename
try:
f_index.calculate_variable_summary()
except Exception as err:
print "Error during calculate_variable_summary:", err
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(
description="Update data file inventory: checksums and value ranges")
parser.add_argument("filelist", type=str,
help="File containing list of files to add to inventory database")
parser.add_argument('-p', '--processes', help='the number of parallel '
'processes to use (default: %(default)s)', default=8, type=int)
parser.add_argument("--checksum", type=str,
default="/gws/nopw/j04/primavera1/tools/adler32/adler32",
help="checksum routine to use")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing checksums (USE WITH CARE)")
parser.add_argument("--quiet", "-q", action="store_true",
help="minimal output")
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
quiet = args.quiet
raw_filenames = []
with open(args.filelist, "r") as fh:
for f in fh.readlines():
raw_filenames.append(f.strip())
if not quiet:
print "Resolving links:"
files = map(os.path.realpath, raw_filenames)
for f, rf in zip(raw_filenames, files):
if f != rf:
if not quiet:
print "\t%s -> %s" % (f, rf)
jobs = []
manager = Manager()
params = manager.Queue()
for i in range(args.processes):
p = Process(target=update_file, args=(params,))
jobs.append(p)
p.start()
iters = itertools.chain(files, (None,) * args.processes)
for item in iters:
params.put(item)
for j in jobs:
j.join()
if __name__ == '__main__':
cmd_args = parse_args()
# run the code
main(cmd_args)
```
#### File: primavera-dmt/scripts/incoming_to_drs.py
```python
from __future__ import unicode_literals, division, absolute_import
import argparse
import logging.config
import os
import shutil
import sys
import django
django.setup()
from pdata_app.models import Settings, DataSubmission
from pdata_app.utils.common import is_same_gws, construct_drs_path
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
def _get_submission_object(submission_dir):
"""
:param str submission_dir: The path of the submission's top level
directory.
:returns: The object corresponding to the submission.
:rtype: pdata_app.models.DataSubmission
"""
try:
data_sub = DataSubmission.objects.get(incoming_directory=submission_dir)
except django.core.exceptions.MultipleObjectsReturned:
msg = 'Multiple DataSubmissions found for directory: {}'.format(
submission_dir)
logger.error(msg)
sys.exit(1)
except django.core.exceptions.ObjectDoesNotExist:
msg = ('No DataSubmissions have been found in the database for '
'directory: {}.'.format(submission_dir))
logger.error(msg)
sys.exit(1)
return data_sub
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Copy files from their'
'incoming directory into the central DRS structure.')
parser.add_argument('directory', help="the submission's top-level "
"directory")
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('-a', '--alternative', help='store data in alternative '
'directory and create a symbolic link to each file from the main '
'retrieval directory')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
logger.debug('Starting incoming_to_drs.py')
data_sub = _get_submission_object(os.path.normpath(args.directory))
if not args.alternative:
drs_base_dir = BASE_OUTPUT_DIR
else:
drs_base_dir = args.alternative
errors_encountered = False
for data_file in data_sub.datafile_set.order_by('name'):
# make full path of existing file
existing_path = os.path.join(data_file.directory, data_file.name)
# make full path of where it will live
drs_sub_path = construct_drs_path(data_file)
drs_dir = os.path.join(drs_base_dir, drs_sub_path)
drs_path = os.path.join(drs_dir, data_file.name)
# check the destination directory exists
if not os.path.exists(drs_dir):
os.makedirs(drs_dir)
# link if on same GWS, or else copy
this_file_error = False
try:
os.rename(existing_path, drs_path)
except OSError as exc:
logger.error('Unable to link from {} to {}. {}'.
format(existing_path, drs_path, str(exc)))
errors_encountered = True
this_file_error = True
# update the file's location in the database
if not this_file_error:
data_file.directory = drs_dir
if not data_file.online:
data_file.online = True
data_file.save()
# if storing the files in an alternative location, create a sym link
# from the primary DRS structure to the file
if not is_same_gws(BASE_OUTPUT_DIR, drs_base_dir):
primary_path = os.path.join(BASE_OUTPUT_DIR, drs_sub_path)
try:
if not os.path.exists(primary_path):
os.makedirs(primary_path)
os.symlink(drs_path, os.path.join(primary_path, data_file.name))
except OSError as exc:
logger.error('Unable to link from {} to {}. {}'.
format(drs_path,
os.path.join(primary_path, data_file.name),
str(exc)))
errors_encountered = True
# summarise what happened and keep the DB updated
if not errors_encountered:
logger.debug('All files copied with no errors. Data submission '
'incoming directory can be deleted.')
else:
logger.error('Errors were encountered. Please fix these before '
'deleting the incoming directory.')
logger.debug('Completed incoming_to_drs.py')
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: primavera-dmt/scripts/populate_data_request.py
```python
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from datetime import datetime
import httplib2
import os
import re
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from cf_units import (CALENDAR_360_DAY, CALENDAR_GREGORIAN,
CALENDAR_PROLEPTIC_GREGORIAN, CALENDAR_STANDARD,
date2num)
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
import django
django.setup()
from pdata_app.models import (DataRequest, Institute, Project, Settings,
ClimateModel, Experiment, VariableRequest)
from pdata_app.utils.dbapi import match_one, get_or_create
# The ID of the Google Speadsheet (taken from the sheet's URL)
SPREADSHEET_ID = '1ewKkyuaUq99HUefWIdb3JzqUwwnPLNUGJxbQyqa-10U'
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = 'client_secret_920707869718-bjp97l2ikhi0qdqi4ibb5ivcpmnml7n8.apps.googleusercontent.com.json'
APPLICATION_NAME = 'PRIMAVERA-DMT'
def get_credentials():
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'sheets.googleapis.'
'com-populate_variable_'
'request.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
curr_dir = os.path.dirname(__file__)
secret_file_path = os.path.abspath(os.path.join(curr_dir, '..', 'etc',
CLIENT_SECRET_FILE))
flow = client.flow_from_clientsecrets(secret_file_path, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def is_ecmwf(sheet_cell):
"""Is this variable produced by ECMWF?"""
if not sheet_cell:
return False
int_string = re.findall(r'-?\d+', sheet_cell)
if int_string:
if (int_string[0] == '1' or
int_string[0] == '2'):
return True
else:
return False
else:
return False
def is_awi(sheet_cell):
"""Is this variable produced by AWI?"""
if not sheet_cell:
return False
status_ignored = sheet_cell.split(':')[-1].strip().upper()
if status_ignored == 'X' or status_ignored == 'LIMITED':
return True
elif status_ignored == 'FALSE':
return False
else:
print('Unknown AWI status: {}. Ignoring.'.format(sheet_cell))
return False
def is_cnrm(sheet_cell):
"""Is this variable produced by CNRM?"""
if not sheet_cell:
return False
int_string = re.findall(r'-?\d+', sheet_cell)
if int_string:
if (int_string[0] == '1' or
int_string[0] == '2' or
int_string[0] == '3'):
return True
elif (int_string[0] == '-1' or
int_string[0] == '-2' or
int_string[0] == '-3' or
int_string[0] == '-999'):
return False
else:
print('Unknown CNRM status: {}. Ignoring.'.format(sheet_cell))
return False
def is_cmcc(sheet_cell):
"""Is this variable produced by CMCC?"""
if not sheet_cell:
return False
if sheet_cell.upper() == 'FALSE':
return False
else:
return True
def is_ec_earth(sheet_cell):
"""Is this variable produced by `institute` using ECEarth?"""
if not sheet_cell:
return False
if sheet_cell.upper() == 'X' or sheet_cell.upper() == 'LIMITED':
return True
elif sheet_cell.upper() == 'FALSE' or sheet_cell.upper() == 'NO':
return False
else:
print('Unknown EC-Earth status: {}. Ignoring.'.format(sheet_cell))
return False
def is_mpi(sheet_cell):
"""Is this variable produced by MPI?"""
if not sheet_cell:
return False
status_ignored = sheet_cell.split(':')[-1].strip().upper()
if status_ignored == 'X' or status_ignored == 'LIMITED':
return True
elif status_ignored == 'FALSE' or status_ignored == 'NO':
return False
else:
print('Unknown MPI status: {}. Ignoring.'.format(sheet_cell))
return False
def is_metoffice(sheet_cell):
"""Is this variable produced by the Met Office?"""
if not sheet_cell:
return False
not_producing_values = ['CHECK', 'FALSE']
for value in not_producing_values:
if value == sheet_cell.upper():
return False
return True
def main():
"""
Run the processing.
"""
# initialize the spreadsheet access
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discovery_url = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discovery_url)
# the names of each of the sheets
sheet_names = [
'Amon', 'LImon', 'Lmon', 'Omon', 'SImon', 'AERmon',
'CFmon', 'Emon', 'EmonZ', 'Primmon', 'PrimmonZ', 'PrimOmon', 'Oday',
'CFday', 'day', 'Eday', 'EdayZ', 'SIday', 'PrimdayPt', 'Primday',
'PrimOday', 'PrimSIday', '6hrPlev', '6hrPlevPt', 'PrimO6hr', 'Prim6hr',
'Prim6hrPt', '3hr', 'E3hr', 'E3hrPt', 'Prim3hr', 'Prim3hrPt', 'E1hr',
'Esubhr', 'Prim1hr', 'fx'
]
# details of each of the institutes
# key is the column number in the spreadsheet
institutes = {
24: {'id': 'ECMWF', 'model_ids': ['ECMWF-IFS-LR', 'ECMWF-IFS-HR'],
'check_func': is_ecmwf, 'calendar': CALENDAR_GREGORIAN},
25: {'id': 'AWI', 'model_ids': ['AWI-CM-1-0-LR', 'AWI-CM-1-0-HR'],
'check_func': is_awi, 'calendar': CALENDAR_STANDARD},
26: {'id': 'CNRM-CERFACS', 'model_ids': ['CNRM-CM6-1-HR', 'CNRM-CM6-1'],
'check_func': is_cnrm, 'calendar': CALENDAR_GREGORIAN},
27: {'id': 'CMCC', 'model_ids': ['CMCC-CM2-HR4', 'CMCC-CM2-VHR4'],
'check_func': is_cmcc, 'calendar': CALENDAR_STANDARD},
28: {'id': 'EC-Earth-Consortium', 'model_ids': ['EC-Earth3-LR',
'EC-Earth3-HR'], 'check_func': is_ec_earth,
'calendar': CALENDAR_GREGORIAN},
32: {'id': 'MPI-M', 'model_ids': ['MPIESM-1-2-HR', 'MPIESM-1-2-XR'],
'check_func': is_mpi, 'calendar': CALENDAR_PROLEPTIC_GREGORIAN},
33: {'id': 'MOHC', 'model_ids': ['HadGEM3-GC31-HM', 'HadGEM3-GC31-MM',
'HadGEM3-GC31-LM'], 'check_func': is_metoffice,
'calendar': CALENDAR_360_DAY}
}
# The HighResMIP experiments
experiments = {
'control-1950': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2050, 1, 1)},
'highres-future': {'start_date': datetime(2015, 1, 1),
'end_date': datetime(2051, 1, 1)},
'hist-1950': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2015, 1, 1)},
'highresSST-present': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2015, 1, 1)},
'highresSST-future': {'start_date': datetime(2015, 1, 1),
'end_date': datetime(2051, 1, 1)},
'highresSST-LAI': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2015, 1, 1)},
'highresSST-smoothed': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2015, 1, 1)},
'highresSST-p4K': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2015, 1, 1)},
'highresSST-4co2': {'start_date': datetime(1950, 1, 1),
'end_date': datetime(2015, 1, 1)}
}
# some objects from constants
# Experiment
experiment_objs = []
for expt in experiments:
expt_obj = match_one(Experiment, short_name=expt)
if expt_obj:
experiment_objs.append(expt_obj)
else:
msg = 'experiment {} not found in the database.'.format(expt)
print(msg)
raise ValueError(msg)
# Look up the Institute object for each institute_id and save the
# results to a dictionary for quick look up later
institute_objs = {}
for col_num in institutes:
result = match_one(Institute, short_name=institutes[col_num]['id'])
if result:
institute_objs[col_num] = result
else:
msg = 'institute_id {} not found in the database.'.format(
institutes[col_num]['id']
)
print(msg)
raise ValueError(msg)
# Look up the ClimateModel object for each institute_id and save the
# results to a dictionary for quick look up later
model_objs = {}
for col_num in institutes:
model_objs[col_num] = []
for clim_model in institutes[col_num]['model_ids']:
result = match_one(ClimateModel, short_name=clim_model)
if result:
model_objs[col_num].append(result)
else:
msg = ('climate_model {} not found in the database.'.
format(clim_model))
print(msg)
raise ValueError(msg)
# The standard reference time
std_units = Settings.get_solo().standard_time_units
# Loop through each sheet
for sheet in sheet_names:
range_name = '{}!A2:AI'.format(sheet)
result = list(service.spreadsheets().values()).get(
spreadsheetId=SPREADSHEET_ID, range=range_name).execute()
values = result.get('values', [])
if not values:
msg = ('No data found in sheet {}.'.format(sheet))
print(msg)
raise ValueError(msg)
if sheet.startswith('Prim'):
project = match_one(Project, short_name='PRIMAVERA')
else:
project = match_one(Project, short_name='CMIP6')
for row in values:
# for each row, make an entry for each institute/model
try:
for col_num in institutes:
# check if institute is producing this variable
institute_cell = (row[col_num]
if len(row) > col_num else None)
if institutes[col_num]['check_func'](institute_cell):
# create an entry for each experiment
for expt in experiment_objs:
# find the corresponding variable request
cmor_name = row[11]
if cmor_name:
var_req_obj = match_one(VariableRequest,
cmor_name=row[11],
table_name=sheet)
else:
var_req_obj = match_one(VariableRequest,
long_name=row[1],
table_name=sheet)
if var_req_obj:
# create a DataRequest for each model in this
# combination
for clim_model in model_objs[col_num]:
_dr = get_or_create(
DataRequest,
project=project,
institute=institute_objs[col_num],
climate_model=clim_model,
experiment=expt,
variable_request=var_req_obj,
request_start_time=date2num(
experiments[expt.short_name]['start_date'],
std_units, institutes[col_num]['calendar']
),
request_end_time=date2num(
experiments[expt.short_name]['end_date'],
std_units, institutes[col_num]['calendar']
),
time_units=std_units,
calendar=institutes[col_num]['calendar']
)
else:
msg = ('Unable to find variable request matching '
'cmor_name {} and table_name {} in the '
'database.'.format(row[11], sheet))
print(msg)
raise ValueError(msg)
except IndexError:
msg = ('Exception at Sheet: {} Variable: {}'.
format(sheet, row[11]))
print(msg)
raise
if __name__ == '__main__':
main()
```
#### File: scripts/update_dreqs/update_dreqs_0012.py
```python
import argparse
from datetime import datetime
import logging.config
import sys
from cf_units import date2num, CALENDAR_360_DAY
import django
django.setup()
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from pdata_app.models import DataSubmission, DataFile
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
NEW_VERSION = 'v20170623'
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
data_subs = [
{'dir': '/group_workspaces/jasmin2/primavera4/upload/CNRM-CERFACS/'
'CNRM-CM6-1-HR/incoming/v20170518_1970',
'version': 'v20170622'},
{'dir': '/group_workspaces/jasmin2/primavera4/upload/CNRM-CERFACS/'
'CNRM-CM6-1-HR/incoming/v20170518_1960',
'version': 'v20170703'}
]
for data_sub_dict in data_subs:
try:
data_sub = DataSubmission.objects.get(
incoming_directory=data_sub_dict['dir'])
except MultipleObjectsReturned:
logger.error('Multiple submissions found for {}'.
format(data_sub_dict['dir']))
except ObjectDoesNotExist:
logger.error('No submissions found for {}'.
format(data_sub_dict['dir']))
for data_file in data_sub.datafile_set.all():
data_file.version
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0052.py
```python
import argparse
import logging.config
import sys
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import DataFile, DataIssue
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
jon = User.objects.get(username='jseddon')
issue_txt = (
'An error has been identified in the CNRM-CERFACS CNRM-CM6-1 and '
'CNRM-CM6-1-HR models for the AMIP highresSST-present data. A nearly '
'uniform value of volcanic AOD was applied over all of the globe. This '
'should only have consequences for users interested in the impact of '
'volcanoes. CNRM-CERFACS are currently rerunning these simulations and '
'the new data will be uploaded to JASMIN as soon as possible.\n\n'
'Ideally any results that you have should be checked against the new '
'data before being published. CNRM-CERFACS have asked that if you '
'publish any results from the existing data then you should cite the '
'model as CNRM-CM6-0. This CNRM-CM6-0 data will not be published '
'through the ESGF, nor will there be any documentation available for '
'it.\n\n'
'When the new data is available then all existing CNRM-CERFACS data '
'will be removed from disk and you will need to request that the new '
'data is restored from disk to tape. Although I will try to keep on '
'disk the variables that have already been restored from tape. The '
'version string in the directory path will be updated to reflect the '
'new version of these files. We hope to have the new data uploaded to '
'JASMIN by the end of February for the low-resolution and mid-April '
'for the high resolution.'
)
cerfacs_issue = DataIssue.objects.create(issue=issue_txt, reporter=jon)
lowres_files = DataFile.objects.filter(
climate_model__short_name='CNRM-CM6-1',
experiment__short_name='highresSST-present',
version='v20170614'
)
logger.debug('{} low res files found'.format(lowres_files.count()))
highres_files = DataFile.objects.filter(
climate_model__short_name='CNRM-CM6-1-HR',
experiment__short_name='highresSST-present',
version='v20170622'
)
logger.debug('{} high res files found'.format(highres_files.count()))
cerfacs_issue.data_file.add(*lowres_files)
cerfacs_issue.data_file.add(*highres_files)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0066.py
```python
import argparse
import logging.config
import sys
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import DataRequest
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
awi_variant_label = 'r1i1p1f002'
awi_reqs = DataRequest.objects.filter(institute__short_name='AWI')
for data_req in awi_reqs:
if data_req.datafile_set.count() > 0:
request_rip_codes = set(data_req.datafile_set.values_list('rip_code'))
if len(request_rip_codes) != 1:
logger.error('More than 1 variant_label for {}'.
format(data_req))
sys.exit(1)
elif list(request_rip_codes)[0] != (awi_variant_label, ):
logger.error('AWI variant_label does not equal {}: {}'.
format(awi_variant_label, data_req))
sys.exit(1)
else:
data_req.rip_code = awi_variant_label
data_req.save()
else:
data_req.rip_code = awi_variant_label
data_req.save()
other_variant_label = 'r1i1p1f1'
other_reqs = DataRequest.objects.exclude(institute__short_name='AWI')
for data_req in other_reqs:
if data_req.datafile_set.count() > 0:
request_rip_codes = set(data_req.datafile_set.values_list('rip_code'))
if len(request_rip_codes) != 1:
logger.error('More than 1 variant_label for {}'.
format(data_req))
sys.exit(1)
elif list(request_rip_codes)[0] != (other_variant_label, ):
logger.error('AWI variant_label does not equal {}: {}'.
format(other_variant_label, data_req))
sys.exit(1)
else:
data_req.rip_code = other_variant_label
data_req.save()
else:
data_req.rip_code = other_variant_label
data_req.save()
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0089.py
```python
import argparse
import logging.config
import sys
import django
django.setup()
from pdata_app.models import DataRequest
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
variables = [
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'tas'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'ts'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'uas'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'vas'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'pr'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rlds'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rlus'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsds'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsus'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsdscs'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsuscs'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rldscs'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsdt'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsut'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rlut'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rlutcs'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'rsutcs'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'cl'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'clw'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'cli'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'ta'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'ua'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'va'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'hus'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'hur'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'wap'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'zg'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'pfull'},
{'variable_request__table_name': 'Amon',
'variable_request__cmor_name': 'phalf'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rlu'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rsu'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rld'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rsd'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rlucs'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rsucs'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rldcs'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'rsdcs'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'ta'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'hur'},
{'variable_request__table_name': 'CFmon',
'variable_request__cmor_name': 'hus'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'pr'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'tas'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rlds'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rlus'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rsds'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rsus'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'uas'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'vas'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rldscs'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rsdscs'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rsuscs'},
{'variable_request__table_name': '3hr',
'variable_request__cmor_name': 'rsdsdiff'}
]
variant_labels = ['r1i1p3f2']
models = ['HadGEM3-GC31-MM']
for model in models:
for variant_label in variant_labels:
for variable in variables:
data_req = DataRequest.objects.get(
climate_model__short_name=model,
experiment__short_name='highresSST-present',
rip_code='r1i1p1f1',
**variable
)
logger.debug('{} {}'.format(variant_label, data_req))
data_req.pk = None
data_req.rip_code = variant_label
data_req.save()
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0108.py
```python
from __future__ import unicode_literals, division, absolute_import
import argparse
import datetime
import logging.config
import sys
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import RetrievalRequest, DataRequest
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
jon = User.objects.get(username='jseddon')
rr = RetrievalRequest.objects.create(requester=jon, start_year=1950,
end_year=1950)
time_zone = datetime.timezone(datetime.timedelta())
rr.date_created = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=time_zone)
rr.save()
common = {
'experiment__short_name': 'highresSST-present',
}
stream_1 = [
{'climate_model__short_name__in': ['CMCC-CM2-HR4', 'CMCC-CM2-VHR4'],
'rip_code': 'r1i1p1f1'},
{'climate_model__short_name__in': ['CNRM-CM6-1', 'CNRM-CM6-1-HR'],
'rip_code__in': ['r21i1p1f2', 'r1i1p1f2']},
{'climate_model__short_name__in': ['EC-Earth3', 'EC-Earth3-HR'],
'rip_code': 'r1i1p1f1'},
{'climate_model__short_name__in': ['ECMWF-IFS-LR', 'ECMWF-IFS-HR'],
'rip_code': 'r1i1p1f1'},
{'climate_model__short_name__in': ['HadGEM3-GC31-LM',
'HadGEM3-GC31-MM',
'HadGEM3-GC31-HM'],
'rip_code': 'r1i1p1f1'},
{'climate_model__short_name__in': ['MPIESM-1-2-HR', 'MPIESM-1-2-XR'],
'rip_code': 'r1i1p1f1'}
]
for stream in stream_1:
drs = DataRequest.objects.filter(datafile__isnull=False,
variable_request__frequency='mon',
**common, **stream).distinct()
rr.data_request.add(*drs)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0183.py
```python
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import argparse
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import Checksum, DataRequest, TapeChecksum
from pdata_app.utils.common import adler32
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('request_id', help='to request id to update')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
model, expt, var_lab, table, var = args.request_id.split('_')
if model == 'MPIESM-1-2-HR':
new_model = 'MPI-ESM1-2-HR'
elif model == 'MPIESM-1-2-XR':
new_model = 'MPI-ESM1-2-XR'
else:
raise ValueError('Unknown source_id {}'.format(model))
dreq = DataRequest.objects.get(
climate_model__short_name=new_model,
experiment__short_name=expt,
rip_code=var_lab,
variable_request__table_name=table,
variable_request__cmor_name=var
)
logger.debug('DataRequest is {}'.format(dreq))
for data_file in dreq.datafile_set.order_by('name'):
logger.debug('Processing {}'.format(data_file.name))
file_path = os.path.join(data_file.directory, data_file.name)
cs = data_file.checksum_set.first()
if not cs:
logger.error('No checksum for {}'.format(data_file.name))
else:
TapeChecksum.objects.create(
data_file=data_file,
checksum_value=cs.checksum_value,
checksum_type=cs.checksum_type
)
# Remove the original checksum now that the tape checksum's
# been created
cs.delete()
Checksum.objects.create(
data_file=data_file,
checksum_type='ADLER32',
checksum_value=adler32(file_path)
)
# Update the file's size
data_file.tape_size = data_file.size
data_file.size = os.path.getsize(file_path)
# Save all of the changes
data_file.save()
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0223.py
```python
import argparse
import json
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import VariableRequest
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
MIP_TABLE_DIR = ('/home/users/jseddon/primavera/original-cmor-tables/'
'primavera_1.00.23/Tables')
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
for table_name in (VariableRequest.objects.order_by('table_name').
values_list('table_name', flat=True).distinct()):
table_file = f'CMIP6_{table_name}.json'
with open(os.path.join(MIP_TABLE_DIR, table_file)) as fh:
mip_table = json.load(fh)
for var_req in (VariableRequest.objects.filter(table_name=table_name)
.order_by('cmor_name')):
cmor_name = var_req.cmor_name
try:
out_name = mip_table['variable_entry'][cmor_name]['out_name']
except KeyError:
logger.error(f'No entry found for {cmor_name} in table '
f'{table_name}')
continue
if not (out_name == cmor_name):
var_req.out_name = out_name
var_req.save()
print(f'{cmor_name}_{table_name} {out_name}')
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0243.py
```python
import argparse
import json
import logging.config
import os
import pprint
import sys
import django
django.setup()
from pdata_app.models import DataRequest, VariableRequest
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
TABLES_DIR = '/home/h04/jseddon/primavera/cmip6-cmor-tables/Tables'
def process_cell_measures(var_req, cell_measures, output_dict):
"""
Add the table and variable name to the appropriate cell measures entry.
"""
if not cell_measures:
# If blank then don't do anything.
return
# correct for typos in the data request
if cell_measures == 'area: areacello OR areacella':
cell_measures = 'area: areacella'
if cell_measures in output_dict:
if var_req.table_name in output_dict[cell_measures]:
if (var_req.cmor_name not in
output_dict[cell_measures][var_req.table_name]):
(output_dict[cell_measures][var_req.table_name].
append(var_req.cmor_name))
else:
output_dict[cell_measures][var_req.table_name] = [var_req.cmor_name,]
else:
output_dict[cell_measures] = {var_req.table_name:[var_req.cmor_name,]}
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
drs = DataRequest.objects.filter(
institute__short_name='MPI-M',
experiment__short_name__in=['control-1950', 'hist-1950',
'spinup-1950'],
datafile__isnull=False
).distinct()
tables = (drs.values_list('variable_request__table_name', flat=True).
distinct().order_by('variable_request__table_name'))
output_dict = {}
for tab_name in tables:
if tab_name.startswith('Prim'):
for dr in (drs.filter(variable_request__table_name=tab_name).
order_by('variable_request__cmor_name')):
cell_measures = (dr.variable_request.cell_measures)
process_cell_measures(dr.variable_request, cell_measures,
output_dict)
else:
json_file = os.path.join(TABLES_DIR, f'CMIP6_{tab_name}.json')
with open(json_file) as fh:
mip_table = json.load(fh)
for dr in (drs.filter(variable_request__table_name=tab_name).
order_by('variable_request__cmor_name')):
try:
cell_measures = (mip_table['variable_entry']
[dr.variable_request.cmor_name]['cell_measures'])
except KeyError:
continue
process_cell_measures(dr.variable_request, cell_measures,
output_dict)
print(pprint.pformat(output_dict))
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0254.py
```python
import argparse
import logging.config
import os
import sys
from cf_units import date2num, CALENDAR_GREGORIAN
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import DataFile, DataIssue
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main():
"""
Main entry point
"""
gijs = User.objects.get(username='gvdoord')
long_txt = (
'The longitude of the atmospheric grid has been shifted by half a '
'grid cell size in the eastward direction. The grid will be corrected. '
'Affected files have a 2-D latitude longitude grid. Fixed files will '
'have a 1-D latitude and a 1-D longitude grid.'
)
long_issue, _created = DataIssue.objects.get_or_create(issue=long_txt,
reporter=gijs)
affected_files = DataFile.objects.filter(
institute__short_name='EC-Earth-Consortium'
).exclude(
data_submission__incoming_directory__contains='Oday'
).exclude(
data_submission__incoming_directory__contains='Omon'
).exclude(
data_submission__incoming_directory__contains='SIday'
).exclude(
data_submission__incoming_directory__contains='SImon'
).exclude(
data_submission__incoming_directory__contains='PrimSIday'
).exclude(
data_submission__incoming_directory__contains='PrimOmon'
).exclude(
data_submission__incoming_directory__contains='PrimOday'
)
logger.debug('{} affected files found'.format(affected_files.count()))
long_issue.data_file.add(*affected_files)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main()
```
#### File: scripts/update_dreqs/update_dreqs_0263.py
```python
from __future__ import unicode_literals, division, absolute_import
import argparse
import datetime
import logging.config
import sys
import django
django.setup()
from django.template.defaultfilters import filesizeformat
from django.contrib.auth.models import User
from pdata_app.models import RetrievalRequest, DataRequest
from pdata_app.utils.common import get_request_size
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Create retrieval requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the '
'default), or error')
parser.add_argument('-c', '--create', help='Create the retrieval request '
'rather than just displaying '
'the data volums',
action='store_true')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
start_year = 1948
end_year = 2150
# data_reqs = DataRequest.objects.filter(
# climate_model__short_name='EC-Earth3P',
# experiment__short_name='highres-future',
# rip_code='r1i1p2f1',
# datafile__isnull=False
# ).exclude(
# variable_request__table_name__startswith='Prim'
# ).exclude(
# variable_request__dimensions__contains='alevhalf'
# ).exclude(
# variable_request__dimensions__contains='alevel'
# ).distinct()
# data_reqs = DataRequest.objects.filter(
# climate_model__short_name='EC-Earth3P',
# experiment__short_name='highres-future',
# rip_code='r3i1p2f1', # 'r2i1p2f1',
# datafile__isnull=False
# ).exclude(
# variable_request__table_name__startswith='Prim'
# ).exclude(
# variable_request__dimensions__contains='alevhalf'
# ).exclude(
# variable_request__dimensions__contains='alevel'
# ).distinct()
data_reqs = DataRequest.objects.filter(
climate_model__short_name='EC-Earth3P-HR',
experiment__short_name='highres-future',
rip_code='r1i1p2f1',
variable_request__frequency__in=['6hr', '3hr'],
datafile__isnull=False
).exclude(
variable_request__table_name__startswith='Prim'
).exclude(
variable_request__dimensions__contains='alevhalf'
).exclude(
variable_request__dimensions__contains='alevel'
).distinct()
# data_reqs = DataRequest.objects.filter(
# climate_model__short_name='EC-Earth3P-HR',
# experiment__short_name='highres-future',
# rip_code='r3i1p2f1', # 'r2i1p2f1',
# variable_request__frequency__in=['mon', 'day'],
# datafile__isnull=False
# ).exclude(
# variable_request__table_name__startswith='Prim'
# ).exclude(
# variable_request__dimensions__contains='alevhalf'
# ).exclude(
# variable_request__dimensions__contains='alevel'
# ).distinct()
logger.debug('Total data volume: {} Volume to restore: {}'.format(
filesizeformat(get_request_size(data_reqs, start_year, end_year)).
replace('\xa0', ' '),
filesizeformat(get_request_size(data_reqs, start_year, end_year,
offline=True)).replace('\xa0', ' '),
))
if args.create:
jon = User.objects.get(username='jseddon')
rr = RetrievalRequest.objects.create(requester=jon, start_year=start_year,
end_year=end_year)
time_zone = datetime.timezone(datetime.timedelta())
rr.date_created = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=time_zone)
rr.save()
rr.data_request.add(*data_reqs)
logger.debug('Retrieval request {} created.'.format(rr.id))
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0313.py
```python
import argparse
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import ReplacedFile
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
AFFECTED_FILES = [
'rsus_3hr_EC-Earth3P_highresSST-present_r1i1p1f1_gr_200207010000-200207312100.nc',
'rlutcs_Amon_EC-Earth3P_highresSST-present_r1i1p1f1_gr_197401-197412.nc',
'rlutcs_Amon_EC-Earth3P_highresSST-present_r1i1p1f1_gr_199801-199812.nc',
'rsus_Amon_EC-Earth3P_highresSST-present_r1i1p1f1_gr_199801-199812.nc',
'rlutcs_CFday_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19830201-19830228.nc',
'rsut_CFday_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19690301-19690331.nc',
'rsut_CFday_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19880101-19880131.nc',
'rsut_CFday_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19910501-19910531.nc',
'rsut_CFday_EC-Earth3P_highresSST-present_r1i1p1f1_gr_20150301-20150331.nc',
'rsut_E3hr_EC-Earth3P_highresSST-present_r1i1p1f1_gr_200903010000-200903312100.nc',
'rsutcs_E3hr_EC-Earth3P_highresSST-present_r1i1p1f1_gr_196607010000-196607312100.nc',
'rsutcs_E3hr_EC-Earth3P_highresSST-present_r1i1p1f1_gr_196810010000-196810312100.nc',
'rlut_day_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19510501-19510531.nc',
'rlut_day_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19730701-19730731.nc',
'rlut_day_EC-Earth3P_highresSST-present_r1i1p1f1_gr_20080401-20080430.nc',
'rsus_day_EC-Earth3P_highresSST-present_r1i1p1f1_gr_19571001-19571031.nc',
'rlutcs_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19750201-19750228.nc',
'rlutcs_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19950301-19950331.nc',
'rsut_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19750401-19750430.nc',
'rsut_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19810601-19810630.nc',
'rsut_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_20120501-20120531.nc',
'rsutcs_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19670201-19670228.nc',
'rsutcs_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_20020101-20020131.nc',
'rsutcs_CFday_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_20111101-20111130.nc',
'rlus_day_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19571201-19571231.nc',
'rlus_day_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19650801-19650831.nc',
'rlut_day_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19660801-19660831.nc',
'rlut_day_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_20110201-20110228.nc',
'rsus_day_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_19650201-19650228.nc',
'rsus_day_EC-Earth3P-HR_highresSST-present_r1i1p1f1_gr_20120101-20120131.nc',
]
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Create retrieval requests')
parser.add_argument('-l', '--log-level',
help='set logging level (default: %(default)s)',
choices=['debug', 'info', 'warning', 'error'],
default='warning')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
data_files = ReplacedFile.objects.filter(name__in=AFFECTED_FILES)
num_found = data_files.count()
num_expected = 30
if num_found != num_expected:
logger.error(f'{num_found} files found but expecting {num_expected}')
sys.exit(1)
tape_ids = data_files.values_list('tape_url', flat=True).distinct()
for tape_id in tape_ids:
filename = f'{tape_id.replace(":", "_")}.txt'
with open(filename, 'w') as fh:
id_files = data_files.filter(tape_url=tape_id)
for data_file in id_files:
tape_path = os.path.join(data_file.incoming_directory,
data_file.name.replace('Earth3P', 'Earth3'))
fh.write(f'{tape_path}\n')
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
log_level = getattr(logging, cmd_args.log_level.upper())
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/update_dreqs/update_dreqs_0319.py
```python
import argparse
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import DataFile, DataRequest, Project, Settings # nopep8
from pdata_app.utils.attribute_update import MipEraUpdate # nopep8
from pdata_app.utils.common import adler32, delete_files # nopep8
__version__ = '0.1.0b1'
logger = logging.getLogger(__name__)
# Directory to copy the file to, to run the attribute edits
SCRATCH_DIR = "/work/scratch-nopw/jseddon/temp"
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Update institution_id')
parser.add_argument('-l', '--log-level',
help='set logging level (default: %(default)s)',
choices=['debug', 'info', 'warning', 'error'],
default='warning')
parser.add_argument('-i', '--incoming', help='Update file only, not the '
'database.',
action='store_true')
parser.add_argument('request_id', help='to request id to update')
parser.add_argument('-s', '--skip-checksum', help='skip checking checksums',
action='store_true')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
model, expt, var_lab, table, var = args.request_id.split('_')
dreq = DataRequest.objects.get(
project__short_name='CMIP6',
climate_model__short_name=model,
experiment__short_name=expt,
rip_code=var_lab,
variable_request__table_name=table,
variable_request__cmor_name=var
)
logger.debug('DataRequest is {}'.format(dreq))
if not args.skip_checksum:
logger.debug('Checking checksums')
checksum_mismatch = 0
for data_file in dreq.datafile_set.order_by('name'):
logger.debug('Processing {}'.format(data_file.name))
full_path = os.path.join(data_file.directory, data_file.name)
actual = adler32(full_path)
if data_file.tapechecksum_set.count():
expected = data_file.tapechecksum_set.first().checksum_value
else:
expected = data_file.checksum_set.first().checksum_value
if actual != expected:
logger.error(f'Checksum mismatch for {full_path}')
checksum_mismatch += 1
dfs = DataFile.objects.filter(name=data_file.name)
if dfs.count() != 1:
logger.error(f'Unable to select file for deletion {full_path}')
else:
delete_files(dfs.all(), BASE_OUTPUT_DIR)
if checksum_mismatch:
logger.error(f'Exiting due to {checksum_mismatch} checksum failures.')
logger.error(f'Data request is in {dreq.directories()}')
sys.exit(1)
logger.debug('Processing files')
for data_file in dreq.datafile_set.order_by('name'):
logger.debug('Processing {}'.format(data_file.name))
new_mip_era = 'PRIMAVERA'
new_dreq, created = DataRequest.objects.get_or_create(
project=Project.objects.get(short_name=new_mip_era),
institute=dreq.institute,
climate_model=dreq.climate_model,
experiment=dreq.experiment,
variable_request=dreq.variable_request,
rip_code=dreq.rip_code,
request_start_time=dreq.request_start_time,
request_end_time=dreq.request_end_time,
time_units=dreq.time_units,
calendar=dreq.calendar
)
if created:
logger.debug('Created {}'.format(new_dreq))
updater = MipEraUpdate(data_file, new_mip_era,
update_file_only=args.incoming,
temp_dir=SCRATCH_DIR)
updater.update()
if dreq.datafile_set.count() == 0:
logger.debug(f'DataRequest has no files so deleting CMIP6 {dreq}')
try:
dreq.delete()
except django.db.models.deletion.ProtectedError:
dreq.rip_code = 'r9i9p9f9'
dreq.save()
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
log_level = getattr(logging, cmd_args.log_level.upper())
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(levelname)s: %(message)s',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/utils/check_debug.py
```python
from __future__ import absolute_import, print_function
import sys
from pdata_site import settings
def main():
""" Main function """
if settings.DEBUG is False:
sys.exit(0)
else:
print('ERROR: DEBUG in settings.py is not False!')
sys.exit(1)
if __name__ == '__main__':
main()
```
#### File: scripts/utils/crepp_prim_volumes.py
```python
import django
django.setup()
from django.db.models import Sum # NOPEP8
from pdata_app.models import DataRequest, DataFile # NOPEP8
from pdata_app.utils.common import filter_hadgem_stream2 # NOPEP8
FILENAME = 'crepp_prim_volumes_ensemble_table.csv'
def main():
"""
Run the processing.
"""
amip_expts = ['highresSST-present', 'highresSST-future']
coupled_expts = ['spinup-1950', 'hist-1950', 'control-1950',
'highres-future']
stream1_2_expts = amip_expts + coupled_expts
# MOHC stream 2 is members r1i2p2f1 to r1i15p1f1
hadgem_stream2_members = [f'r1i{init_index}p1f1'
for init_index in range(2, 16)]
other_models = DataRequest.objects.filter(
project__short_name='PRIMAVERA',
experiment__short_name__in=stream1_2_expts,
variable_request__table_name__startswith='Prim',
datafile__isnull=False
).exclude(
# Exclude HadGEM2 stream 2 for the moment
climate_model__short_name__startswith='HadeGEM',
rip_code__in=hadgem_stream2_members
).exclude(
# Exclude EC-Earth coupled r1i1p1f1
institute__short_name='EC-Earth-Consortium',
experiment__short_name__in=coupled_expts,
rip_code='r1i1p1f1'
).distinct()
hadgem_s2 = filter_hadgem_stream2(DataRequest.objects.filter(
project__short_name='PRIMAVERA',
experiment__short_name__in=stream1_2_expts,
variable_request__table_name__startswith='Prim',
climate_model__short_name__startswith='HadeGEM',
rip_code__in=hadgem_stream2_members,
datafile__isnull = False
)).distinct()
ec_earth_s1 = DataRequest.objects.filter(
institute__short_name='EC-Earth-Consortium',
experiment__short_name__in=coupled_expts,
rip_code='r1i1p1f1',
datafile__isnull=False
).distinct()
wp5 = DataRequest.objects.filter(
experiment__short_name__in=['primWP5-amv-neg', 'primWP5-amv-pos',
'dcppc-amv-neg', 'dcppc-amv-pos'],
datafile__isnull = False
).distinct()
prim_reqs = other_models | hadgem_s2 | ec_earth_s1 | wp5
unique_expts = (prim_reqs.values_list('institute__short_name',
'climate_model__short_name',
'experiment__short_name',
'rip_code',
'variable_request__table_name').
distinct().order_by('institute__short_name',
'climate_model__short_name',
'experiment__short_name',
'rip_code',
'variable_request__table_name'))
with open(FILENAME, 'w') as fh:
fh.write('drs_id, Volume (TB)\n')
for inst_name, model_name, expt_name, rip_code, table_name in unique_expts:
dreqs = prim_reqs.filter(
institute__short_name=inst_name,
climate_model__short_name=model_name,
experiment__short_name=expt_name,
rip_code=rip_code,
variable_request__table_name=table_name
)
if dreqs:
dreq_size = (
DataFile.objects.filter(data_request__in=dreqs).
distinct().aggregate(Sum('size'))['size__sum']
)
df = dreqs.first().datafile_set.first()
drs_id = (
f'PRIMAVERA.'
f'{df.activity_id.short_name}.'
f'{df.institute.short_name}.'
f'{df.climate_model.short_name}.'
f'{df.experiment.short_name}.'
f'{df.rip_code}.'
f'{df.variable_request.table_name}'
)
if 'MPI' in drs_id and 'DCPP' in drs_id:
drs_id = (drs_id.replace('DCPP', 'primWP5').
replace('dcppc', 'primWP5'))
if 'NCAS' in drs_id:
drs_id = drs_id.replace('NCAS', 'NERC')
fh.write(
f'{drs_id}, {dreq_size / 1024**4}\n'
)
if __name__ == '__main__':
main()
```
#### File: scripts/utils/esgf_left_to_publish.py
```python
import argparse
import logging.config
import django
django.setup()
from pdata_app.models import DataRequest # nopep8
__version__ = '0.1.0b1'
logger = logging.getLogger(__name__)
def get_stream_1_2():
"""
Calculate the total volume in bytes of files that need to be published.
:return: volume in bytes.
"""
amip_expts = ['highresSST-present', 'highresSST-future']
coupled_expts = ['spinup-1950', 'hist-1950', 'control-1950',
'highres-future']
stream1_2_expts = amip_expts + coupled_expts
# MOHC stream 2 is members r1i2p2f1 to r1i15p1f1
mohc_stream2_members = [f'r1i{init_index}p1f1'
for init_index in range(2, 16)]
stream1_2 = DataRequest.objects.filter(
experiment__short_name__in=stream1_2_expts,
datafile__isnull=False
).exclude(
# Exclude MOHC Stream 2
institute__short_name__in=['MOHC', 'NERC'],
rip_code__in=mohc_stream2_members,
).exclude(
# Exclude EC-Earth atmosphere levels
climate_model__short_name__startswith='EC-Earth',
variable_request__dimensions__contains='alevhalf'
).exclude(
# Exclude EC-Earth atmosphere levels
climate_model__short_name__startswith='EC-Earth',
variable_request__dimensions__contains='alevel'
).distinct()
mohc_stream2_members = DataRequest.objects.filter(
institute__short_name__in=['MOHC', 'NERC'],
experiment__short_name__in=stream1_2_expts,
rip_code__in=mohc_stream2_members,
datafile__isnull=False
).distinct()
mohc_stream2_low_freq = mohc_stream2_members.filter(
variable_request__frequency__in=['mon', 'day']
).exclude(
variable_request__table_name='CFday'
).distinct()
mohc_stream2_cfday = mohc_stream2_members.filter(
variable_request__table_name='CFday',
variable_request__cmor_name='ps'
).distinct()
mohc_stream2_6hr = mohc_stream2_members.filter(
variable_request__table_name='Prim6hr',
variable_request__cmor_name='wsgmax'
).distinct()
mohc_stream2_3hr = mohc_stream2_members.filter(
variable_request__table_name__in=['3hr', 'E3hr', 'E3hrPt', 'Prim3hr',
'Prim3hrPt'],
variable_request__cmor_name__in=['rsdsdiff', 'rsds', 'tas', 'uas',
'vas', 'ua50m', 'va50m', 'ua100m',
'va100m', 'ua7h', 'va7h', 'sfcWind',
'sfcWindmax', 'pr', 'psl', 'zg7h']
).distinct()
publishable_files = (stream1_2 | mohc_stream2_low_freq |
mohc_stream2_cfday | mohc_stream2_6hr |
mohc_stream2_3hr)
return publishable_files
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Find datasets that have not '
'been published to the ESGF.')
parser.add_argument('-l', '--log-level',
help='set logging level (default: %(default)s)',
choices=['debug', 'info', 'warning', 'error'],
default='warning')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main():
"""Run the script"""
stream_1_2 = get_stream_1_2()
no_esgf = stream_1_2.filter(esgfdataset__isnull=True)
logger.info('Writing no_esgf_dataset.txt')
with open('no_esgf_dataset.txt', 'w') as fh:
for dreq in no_esgf.order_by('project', 'institute', 'climate_model',
'experiment', 'rip_code',
'variable_request__table_name',
'variable_request__cmor_name'):
fh.write(str(dreq) + '\n')
remaining = stream_1_2.exclude(esgfdataset__isnull=True)
not_published = remaining.exclude(esgfdataset__status='PUBLISHED')
logger.info('Writing status_not_published.txt')
with open('status_not_published.txt', 'w') as fh:
for dreq in not_published.order_by('project', 'institute',
'climate_model', 'experiment',
'rip_code',
'variable_request__table_name',
'variable_request__cmor_name'):
fh.write(str(dreq) + '\n')
# TODO what about WP5?
if __name__ == "__main__":
cmd_args = parse_args()
# configure the logger
log_level = getattr(logging, cmd_args.log_level.upper())
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(levelname)s: %(message)s',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main()
```
#### File: scripts/utils/esgf_percentage.py
```python
from __future__ import unicode_literals, division, absolute_import
import argparse
import logging.config
import sys
import django
from django.db.models import Sum
from django.template.defaultfilters import filesizeformat
django.setup()
from pdata_app.models import DataFile, ESGFDataset
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Calculate the volume of data '
'submitted to the ESGF.')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the '
'default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""Run the script"""
esgf_volume = 0
for esgf in ESGFDataset.objects.all():
dataset_volume = (esgf.data_request.datafile_set.distinct().
aggregate(Sum('size'))['size__sum'])
if dataset_volume:
esgf_volume += dataset_volume
total_volume = (DataFile.objects.all().distinct().aggregate(Sum('size'))
['size__sum'])
pretty_esgf = filesizeformat(esgf_volume).replace('\xa0', ' ')
pretty_total = filesizeformat(total_volume).replace('\xa0', ' ')
print(f'Volume published to ESGF {pretty_esgf}')
print(f'Total Volume {pretty_total}')
print(f'Percentage published to ESGF {esgf_volume / total_volume:.0%}')
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: scripts/utils/fix_failed_et.py
```python
import argparse
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import DataFile, Settings
from pdata_app.utils.common import (adler32, construct_drs_path,
get_gws_any_dir, ilist_files, is_same_gws)
__version__ = '0.1.0b'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Check extracted data and '
'move to DRS.')
parser.add_argument('top_dir', help='The top-level directory to search '
'for files')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the '
'default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""Main entry point"""
base_dir = Settings.get_solo().base_output_dir
for extracted_file in ilist_files(args.top_dir):
found_name = os.path.basename(extracted_file)
try:
data_file = DataFile.objects.get(name=found_name)
except django.core.exceptions.ObjectDoesNotExist:
logger.warning('Cannot find DMT entry. Skipping {}'.
format(extracted_file))
continue
found_checksum = adler32(extracted_file)
if not found_checksum == data_file.checksum_set.first().checksum_value:
logger.warning("Checksum doesn't match. Skipping {}".
format(found_name))
continue
dest_dir = os.path.join(get_gws_any_dir(extracted_file), 'stream1',
construct_drs_path(data_file))
dest_path = os.path.join(dest_dir, found_name)
if os.path.exists(dest_path):
logger.warning('Skipping {} as it already exists at {}'.
format(found_name, dest_path))
continue
# create the directory if it doesn't exist
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
os.rename(extracted_file, dest_path)
# create a link from the base dir
if not is_same_gws(dest_path, base_dir):
link_dir = os.path.join(base_dir, construct_drs_path(data_file))
link_path = os.path.join(link_dir, data_file.name)
if not os.path.exists(link_dir):
os.makedirs(link_dir)
os.symlink(dest_path, link_path)
data_file.online = True
data_file.directory = dest_dir
data_file.save()
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: primavera-dmt/scripts/validate_data_submission.py
```python
from __future__ import unicode_literals, division, absolute_import
import argparse
import datetime
import itertools
import json
import logging.config
from multiprocessing import Process, Manager
from multiprocessing.pool import ThreadPool
from netCDF4 import Dataset
import os
import re
import shutil
import subprocess
import sys
import time
import warnings
try:
import dask
except ImportError:
pass
import iris
from primavera_val import (identify_filename_metadata, validate_file_contents,
identify_contents_metadata,
validate_cell_measures_contents,
identify_cell_measures_metadata, load_cube,
FileValidationError)
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import (Project, ClimateModel, Experiment, DataSubmission,
DataFile, VariableRequest, DataRequest, Checksum, Settings, Institute,
ActivityId, EmailQueue)
from pdata_app.utils.dbapi import get_or_create, match_one
from pdata_app.utils.common import adler32, list_files, pdt2num
from vocabs.vocabs import STATUS_VALUES, CHECKSUM_TYPES
# Ignore warnings displayed when loading data
warnings.filterwarnings("ignore")
__version__ = '0.1.0b'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
CONTACT_PERSON_USER_ID = 'jseddon'
# The maximum size (in bytes) of file to read into memory for an HDF
# data integrity check
# 1073741824 = 1 GiB
MAX_DATA_INTEGRITY_SIZE = 1073741824
# Don't run PrePARE on the following var/table combinations as they've
# been removed from the CMIP6 data request, but are still needed for
# PRIMAVERA
# Additionally, don't run PrePARE on any of the PRIMAVERA only tables
SKIP_PREPARE_VARS = ['psl_E3hrPt', 'ua850_E3hrPt', 'va850_E3hrPt',
'mrlsl_Emon', 'mrlsl_Lmon', 'sialb_SImon',
'tso_3hr',
'Prim1hr', 'Prim3hr', 'Prim3hrPt', 'Prim6hr',
'Prim6hrPt', 'PrimO6hr', 'PrimOday', 'PrimOmon',
'PrimSIday', 'Primday', 'PrimdayPt', 'Primmon',
'PrimmonZ',
]
class SubmissionError(Exception):
"""
An exception to indicate that there has been an error that means that
the data submission cannot continue.
"""
pass
def identify_and_validate(filenames, project, num_processes, file_format):
"""
Loop through a list of file names, identify each file's metadata and then
validate it. The looping is done in parallel using the multiprocessing
library module.
clt_Amon_HadGEM2-ES_historical_r1i1p1_185912-188411.nc
:param list filenames: The files to process
:param str project: The name of the project
:param int num_processes: The number of parallel processes to use
:param str file_format: The CMOR version of the netCDF files, one out of-
CMIP5 or CMIP6
:returns: A list containing the metadata dictionary generated for each file
:rtype: multiprocessing.Manager.list
"""
jobs = []
manager = Manager()
params = manager.Queue()
result_list = manager.list()
error_event = manager.Event()
if num_processes != 1:
for i in range(num_processes):
p = Process(target=identify_and_validate_file, args=(params,
result_list, error_event))
jobs.append(p)
p.start()
func_input_pair = list(zip(filenames,
(project,) * len(filenames),
(file_format,) * len(filenames)))
blank_pair = (None, None, None)
iters = itertools.chain(func_input_pair, (blank_pair,) * num_processes)
for item in iters:
params.put(item)
if num_processes == 1:
identify_and_validate_file(params, result_list, error_event)
else:
for j in jobs:
j.join()
if error_event.is_set():
raise SubmissionError()
return result_list
def identify_and_validate_file(params, output, error_event):
"""
Identify `filename`'s metadata and then validate the file. The function
continues getting items to process from the parameter queue until a None
is received.
:param multiprocessing.Manager.Queue params: A queue, with each item being a
tuple of the filename to load, the name of the project and the netCDF
file CMOR version
:param multiprocessing.Manager.list output: A list containing the output
metadata dictionaries for each file
:param multiprocessing.Manager.Event error_event: If set then a catastrophic
error has occurred in another process and processing should end
"""
while True:
# close existing connections so that a fresh connection is made
django.db.connections.close_all()
if error_event.is_set():
return
filename, project, file_format = params.get()
if filename is None:
return
try:
_identify_and_validate_file(filename, project, file_format,output,
error_event)
except django.db.utils.OperationalError:
# Wait and then re-run once in case of temporary database
# high load
logger.warning('django.db.utils.OperationalError waiting for one '
'minute and then retrying.')
time.sleep(60)
try:
_identify_and_validate_file(filename, project, file_format,
output, error_event)
except django.db.utils.OperationalError:
logger.error('django.db.utils.OperationalError for a second '
'time. Exiting.')
error_event.set()
raise
def _identify_and_validate_file(filename, project, file_format, output,
error_event):
"""
Do the validation of a file.
:param str filename: The name of the file
:param str project: The name of the project
:param str file_format: The format of the file (CMIP5 or CMIP6)
:param multiprocessing.Manager.list output: A list containing the output
metadata dictionaries for each file
:param multiprocessing.Manager.Event error_event: If set then a catastrophic
error has occurred in another process and processing should end
"""
try:
basename = os.path.basename(filename)
if DataFile.objects.filter(name=basename).count() > 0:
msg = 'File {} already exists in the database.'.format(basename)
raise FileValidationError(msg)
metadata = identify_filename_metadata(filename, file_format)
if metadata['table'].startswith('Prim'):
metadata['project'] = 'PRIMAVERA'
else:
metadata['project'] = project
if 'fx' in metadata['table']:
cf = iris.fileformats.cf.CFReader(filename)
metadata.update(identify_cell_measures_metadata(cf, filename))
validate_cell_measures_contents(cf, metadata)
else:
cube = load_cube(filename)
metadata.update(identify_contents_metadata(cube, filename))
validate_file_contents(cube, metadata)
_contents_hdf_check(cube, metadata, cmd_args.data_limit)
verify_fk_relationships(metadata)
calculate_checksum(metadata)
except SubmissionError:
msg = ('A serious file error means the submission cannot continue: '
'{}'.format(filename))
logger.error(msg)
error_event.set()
except FileValidationError as fve:
msg = 'File failed validation. {}'.format(fve.__str__())
logger.warning(msg)
else:
output.append(metadata)
def calculate_checksum(metadata):
checksum_value = adler32(os.path.join(metadata['directory'],
metadata['basename']))
if checksum_value:
metadata['checksum_type'] = CHECKSUM_TYPES['ADLER32']
metadata['checksum_value'] = checksum_value
else:
msg = ('Unable to calculate checksum for file: {}'.
format(metadata['basename']))
logger.warning(msg)
metadata['checksum_type'] = None
metadata['checksum_value'] = None
def verify_fk_relationships(metadata):
"""
Identify the variable_request and data_request objects corresponding to this file.
:param dict metadata: Metadata identified for this file.
:raises SubmissionError: If there are no existing entries in the
database for `Project`, `ClimateModel` or `Experiment`.
"""
foreign_key_types = [
(Project, 'project'),
(ClimateModel, 'climate_model'),
(Experiment, 'experiment'),
(Institute, 'institute'),
(ActivityId, 'activity_id')]
# get values for each of the foreign key types
for object_type, object_str in foreign_key_types:
result = match_one(object_type, short_name=metadata[object_str])
if result:
metadata[object_str] = result
else:
msg = ("No {} '{}' found for file: {}. Please create this object "
"and resubmit.".format(object_str.replace('_', ' '),
metadata[object_str], metadata['basename']))
logger.error(msg)
raise SubmissionError(msg)
# find the data request
dreq_match = match_one(
DataRequest,
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
experiment=metadata['experiment'],
variable_request__table_name=metadata['table'],
variable_request__cmor_name=metadata['var_name'],
rip_code=metadata['rip_code']
)
if dreq_match:
metadata['data_request'] = dreq_match
metadata['variable'] = dreq_match.variable_request
else:
# if cmor_name doesn't match then it may be a variable where out_name
# is different to cmor_name so check these
dreq_matches = DataRequest.objects.filter(
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
experiment=metadata['experiment'],
variable_request__table_name=metadata['table'],
variable_request__var_name=metadata['var_name'],
rip_code=metadata['rip_code']
)
if dreq_matches.count() == 0:
msg = ('No data request found for file: {}.'.
format(metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
elif dreq_matches.count() == 1:
metadata['data_request'] = dreq_matches[0]
metadata['variable'] = dreq_matches[0].variable_request
else:
try:
plev_name = _guess_plev_name(metadata)
except Exception:
msg = ('Cannot open file to determine plev name: {}.'.
format(metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
if plev_name:
plev_matches = dreq_matches.filter(
variable_request__dimensions__icontains=plev_name
)
if plev_matches.count() == 1:
metadata['data_request'] = plev_matches[0]
metadata['variable'] = plev_matches[0].variable_request
elif plev_matches.count() == 0:
msg = ('No data requests found with plev {} for file: {}.'.
format(plev_name, metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
else:
msg = ('Multiple data requests found with plev {} for '
'file: {}.'.format(plev_name, metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
else:
msg = ('Unable to determine plev name: {}.'.
format(metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
def update_database_submission(validated_metadata, data_sub, files_online=True,
file_version=None):
"""
Create entries in the database for the files in this submission.
:param list validated_metadata: A list containing the metadata dictionary
generated for each file
:param pdata_app.models.DataSubmission data_sub: The data submission object
to update.
:param bool files_online: True if the files are online.
:returns:
"""
for data_file in validated_metadata:
create_database_file_object(data_file, data_sub, files_online,
file_version)
data_sub.status = STATUS_VALUES['VALIDATED']
data_sub.save()
def read_json_file(filename):
"""
Read a JSON file describing the files in this submission.
:param str filename: The name of the JSON file to read.
:returns: a list of dictionaries containing the validated metadata
"""
with open(filename) as fh:
metadata = json.load(fh, object_hook=_dict_to_object)
logger.debug('Metadata for {} files read from JSON file {}'.format(
len(metadata), filename))
return metadata
def write_json_file(validated_metadata, filename):
"""
Write a JSON file describing the files in this submission.
:param list validated_metadata: A list containing the metadata dictionary
generated for each file
:param str filename: The name of the JSON file to write the validated data
to.
"""
with open(filename, 'w') as fh:
json.dump(list(validated_metadata), fh, default=_object_to_default,
indent=4)
logger.debug('Metadata written to JSON file {}'.format(filename))
def create_database_file_object(metadata, data_submission, file_online=True,
file_version=None):
"""
Create a database entry for a data file
:param dict metadata: This file's metadata.
:param pdata_app.models.DataSubmission data_submission: The parent data
submission.
:param bool file_online: True if the file is online.
:param str file_version: The version string to apply to each file. The
string from the incoming directory name or the current date is used
if a string isn't supplied.
:returns:
"""
# get a fresh DB connection after exiting from parallel operation
django.db.connections.close_all()
time_units = Settings.get_solo().standard_time_units
if file_version:
version_string = file_version
else:
# find the version number from the date in the submission directory path
date_string = re.search(r'(?<=/incoming/)(\d{8})',
metadata['directory'])
if date_string:
date_string = date_string.group(0)
version_string = 'v' + date_string
else:
today = datetime.datetime.utcnow()
version_string = today.strftime('v%Y%m%d')
# if the file isn't online (e.g. loaded from JSON) then directory is blank
directory = metadata['directory'] if file_online else None
# create a data file. If the file already exists in the database with
# identical metadata then nothing happens. If the file exists but with
# slightly different metadata then django.db.utils.IntegrityError is
# raised
try:
data_file = DataFile.objects.create(
name=metadata['basename'],
incoming_name=metadata['basename'],
incoming_directory=metadata['directory'],
directory=directory, size=metadata['filesize'],
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
activity_id=metadata['activity_id'],
experiment=metadata['experiment'],
variable_request=metadata['variable'],
data_request=metadata['data_request'],
frequency=metadata['frequency'], rip_code=metadata['rip_code'],
start_time=pdt2num(metadata['start_date'], time_units,
metadata['calendar']) if metadata['start_date']
else None,
end_time=pdt2num(metadata['end_date'], time_units,
metadata['calendar'], start_of_period=False) if
metadata['start_date'] else None,
time_units=time_units, calendar=metadata['calendar'],
version=version_string,
data_submission=data_submission, online=file_online,
grid=metadata.get('grid'),
tape_url = metadata.get('tape_url')
)
except django.db.utils.IntegrityError as exc:
msg = ('Unable to submit file {}: {}'.format(metadata['basename'],
exc.__str__()))
logger.error(msg)
raise SubmissionError(msg)
if metadata['checksum_value']:
checksum = get_or_create(Checksum, data_file=data_file,
checksum_value=metadata['checksum_value'],
checksum_type=metadata['checksum_type'])
def move_rejected_files(submission_dir):
"""
Move the entire submission to a rejected directory two levels up from the
submission directory.
:param str submission_dir:
:returns: The path to the submission after the function has run.
"""
rejected_dir = os.path.normpath(os.path.join(submission_dir, '..',
'..', 'rejected'))
try:
if not os.path.exists(rejected_dir):
os.mkdir(rejected_dir)
shutil.move(submission_dir, rejected_dir)
except (IOError, OSError):
msg = ("Unable to move the directory. Leaving it in it's current "
"location")
logger.error(msg)
return submission_dir
submission_rejected_dir = os.path.join(rejected_dir,
os.path.basename(os.path.abspath(submission_dir)))
msg = 'Data submission moved to {}'.format(submission_rejected_dir)
logger.error(msg)
return submission_rejected_dir
def send_user_rejection_email(data_sub):
"""
Send an email to the submission's creator warning them of validation
failure.
:param pdata_app.models.DataSubmission data_sub:
"""
val_tool_url = ('http://proj.badc.rl.ac.uk/primavera-private/wiki/JASMIN/'
'HowTo#SoftwarepackagesinstalledonthePRIMAVERAworkspace')
contact_user_id = Settings.get_solo().contact_user_id
contact_user = User.objects.get(username=contact_user_id)
contact_string = '{} {} ({})'.format(contact_user.first_name,
contact_user.last_name,
contact_user.email)
msg = (
'Dear {first_name} {surname},\n'
'\n'
'Your data submission in {incoming_dir} has failed validation and '
'has been moved to {rejected_dir}.\n'
'\n'
'Please run the validation tool ({val_tool_url}) to check why this '
'submission failed validation. Once the data is passing validation '
'then please resubmit the corrected data.\n'
'\n'
'Please contact {contact_person} if you '
'have any questions.\n'
'\n'
'Thanks,\n'
'\n'
'{friendly_name}'.format(
first_name=data_sub.user.first_name, surname=data_sub.user.last_name,
incoming_dir=data_sub.incoming_directory,
rejected_dir=data_sub.directory, val_tool_url=val_tool_url,
contact_person=contact_string,
friendly_name=contact_user.first_name
))
_email = EmailQueue.objects.create(
recipient=data_sub.user,
subject='[PRIMAVERA_DMT] Data submission failed validation',
message=msg)
def send_admin_rejection_email(data_sub):
"""
Send the admin user an email warning them that a submission failed due to
a server problem (missing data request, etc).
:param pdata_app.models.DataSubmission data_sub:
"""
admin_user_id = Settings.get_solo().contact_user_id
admin_user = User.objects.get(username=admin_user_id)
msg = (
'Data submission {} from incoming directory {} failed validation due '
'to a SubmissionError being raised. Please run the validation script '
'manually on this submission and correct the error.\n'
'\n'
'Thanks,\n'
'\n'
'{}'.format(data_sub.id, data_sub.incoming_directory,
admin_user.first_name)
)
_email = EmailQueue.objects.create(
recipient=admin_user,
subject=('[PRIMAVERA_DMT] Submission {} failed validation'.
format(data_sub.id)),
message=msg
)
def set_status_rejected(data_sub, rejected_dir):
"""
Set the data submission's status to be rejected and update the path to
point to where the data now lives.
:param pdata_app.models.DataSubmission data_sub: The data submission object.
:param str rejected_dir: The name of the directory that the rejected files
have been moved to.
"""
data_sub.status = STATUS_VALUES['REJECTED']
data_sub.directory = rejected_dir
data_sub.save()
def add_tape_url(metadata, tape_base_url, submission_dir):
"""
Add to each file's metadata its URL in the tape system. The URL is
calculated by finding the file's path relative to the submission directory
and appending this to the base URL.
:param list metadata: a list the dictionary object corresponding to
each file
:param str tape_base_url: the top level url of the data in the tape system
:param str submission_dir: the top-level directory of the submission
"""
for data_file in metadata:
rel_dir = os.path.relpath(data_file['directory'], submission_dir)
data_file['tape_url'] = tape_base_url + '/' + rel_dir
def run_prepare(file_paths, num_processes):
"""
Run PrePARE on each file in the submission. Any failures are reported
as an error with the logging and an exception is raised at the end of
processing if one or more files has failed.
:param list file_paths: The paths of the files in the submission's
directory.
:param int num_processes: The number of processes to use in parallel.
:raises SubmissionError: at the end of checking if one or more files has
failed PrePARE's checks.
"""
logger.debug('Starting PrePARE on {} files'.format(len(file_paths)))
jobs = []
manager = Manager()
params = manager.Queue()
file_failed = manager.Event()
if num_processes != 1:
for i in range(num_processes):
p = Process(target=_run_prepare, args=(params, file_failed))
jobs.append(p)
p.start()
for item in itertools.chain(file_paths, (None,) * num_processes):
params.put(item)
if num_processes == 1:
_run_prepare(params, file_failed)
else:
for j in jobs:
j.join()
if file_failed.is_set():
logger.error('Not all files passed PrePARE')
raise SubmissionError()
logger.debug('All files successfully checked by PrePARE')
def _contents_hdf_check(cube, metadata, max_size=MAX_DATA_INTEGRITY_SIZE):
"""
Check that the entire data of the file can be read into memory without
any errors. Corrupt files typically generate an HDF error. Files larger
than `max_size` are not read and a warning is displayed. Most files are
under this limit, but those over are excessively slow to validate.
:param iris.cube.Cube cube: The cube to check
:param dict metadata: Metadata obtained from the file
:param int max_size: Files larger than this (in bytes) are not checked
:returns: True if file read ok.
:raises FileValidationError: If there was any problem reading the data.
"""
if os.path.getsize(os.path.join(metadata['directory'],
metadata['basename'])) > max_size:
logger.warning('File {} is larger than {} bytes. File contents '
'reading check not run.'.format(metadata['basename'],
max_size))
return True
try:
_data = cube.data
except Exception:
msg = 'Unable to read data from file {}.'.format(metadata['basename'])
raise FileValidationError(msg)
else:
return True
def _run_prepare(params, file_failed):
"""
Check a single file with PrePARE. This function is called in parallel by
multiprocessing.
:param multiprocessing.Manager.Queue params: A queue, with each item being
the full path of a file in the submission to check.
:param multiprocessing.Manager.Event file_failed: If set then one or more
files has failed validation.
"""
while True:
file_path = params.get()
if file_path is None:
return
skip_this_var = False
for skip_var in SKIP_PREPARE_VARS:
if skip_var in file_path:
logger.debug('Skipping running PrePARE on {}'.
format(file_path))
skip_this_var = True
break
if skip_this_var:
continue
prepare_script = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'run_prepare.sh'
)
prep_res = subprocess.run([prepare_script, file_path],
stdout=subprocess.PIPE)
if prep_res.returncode:
logger.error('File {} failed PrePARE\n{}'.
format(file_path, prep_res.stdout.decode('utf-8')))
file_failed.set()
def _get_submission_object(submission_dir):
"""
:param str submission_dir: The path of the submission's top level
directory.
:returns: The object corresponding to the submission.
:rtype: pdata_app.models.DataSubmission
"""
try:
data_sub = DataSubmission.objects.get(incoming_directory=submission_dir)
except django.core.exceptions.MultipleObjectsReturned:
msg = 'Multiple DataSubmissions found for directory: {}'.format(
submission_dir)
logger.error(msg)
raise SubmissionError(msg)
except django.core.exceptions.ObjectDoesNotExist:
msg = ('No DataSubmissions have been found in the database for '
'directory: {}. Please create a submission through the web '
'interface.'.format(submission_dir))
logger.error(msg)
raise SubmissionError(msg)
return data_sub
def _guess_plev_name(metadata):
"""
Guess the name of the plev in the data request dimensions.
:param dict metadata: The file's metadata dictionary.
:returns: The name of the pressure levels from the data request or none
if it can't be guessed.
:rtype: str
"""
rootgrp = Dataset(os.path.join(metadata['directory'],
metadata['basename']))
level_name = None
if 'plev' in rootgrp.dimensions:
level_name = 'plev'
elif 'lev' in rootgrp.dimensions:
level_name = 'lev'
if level_name:
num_plevs = len(rootgrp.dimensions[level_name])
if num_plevs == 4:
plev_val = 'plev4'
elif num_plevs == 7:
plev_val = 'plev7h'
elif num_plevs == 27:
plev_val = 'plev27'
else:
plev_val = None
else:
plev_val = None
rootgrp.close()
return plev_val
def _object_to_default(obj):
"""
Convert known objects to a form that can be serialized by JSON
"""
if isinstance(obj, iris.time.PartialDateTime):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
kwargs = {}
for k, v in re.findall(r'(\w+)=(\d+)', repr(obj)):
kwargs[k] = int(v)
obj_dict['__kwargs__'] = kwargs
return obj_dict
elif isinstance(obj, (ActivityId, ClimateModel, Experiment, Institute,
Project)):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {'short_name': obj.short_name}
return obj_dict
elif isinstance(obj, VariableRequest):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {'table_name': obj.table_name,
'cmor_name': obj.cmor_name}
return obj_dict
elif isinstance(obj, DataRequest):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {
'variable_request__table_name': obj.variable_request.table_name,
'variable_request__cmor_name': obj.variable_request.cmor_name,
'institute__short_name': obj.institute.short_name,
'climate_model__short_name': obj.climate_model.short_name,
'experiment__short_name': obj.experiment.short_name,
'rip_code': obj.rip_code
}
return obj_dict
def _dict_to_object(dict_):
"""
Convert a dictionary to an object
"""
if '__class__' in dict_:
module = __import__(dict_['__module__'], fromlist=[dict_['__class__']])
klass = getattr(module, dict_['__class__'])
if dict_['__class__'] == 'PartialDateTime':
inst = klass(**dict_['__kwargs__'])
elif dict_['__class__'] in ('ActivityId', 'ClimateModel',
'Experiment', 'Institute', 'Project',
'VariableRequest', 'DataRequest'):
inst = match_one(klass, **dict_['__kwargs__'])
else:
msg = ('Cannot load from JSON files class {}'.
format(dict_['__class__']))
raise NotImplementedError(msg)
else:
inst = dict_
return inst
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Validate and create a '
'PRIMAVERA data submission')
parser.add_argument('directory', help="the submission's top-level "
"directory")
parser.add_argument('-j', '--mip_era', help='the mip_era that data is '
'ultimately being submitted to '
'(default: %(default)s)',
default='CMIP6')
parser.add_argument('-f', '--file-format', help='the CMOR version of the '
'input netCDF files being '
'submitted (CMIP5 or CMIP6)'
' (default: %(default)s)',
default='CMIP6')
group = parser.add_mutually_exclusive_group()
group.add_argument('-o', '--output', help='write the new entries to the '
'JSON file specified rather '
'than to the database', type=str)
group.add_argument('-i', '--input', help='read the entries to add to the '
'database from the JSON file '
'specified rather than by '
'validating files', type=str)
parser.add_argument('-t', '--tape-base-url', help='add a tape url to each '
'file with the base being specified on the command line', type=str)
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('-p', '--processes', help='the number of parallel processes '
'to use (default: %(default)s)', default=8, type=int)
parser.add_argument('-s', '--version-string', help='an optional version to '
'use on all files. If not specifed the string in the incoming '
'directory name or the current date is used', type=str)
parser.add_argument('-r', '--relaxed', help='create a submission from '
'validated files, ignoring failed files (default behaviour is to only '
'create a submission when all files pass validation)', action='store_true')
parser.add_argument('-v', '--validate_only', help='only validate the input, '
'do not create a data submission', action='store_true')
parser.add_argument('-n', '--no-prepare', help="don't run PrePARE",
action='store_true')
parser.add_argument('-d', '--data-limit', help='the maximum size of file '
'(in bytes) to load into '
'memory for an HDF '
'integrity check (default: '
'%(default)s)',
type=int, default=MAX_DATA_INTEGRITY_SIZE)
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
if args.processes == 1 and not iris.__version__.startswith('1.'):
# if not multiprocessing then limit the number of Dask threads
# this can't seem to be limited when using multiprocessing
dask.config.set(pool=ThreadPool(2))
submission_dir = os.path.normpath(args.directory)
logger.debug('Submission directory: %s', submission_dir)
logger.debug('Project: %s', args.mip_era)
logger.debug('Processes requested: %s', args.processes)
try:
if args.input:
validated_metadata = read_json_file(args.input)
data_sub = _get_submission_object(submission_dir)
files_online = False
else:
files_online = True
data_files = list_files(submission_dir)
logger.debug('%s files identified', len(data_files))
if not args.validate_only and not args.output:
data_sub = _get_submission_object(submission_dir)
if data_sub.status != 'ARRIVED':
msg = "The submission's status is not ARRIVED."
logger.error(msg)
raise SubmissionError(msg)
try:
if not args.no_prepare:
run_prepare(data_files, args.processes)
validated_metadata = list(identify_and_validate(data_files,
args.mip_era, args.processes, args.file_format))
except SubmissionError:
if not args.validate_only and not args.output:
send_admin_rejection_email(data_sub)
raise
logger.debug('%s files validated successfully',
len(validated_metadata))
if args.validate_only:
logger.debug('Data submission not run (-v option specified)')
logger.debug('Processing complete')
sys.exit(0)
if not args.relaxed and len(validated_metadata) != len(data_files):
# if not args.output:
# rejected_dir = move_rejected_files(submission_dir)
# set_status_rejected(data_sub, rejected_dir)
# send_user_rejection_email(data_sub)
msg = ('Not all files passed validation. Please fix these '
'errors and then re-run this script.')
logger.error(msg)
raise SubmissionError(msg)
if args.tape_base_url:
add_tape_url(validated_metadata, args.tape_base_url, submission_dir)
if args.output:
write_json_file(validated_metadata, args.output)
else:
update_database_submission(validated_metadata, data_sub,
files_online, args.version_string)
logger.debug('%s files submitted successfully',
match_one(DataSubmission, incoming_directory=submission_dir).get_data_files().count())
except SubmissionError:
sys.exit(1)
logger.debug('Processing complete')
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
```
#### File: primavera-dmt/test/test_retrieve_request.py
```python
from __future__ import unicode_literals, division, absolute_import
import datetime
try:
from unittest import mock
except ImportError:
import mock
import django
django.setup()
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils.timezone import make_aware
from pdata_app.utils.dbapi import get_or_create, match_one
from pdata_app.models import (Project, Institute, ClimateModel, ActivityId,
Experiment, VariableRequest, DataRequest,
RetrievalRequest, DataFile, DataSubmission,
Settings)
from vocabs.vocabs import (CALENDARS, FREQUENCY_VALUES, STATUS_VALUES,
VARIABLE_TYPES)
from scripts.retrieve_request import main, get_tape_url
import scripts.retrieve_request
class TestIntegrationTests(TestCase):
"""Integration tests run through the unittest framework and mock"""
def setUp(self):
# set the base output directory to something appropriate for these
# paths
mock.patch.object(scripts.retrieve_request, 'BASE_OUTPUT_DIR',
return_value = '/gws/nopw/j04/primavera5/stream1')
# mock any external calls
patch = mock.patch('scripts.retrieve_request.run_command')
self.mock_run_cmd = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.os.rename')
self.mock_rename = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.os.mkdir')
self.mock_mkdir = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.os.makedirs')
self.mock_makedirs = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.os.path.exists')
self.mock_exists = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.os.symlink')
self.mock_symlink = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.logger')
self.mock_logger = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request._email_user_success')
self.mock_email = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.os.remove')
self.mock_remove = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch('scripts.retrieve_request.shutil.rmtree')
self.mock_rmtree = patch.start()
self.addCleanup(patch.stop)
# create the necessary DB objects
proj = get_or_create(Project, short_name="CMIP6", full_name="6th "
"Coupled Model Intercomparison Project")
climate_model = get_or_create(ClimateModel, short_name="MY-MODEL",
full_name="Really good model")
institute = get_or_create(Institute, short_name='MOHC',
full_name='Met Office Hadley Centre')
act_id = get_or_create(ActivityId, short_name='HighResMIP',
full_name='High Resolution Model Intercomparison Project')
experiment = get_or_create(Experiment, short_name="experiment",
full_name="Really good experiment")
incoming_directory = '/gws/MOHC/MY-MODEL/incoming/v12345678'
var1 = get_or_create(VariableRequest, table_name='my-table',
long_name='very descriptive', units='1', var_name='my-var',
standard_name='var-name', cell_methods='time: mean',
positive='optimistic', variable_type=VARIABLE_TYPES['real'],
dimensions='massive', cmor_name='my-var', modeling_realm='atmos',
frequency=FREQUENCY_VALUES['ann'], cell_measures='', uid='123abc')
var2 = get_or_create(VariableRequest, table_name='your-table',
long_name='very descriptive', units='1', var_name='your-var',
standard_name='var-name', cell_methods='time: mean',
positive='optimistic', variable_type=VARIABLE_TYPES['real'],
dimensions='massive', cmor_name='your-var', modeling_realm='atmos',
frequency=FREQUENCY_VALUES['ann'], cell_measures='', uid='123abc')
self.dreq1 = get_or_create(DataRequest, project=proj,
institute=institute, climate_model=climate_model,
experiment=experiment, variable_request=var1, rip_code='r1i1p1f1',
request_start_time=0.0, request_end_time=23400.0,
time_units='days since 1950-01-01', calendar='360_day')
self.dreq2 = get_or_create(DataRequest, project=proj,
institute=institute, climate_model=climate_model,
experiment=experiment, variable_request=var2, rip_code='r1i1p1f1',
request_start_time=0.0, request_end_time=23400.0,
time_units='days since 1950-01-01', calendar='360_day')
self.user = get_or_create(User,
username=Settings.get_solo().contact_user_id)
dsub = get_or_create(DataSubmission, status=STATUS_VALUES['VALIDATED'],
incoming_directory=incoming_directory,
directory=incoming_directory, user=self.user)
df1 = get_or_create( DataFile, name='file_one.nc',
incoming_directory=incoming_directory, directory=None, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var1, data_request=self.dreq1,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', online=False, start_time=0., end_time=359.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:1234',
data_submission=dsub)
self.df1 = df1
df2 = get_or_create( DataFile, name='file_two.nc',
incoming_directory=incoming_directory, directory=None, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var2, data_request=self.dreq2,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', online=False, start_time=0., end_time=359.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:5678',
data_submission=dsub)
self.df2 = df2
df3 = get_or_create( DataFile, name='file_three.nc',
incoming_directory=incoming_directory, directory=None, size=1,
project=proj, climate_model=climate_model, experiment=experiment,
institute=institute, variable_request=var2, data_request=self.dreq2,
frequency=FREQUENCY_VALUES['ann'], activity_id=act_id,
rip_code='r1i1p1f1', online=False, start_time=360., end_time=719.,
time_units='days since 1950-01-01', calendar=CALENDARS['360_day'],
grid='gn', version='v12345678', tape_url='et:8765',
data_submission=dsub)
self.df3 = df3
def test_simplest(self):
ret_req = get_or_create(RetrievalRequest, requester=self.user,
start_year=1000, end_year=3000, id=999999)
ret_req.data_request.add(self.dreq1)
ret_req.save()
class ArgparseNamespace(object):
retrieval_id = ret_req.id
no_restore = False
skip_checksums = True
alternative = None
incoming = False
self.mock_exists.side_effect = [
False, # if os.path.exists(retrieval_dir):
True, # if not os.path.exists(extracted_file_path):
True, # if not os.path.exists(drs_dir):
False # if os.path.exists(dest_file_path):
]
ns = ArgparseNamespace()
get_tape_url('et:1234', [self.df1], ns)
df = match_one(DataFile, name='file_one.nc')
self.assertIsNotNone(df)
self.mock_rename.assert_called_once_with(
'/gws/nopw/j04/primavera5/.et_retrievals/ret_999999/'
'batch_01234/gws/MOHC/MY-MODEL/incoming/v12345678/file_one.nc',
'/gws/nopw/j04/primavera5/stream1/CMIP6/HighResMIP/'
'MOHC/MY-MODEL/experiment/r1i1p1f1/my-table/my-var/gn/v12345678/'
'file_one.nc'
)
self.assertTrue(df.online)
self.assertEqual(df.directory, '/gws/nopw/j04/primavera5/'
'stream1/CMIP6/HighResMIP/MOHC/'
'MY-MODEL/experiment/r1i1p1f1/'
'my-table/my-var/gn/v12345678')
def test_multiple_tapes(self):
ret_req = get_or_create(RetrievalRequest, requester=self.user,
start_year=1000, end_year=3000)
ret_req.data_request.add(self.dreq1, self.dreq2)
ret_req.save()
class ArgparseNamespace(object):
retrieval_id = ret_req.id
no_restore = False
skip_checksums = True
alternative = None
incoming = False
self.mock_exists.side_effect = [
# first tape_url
False, # if os.path.exists(retrieval_dir):
True, # if not os.path.exists(extracted_file_path):
True, # if not os.path.exists(drs_dir):
False, # if os.path.exists(dest_file_path):
# second tape_url
False, # if os.path.exists(retrieval_dir):
True, # if not os.path.exists(extracted_file_path):
True, # if not os.path.exists(drs_dir):
False, # if os.path.exists(dest_file_path):
# third tape_url
False, # if os.path.exists(retrieval_dir):
True, # if not os.path.exists(extracted_file_path):
True, # if not os.path.exists(drs_dir):
False # if os.path.exists(dest_file_path):
]
ns = ArgparseNamespace()
get_tape_url('et:1234', [self.df1], ns)
get_tape_url('et:5678', [self.df2], ns)
get_tape_url('et:8765', [self.df3], ns)
self.assertEqual(self.mock_rename.call_count, 3)
for data_file in DataFile.objects.all():
self.assertTrue(data_file.online)
self.assertIn(data_file.directory, [
'/gws/nopw/j04/primavera5/stream1/CMIP6/'
'HighResMIP/MOHC/MY-MODEL/experiment/r1i1p1f1/my-table/'
'my-var/gn/v12345678',
'/gws/nopw/j04/primavera5/stream1/CMIP6/HighResMIP/'
'MOHC/MY-MODEL/experiment/r1i1p1f1/your-table/your-var/'
'gn/v12345678'
])
def test_bad_retrieval_id(self):
# check that the retrieval request id doesn't exist
ret_req_id = 1000000
if RetrievalRequest.objects.filter(id=ret_req_id):
raise ValueError('retrieval id already exsists')
class ArgparseNamespace(object):
retrieval_id = ret_req_id
no_restore = False
skip_checksums = True
alternative = None
incoming = False
ns = ArgparseNamespace()
self.assertRaises(SystemExit, main, ns)
self.mock_logger.error.assert_called_with('Unable to find retrieval id '
'{}'.format(ret_req_id))
def test_retrieval_already_complete(self):
completion_time = datetime.datetime(2017, 10, 31, 23, 59, 59)
completion_time = make_aware(completion_time)
ret_req = get_or_create(RetrievalRequest, requester=self.user,
date_complete=completion_time, start_year=1000,
end_year=3000)
class ArgparseNamespace(object):
retrieval_id = ret_req.id
no_restore = False
skip_checksums = True
alternative = None
incoming = False
ns = ArgparseNamespace()
self.assertRaises(SystemExit, main, ns)
self.mock_logger.error.assert_called_with('Retrieval {} was already '
'completed, at {}.'.format(
ret_req.id, completion_time.strftime('%Y-%m-%d %H:%M')))
def test_alternative_dir(self):
ret_req = get_or_create(RetrievalRequest, requester=self.user,
start_year=1000, end_year=3000, id=999999)
ret_req.data_request.add(self.dreq1)
ret_req.save()
class ArgparseNamespace(object):
retrieval_id = ret_req.id
no_restore = False
skip_checksums = True
alternative = '/gws/nopw/j04/primavera3/spare_dir'
incoming = False
self.mock_exists.side_effect = [
False, # if os.path.exists(retrieval_dir):
True, # if not os.path.exists(extracted_file_path):
True, # if not os.path.exists(drs_dir):
False, # if os.path.exists(dest_file_path):
True # if not os.path.exists(primary_path):
]
ns = ArgparseNamespace()
get_tape_url('et:1234', [self.df1], ns)
self.mock_rename.assert_called_once_with(
'/gws/nopw/j04/primavera3/.et_retrievals/ret_999999/'
'batch_01234/gws/MOHC/MY-MODEL/incoming/v12345678/file_one.nc',
'/gws/nopw/j04/primavera3/spare_dir/CMIP6/HighResMIP/'
'MOHC/MY-MODEL/experiment/r1i1p1f1/my-table/my-var/gn/'
'v12345678/file_one.nc'
)
self.mock_symlink.assert_called_once_with(
'/gws/nopw/j04/primavera3/spare_dir/CMIP6/HighResMIP/'
'MOHC/MY-MODEL/experiment/r1i1p1f1/my-table/my-var/gn/'
'v12345678/file_one.nc',
'/gws/nopw/j04/primavera5/stream1/CMIP6/HighResMIP/'
'MOHC/MY-MODEL/experiment/r1i1p1f1/my-table/my-var/gn/v12345678/'
'file_one.nc'
)
``` |
{
"source": "JonShantz/KijijiCarScraper",
"score": 3
} |
#### File: JonShantz/KijijiCarScraper/KijijiCarScraper.py
```python
import requests
from bs4 import BeautifulSoup
from csv import writer
import requests
# Returns the Text if non-empty, and "-" if empty
def process(rawdata):
if rawdata is None:
return "N/A"
else:
return rawdata.get_text()
# Flagged Word List = "As is", "Rebuild", "Rebuilt", "Salvaged". "Salvage"
# This might be good to implement later, but would also take a lot of computation time, so right now just
# looking for "as is"
def urlscraper(url, fname):
"""
returns none and creates a CSV file, fname, with the data for all car ads, given a url.
urlscraper: Str Str -> None
:param url: A Kijiji URL that has Car Advertisements
:param fname: Name of the CSV file that the data gets stored in. Remember to include .csv at the end
:return: None
:effects: Creates and writes to a CSV file
"""
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
LocationDatePosted = soup.find(class_="location").get_text().strip()
DatePosted = soup.find(class_="location").find(class_="date-posted").get_text().strip()
Location = LocationDatePosted[:LocationDatePosted.find(DatePosted)]
def isasis(lod):
"""
Returns True if "as is" is in the list of description words, lod. And false otherwise
isasis: (listof Str) -> Bool
requires: lod is a lower case list of Strs
:param lod:
:return:
"""
length = len(lod)
pos = 0
while pos < (length-1):
if lod[pos] == "as" and lod[pos+1] == "is":
pos += 1
return True
else:
pos += 1
else:
return False
# List of posts from url
# Going to each ad separately and pulling the data.
with open(fname, 'w') as csv_file:
csv_writer = writer(csv_file)
headers = ["Location", "Price", "Make", "Model", "Year", "KMs", "Trim", "Transmission", "Body", "Color",
"Drivetrain", "Doors", "Seats", "isDealer", "isFlagged", "Address", "URL", "Description"]
csv_writer.writerow(headers)
# Grab the posts from the first page
posts = soup.find_all(class_="info-container")
for post in posts:
try:
if post is None:
pass
else:
URLUnique = post.find('a')['href']
URLRoot = "https://www.kijiji.ca"
URL = URLRoot + URLUnique
CarResponse = requests.get(URL)
CarSoup = BeautifulSoup(CarResponse.text, 'html.parser')
# Pulling data on each car from the info panel
isDealer = post.find(class_="dealer-logo-image") is not None
# Want to pull description from CarSoup instead of from the homepage (Posts)
Description = process(CarSoup.find(class_="descriptionContainer-3544745383"))[11:]
if Description is not None:
isFlagged = isasis(Description.lower().split())
else:
isFlagged = False
if CarSoup.find(itemprop="address") is not None:
Address = CarSoup.find(itemprop="address").get_text()
else:
Address = "N/a"
CarData = CarSoup.find(class_="attributeListWrapper-1585172129")
Price = process(CarSoup.find(class_="priceContainer-2538502416"))
Year = process(CarData.find(itemprop="vehicleModelDate"))
Make = process(CarData.find(itemprop="brand"))
Model = process(CarData.find(itemprop="model"))
Trim = process(CarData.find(itemprop="vehicleConfiguration"))
Color = process(CarData.find(itemprop="color"))
Body = process(CarData.find(itemprop="bodyType"))
Doors = process(CarData.find(itemprop="numberOfDoors"))
Seats = process(CarData.find(itemprop="seatingCapacity"))
Drivetrain = process(CarData.find(itemprop="driveWheelConfiguration"))
Transmission = process(CarData.find(itemprop="vehicleTransmission"))
KMs = process(CarData.find(itemprop="mileageFromOdometer"))
# Writing the line to the file
csv_writer.writerow([Location, Price, Make, Model, Year, KMs, Trim, Transmission, Body, Color,
Drivetrain, Doors, Seats, isDealer, isFlagged, Address, URL, Description])
except RuntimeError:
pass
# Repeat as long as there are next pages to be done. This isn't elegant, and possibly not efficient, but
# should get the job done?
# Not going to the next page correctly. Not sure where the problem is, although I suspect it might be something
# to do with the very beginning of this while loop (although it may be in the writing to CSV as well).
# Seems to work with an if statement, but not with a while statement. Not sure why this is???
# Just kidding, it seems to be working now, but I'm going to leave the previous comments in there just in
# case it stops working again, I want some amount of legacy information for troubleshooting.
while soup.find(title="Next") is not None:
pageunique = soup.find(title="Next")['data-href']
pageroot = "https://www.kijiji.ca"
page = pageroot + pageunique
# This is the Page level
response = requests.get(page)
soup = BeautifulSoup(response.text, 'html.parser')
posts = soup.find_all(class_="info-container")
# This is the ad level ('Clicking' on each URL)
for post in posts:
try:
URLUnique = post.find('a')['href']
URLRoot = "https://www.kijiji.ca"
URL = URLRoot + URLUnique
CarResponse = requests.get(URL)
CarSoup = BeautifulSoup(CarResponse.text, 'html.parser')
# Pulling data on each car from the info panel
isDealer = post.find(class_="dealer-logo-image") is not None
# Take Char 11-End to remove "Description" which is at the beginning of each description.
Description = process(CarSoup.find(class_="descriptionContainer-3544745383"))[11:]
if Description is not None:
isFlagged = isasis(Description.lower().split())
else:
isFlagged = False
if CarSoup.find(itemprop="address") is not None:
Address = CarSoup.find(itemprop="address").get_text()
else:
Address = "N/a"
CarData = CarSoup.find(class_="attributeListWrapper-1585172129")
Price = process(CarSoup.find(class_="priceContainer-2538502416"))
Year = process(CarData.find(itemprop="vehicleModelDate"))
Make = process(CarData.find(itemprop="brand"))
Model = process(CarData.find(itemprop="model"))
Trim = process(CarData.find(itemprop="vehicleConfiguration"))
Color = process(CarData.find(itemprop="color"))
Body = process(CarData.find(itemprop="bodyType"))
Doors = process(CarData.find(itemprop="numberOfDoors"))
Seats = process(CarData.find(itemprop="seatingCapacity"))
Drivetrain = process(CarData.find(itemprop="driveWheelConfiguration"))
Transmission = process(CarData.find(itemprop="vehicleTransmission"))
KMs = process(CarData.find(itemprop="mileageFromOdometer"))
# Writing the line to the file
csv_writer.writerow([Location, Price, Make, Model, Year, KMs, Trim, Transmission, Body, Color,
Drivetrain, Doors, Seats, isDealer, isFlagged, Address, URL, Description])
except RuntimeError:
pass
csv_file.close()
# Now I just need to make it automatically scroll through pages and grab EVERYTHING. Once I've done that, we're good!
``` |
{
"source": "JonShedden/notifiers",
"score": 2
} |
#### File: notifiers/notifiers_cli/core.py
```python
from functools import partial
import click
from notifiers import __version__
from notifiers import get_notifier
from notifiers.core import all_providers
from notifiers.exceptions import NotifierException
from notifiers_cli.utils.callbacks import _notify
from notifiers_cli.utils.callbacks import _resource
from notifiers_cli.utils.callbacks import _resources
from notifiers_cli.utils.callbacks import func_factory
from notifiers_cli.utils.dynamic_click import CORE_COMMANDS
from notifiers_cli.utils.dynamic_click import schema_to_command
def provider_group_factory():
"""Dynamically generate provider groups for all providers, and add all basic command to it"""
for provider in all_providers():
p = get_notifier(provider)
provider_name = p.name
help = f"Options for '{provider_name}'"
group = click.Group(name=provider_name, help=help)
# Notify command
notify = partial(_notify, p=p)
group.add_command(schema_to_command(p, "notify", notify, add_message=True))
# Resources command
resources_callback = partial(_resources, p=p)
resources_cmd = click.Command(
"resources",
callback=resources_callback,
help="Show provider resources list",
)
group.add_command(resources_cmd)
pretty_opt = click.Option(
["--pretty/--not-pretty"], help="Output a pretty version of the JSON"
)
# Add any provider resources
for resource in p.resources:
rsc = getattr(p, resource)
rsrc_callback = partial(_resource, rsc)
rsrc_command = schema_to_command(
rsc, resource, rsrc_callback, add_message=False
)
rsrc_command.params.append(pretty_opt)
group.add_command(rsrc_command)
for name, description in CORE_COMMANDS.items():
callback = func_factory(p, name)
params = [pretty_opt]
command = click.Command(
name,
callback=callback,
help=description.format(provider_name),
params=params,
)
group.add_command(command)
notifiers_cli.add_command(group)
@click.group()
@click.version_option(
version=__version__, prog_name="notifiers", message=("%(prog)s %(version)s")
)
@click.option("--env-prefix", help="Set a custom prefix for env vars usage")
@click.pass_context
def notifiers_cli(ctx, env_prefix):
"""Notifiers CLI operation"""
ctx.obj["env_prefix"] = env_prefix
@notifiers_cli.command()
def providers():
"""Shows all available providers"""
click.echo(", ".join(all_providers()))
def entry_point():
"""The entry that CLI is executed from"""
try:
provider_group_factory()
notifiers_cli(obj={})
except NotifierException as e:
click.secho(f"ERROR: {e.message}", bold=True, fg="red")
exit(1)
if __name__ == "__main__":
entry_point()
```
#### File: notifiers/providers/slack.py
```python
from ..core import Provider
from ..core import Response
from ..utils import requests
class Slack(Provider):
"""Send Slack webhook notifications"""
base_url = "https://hooks.slack.com/services/"
site_url = "https://api.slack.com/incoming-webhooks"
name = "slack"
__fields = {
"type": "array",
"title": "Fields are displayed in a table on the message",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"title": {"type": "string", "title": "Required Field Title"},
"value": {
"type": "string",
"title": "Text value of the field. May contain standard message markup and must"
" be escaped as normal. May be multi-line",
},
"short": {
"type": "boolean",
"title": "Optional flag indicating whether the `value` is short enough to be displayed"
" side-by-side with other values",
},
},
"required": ["title"],
"additionalProperties": False,
},
}
__attachments = {
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {"type": "string", "title": "Attachment title"},
"author_name": {
"type": "string",
"title": "Small text used to display the author's name",
},
"author_link": {
"type": "string",
"title": "A valid URL that will hyperlink the author_name text mentioned above. "
"Will only work if author_name is present",
},
"author_icon": {
"type": "string",
"title": "A valid URL that displays a small 16x16px image to the left of the author_name text. "
"Will only work if author_name is present",
},
"title_link": {"type": "string", "title": "Attachment title URL"},
"image_url": {"type": "string", "format": "uri", "title": "Image URL"},
"thumb_url": {
"type": "string",
"format": "uri",
"title": "Thumbnail URL",
},
"footer": {"type": "string", "title": "Footer text"},
"footer_icon": {
"type": "string",
"format": "uri",
"title": "Footer icon URL",
},
"ts": {
"type": ["integer", "string"],
"format": "timestamp",
"title": "Provided timestamp (epoch)",
},
"fallback": {
"type": "string",
"title": "A plain-text summary of the attachment. This text will be used in clients that don't"
" show formatted text (eg. IRC, mobile notifications) and should not contain any markup.",
},
"text": {
"type": "string",
"title": "Optional text that should appear within the attachment",
},
"pretext": {
"type": "string",
"title": "Optional text that should appear above the formatted data",
},
"color": {
"type": "string",
"title": "Can either be one of 'good', 'warning', 'danger', or any hex color code",
},
"fields": __fields,
},
"required": ["fallback"],
"additionalProperties": False,
},
}
_required = {"required": ["webhook_url", "message"]}
_schema = {
"type": "object",
"properties": {
"webhook_url": {
"type": "string",
"format": "uri",
"title": "the webhook URL to use. Register one at https://my.slack.com/services/new/incoming-webhook/",
},
"icon_url": {
"type": "string",
"format": "uri",
"title": "override bot icon with image URL",
},
"icon_emoji": {
"type": "string",
"title": "override bot icon with emoji name.",
},
"username": {"type": "string", "title": "override the displayed bot name"},
"channel": {
"type": "string",
"title": "override default channel or private message",
},
"unfurl_links": {
"type": "boolean",
"title": "avoid automatic attachment creation from URLs",
},
"message": {
"type": "string",
"title": "This is the text that will be posted to the channel",
},
"attachments": __attachments,
},
"additionalProperties": False,
}
def _prepare_data(self, data: dict) -> dict:
text = data.pop("message")
data["text"] = text
if data.get("icon_emoji"):
icon_emoji = data["icon_emoji"]
if not icon_emoji.startswith(":"):
icon_emoji = f":{icon_emoji}"
if not icon_emoji.endswith(":"):
icon_emoji += ":"
data["icon_emoji"] = icon_emoji
return data
def _send_notification(self, data: dict) -> Response:
url = data.pop("webhook_url")
response, errors = requests.post(url, json=data)
return self.create_response(data, response, errors)
```
#### File: tests/providers/test_simplepush.py
```python
import pytest
from notifiers.exceptions import BadArguments
provider = "simplepush"
class TestSimplePush:
"""SimplePush notifier tests
Note: These tests assume correct environs set for NOTIFIERS_SIMPLEPUSH_KEY
"""
def test_simplepush_metadata(self, provider):
assert provider.metadata == {
"base_url": "https://api.simplepush.io/send",
"site_url": "https://simplepush.io/",
"name": "simplepush",
}
@pytest.mark.parametrize(
"data, message", [({}, "key"), ({"key": "foo"}, "message")]
)
def test_simplepush_missing_required(self, data, message, provider):
data["env_prefix"] = "test"
with pytest.raises(BadArguments) as e:
provider.notify(**data)
assert f"'{message}' is a required property" in e.value.message
@pytest.mark.online
def test_simplepush_sanity(self, provider, test_message):
"""Successful simplepush notification"""
data = {"message": test_message}
rsp = provider.notify(**data)
rsp.raise_on_errors()
```
#### File: notifiers/tests/test_logger.py
```python
import logging
import pytest
from notifiers.exceptions import NoSuchNotifierError
log = logging.getLogger("test_logger")
class TestLogger:
def test_with_error(self, mock_provider, handler, capsys):
hdlr = handler(mock_provider.name, logging.INFO)
log.addHandler(hdlr)
log.info("test")
assert "--- Logging error ---" in capsys.readouterr().err
def test_missing_provider(self, handler):
with pytest.raises(NoSuchNotifierError):
handler("foo", logging.INFO)
def test_valid_logging(self, magic_mock_provider, handler):
hdlr = handler(magic_mock_provider.name, logging.INFO)
log.addHandler(hdlr)
assert repr(hdlr) == "<NotificationHandler magic_mock(INFO)>"
log.info("test")
magic_mock_provider.notify.assert_called()
def test_lower_level_log(self, magic_mock_provider, handler):
hdlr = handler(magic_mock_provider.name, logging.INFO)
log.addHandler(hdlr)
log.debug("test")
magic_mock_provider.notify.assert_not_called()
def test_with_data(self, magic_mock_provider, handler):
data = {"foo": "bar"}
hdlr = handler(magic_mock_provider.name, logging.INFO, data)
log.addHandler(hdlr)
log.info("test")
magic_mock_provider.notify.assert_called_with(
foo="bar", message="test", raise_on_errors=True
)
def test_with_fallback(self, magic_mock_provider, handler):
data = {"env_prefix": "foo"}
hdlr = handler(
"pushover", logging.INFO, data, fallback=magic_mock_provider.name
)
log.addHandler(hdlr)
log.info("test")
magic_mock_provider.notify.assert_called_with(
message="Could not log msg to provider 'pushover'!\nError with sent data: 'user' is a required property"
)
def test_with_fallback_with_defaults(self, magic_mock_provider, handler):
fallback_defaults = {"foo": "bar"}
data = {"env_prefix": "foo"}
hdlr = handler(
"pushover",
logging.INFO,
data,
fallback=magic_mock_provider.name,
fallback_defaults=fallback_defaults,
)
log.addHandler(hdlr)
log.info("test")
magic_mock_provider.notify.assert_called_with(
foo="bar",
message="Could not log msg to provider 'pushover'!\nError with sent data: 'user' is a required property",
)
``` |
{
"source": "jonsheller/crypto-utils",
"score": 3
} |
#### File: crypto-utils/src/sha1.py
```python
import io
import struct
INITIAL_H = (0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0)
def leftrotate(v, n):
return 0xFFFFFFFF & ((0xFFFFFFFF & (v << n)) | (0xFFFFFFFF & (v >> (32 - n))))
def padding(data_length):
pad = [0x80] + [0x0] * ((56 - (data_length + 1) % 64) % 64)
return bytes(pad) + struct.pack(">Q", data_length * 8)
class SHA1:
def __init__(self, h=INITIAL_H, data_len=0):
self._h = h
self._unprocessed = bytes()
self._data_len = data_len
def update(self, data):
inp = io.BytesIO(self._unprocessed + data)
while True:
chunk = inp.read(64)
if len(chunk) < 64:
self._unprocessed = chunk
break
self._h = self._add_chunk(self._h, chunk)
self._data_len += 64
return self
def finalize(self):
padding_data = self._padding()
inp = io.BytesIO(self._unprocessed + padding_data)
h = self._h
while True:
chunk = inp.read(64)
if len(chunk) < 64:
break
h = self._add_chunk(h, chunk)
return struct.pack(">5I", *h)
def _padding(self):
total_len = self._data_len + len(self._unprocessed)
return padding(total_len)
def _add_chunk(self, h, chunk):
w = list(struct.unpack(">16I", chunk))
for i in range(16, 80):
w.append(leftrotate(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1))
a, b, c, d, e = h
for i in range(80):
if i <= 19:
f = (b & c) | ((~b) & d)
k = 0x5A827999
elif i <= 39:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif i <= 59:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
else:
f = b ^ c ^ d
k = 0xCA62C1D6
t = 0xFFFFFFFF & (leftrotate(a, 5) + f + e + k + w[i])
a, b, c, d, e = t, a, leftrotate(b, 30), c, d
a += h[0]
b += h[1]
c += h[2]
d += h[3]
e += h[4]
return (
a & 0xFFFFFFFF,
b & 0xFFFFFFFF,
c & 0xFFFFFFFF,
d & 0xFFFFFFFF,
e & 0xFFFFFFFF,
)
if __name__ == "__main__":
main()
``` |
{
"source": "JonShelley/cyclecloud-pbspro",
"score": 2
} |
#### File: files/default/submit_hook.py
```python
from collections import OrderedDict
import json
import sys
import traceback
import os
import subprocess
import json
try:
import pbs
except ImportError:
import mockpbs as pbs
def validate_groupid_placement(job):
'''
@return True if the job has a placement group of group_id
Note we will set it to group_id if it isn't specified.
'''
place = repr(job.Resource_List["place"]) if job.Resource_List["place"] else ""
status, mj_place = get_groupid_placement(place)
if mj_place != None:
job.Resource_List["place"] = pbs.place(mj_place)
return status
def get_groupid_placement(place):
debug("Get groupid placement: %s" % place)
placement_grouping = None
for expr in place.split(":"):
placement_grouping = None
if "=" in expr:
key, value = [x.lower().strip() for x in expr.split("=", 1)]
if key == "group":
placement_grouping = value
if placement_grouping is None:
debug("The user didn't specify place=group, setting group=group_id")
placement_grouping = "group_id"
prefix = ":" if place else ""
mj_place = place + prefix + "group=group_id"
return [True, mj_place]
if placement_grouping == "group_id":
return [True, None]
else:
debug("User specified a placement group that is not group_id - skipping.")
return [False, None]
def parse_select(job, select_str=None):
# 3:ncpus=2:slot_type=something
select_toks = get_select_expr(job).split(":")
select_N = int(select_toks[0])
return select_N, OrderedDict([e.split("=", 1) for e in select_toks[1:]])
def get_select(job):
debug("Get select: %s" %job.Resource_List["select"])
return job.Resource_List["select"]
def get_select_expr(job):
return repr(get_select(job))
def append_select_expr(job, key, value):
select_expr = get_select_expr(job)
prefix = ":" if select_expr else ""
job.Resource_List["select"] = pbs.select(select_expr + "%s%s=%s" % (prefix, key, value))
def set_select_key(job, key, value):
select_expr = get_select_expr(job)
key_values = select_expr.split(":")
found = False
for i in range(1, len(key_values)):
possible_key, _ = key_values[i].split("=", 1)
if possible_key == key:
found = True
key_values[i] = "%s=%s" % (key, value)
if not found:
append_select_expr(job, key, value)
else:
job.Resource_List["select"] = pbs.select(":".join(key_values))
def placement_hook(hook_config, job):
if not get_select(job):
# pbs 18 seems to treat host booleans as strings, which is causing this very annoying workaround.
#job.Resource_List["ungrouped"] = "true"
if job.Resource_List["slot_type"]:
job.Resource_List["slot_type"] = job.Resource_List["slot_type"]
# Check to see if job is interactive
if job.interactive:
debug("Job is interactive")
return
debug("The job doesn't have a select statement, it doesn't have any placement requirements.")
debug("Place a hold on the job")
job.Hold_Types = pbs.hold_types("so")
return
if validate_groupid_placement(job):
_, select_dict = parse_select(job)
if "ungrouped" not in select_dict:
set_select_key(job, "ungrouped", "false")
slot_type = select_dict.get("slot_type")
if slot_type:
set_select_key(job, "slot_type", slot_type)
debug("Using the grouped slot_type as a resource (%s)." % slot_type)
def debug(msg):
pbs.logmsg(pbs.EVENT_DEBUG3, "cycle_sub_hook - %s" % msg)
def error(msg):
pbs.logmsg(pbs.EVENT_ERROR, "cycle_sub_hook - %s" % msg)
def run_cmd(cmd):
debug("Cmd: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
debug('cmd failed!\n\tstdout="%s"\n\tstderr="%s"' % (stdout, stderr))
return stdout, stderr
# another non-pythonic thing - this can't be behind a __name__ == '__main__',
# as the hook code has to be executable at the load module step.
hook_config = {}
if pbs.hook_config_filename:
with open(pbs.hook_config_filename) as fr:
hook_config.update(json.load(fr))
try:
e = pbs.event()
if e.type == pbs.QUEUEJOB:
j = e.job
placement_hook(hook_config, j)
elif e.type == pbs.PERIODIC:
# Defined paths to PBS commands
qselect_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qselect')
qstat_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qstat')
qalter_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qalter')
qrls_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qrls')
# Get the jobs in an "so" hold state
cmd = [qselect_cmd, "-h", "so"]
stdout, stderr = run_cmd(cmd)
jobs = stdout.split()
debug("Jobs: %s" % jobs)
# Get the job information
if not jobs:
debug("No jobs to evaluate")
e.accept()
# Get Queue defaults information
cmd = [qstat_cmd, "-Qf", "-F", "json"]
stdout, stderr = run_cmd(cmd)
qstat_Qf_json = json.loads(stdout)
# Get job information
cmd = [qstat_cmd, "-f", "-F", "json"] + jobs[:25]
stdout, stderr = run_cmd(cmd)
qstat_json = json.loads(stdout)
jobs = qstat_json["Jobs"]
for key, value in jobs.iteritems():
# Reevaluate each held job
debug("Key: %s\nValue: %s" % (key, value))
j_queue = jobs[key]["queue"]
j_place = jobs[key]["Resource_List"]["place"]
j_select = jobs[key]["Resource_List"]["select"]
# Check the groupid placement
mj_place = "group=group_id"
# Assign default placement from queue. If none, assign group=group_id
if j_queue in qstat_Qf_json["Queue"]:
if "resources_default" in qstat_Qf_json["Queue"][j_queue]:
if "place" in qstat_Qf_json["Queue"][j_queue]["resources_default"]:
mj_place = qstat_Qf_json["Queue"][j_queue]["resources_default"]["place"]
# Qalter the job
cmd = [qalter_cmd]
if mj_place != None:
debug("New place statement: %s" % mj_place)
cmd.append("-lselect=%s" % j_select)
cmd.append("-lplace=%s" % mj_place)
debug("qalter the job")
cmd.append(key)
stdout, stderr = run_cmd(cmd)
# Release the hold on the job
cmd = [qrls_cmd, "-h", "so", key]
debug("Release the hold on the job")
stdout, stderr = run_cmd(cmd)
except SystemExit:
debug("Exited with SystemExit")
except:
error(traceback.format_exc())
raise
``` |
{
"source": "JonShelley/pbs_hooks",
"score": 2
} |
#### File: pbs_hooks/azure/send_app_data_to_log_analytics.py
```python
import pbs
import json
import datetime
import hashlib
import hmac
import base64
import traceback
import sys
import os
import subprocess
# Add standard python module path
sys.path.append('/lib/python2.7/site-packages')
import requests
def debug(msg):
pbs.logmsg(pbs.EVENT_DEBUG3, 'LA debug: %s' % msg)
def error(msg):
pbs.logmsg(pbs.EVENT_ERROR, 'LA error: %s' % msg)
def run_cmd(cmd):
debug("Cmd: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
debug('cmd failed!\n\tstdout="%s"\n\tstderr="%s"' % (stdout, stderr))
return stdout, stderr
def parse_config_file():
"""
Read the config file in json format
"""
debug('Parse config filed')
# Identify the config file and read in the data
config_file = ''
if 'PBS_HOOK_CONFIG_FILE' in os.environ:
config_file = os.environ['PBS_HOOK_CONFIG_FILE']
if not config_file:
error('Config file not found')
msg = 'Config file is %s' % config_file
debug(msg)
try:
with open(config_file, 'r') as desc:
config = json.load(desc)
except IOError:
error('I/O error reading config file')
debug('cgroup hook configuration: %s' % config)
return config
# Build the API signature
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
debug("Entering build_signature")
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash).encode('utf-8')
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest())
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
# Build and send a request to the POST API
def post_data(customer_id, shared_key, body, log_type):
debug("Entering post_data")
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = 'https://' + customer_id + '.ods.opinsights.azure.com' + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
debug("Body: %s" % body)
debug("Body type: %s" % type(body))
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
debug('Accepted')
else:
debug("Rejected - Response code: {}".format(response.status_code))
# Read in the config file
cfg = parse_config_file()
# Update the customer ID to your Log Analytics workspace ID
customer_id = cfg["customer_id"]
# For the shared key, use either the primary or the secondary Connected Sources client authentication key
shared_key = cfg["shared_key"]
# Read in the job env
e = pbs.event()
j = e.job
# Read the filename to upload from the environment
try:
if not j.in_ms_mom():
debug("Not on the ms node")
e.accept()
if "PBS_AZURE_LA_JSON_FILE_DIR" in j.Variable_List and "PBS_AZURE_LA_LOG_TYPE" in j.Variable_List:
debug("Proceed to add data to log analytics")
log_type = j.Variable_List["PBS_AZURE_LA_LOG_TYPE"]
debug("Log type: %s" % log_type)
data_file_dir = j.Variable_List["PBS_AZURE_LA_JSON_FILE_DIR"]
data_filename = data_file_dir + os.sep + j.id + ".json"
debug("Data filename: %s" % data_filename)
# Get VM Instance
cmd = [ "curl", "-s", "-H", "Metadata:true", "http://169.254.169.254/metadata/instance?api-version=2017-12-01"]
stdout, stderr = run_cmd(cmd)
json_vm = json.loads(stdout)
vm_size=json_vm["compute"]["vmSize"]
debug("json_vm vm_inst: %s" % vm_size)
if os.path.isfile(data_filename):
with open(data_filename) as data_fp:
json_data = json.load(data_fp)
json_data["vmSize"] = vm_size
json_str = json.dumps(json_data)
debug("data file contents: %s" % json_str)
post_data(customer_id, shared_key, json_str, log_type)
debug("Completed sending data to log anaylitics")
else:
debug("Data file: %s was not found" % data_filename)
except SystemExit:
debug("Exited with SystemExit")
except:
debug("Failed to post data to log analytics")
error(traceback.format_exc())
raise
``` |
{
"source": "jonshern/httpsecuritychecker",
"score": 4
} |
#### File: jonshern/httpsecuritychecker/headercheck.py
```python
import argparse
"""Used to parse command line arguments"""
import urllib3
class HeaderExpectedItem:
def __init__(self, header, expected, message, risk):
self.header = header
self.expected = expected
self.message = message
self.risk = risk
def printitem(self):
print(self.header)
print(self.expected)
print(self.message)
print(self.risk)
class HeaderResultItem:
def __init__(self, url, rawheader, headerexpecteditems):
self.url = url
self.rawheader = rawheader
self.headerexpecteditems = headerexpecteditems
def valid_url(url):
print('Validating Url: ' + url )
return True
def scan_url(url, expectedHeaders):
print('Scanning Url' + url)
http = urllib3.PoolManager()
response = http.request('GET', url)
print('---Response---')
print(response.status)
print(response.headers)
for k in response.headers:
print(k)
print(response.headers[k])
def initialize():
"""Used the guidance provided here https://blog.appcanary.com/2017/http-security-headers.html to create this list"""
headers = []
headers.append(HeaderExpectedItem('X-Frame-Options', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('X-XSS-Protection', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('Content-Security-Policy', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('Strict-Transport-Security', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('Public-Key-Pins', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('X-Frame-Options', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('X-Content-Type-Options', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('Referrer-Policy', 'testing2', 'testing3', 'testing4'))
headers.append(HeaderExpectedItem('Set-Cookie', 'testing2', 'testing3', 'testing4'))
return headers
def main():
"""Application entry point"""
parser = argparse.ArgumentParser(description='Http Header Security Scanner')
parser.add_argument('-s', '--scan', action='store_true', help='Scans the specified url')
parser.add_argument('-u', '--URL', help='The Url to be checked')
args = parser.parse_args()
if args.scan:
expectedheaders = initialize()
valid_url(args.URL)
scan_url(args.URL, expectedheaders)
# for item in expectedheaders:
# print(item.printitem())
else:
print('Not going to scan')
if __name__ == '__main__':
main()
``` |
{
"source": "jonshern/photoorganizer",
"score": 3
} |
#### File: jonshern/photoorganizer/app.py
```python
import exifread
import json
import os
import csv
import sys
import argparse
from dateutil import parser
from datetime import datetime
DATE_TAKEN_KEY = 'Image DateTime'
DATE_ORIGINAL = 'EXIF DateTimeOriginal'
DATE_DIGITIZED = 'EXIF DateTimeDigitized'
#constants
YEARS = ['2000', '2001', '2002', '2003', '2004', '2005', '2006',
'2006', '2007', '2008', '2009', '2010', '2011',
'2012', '2013', '2014', '2015', '2016', '2017']
MONTHS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class IndexItem:
fullpath = ''
tags = {}
date1 = ''
date2 = ''
date3 = ''
datetaken = ''
def __init__(self, fullpath, filename, path):
self.fullpath = fullpath
self.filename = filename
self.path = path
def process_file(self):
file = open(self.fullpath, 'rb')
self.tags = exifread.process_file(file, details=False)
file.close()
def process_exif(self):
#Format 2006:07:13 23:50:10
if DATE_TAKEN_KEY in self.tags:
self.date1 = self.tags[DATE_TAKEN_KEY]
if DATE_ORIGINAL in self.tags:
self.date2 = self.tags[DATE_ORIGINAL]
if DATE_DIGITIZED in self.tags:
self.date3 = self.tags[DATE_DIGITIZED]
if self.date1:
self.datetaken = parse_date(self.date1)
if self.date2:
self.datetaken = parse_date(self.date2)
#considering adding file datetime / modified time also.
def parse_date(string_datetime):
print 'Date Being Converted: ' + str(string_datetime)
# return parser.parse(str(string_datetime))
# converted = timestring.Date(str(string_datetime))
try:
converted = datetime.strptime(str(string_datetime), '%Y:%m:%d %H:%M:%S')
except:
converted = datetime(2000, 01, 01,01,01,01)
print 'Converted Date: ' + str(converted)
return converted
def path_creator(destpath, should_create_folders):
#get a unique list of years and months
# yearset = set()
# for item in items:
# if item.year not in yearset:
# yearset.add(item.year)
if should_create_folders:
for year in YEARS:
for month in MONTHS:
filepath = destpath + os.sep + year + os.sep + month
print 'creating folder' + filepath
if not os.path.exists(filepath):
os.makedirs(filepath)
#create unknown folder
filepath = destpath + os.sep + 'Unknown'
os.makedirs(filepath)
def process_single_item(filename):
item = IndexItem(filename, '', '')
item.process_file()
item.process_exif()
def write_results_to_file(items):
with open('fileindex.csv', 'wb') as csvfile:
fieldnames = ['filepath',
'filename', DATE_TAKEN_KEY, DATE_ORIGINAL, DATE_DIGITIZED, 'Date Taken']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in items:
writer.writerow(
{'filepath': item.filepath, 'filename':item.filename,
DATE_TAKEN_KEY: item.date1, DATE_ORIGINAL: item.date2,
DATE_DIGITIZED: item.date3, 'Date Taken': item.datetaken})
def process_images(rootdir):
items = []
for subdir, dirs, files in os.walk(rootdir):
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".jpg") or filepath.endswith(".JPG"):
item = IndexItem(filepath, file, subdir)
item.process_file()
item.process_exif()
items.append(item)
print "Processed " + str(len(items)) + " items"
write_results_to_file(items)
def main():
parser = argparse.ArgumentParser(
description='Organize Pictures')
parser.add_argument(
'-f', '--foldercreate', help='Create the folder structure', action='store_true')
parser.add_argument('-d', '--destpath', help='Create the folder structure', default='nopath')
parser.add_argument('-p', '--processimages', help='Process some images', action='store_true')
parser.add_argument('-i', '--imagepath', help='Path of images to be process', default='nopath')
args = vars(parser.parse_args())
if args['foldercreate']:
if args['destpath'] != 'nopath':
print 'create a folder'
path_creator(args['destpath'], True)
else:
print 'Path missing - User the param -d to set a path'
if args['processimages']:
if args['imagepath'] != 'nopath':
print 'Process some images'
process_images(args['imagepath'])
else:
print 'Path missing - User the param -i to set a path'
if __name__ == '__main__':
main()
``` |
{
"source": "jonshern/raspberrypi-indoorhealthmonitor",
"score": 3
} |
#### File: jonshern/raspberrypi-indoorhealthmonitor/dustmonitor.py
```python
import time
import grovepi
import atexit
import logging
import json
import sys
import datetime
from sensorvalue import SensorValue
def main():
location = 'Jons Office'
readdustsensor(location)
def writetofile(data):
with open('dustdata.csv', 'ab') as f:
f.write(data.writecsv() + '\n')
def readdustsensor(location):
atexit.register(grovepi.dust_sensor_dis)
print("Reading from the dust sensor")
grovepi.dust_sensor_en()
while True:
try:
[new_val,lowpulseoccupancy] = grovepi.dustSensorRead()
if new_val:
print(lowpulseoccupancy)
sensordata = SensorValue(lowpulseoccupancy, 'none', 'Dust', location)
writetofile(sensordata)
time.sleep(5)
except IOError:
print ("Error")
if __name__ == '__main__':
main()
```
#### File: raspberrypi-indoorhealthmonitor/lib/iotconfig.py
```python
import yaml
from sensorconfig import SensorConfig
class IOTConfig(object):
location = ''
logfile = ''
alerts = ''
pollinginterval = ''
displaymode = ''
configuredsensors = dict()
snsarn = ''
host = ""
rootCAPath = ""
cognitoIdentityPoolID = ""
def __init__(self):
self.location = ""
self.logfile = ""
self.alertsenabled = ""
self.pollinginterval = ""
self.displaymode = ""
self.configuredsensors = dict()
self.snsarn = ""
self.host = ""
self.rootCAPath = ""
self.cognitoIdentityPoolID = ""
# def __init__(self, location, logfile, alertsenabled, pollinginterval, displaymode, configuredsensors, snsarn):
# self.location = location
# self.logfile = logfile
# self.alerts = alertsenabled
# self.pollinginterval = pollinginterval
# self.displaymode = displaymode
# self.configuredsensors = configuredsensors
# self.snsarn = snsarn
def initializefromdictionary(self, settings):
self.location = settings["core"]["location"]
self.logfile = settings["core"]["logfile"]
self.alertsenabled = settings["core"]["alertsenabled"]
self.pollinginterval = settings["core"]["pollinginterval"]
self.displaymode = settings["core"]["displaymode"]
self.host = settings["aws"]["host"]
self.rootCAPath = settings["aws"]["rootCAPath"]
self.cognitoIdentityPoolID = settings["aws"]["cognitoIdentityPoolID"]
for item in settings["core"]["configuredsensors"]:
sensor = SensorConfig(item['name'], item['port'])
self.configuredsensors[item['name']] = sensor
def initializefromfile(self, filename):
with open(filename, "r") as f:
settings = yaml.load(f)
self.initializefromdictionary(settings)
def isconfigvalid(self, supportedsensors):
for sensor in self.configuredsensors:
if sensor not in supportedsensors:
print "The " + sensor + "sensor is not supported"
return False
return True
```
#### File: raspberrypi-indoorhealthmonitor/testing/logtest.py
```python
import time
import logging
import json
import sys
def main():
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# create a file handler
handler = logging.FileHandler('sensorlogger.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Start logging sensor readings')
if __name__ == '__main__':
main()
``` |
{
"source": "jonsim/dirdiff",
"score": 3
} |
#### File: dirdiff/pyutils/console.py
```python
import fcntl
import termios
import struct
_CONSOLE_CACHE = None
def size(use_cache=True):
"""Derives the current console's size.
NB: Taken from http://stackoverflow.com/a/3010495
Returns:
(width, height) of the current console.
"""
global _CONSOLE_CACHE
if not use_cache or not _CONSOLE_CACHE:
try:
h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(1,
termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
except IOError:
w, h = (80, 40)
_CONSOLE_CACHE = (w, h)
return _CONSOLE_CACHE
``` |
{
"source": "jonsim/robin-project",
"score": 3
} |
#### File: graphing/tests/colour_converter.py
```python
from collections import namedtuple
from numpy import zeros
from scipy.cluster.vq import kmeans2
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.font_manager import FontProperties
import matplotlib
import matplotlib.pyplot as plt
import Image, ImageDraw
import sys
import struct
#--------------------------------------------------------------#
#--------------------- CLASS DEFINITIONS ----------------------#
#--------------------------------------------------------------#
Point = namedtuple('Point', ['x', 'y', 'z'])
#--------------------------------------------------------------#
#-------------------- FUNCTION DEFINITIONS --------------------#
#--------------------------------------------------------------#
# H' takes values between 0-1530
# H' = 0- 255 RGB= 255, 0-255, 0
# H' = 255- 510 RGB= 255-0, 255, 0
# H' = 510- 765 RGB= 0, 255, 0-255
# H' = 765-1020 RGB= 0, 255-0, 255
# H' = 1020-1275 RGB= 0-255, 0, 255
# H' = 1275-1530 RGB= 255, 0, 255-0
def convert_rgb_to_depth (rgb):
r = rgb[0]
g = rgb[1]
b = rgb[2]
v = 0
if r == 0 and g == 0 and b == 0:
return 0
if r == 255:
if b == 0:
v = g
else:
v = 1530 - b
elif g == 255:
if b == 0:
v = 510 - r
else:
v = b + 510
elif b == 255:
if r == 0:
v = 1020 - g
else:
v = r + 1020
v = (v * (4800/1530))
if v < 1:
return 0
else:
return v + 400
def convert_depth_to_rgb (v):
# check for zero
if v <= 400:
return (0, 0, 0)
v = int((v - 400) / (4800/1530))
if v >= 1530:
return (1.0, 0.0, 0.0)
"""
if v < 255:
return (255, v, 0)
elif v < 510:
return (510-v, 255, 0)
elif v < 765:
return (0, 255, v-510)
elif v < 1020:
return (0, 1020-v, 255)
elif v < 1275:
return (v-1020, 0, 255)
else:
return (255, 0, 1530-v)"""
if v < 255:
return (1.0, v/255.0, 0.0)
elif v < 510:
return ((510-v)/255.0, 1.0, 0.0)
elif v < 765:
return (0.0, 1.0, (v-510)/255.0)
elif v < 1020:
return (0.0, (1020-v)/255.0, 1.0)
elif v < 1275:
return ((v-1020)/255.0, 0.0, 1.0)
else:
return (1.0, 0.0, (1530-v)/255.0)
def convert_depth_to_gs (v):
gs = int(v / (10000/255))
return (gs, gs, gs)
# loads a given image into a 2d array
def load_image (filename):
# make the data structure and open the image
data2d = zeros((640, 480))
input_img = Image.open(filename)
# loop through the input image, saving the values
for y in range(0, 480):
for x in range(0, 640):
data2d[x][y] = convert_rgb_to_depth(input_img.getpixel((x, y)))
# return
return data2d
# saves a given 2d array of data into an image. the image may be either 'grayscale' or 'colour'
def save_image (data2d, filename, image_type='grayscale'):
# make the image
image = Image.new("RGB", (640, 480), (0,255,0))
draw = ImageDraw.Draw(image)
# draw the image
if image_type == 'color':
for y in range (0, 480):
for x in range(0, 640):
draw.point((x, y), fill=convert_depth_to_rgb(data2d[x][y]))
else:
for y in range (0, 480):
for x in range(0, 640):
draw.point((x, y), fill=convert_depth_to_gs(data2d[x][y]))
# save the image
image.save(filename)
def make_histogram (data2d, side='both'):
# setup the ranges to accomodate the side we want to look at.
xstart = 0
xend = 640
if side == 'left':
xend = 320
elif side == 'right':
xstart = 320
# make the data structure
hist = zeros(10000)
# loop through the input and build the histogram
for y in range(0, 480):
for x in range(xstart, xend):
hist[data2d[x][y]] += 1
# produce the histogram colour spectrum
colours = []
for i in range(0, 10000):
colours.append(convert_depth_to_rgb(i))
# print the histogram's stats
hist_range1 = 0
for i in range(600, 900):
hist_range1 += hist[i]
hist_range2 = 0
for i in range(900, 1200):
hist_range2 += hist[i]
print "hist[0] =", hist[0]
print "hist[600-899] =", hist_range1
print "hist[900-1199] =", hist_range2
# tidy up the histogram
histy = hist.tolist()
histx = range(0, 10000)
histy[0] = 0
# adjust the ranges
for i in range(0, len(histx)):
histx[i] /= 1000.0
for i in range(0, len(histy)):
histy[i] /= 1000.0
# return
return (histx, histy, colours)
def plot_histogram (plot_axes, hist):
plot_axes.bar(hist[0], hist[1], width=1.0/1000.0, color=hist[2], edgecolor=hist[2])
# ax.plot(histx, histy, 'b-')
def setup_plot ():
# create the plot
fig = plt.figure()
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.1)
ax = fig.add_subplot(111)
# remove the top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# setup the font
font = {'family': 'Quattrocento',
'weight': 'bold',
'size' : 16}
matplotlib.rc('font', **font)
# return
return ax
def setup_plot_labels (ax):
# setup the labels
ax.set_xlabel('Depth (m)')
ax.set_ylabel('Frequency (000\'s of pixels)')
#ax.set_title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
ax.set_xlim(0, 5)
ax.set_ylim(0, 4)
ax.set_yticks(range(0, 4+1))
"""
def plot_2d_data (data2d, colours=None, depth_scale=True):
# Initialise, calculating the resolution and creating the image (bg=green)
res_step = data2d[1].x - data2d[0].x
xres = 640 / res_step
yres = 480 / res_step
image = Image.new("RGB", (xres, yres), (0,255,0))
draw = ImageDraw.Draw(image)
# calculate max depth
max_depth = 0
for data in data2d:
if (data.z > max_depth):
max_depth = data.z
# Depth image
depth_scaling = 255.0 / max_depth
for i in range(len(data2d)):
color = int(data2d[i].z * depth_scaling)
if colours == None:
draw.point((data2d[i].x/res_step, data2d[i].y/res_step), fill=(color, color, color))
else:
draw.point((data2d[i].x/res_step, data2d[i].y/res_step), fill=colours[i])
# Scale
if (depth_scale and xres == 640):
scale_offset_x = 10
scale_offset_y = 10
scale_height = 200
scale_width = 65
gradient_scaling = 255.0 / (scale_height-30)
draw.rectangle([scale_offset_x, scale_offset_y, scale_offset_x+scale_width, scale_offset_y+scale_height], fill=(255,255,255), outline=(0,0,0))
for y in range(scale_height-30):
for x in range(20):
gradient_shade = int(y * gradient_scaling)
draw.point((scale_offset_x+5+x, scale_offset_y+20+y), fill=(gradient_shade, gradient_shade, gradient_shade))
title_string = "DEPTH (mm)"
title_string_s = draw.textsize(title_string)
title_string_offset_x = scale_width / 2 - title_string_s[0] / 2
title_string_offset_x = 4 # Comment this out for a more accurate x offset (at the risk of slight-non-centering
title_string_offset_y = 2
draw.text((scale_offset_x+title_string_offset_x, scale_offset_y+title_string_offset_y), title_string, fill=(0,0,0))
draw.text((scale_offset_x+25, scale_offset_y+15), "- 0", fill=(0,0,0))
draw.text((scale_offset_x+25, scale_offset_y+scale_height-16), "- " + str(max_depth), fill=(0,0,0))
# show
image.show()"""
#--------------------------------------------------------------#
#------------------------ MAIN FUNCTION -----------------------#
#--------------------------------------------------------------#
# get command line args
filename = ""
side = 'both'
mode = 'resave'
if len(sys.argv) < 2:
print "ERROR: the program must be called as follows:\n ./diagram_maker.py filename.png ['left' | 'right' | 'both']"
sys.exit()
elif len(sys.argv) > 2:
side = sys.argv[2]
filename = sys.argv[1]
if mode == 'histogram':
# setup
ax = setup_plot()
# do stuff
print "Loading image..."
image = load_image(filename)
print "Making histogram..."
histogram = make_histogram(image, side=side)
print "Plotting histogram..."
plot_histogram(ax, histogram)
setup_plot_labels(ax)
print "Outputting histogram..."
# show stuff
#plt.show()
# save stuff
output_filename = filename.split('.')[0] + "_hist_" + side
plt.savefig(output_filename + ".svg")
plt.savefig(output_filename + ".png")
else:
filename_split = filename.split('.')
outfilename = ".".join(filename_split[0:-1]) + "_gs.png"
print "Loading image (" + filename + ") ..."
image = load_image(filename)
print "Saving image (" + outfilename + ")..."
save_image(image, outfilename, "grayscale")
``` |
{
"source": "jonsim/tiny-backup",
"score": 2
} |
#### File: jonsim/tiny-backup/backup.py
```python
import argparse # ArgumentParser
import ConfigParser # SafeConfigParser
import os # makedirs
import os.path # exists, isfile, isdir, expanduser
import subprocess # check_output
import shutil # rmtree
import sys # stdout
import tempfile # mkdtemp
__version__ = '1.0.0'
DEST_KEY = 'dest'
SRC_KEY = 'src'
ARCHIVE_KEY = 'archive'
COMPRESS_KEY = 'compress'
ENCRYPT_KEY = 'encrypt'
DEFAULTS = {
ARCHIVE_KEY: 'no',
COMPRESS_KEY: 'no',
ENCRYPT_KEY: 'no',
}
_TEMPDIR = None
def make_tempdir():
"""
Retrieves a temporary directory, creating it if necessary.
Returns:
string path to a temporary directory.
"""
global _TEMPDIR
if not _TEMPDIR:
_TEMPDIR = tempfile.mkdtemp()
return _TEMPDIR
def archive_path(dest, src, excludes=None, verbose=False):
"""
Packs a file or directory into a .tar archive.
Args:
dest: string path for the destination file for the archive. Must
end with '.tar'.
src: string path for the source file or directory for the
archive.
excludes: list of strings of paths to exclude from the archive. May be
None or an empty list to include all files from source. Defaults to
None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'tar' command fails for any reason.
"""
assert dest and dest.endswith('.tar') and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and os.path.exists(src)
cmd = ['tar']
cmd.append('--create')
if verbose:
print '\narchive_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
if excludes:
for exclude in excludes:
cmd.append('--exclude=%s' % (exclude))
cmd.append('--file')
cmd.append(dest)
cmd.append('--directory')
cmd.append(os.path.dirname(src))
cmd.append(os.path.basename(src))
sys.stdout.write(subprocess.check_output(cmd))
def unarchive_path(dest, src, verbose=False):
"""
Extracts a .tar archive into a directory.
Args:
dest: string path for the destination *directory* into which the
archive contents will be extracted. NB: This is the directory to
extract into, not the final path for the contents of the archive.
src: string path for the source archive file. Must end with
'.tar'.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'tar' command fails for any reason.
"""
assert dest and os.path.isdir(dest)
assert src and src.endswith('.tar') and os.path.isfile(src)
cmd = ['tar']
cmd.append('--extract')
if verbose:
print '\nunarchive_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
cmd.append('--file')
cmd.append(src)
cmd.append('--directory')
cmd.append(dest)
sys.stdout.write(subprocess.check_output(cmd))
def compress_path(dest, src, verbose=False):
"""
Compresses a file into an xz-compressed file.
Args:
dest: string path for the destination file. Must end with '.xz'.
src: string path for the source file to compress.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'xz' command fails for any reason.
"""
assert dest and dest.endswith('.xz') and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and os.path.isfile(src)
cmd = ['xz']
if verbose:
print '\ncompress_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
cmd.append('--keep')
cmd.append('--stdout')
cmd.append('--compress')
cmd.append(src)
try:
dest_file = open(dest, 'w')
subprocess.check_call(cmd, stdout=dest_file)
finally:
dest_file.close()
def uncompress_path(dest, src, verbose=False):
"""
Uncompresses an xz-compressed file into it's original format.
Args:
dest: string path for the destination uncompressed file.
src: string path for the source compressed file. Must end with
'.xz'.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'xz' command fails for any reason.
"""
assert dest and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and src.endswith('.xz') and os.path.isfile(src)
cmd = ['xz']
if verbose:
print '\nuncompress_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
cmd.append('--keep')
cmd.append('--stdout')
cmd.append('--decompress')
cmd.append(src)
try:
dest_file = open(dest, 'w')
subprocess.check_call(cmd, stdout=dest_file)
finally:
dest_file.close()
def encrypt_path(dest, src, homedir=None, verbose=False):
"""
Encrypts a file into a gpg-encrypted file.
Args:
dest: string path for the destination file. Must end with '.gpg'.
src: string path for the source file to encrypt.
homedir: string path for the location of the GPG home directory to
use. May be None to use the default location for the machine's GPG
implementation (typically ~/gnupg). Defaults to None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'gpg' command fails for any reason.
"""
assert dest and dest.endswith('.gpg') and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and os.path.isfile(src)
cmd = ['gpg']
if verbose:
print '\nencrypt_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
if homedir:
cmd.append('--homedir')
cmd.append(homedir)
cmd.append('--default-recipient-self')
cmd.append('--output')
cmd.append(dest)
cmd.append('--encrypt')
cmd.append(src)
sys.stdout.write(subprocess.check_output(cmd))
def unencrypt_path(dest, src, homedir=None, verbose=False):
"""
Decrypts a gpg-encrypted file into its original format.
Args:
dest: string path for the destination decrypted file.
src: string path for the source file to decrypt. Must end with
'.gpg'.
homedir: string path for the location of the GPG home directory to
use. May be None to use the default location for the machine's GPG
implementation (typically ~/gnupg). Defaults to None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'gpg' command fails for any reason.
"""
assert dest and not os.path.isdir(dest)and \
os.path.isdir(os.path.dirname(dest))
assert src and src.endswith('.gpg') and os.path.isfile(src)
cmd = ['gpg']
if verbose:
print '\nunencrypt_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
if homedir:
cmd.append('--homedir')
cmd.append(homedir)
cmd.append('--default-recipient-self')
cmd.append('--output')
cmd.append(dest)
cmd.append('--decrypt')
cmd.append(src)
sys.stdout.write(subprocess.check_output(cmd))
def copy_path(dest, src, excludes=None, verbose=False):
"""
Copies a path to another location.
Args:
dest: string path for the destination copied file or directory.
src: string path for the source file or directory to copy.
excludes: list of strings of paths to exclude from the copy. May be
None or an empty list to include all files from source. Defaults to
None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'rsync' command fails for any reason.
"""
assert dest and os.path.isdir(os.path.dirname(dest))
assert src and os.path.exists(src)
cmd = ['rsync']
if verbose:
print '\ncopy_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
cmd.append('--archive') # Preserve metadata (-a)
cmd.append('--delete') # Delete extra files
cmd.append('--compress') # Compress xfer data (-z)
cmd.append('--protect-args') # Preserve whitespace (-s)
if excludes:
for exclude in excludes:
cmd.append('--filter=exclude_%s' % (exclude))
cmd.append(src)
cmd.append(dest)
sys.stdout.write(subprocess.check_output(cmd))
def resolve_relative_path(path, config_path):
"""
Resolves relative paths into absolute paths relative to the config file.
Args:
path: string (potentially) relative path to resolve.
config_path: string path to the config file to resolve relative to.
Returns:
string absolute path (unaltered if 'path' was already absolute).
"""
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(config_path), path)
def get_out_filename(dirname, src, extension):
"""
Forms a filename from a dir-name, file-name and file-extension.
Args:
dirname: string path to directory to use.
src: string path to file whose basename to use.
extension: string file extension (without preceding '.') to use.
Returns:
string path formed from the given components.
"""
return os.path.join(dirname, '%s.%s' % (os.path.basename(src), extension))
def process_section(config, section, config_path, verbose=False, gpg_home=None):
"""
Process a config file section and perform the actions it describes.
Args:
config: ConfigParser to read the section from.
section: string section name to read from the ConfigParser.
config_path: string path to the read config file.
verbose: boolean, True to output verbose status to stdout.
Defaults to False.
gpg_home: string path for the location of the GPG home directory
to use. May be None to use the default location for the machine's
GPG implementation (typically ~/gnupg). Defaults to None.
Raises:
OSError: if the source path given in the section does not exist.
"""
# Extract fields from the section (and write-back any missing).
if not config.has_option(section, SRC_KEY):
config.set(section, SRC_KEY, section)
pipeline_src = resolve_relative_path(config.get(section, SRC_KEY), config_path)
pipeline_dest = resolve_relative_path(config.get(section, DEST_KEY), config_path)
archive = config.getboolean(section, ARCHIVE_KEY)
compress = config.getboolean(section, COMPRESS_KEY)
encrypt = config.getboolean(section, ENCRYPT_KEY)
# Validate args.
if not os.path.exists(pipeline_src):
raise OSError("Source path %s does not exist." % (pipeline_src))
if not os.path.exists(pipeline_dest):
os.makedirs(pipeline_dest)
if (compress or encrypt) and os.path.isdir(pipeline_src):
archive = True
# Perform backup pipeline.
stage_src = pipeline_src
if archive or compress or encrypt:
tempdir = make_tempdir()
if archive:
stage_dest = get_out_filename(tempdir, stage_src, 'tar')
archive_path(stage_dest, stage_src, verbose=verbose)
stage_src = stage_dest
if compress:
stage_dest = get_out_filename(tempdir, stage_src, 'xz')
compress_path(stage_dest, stage_src, verbose=verbose)
stage_src = stage_dest
if encrypt:
stage_dest = get_out_filename(tempdir, stage_src, 'gpg')
encrypt_path(stage_dest, stage_src, verbose=verbose, homedir=gpg_home)
stage_src = stage_dest
# Perform copy.
copy_path(pipeline_dest, stage_src, verbose=verbose)
def main(argv=None):
"""Main method.
Args:
argv: list of strings to pass through to the ArgumentParser. If None
will pass through sys.argv instead. Defaults to None.
Raises:
OSError: if the config file path given does not exist.
"""
global _TEMPDIR
# Handle command line.
parser = argparse.ArgumentParser(description='A micro backup manager, '
'designed to be lightly configurable, '
'simple and unobtrusive. Useful for '
'maintaining lightweight backups.')
parser.add_argument('--config', metavar='PATH',
type=str, default='~/.backup_config',
help='The location of the backup config file to read. '
'Defaults to %(default)s')
parser.add_argument('--gpg-home', metavar='PATH',
type=str, default=None,
help='The location of the GPG home directory to use if '
'encrypting data. Defaults to that of the machine\'s '
'GPG implementation (typically ~/gnupg).')
parser.add_argument('--restore',
action='store_true', default=False,
help='Reverse the backup process to restore the local '
'file system from the backups at the given locations.')
parser.add_argument('--retention', metavar='N',
type=int, default=1,
help='The number of copies of the backup to retain. '
'When this is exceeded, the oldest will be '
'removed. Defaults to %(default)s.')
parser.add_argument('--verbose',
action='store_true', default=False,
help='Print additional output.')
parser.add_argument('--version',
action='version', version='%(prog)s ' + __version__)
args = parser.parse_args(args=argv)
# Process command line.
if args.restore:
raise NotImplementedError('Restore functionality is not implemented.')
if args.retention != 1:
raise NotImplementedError('Retention functionality is not implemented')
# Parse the config file.
args.config = os.path.expanduser(args.config)
if not os.path.isfile(args.config):
raise OSError('Config file "%s" does not exist.' % (args.config))
config = ConfigParser.SafeConfigParser(DEFAULTS)
with open(args.config) as config_file:
config.readfp(config_file)
# Perform the backup.
try:
for section in config.sections():
process_section(config, section, args.config, verbose=args.verbose,
gpg_home=args.gpg_home)
finally:
if _TEMPDIR:
shutil.rmtree(_TEMPDIR)
_TEMPDIR = None
# Entry point.
if __name__ == "__main__":
main()
``` |
{
"source": "jonslo/aws-greengrass-stream-manager-sdk-python",
"score": 2
} |
#### File: aws-greengrass-stream-manager-sdk-python/stream_manager/utilinternal.py
```python
import asyncio
import json
import re
import uuid
from typing import Sequence
from .data import ResponseStatusCode
from .exceptions import (
InvalidRequestException,
MessageStoreReadErrorException,
NotEnoughMessagesException,
RequestPayloadTooLargeException,
ResourceNotFoundException,
ResponsePayloadTooLargeException,
ServerOutOfMemoryException,
ServerTimeoutException,
StreamManagerException,
UnauthorizedException,
UnknownFailureException,
UnknownOperationException,
UpdateFailedException,
UpdateNotAllowedException,
)
class UtilInternal:
__ENDIAN = "big"
_MAX_PACKET_SIZE = 1 << 30
@staticmethod
def sync(coro, loop: asyncio.AbstractEventLoop):
if asyncio.iscoroutine(coro):
# Run async function in the loop and return the value or raise the exception
return asyncio.run_coroutine_threadsafe(coro, loop=loop).result()
return coro
"""
Delete keys with the value ``None`` in a dictionary, recursively.
This alters the input so you may wish to ``copy`` the dict first.
"""
@staticmethod
def del_empty_arrays(d):
for key, value in list(d.items()):
if isinstance(value, list) and len(value) == 0:
del d[key]
elif isinstance(value, dict):
UtilInternal.del_empty_arrays(value)
return d
@staticmethod
def serialize_to_json_with_empty_array_as_null(data):
s = json.dumps(UtilInternal.del_empty_arrays(data.as_dict()))
return s.encode()
@staticmethod
def int_to_bytes(i, length=4):
return int.to_bytes(i, length=length, byteorder=UtilInternal.__ENDIAN, signed=True)
@staticmethod
def int_from_bytes(b):
return int.from_bytes(b, byteorder=UtilInternal.__ENDIAN, signed=True)
@staticmethod
def encode_frame(frame) -> Sequence[bytes]:
if len(frame.payload) + 1 > UtilInternal._MAX_PACKET_SIZE:
raise RequestPayloadTooLargeException()
return [
bytes(
[
*UtilInternal.int_to_bytes(len(frame.payload) + 1),
*UtilInternal.int_to_bytes(frame.operation.value, length=1),
]
),
frame.payload,
]
@staticmethod
def get_request_id():
return str(uuid.uuid4())
@staticmethod
def is_invalid(o):
if not hasattr(o, "_validations_map"):
return False
if not hasattr(o, "_types_map"):
return False
for prop_name, validations in o._validations_map.items():
if not hasattr(o, prop_name):
return "Object is malformed, missing property: {}".format(prop_name)
# Validate all properties on lists
if type(getattr(o, prop_name)) == list:
for i, v in enumerate(getattr(o, prop_name)):
result = UtilInternal.is_invalid(v)
if result:
return "Property {}[{}] is invalid because {}".format(prop_name, i, result)
# Recurse down to check validity of objects within objects
result = UtilInternal.is_invalid(getattr(o, prop_name))
if result:
return "Property {} is invalid because {}".format(prop_name, result)
# Validate the property
if "required" in validations and validations["required"] and getattr(o, prop_name) is None:
return "Property {} is required, but was None".format(prop_name)
if (
"minLength" in validations
and getattr(o, prop_name) is not None
and len(getattr(o, prop_name)) < validations["minLength"]
):
return "Property {} must have a minimum length of {}, but found length of {}".format(
prop_name, validations["minLength"], len(getattr(o, prop_name))
)
if (
"maxLength" in validations
and getattr(o, prop_name) is not None
and len(getattr(o, prop_name)) > validations["maxLength"]
):
return "Property {} must have a maximum length of {}, but found length of {}".format(
prop_name, validations["maxLength"], len(getattr(o, prop_name))
)
if (
"minItems" in validations
and getattr(o, prop_name) is not None
and len(getattr(o, prop_name)) < validations["minItems"]
):
return "Property {} must have at least {} items, but found {}".format(
prop_name, validations["minItems"], len(getattr(o, prop_name))
)
if (
"maxItems" in validations
and getattr(o, prop_name) is not None
and len(getattr(o, prop_name)) > validations["maxItems"]
):
return "Property {} must have at most {} items, but found {}".format(
prop_name, validations["maxItems"], len(getattr(o, prop_name))
)
if (
"maximum" in validations
and getattr(o, prop_name) is not None
and getattr(o, prop_name) > validations["maximum"]
):
return "Property {} must be at most {}".format(prop_name, validations["maximum"])
if (
"minimum" in validations
and getattr(o, prop_name) is not None
and getattr(o, prop_name) < validations["minimum"]
):
return "Property {} must be at least {}".format(prop_name, validations["minimum"])
if (
"pattern" in validations
and getattr(o, prop_name) is not None
and re.fullmatch(validations["pattern"], getattr(o, prop_name)) is None
):
return "Property {} must match regex {}".format(prop_name, validations["pattern"])
for prop_name, types in o._types_map.items():
# Validate all properties with their respective types
if "type" in types and getattr(o, prop_name) is not None:
result = isinstance(getattr(o, prop_name), types["type"])
if not result:
return "Property {} is invalid because it must be of type {}".format(
prop_name, types["type"].__name__
)
if types["type"] == list and "subtype" in types:
for i, v in enumerate(getattr(o, prop_name)):
result = isinstance(v, types["subtype"])
if not result:
return "Property {}[{}] is invalid because it must be of type {}".format(
prop_name, i, types["subtype"].__name__
)
return False
@staticmethod
def raise_on_error_response(response):
if response.status == ResponseStatusCode.Success:
return
elif response.status == ResponseStatusCode.InvalidRequest:
raise InvalidRequestException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.RequestPayloadTooLarge:
raise RequestPayloadTooLargeException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.ResourceNotFound:
raise ResourceNotFoundException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.ResponsePayloadTooLarge:
raise ResponsePayloadTooLargeException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.ServerTimeout:
raise ServerTimeoutException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.Unauthorized:
raise UnauthorizedException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.UnknownFailure:
raise UnknownFailureException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.NotEnoughMessages:
raise NotEnoughMessagesException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.MessageStoreReadError:
raise MessageStoreReadErrorException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.OutOfMemoryError:
raise ServerOutOfMemoryException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.UpdateFailed:
raise UpdateFailedException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.UpdateNotAllowed:
raise UpdateNotAllowedException(response.error_message, response.status, response.request_id)
elif response.status == ResponseStatusCode.UnknownOperation:
raise UnknownOperationException(response.error_message, response.status, response.request_id)
else:
raise StreamManagerException(
"Client is not able to understand this server response status code", "Unrecognized", response.request_id
)
``` |
{
"source": "JonSn0w/advent-of-code",
"score": 3
} |
#### File: advent-of-code/2018/day16.py
```python
from sys import stdin
import re
oper = {
'addr': lambda reg, a, b: reg[a] + reg[b],
'addi': lambda reg, a, b: reg[a] + b,
'mulr': lambda reg, a, b: reg[a] * reg[b],
'muli': lambda reg, a, b: reg[a] * b,
'banr': lambda reg, a, b: reg[a] & reg[b],
'bani': lambda reg, a, b: reg[a] & b,
'borr': lambda reg, a, b: reg[a] | reg[b],
'bori': lambda reg, a, b: reg[a] | b,
'setr': lambda reg, a, b: reg[a],
'seti': lambda reg, a, b: a,
'gtir': lambda reg, a, b: 1 if a > reg[b] else 0,
'gtri': lambda reg, a, b: 1 if reg[a] > b else 0,
'gtrr': lambda reg, a, b: 1 if reg[a] > reg[b] else 0,
'eqir': lambda reg, a, b: 1 if a == reg[b] else 0,
'eqri': lambda reg, a, b: 1 if reg[a] == b else 0,
'eqrr': lambda reg, a, b: 1 if reg[a] == reg[b] else 0,
}
def decipherCodes(monitor, opcodes):
opts = dict()
found = set()
for i in monitor:
before, after = i[0], i[2]
op = i[1][0]
a,b,c = i[1][1], i[1][2], i[1][3]
if op not in opts.keys():
opts[op] = set(oper.keys())
possible = list(opts[op])
for j in possible:
if j in opts[op] and oper[j](before,a,b) != after[c]:
opts[op].remove(j)
if len(opts[op]) == 1:
opcodes[list(opts[op])[0]] = op
found.add(list(opts[op])[0])
# Narrow down possible operations
incomplete = True
while incomplete:
incomplete = False
for i in opts.keys():
opts[i] = opts[i].difference(found)
if len(opts[i]) == 1:
opcodes[list(opts[i])[0]] = i
found.add(list(opts[i])[0])
incomplete = True
return dict((v,k) for k,v in opcodes.items())
inp = [i.strip() for i in stdin.readlines()]
opts = list(oper.keys())
monitor, ops = list(), list()
i = 0
while 'Before:' in inp[i]:
temp = list()
temp.append([int(j) for j in re.findall('\d+', inp[i])])
temp.append([int(j) for j in inp[i+1].split()])
temp.append([int(j) for j in re.findall('\d+', inp[i+2])])
monitor.append(temp)
i += 4
opcodes = decipherCodes(monitor, {i:None for i in opts})
reg = [0, 0, 0, 0]
for i in range(i+2, len(inp)):
code,a,b,c = [int(j) for j in inp[i].split()]
print(f'{opcodes[code]}, {a}, {b}, {c}')
reg[c] = oper[opcodes[code]](reg, a, b)
print(reg[0])
``` |
{
"source": "JonSn0w/Urban-Dictionary-Therapy",
"score": 3
} |
#### File: Urban-Dictionary-Therapy/tests/test_UDTherapy.py
```python
import os, pytest
from UDTherapy import helper
@pytest.fixture
def test_data():
return { 'opts': ['-s','udt'],
'url': 'https://www.urbandictionary.com/define.php?term=udt', }
def test_arguments():
args = helper.parse_options(['-s','udt','-n','3'])
assert (' '.join(args.search) == 'udt' and args.num == 3) and not (args.all and args.wotd)
def test_generate_url(test_data):
args = helper.parse_options(test_data['opts'])
test_data['args'] = args
assert helper.generate_url(args) == test_data['url']
def test_scrape_term(test_data):
url = test_data['url']
assert len(helper.scrape_term(url, 0))
``` |
{
"source": "JonSn0w/YouTube-to-MP3",
"score": 3
} |
#### File: YouTube-to-MP3/yt2mp3/song.py
```python
import os, io, pydub, youtube_dl, requests, logging
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, APIC, TIT2, TPE1, TPE2, TALB, TCON, TRCK, TDRC, TPOS
from PIL import Image
from colorama import Fore, Style
from yt2mp3 import util
class Song():
"""
A class used to represent a song
...
Attributes
----------
data : dict
A dictionary containing the Youtube URL and song data provided by
the iTunes API
"""
def __init__(self, data):
self.track = data['track_name']
self.artist = data['artist_name']
self.album = data['collection_name']
self.genre = data['primary_genre_name']
self.artwork_url = data['artwork_url_100']
self.track_number = str(data['track_number'])
self.track_count = str(data['track_count'])
self.disc_count = str(data['disc_count'])
self.disc_number = str(data['disc_number'])
self.release_date = data['release_date']
self.filename = data['track_name']
self.video_url = data['video_url']
def download(self, verbose=False):
"""
Downloads the video at the provided url
Args:
verbose: A bool value to specify the current logging mode
Returns:
The path of the downloaded video file
"""
temp_dir = os.path.expanduser('~/Downloads/Music/temp/')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
video_id = self.video_url.split('watch?v=')[-1]
ydl_opts = dict()
ydl_opts['outtmpl'] = temp_dir+'%(id)s.%(ext)s'
ydl_opts['format'] = 'bestaudio/best'
ydl_opts['quiet'] = True
if verbose:
ydl_opts['progress_hooks'] = [util.show_progressbar]
logging.info(Fore.YELLOW+'↓ '+Style.RESET_ALL+'Downloading...')
ydl = youtube_dl.YoutubeDL(ydl_opts)
video_info = None
with ydl:
ydl.download([self.video_url])
video_info = ydl.extract_info(self.video_url, download=False)
logging.info(Fore.GREEN+'✔ '+Style.RESET_ALL+'Download Complete')
path = os.path.join(temp_dir, video_id+'.'+video_info['ext'])
return path
def convert_to_mp3(self, video):
"""
Converts the downloaded video file to MP3
Args:
video: A path to the downloaded video file
Returns:
The path of the converted MP3 file
"""
logging.info(Fore.BLUE+'♬ '+Style.RESET_ALL+'Converting to MP3')
artist_dir = os.path.expanduser('~/Downloads/Music/')
artist_dir = os.path.join(artist_dir, self.artist.replace('/',''))
if not os.path.exists(artist_dir):
os.makedirs(artist_dir)
song_path = os.path.join(artist_dir, self.filename+'.mp3')
# TODO: Write test to cover
if os.path.exists(song_path):
self.filename = self.filename+' ('+self.album+')'
song_path = os.path.join(artist_dir, self.filename+'.mp3')
pydub.AudioSegment.from_file(video).export(song_path, format='mp3')
return song_path
def get_cover_image(self, resolution):
"""
Retrieves the cover-art image with the specified resolution
Args:
resolution: The target resolution of the cover-art
Returns:
The path of the retrieved cover-art image
"""
img_url = self.artwork_url
if 'youtube' not in img_url:
ext = '/%sx%sbb.jpg' % (resolution, resolution)
img_url = '/'.join(img_url.split('/')[:-1])+ext
img_path = os.path.expanduser('~/Downloads/Music/CoverArt')
if not os.path.exists(img_path):
os.makedirs(img_path)
img_path = os.path.join(img_path, 'cover.jpg')
response = requests.get(img_url)
Image.open(io.BytesIO(response.content)).save(img_path)
return img_path
def set_id3(self, path, resolution=480):
"""
Assigns the ID3 metadata of the MP3 file
Args:
path: The path of the converted MP3 file
resolution: The target resolution of the cover-art
"""
tags = ID3(path)
tags.delete()
tags.add(TIT2(encoding=3, text=self.track))
tags.add(TPE1(encoding=3, text=self.artist))
tags.add(TPE2(encoding=3, text=self.artist))
tags.add(TALB(encoding=3, text=self.album))
tags.add(TCON(encoding=3, text=self.genre))
tags.add(TRCK(encoding=3, text=self.track_number+'/'+self.track_count))
tags.add(TPOS(encoding=3, text=self.disc_number+'/'+self.disc_count))
tags.add(TDRC(encoding=3, text=self.release_date[0:4]))
# Embed cover-art in ID3 metadata
img_path = self.get_cover_image(resolution)
tags.add(APIC(encoding=3, mime='image/jpg', type=3,
desc=u'Cover', data=open(img_path, 'rb').read()))
tags.save()
def file_exists(self):
"""
Checks if a duplicate file already exists in the output directory
Returns:
A boolean value indicating whether the target file already exists
"""
path = os.path.expanduser('~/Downloads/Music/')
path = os.path.join(path, self.artist.replace('/',''), self.filename+'.mp3')
if os.path.exists(path):
tags = EasyID3(path)
if len(self.album) == '' or self.album in tags['album'][0]:
return True
return False
``` |
{
"source": "jonsneyers/nerdlandbot",
"score": 3
} |
#### File: nerdlandbot/bot/nerdlandbot.py
```python
from discord.ext.commands import Bot
from discord import Intents
class NerdlandBot(Bot):
def __init__(self, prefix: str, intents: Intents):
self.prefix = prefix
super().__init__(command_prefix=self.prefix, intents=intents, case_insensitive=True,)
```
#### File: nerdlandbot/commands/wombat.py
```python
import os
import discord
import random
from discord.ext import commands
from nerdlandbot.helpers.constants import WOMBATS_DIR_NAME
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.translations.Translations import get_text as translate
if not os.path.exists(WOMBATS_DIR_NAME):
os.makedirs(WOMBATS_DIR_NAME)
class Wombat(commands.Cog, name="Wombat"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="wombat_pic", brief="Post a random wombat picture", help="The bot will search for a random picture of a wombat in its database, and post it in the chat")
async def cmd_wombat_pic(self, ctx):
wombat_list = [os.path.join(WOMBATS_DIR_NAME, w) for w in os.listdir(WOMBATS_DIR_NAME)]
if not wombat_list:
msg = translate("empty_wombat_list", await culture(ctx))
return await ctx.send(msg)
await ctx.send(file=discord.File(random.choice(wombat_list)))
def setup(bot):
bot.add_cog(Wombat(bot))
```
#### File: nerdlandbot/commands/youtube.py
```python
import discord
import os
import requests
from discord.ext import commands, tasks
from nerdlandbot.commands.GuildData import get_all_guilds_data, get_guild_data, GuildData
from nerdlandbot.helpers.channel import get_channel
from nerdlandbot.helpers.log import info, fatal
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.scheduler.YoutubeScheduler import get_latest_video
from nerdlandbot.translations.Translations import get_text as translate
class Youtube(commands.Cog, name="Youtube_lists"):
@commands.command(
name="add_youtube", usage="add_youtube_usage", help="add_youtube_help",
)
async def add_youtube_channel(
self, ctx: commands.Context, youtube_channel_id: str, text_channel: str
):
"""
Add a Youtube channel to be notified
:param ctx: The current context. (discord.ext.commands.Context)
:param youtube_channel_id: The Youtube channel to be notified of (str)
:param text_channel: The text channel that will receive the notification (str)
"""
guild_data = await get_guild_data(ctx.message.guild.id)
# Error if not admin
if not guild_data.user_is_admin(ctx.author):
gif = translate("not_admin_gif", await culture(ctx))
return await ctx.send(gif)
# TODO: throw specific error with message when channel ID is wrong
latest_video = await get_latest_video(youtube_channel_id)
# Get the channel
channel = get_channel(ctx, text_channel)
# TODO: Give information to the user when the text channel does not exist
if not channel:
await ctx.channel.send(translate("membercount_channel_nonexistant", await culture(ctx)))
raise Exception("Invalid text channel provided")
if isinstance(channel, discord.VoiceChannel):
await ctx.channel.send(translate("channel_is_voice", await culture(ctx)))
return
add_response = await guild_data.add_youtube_channel(
youtube_channel_id, channel, latest_video["video_id"]
)
msg = ""
if add_response:
msg = translate("youtube_added", await culture(ctx)).format(
youtube_channel_id, channel
)
else:
msg = translate("youtube_exists", await culture(ctx)).format(
youtube_channel_id
)
info(msg)
await ctx.send(msg)
@commands.command(
name="remove_youtube", usage="remove_youtube_usage", help="remove_youtube_help",
)
async def remove_youtube_channel(
self, ctx: commands.Context, youtube_channel_id: str):
"""
Remove a Youtube channel that was being notified
:param ctx: The current context. (discord.ext.commands.Context)
:param youtube_channel_id: The Youtube channel to be notified of (str)
"""
guild_data = await get_guild_data(ctx.message.guild.id)
# Error if not admin
if not guild_data.user_is_admin(ctx.author):
gif = translate("not_admin_gif", await culture(ctx))
return await ctx.send(gif)
remove_response = await guild_data.remove_youtube_channel(youtube_channel_id)
msg = ""
if remove_response:
msg = translate("youtube_removed", await culture(ctx)).format(
youtube_channel_id
)
else:
msg = translate("youtube_no_exists", await culture(ctx)).format(
youtube_channel_id
)
info(msg)
await ctx.send(msg)
@commands.command(
name="list_youtube", help="list_youtube_help",
)
async def list_youtube_channels(self, ctx: commands.Context):
"""
List all Youtube channels that are being monitored
"""
guild_data = await get_guild_data(ctx.message.guild.id)
msg = translate("youtube_list_title", await culture(ctx))
for channel_id, channel_data in guild_data.youtube_channels.items():
msg = (
msg
+ f"\n - Channel `{channel_id}` posts in <#{channel_data['text_channel_id']}>, last video ID: `{channel_data['latest_video_id']}`"
)
await ctx.send(msg)
def setup(bot: commands.bot):
bot.add_cog(Youtube(bot))
```
#### File: nerdlandbot/helpers/log.py
```python
import logging
logging.basicConfig(filename='bot.log', level=logging.WARN)
def debug(msg, *args, **kwargs):
print(f'DEBUG |{msg}')
logging.debug(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
print(f'INFO |{msg}')
logging.info(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
print(f'WARNING |{msg}')
logging.warning(msg, *args, **kwargs)
def error(msg, *args, **kwargs):
print(f'ERROR |{msg}')
logging.error(msg, *args, **kwargs)
def fatal(msg, *args, **kwargs):
print(f'FATAL |{msg}')
logging.fatal(msg, *args, **kwargs)
```
#### File: nerdlandbot/nerdlandbot/__main__.py
```python
import os
import sys
import discord
from dotenv import load_dotenv
from nerdlandbot.bot import NerdlandBot
from nerdlandbot.helpers.log import info, fatal
from nerdlandbot.translations.Translations import get_text as _
from nerdlandbot.scheduler.YoutubeScheduler import check_and_post_latest_videos
from nerdlandbot.scheduler.PurgeScheduler import purge_messages
from nerdlandbot.commands.GuildData import get_all_guilds_data, GuildData
load_dotenv()
PREFIX = os.getenv("PREFIX")
TOKEN = os.getenv("DISCORD_TOKEN")
if PREFIX:
info("Start bot with prefix '" + PREFIX + "'")
else:
fatal("Please provide a PREFIX in your .env file")
sys.exit()
# load up intents
intents = discord.Intents.all()
bot = NerdlandBot(PREFIX, intents)
# remove default help command
bot.remove_command("help")
# load event handlers
bot.load_extension("nerdlandbot.eventhandlers.onmemberjoin")
bot.load_extension("nerdlandbot.eventhandlers.onready")
bot.load_extension("nerdlandbot.eventhandlers.oncommanderror")
# load commands
bot.load_extension("nerdlandbot.commands.notify")
bot.load_extension("nerdlandbot.commands.help")
bot.load_extension("nerdlandbot.commands.settings")
bot.load_extension("nerdlandbot.commands.membercount")
bot.load_extension("nerdlandbot.commands.random_user")
bot.load_extension("nerdlandbot.commands.wombat")
bot.load_extension("nerdlandbot.commands.youtube")
bot.load_extension("nerdlandbot.commands.poll")
bot.load_extension("nerdlandbot.commands.purger")
bot.load_extension("nerdlandbot.commands.kerk")
# Initialize and start YouTube scheduler
YOUTUBE_TOKEN = os.getenv("YOUTUBE_TOKEN")
@bot.event
async def on_ready():
if YOUTUBE_TOKEN:
info("Starting YouTube scheduler")
check_and_post_latest_videos.start(bot)
else:
fatal(
"Not starting YouTube scheduler. Please provide a YOUTUBE_TOKEN in your .env file"
)
bot.is_purging = {}
purge_messages.start(bot)
bot.run(TOKEN)
```
#### File: nerdlandbot/persistence/abstractconfigstore.py
```python
from abc import ABC, abstractmethod
class ConfigStore(ABC):
data = {}
@abstractmethod
def read(self):
pass
@abstractmethod
def write(self, data: dict):
pass
``` |
{
"source": "JonSnowWhite/discord-soundboard-bot",
"score": 2
} |
#### File: JonSnowWhite/discord-soundboard-bot/soundboard_bot.py
```python
from discord.ext import commands
from cogs.sound_cog import SoundboardCog
from cogs.admin_cog import AdminCog
from cogs.user_cog import UserCog
from discord import opus
command_prefix = '!'
bot = commands.Bot(command_prefix=commands.when_mentioned_or(command_prefix))
sound_folder = '~~your folder~~'
log_channel_id = int('~~your log channel id~~')
standard_activity = command_prefix + 'help'
tag_dict={'tag1': ['name1', 'name2'], # this dict saves the tags and their sounds. Be sure to use lists also when a tag has only 1 name
'tag2': ['name1', 'name2', 'name3']}
bot.add_cog(SoundboardCog(bot=bot, folder=sound_folder, log_channel_id=log_channel_id, tag_dict=tag_dict))
bot.add_cog(AdminCog(bot=bot, log_channel_id=log_channel_id, activity =standard_activity))
bot.add_cog(UserCog(bot=bot, log_channel_id=log_channel_id))
@bot.event
async def on_ready():
print('We have logged in as {0.user}'.format(bot))
bot.run('~~your token~~')
``` |
{
"source": "JonSolow/tvshow_end",
"score": 2
} |
#### File: tvshow_end/src/base.py
```python
from src.settings import *
# local imports
from src.clean_data import *
from src.model import *
from src.visualize import *
# import other custom functions not suitable for other categories.
from src.custom import *
#import make_data
def test_base():
print('Base Module Imported')
print('\nTesting local imports')
test_clean_data()
test_model()
test_viz()
test_custom()
return None
```
#### File: tvshow_end/src/custom.py
```python
def test_custom():
print('In custom module')
return None
``` |
{
"source": "jonson/flapison",
"score": 3
} |
#### File: flapison/flapison/decorators.py
```python
import json
import logging
from functools import wraps
from flask import request, make_response, jsonify, current_app
from flapison.errors import jsonapi_errors
from flapison.exceptions import JsonApiException
from flapison.utils import JSONEncoder
logger = logging.getLogger(__name__)
def check_headers(func):
"""Check headers according to jsonapi reference
:param callable func: the function to decorate
:return callable: the wrapped function
"""
@wraps(func)
def wrapper(*args, **kwargs):
if request.method in ("POST", "PATCH"):
if (
"Content-Type" in request.headers
and "application/vnd.api+json" in request.headers["Content-Type"]
and request.headers["Content-Type"] != "application/vnd.api+json"
):
error = json.dumps(
jsonapi_errors(
[
{
"source": "",
"detail": "Content-Type header must be application/vnd.api+json",
"title": "Invalid request header",
"status": "415",
}
]
),
cls=JSONEncoder,
)
return make_response(
error, 415, {"Content-Type": "application/vnd.api+json"}
)
if "Accept" in request.headers:
flag = False
for accept in request.headers["Accept"].split(","):
if accept.strip() == "application/vnd.api+json":
flag = False
break
if (
"application/vnd.api+json" in accept
and accept.strip() != "application/vnd.api+json"
):
flag = True
if flag is True:
error = json.dumps(
jsonapi_errors(
[
{
"source": "",
"detail": (
"Accept header must be application/vnd.api+json without"
"media type parameters"
),
"title": "Invalid request header",
"status": "406",
}
]
),
cls=JSONEncoder,
)
return make_response(
error, 406, {"Content-Type": "application/vnd.api+json"}
)
return func(*args, **kwargs)
return wrapper
def check_method_requirements(func):
"""Check methods requirements
:param callable func: the function to decorate
:return callable: the wrapped function
"""
@wraps(func)
def wrapper(*args, **kwargs):
error_message = "You must provide {error_field} in {cls} to get access to the default {method} method"
error_data = {
"cls": args[0].__class__.__name__,
"method": request.method.lower(),
}
if request.method != "DELETE":
if not hasattr(args[0], "schema"):
error_data.update({"error_field": "a schema class"})
raise Exception(error_message.format(**error_data))
return func(*args, **kwargs)
return wrapper
def jsonapi_exception_formatter(func):
@wraps(func)
def wrapper(*args, **kwargs):
headers = {"Content-Type": "application/vnd.api+json"}
try:
return func(*args, **kwargs)
except JsonApiException as e:
if isinstance(e.status, str) and e.status and e.status[0] == '5':
logger.exception("Exception while processing request")
elif isinstance(e.status, int) and 500 <= e.status <= 599:
logger.exception("Exception while processing request")
return make_response(
jsonify(jsonapi_errors([e.to_dict()])), e.status, headers
)
except Exception as e:
if current_app.config["DEBUG"] is True:
raise e
if "sentry" in current_app.extensions:
current_app.extensions["sentry"].captureException()
logger.exception('Unhandled exception while processing request')
exc = JsonApiException(
getattr(
e,
"detail",
current_app.config.get("GLOBAL_ERROR_MESSAGE") or str(e),
),
source=getattr(e, "source", ""),
title=getattr(e, "title", None),
status=getattr(e, "status", None),
code=getattr(e, "code", None),
id_=getattr(e, "id", None),
links=getattr(e, "links", None),
meta=getattr(e, "meta", None),
)
return make_response(
jsonify(jsonapi_errors([exc.to_dict()])), exc.status, headers
)
return wrapper
``` |
{
"source": "jonsource/docker-spark",
"score": 3
} |
#### File: docker-spark/scripts/spark-jobs.py
```python
import sys
import requests
from bs4 import BeautifulSoup as BS
def findRunningApplicationsTable(soup):
header = soup.find(string=" Running Applications ");
if not header:
return None
rows = header.parent.find_next_sibling("table").find("tbody").find_all("tr")
applications = {}
for row in rows:
id = row.find("td").find("a").string
name = row.find("td").find_next_sibling("td").find("a")
applications[id] = {'name': name.string, 'driver': name['href']}
return applications
def printApps(apps):
if len(apps):
print "ID Name Driver"
for app in apps:
print app+": {name} {driver}".format(**apps[app])
if len(sys.argv) < 2:
print "Usage: spark-jobs.py host:port[,host:port ...]"
exit(1)
masters = sys.argv[1].split(',')
pages = []
for master in masters:
page = BS(requests.get("http://"+master).content,"html5lib")
apps = findRunningApplicationsTable(page)
if len(apps):
print "Master: " + master
printApps(apps)
break
``` |
{
"source": "jonsource/kafka-spark",
"score": 2
} |
#### File: jonsource/kafka-spark/kafkaConnector.py
```python
import logging
from logni import log
from timeout import timeout
from timeout import TimeoutByThreads
from pykafka import KafkaClient
import pykafka
import sys
import traceback
def transformLoggerLevel(level):
lvlMap = {'DEBUG': ('DBG', 3),
'WARNING': ('WARN', 3),
'ERROR': ('ERR', 4),
'INFO': ('INFO', 3),
'EXCEPTION': ('ERR', 4),
'Level 5': ('INFO', 2)
}
if level in lvlMap:
return lvlMap[level]
log.ni("Unknown log level %s", level, INFO=3)
return 'ERR', 3
def createLogniAdapter(module, method=False):
if module:
module = module+': '
else:
module = ''
def loggingLogniAdapter(level, msg, *args, **kwargs):
lvlName, lvlVal = transformLoggerLevel(logging.getLevelName(level))
kwargs[lvlName] = lvlVal
log.ni("%s%s" % (module, msg), *args, offset=3, **kwargs)
def loggingLogniAdapterMethod(self, level, msg, *args, **kwargs):
loggingLogniAdapter(level, msg, args, kwargs)
if(method):
return loggingLogniAdapterMethod
else:
return loggingLogniAdapter
def plugLogging():
logging.getLogger("pykafka").setLevel(1)
logging.Logger.log = createLogniAdapter('', method=True)
for name in ('pykafka.cluster', 'pykafka.broker', 'pykafka.handlers', 'pykafka.producer', 'pykafka.topic', 'pykafka.connection', 'pykafka.partition'):
module = sys.modules[name]
module.log._log = createLogniAdapter('pykafka')
logging.info("Starting log")
log.stderr(1)
log.mask('I1W1E1D1F1')
class KafkaProducerUnavailable(Exception):
pass
class KafkaConnector(object):
def __init__(self, config):
self.config = config
self.kafkaProducer = None
self._getKafkaProducer()
#@timeout(seconds=2)
@TimeoutByThreads(seconds=0.6)
def _connectKafka(self):
log.ni("KafkaConnector: Connecting to kafka at %s ...", (self.config.get("kafka", "zk_hosts"),), WARN=4)
# debugging only - fake connection latency
# sleep = 1 + random.random()*1
# print "sleep ", sleep
# time.sleep(sleep)
try:
self.kafkaClient = KafkaClient(zookeeper_hosts=self.config.get("kafka", "zk_hosts"), socket_timeout_ms=500, offsets_channel_socket_timeout_ms=10 * 500)
self.kafkaTopic = self.kafkaClient.topics[self.config.get("kafka", "topic")]
self.kafkaProducer = self.kafkaTopic.get_producer(linger_ms=int(self.config.get("kafka", "lingerTimeMs")), min_queued_messages=int(self.config.get("kafka", "minBatchSize")))
log.ni("KafkaConnector: got one", INFO=1)
except Exception as e:
log.ni("KafkaConnector: didn't find one %s", (traceback.print_exc(),), WARN=4)
def _getKafkaProducer(self):
if not self.kafkaProducer:
log.ni("KafkaConnector: no kafka producer", INFO=1)
try:
self._connectKafka()
except Exception as e:
log.ni("KafkaConnector: didn't get producer %s", (traceback.print_exc(),), WARN=4)
if not self.kafkaProducer:
# raise KafkaProducerUnavailable
return None
log.ni("KafkaConnector: got kafka producer", INFO=1)
return self.kafkaProducer
def sendToKafka(self, message):
# try:
# log.ni("KafkaConnector * Send to kafka: %s", (message,), INFO=2)
# self._getKafkaProducer().produce(message)
# except Exception as e:
# log.ni("KafkaConnector * didn't send %s", (e,), WARN=4)
if self.kafkaProducer:
self.kafkaProducer.produce(message)
else:
log.ni("KafkaConnector: sending %s without producer", (message,), ERR=2)
def stopProducer(self):
if self.kafkaProducer:
self.kafkaProducer.stop()
```
#### File: jonsource/kafka-spark/producer.py
```python
from pykafka import KafkaClient
import sys
import time
if len(sys.argv) < 3:
print "Usage: producer.py <kafka_host> <topic> [benchmark]"
exit(-1)
kafkaHost, topic = sys.argv[1:3]
print kafkaHost, topic
client = KafkaClient(hosts=kafkaHost)
topicTest = client.topics[topic]
def benchmark(producer, n=1000):
start = time.time()
count = 0
print "\nbenchmark %s : " % n,
while(count < n):
producer.produce('test message %s' % count)
if not count % 10000:
sys.stdout.write('.')
sys.stdout.flush()
count += 1
producer.stop()
end = time.time()
print end - start
return start, end
if len(sys.argv) > 3 and sys.argv[3] == 'benchmark':
number = 300000
pre = benchmark(topicTest.get_producer(linger_ms=1, min_queued_messages=1), number)
normal = benchmark(topicTest.get_producer(linger_ms=11000, min_queued_messages=20), number)
ack = benchmark(topicTest.get_producer(linger_ms=11000, min_queued_messages=20, required_acks=1), number)
ack = benchmark(topicTest.get_producer(linger_ms=11000, min_queued_messages=20, required_acks=2), number)
# print "\n*****"
# print pre[1]-pre[0]
# print normal[1]-normal[0]
# print ack[1]-ack[0]
else:
print "Connection established (using pykafka). Sending messages.\n(. = 10 messages)"
testProducer = topicTest.get_producer(linger_ms=11000, min_queued_messages=20)
count = 0
while(1):
testProducer.produce('test message %s' % count)
count = count+1
if not count % 10:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.7)
```
#### File: jonsource/kafka-spark/wordCount.py
```python
from __future__ import print_function
import sys
import MySQLdb
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from pyspark.streaming.kafka import TopicAndPartition
from pyspark.sql import SQLContext
def myPrint(msg):
print("\n-------------------------------------------\n %s\n-------------------------------------------\n" % msg)
class saver(object):
def __init__(self, sqlc):
self.sqlc = sqlc
self.connection = MySQLdb.connect(user='root', db='test', host="127.0.0.1", passwd="")
self.cursor = self.connection.cursor()
def saveRdd(self, rdd, moar=None):
if not rdd.count():
myPrint('Empty set - nothing to save!')
return
df = self.sqlc.createDataFrame(rdd, ['word', 'count'])
list = df.collect()
self.cursor.execute("BEGIN")
for x in list:
# x[0][0]=datum, x[0][1]=id, x[1]=imps
que = "UPDATE test.impressions SET view_count = view_count + %s WHERE banner_id = %s AND view_date = \"%s\"" % (x[1], x[0][1], x[0][0])
print(que)
cnt = self.cursor.execute(que)
if not cnt:
que = "INSERT INTO test.impressions (banner_id, view_date, view_count) VALUES (%s, \"%s\", %s)" % (x[0][1], x[0][0], x[1])
print(que)
self.cursor.execute(que)
myPrint("%s messages" % len(list))
saveStartOffsets("impressions", self.cursor)
self.cursor.execute("COMMIT")
self.connection.commit()
def saveStream(self, dStream):
dStream.foreachRDD(lambda rdd: self.saveRdd(rdd))
offsetRanges = []
def storeOffsetRanges(rdd):
global offsetRanges
offsetRanges = rdd.offsetRanges()
return rdd
def printOffsetRanges(rdd):
for o in offsetRanges:
print("%s %s %s %s" % (o.topic, o.partition, o.fromOffset, o.untilOffset))
def cutSeconds(time):
time = time[:-5]
time +="00:00"
return time
def parse(row):
row = row.split(' ',3)
date = str(row[1]) + " " + str(cutSeconds(row[2]))
try:
bannerId = int(row[3])
except Exception:
bannerId = 0
return ((date, bannerId), 1)
def getStartOffsets(task, topic, partitions):
connection = MySQLdb.connect(user='root', db='test', host="127.0.0.1", passwd="")
cursor = connection.cursor()
que = 'SELECT `partition`, `offset` FROM `test`.`kafka_offsets` WHERE `task`="%s" AND `topic`="%s"' % (task, topic)
print(que)
cnt = cursor.execute(que)
if not cnt:
for p in range(partitions):
que = 'INSERT INTO test.kafka_offsets (`task`,`topic`,`partition`,`offset`) VALUES ("%s","%s",%s,0)' % (task, topic, p)
print(que)
cnt = cursor.execute(que)
connection.commit()
return getStartOffsets(task, topic, partitions)
ret = {}
for row in cursor.fetchall():
ret[TopicAndPartition(topic, row[0])] = long(row[1])
connection.close()
return ret
def saveStartOffsets(task, cursor):
global offsetRanges
for o in offsetRanges:
print("%s %s %s %s" % (o.topic, o.partition, o.fromOffset, o.untilOffset))
que = 'UPDATE test.kafka_offsets SET `offset` = %s WHERE `task`="%s" AND `topic`="%s" AND `partition`=%s' % (o.untilOffset, task, o.topic, o.partition)
print(que)
cnt = cursor.execute(que)
# if not cnt:
# que = 'INSERT INTO test.kafka_offsets (`task`,`topic`,`partition`,`offset`) VALUES ("%s","%s",%s,%s)' % (task, o.topic, o.partition, o.untilOffset)
# print(que)
# cnt = cursor.execute(que)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: kafka_wordcount.py <zk> <topic>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonStreamingKafkaWordCount")
ssc = StreamingContext(sc, 5)
sqlc = SQLContext(sc)
kafkaList, topic = sys.argv[1:]
offsets = getStartOffsets("impressions", topic, 12)
print(offsets)
kvs = KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": kafkaList}, fromOffsets=offsets)
#kvs = KafkaUtils.createStream(ssc, zkQuorum, "spark-streaming-consumer", {topic: 12})
kvs.transform(storeOffsetRanges).foreachRDD(printOffsetRanges)
lines = kvs.map(lambda x: x[1])
pairs = lines.map(parse)
# format je (('2016-01-28 14:06:00', 999), 6)
counts = pairs.reduceByKey(lambda a, b: a+b)
counts.pprint()
s = saver(sqlc)
s.saveStream(counts)
# df = sqlc.createDataFrame(counts, ['word', 'count']);
# print(df)
ssc.start()
ssc.awaitTermination()
``` |
{
"source": "jonspeicher/blinkyfun",
"score": 3
} |
#### File: blinkyfun/blinkytape/color.py
```python
import random, sys
class Color(object):
def __init__(self, red, green, blue):
self._red = self._clamp_color(red)
self._green = self._clamp_color(green)
self._blue = self._clamp_color(blue)
@classmethod
def from_string(cls, string):
return getattr(sys.modules[__name__], string.upper())
@classmethod
def random(cls):
rgb = [random.randint(0, 255) for i in range(3)]
return cls(*rgb)
@classmethod
def scale(cls, color, scale):
return cls(color.red * scale, color.green * scale, color.blue * scale)
@property
def red(self):
return self._red
@property
def green(self):
return self._green
@property
def blue(self):
return self._blue
@property
def rgb(self):
return [self.red, self.green, self.blue]
@property
def raw(self):
clipped_rgb = [min(color, 254.0) for color in self.rgb]
rounded_rgb = [round(color) for color in clipped_rgb]
truncated_rgb = [int(color) for color in rounded_rgb]
return truncated_rgb
def _clamp_color(self, color):
return max(0.0, min(255.0, color))
def __repr__(self):
return str(self.rgb)
BLACK = Color(0, 0, 0)
RED = Color(255, 0, 0)
ORANGE = Color(255, 165, 0)
YELLOW = Color(255, 255, 0)
GREEN = Color(0, 255, 0)
BLUE = Color(0, 0, 255)
PURPLE = Color(255, 0, 255)
WHITE = Color(255, 255, 255)
``` |
{
"source": "jonspock/vdf-competition",
"score": 4
} |
#### File: vdf-competition/inkfish/large_proof.py
```python
import math
def approximate_i(T):
"""Performs the approximation from the paper to select a resonable cache
size"""
x = (T / 16) * math.log(2)
w = math.log(x) - math.log(math.log(x)) + 0.25
return round(w / (2 * math.log(2)))
def sum_combinations(numbers):
"""Add all combinations of the given numbers, of at least one number"""
combinations = [0]
for element in numbers:
new_combinations = list(combinations)
for element2 in combinations:
new_combinations.append(element + element2)
combinations = new_combinations
combinations.remove(0) # Remove 0
return combinations
def cache_indeces_for_count(T):
i = approximate_i(T)
# Since T might not be a power of 2, we have to divide and
# add 1 if odd, to calculate all of the indeces that we will cache
curr_T = T
intermediate_Ts = []
for _ in range(i):
curr_T >>= 1
intermediate_Ts.append(curr_T)
if curr_T & 1 == 1:
curr_T += 1
cache_indeces = sorted([s for s in
sum_combinations(intermediate_Ts)])
cache_indeces.append(T)
return cache_indeces
def calculate_final_T(T, delta):
# Based on the number of rounds to skip, calculates the target T that
# we must look for, in order to stop the iteration of the loop
curr_T = T
Ts = []
while curr_T != 2:
Ts.append(curr_T)
curr_T = curr_T >> 1
if curr_T & 1 == 1:
curr_T += 1
Ts += [2, 1] # Add 2, 1 for completion
return Ts[-delta] # return the correct T to look for
def generate_large_proof(x, T, delta, y, powers, reduce_f, identity,
generate_r_value, int_size_bits):
"""
Generate the proof.
Returns a list of elements derived by operations on x.
"""
# Only even values work, since we need to do T/2
if T % 2 != 0:
raise ValueError("T must be even")
i = approximate_i(T)
mus = []
rs = [] # random values generated using hash function
x_p = [x] # x prime in the paper
y_p = [y] # y prime in the paper
curr_T = T
Ts = [] # List of all of the Ts being used, T, then T/2, etc
final_T = calculate_final_T(T, delta)
round_index = 0
while curr_T != final_T:
assert(curr_T & 1 == 0)
half_T = curr_T >> 1
Ts.append(half_T)
denominator = 1 << (round_index + 1) # T/2 for initial round
# use cache for first i rounds for fast computation
if (round_index < i):
mu = identity # Compute product below
# Get each of the cached terms, for round 3, denominator is 8.
# The terms are T/8, 3T/8 5T/8 7T/8. If not a power of two, we
# will not use exactly 3T/8
for numerator in range(1, denominator, 2):
# Number of bits in the denominator, for example 3
# Don't include last r, since not computed yet
num_bits = denominator.bit_length() - 2
# Find out which rs to multiply for this term, based on bit
# composition. For example, for 5T/8, 5 is 101 in bits, so
# multiply r1 (but not r2)
rs_to_mult = [
1 if numerator & (1 << (b + 1))
else rs[num_bits - b - 1]
for b in range(num_bits-1, -1, -1)]
# Multiply rs together
r_prod = 1
for r in rs_to_mult:
r_prod *= r
# Calculates the exact cached power T to use
Ts_to_add = [
Ts[num_bits - b - 1] if numerator & (1 << (b + 1)) else 0
for b in range(num_bits)]
T_sum = half_T
for t in Ts_to_add:
T_sum += t
mu_component = powers[T_sum]
mu = reduce_f(mu * pow(mu_component, r_prod))
mus.append(mu)
else:
# Compute for rounds i + 1 until the end, for low cache storage
mu = x_p[-1]
for _ in range(half_T):
mu = pow(mu, 2)
mus.append(mu)
rs.append(generate_r_value(x, y, mus[-1], int_size_bits))
x_p.append(reduce_f(pow(x_p[-1], rs[-1]) * mu))
y_p.append(reduce_f(pow(mu, rs[-1]) * y_p[-1]))
# Compute the new T, and y. If T is odd, make it even, and adjust
# the y_p accordingly, so that y_p = x_p ^ (2 ^ curr_T)
curr_T = curr_T >> 1
if curr_T & 1 == 1:
curr_T += 1
y_p[-1] = pow(y_p[-1], 2)
round_index += 1
assert(pow(y_p[-1], 1) == pow(x_p[-1], 1 << final_T))
return mus
def verify_large_proof(x_initial, y_initial, proof, T, delta, reduce_f,
generate_r_value, int_size_bits):
# Only even values work, since we need to do T/2
if T % 2 != 0:
raise ValueError("T must be even")
mu = None
x = x_initial
y = y_initial
final_T = calculate_final_T(T, delta)
curr_T = T
for mu in proof:
assert(curr_T & 1 == 0)
r = generate_r_value(x_initial, y_initial, mu, int_size_bits)
x = reduce_f(pow(x, r) * mu)
y = reduce_f(pow(mu, r) * y)
# To guarantee even Ts, add 1 if necessary
curr_T >>= 1
if curr_T & 1 == 1:
curr_T += 1
y = pow(y, 2)
return pow(x, 1 << final_T) == y
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
```
#### File: vdf-competition/tests/test_mod.py
```python
import heapq
import math
import unittest
from inkfish import mod
def primes(max_value=None):
yield 2
heap = [(4, 2)]
c = 2
while max_value is None or c < max_value:
c += 1
n, p = heap[0]
if n > c:
yield c
heapq.heappush(heap, (c+c, c))
while n <= c:
heapq.heapreplace(heap, (n+p, p))
n, p = heap[0]
def primes_3_mod_4(max_value=None):
for p in primes(max_value):
if p & 3 == 3:
yield p
class test_ModArithmetic(unittest.TestCase):
def test_extended_gcd(self):
for a in range(1, 1000, 3):
for b in range(1, 1000, 5):
r, s, t = mod.extended_gcd(a, b)
self.assertEqual(r, math.gcd(a, b))
self.assertEqual(r, a * s + b * t)
def test_inverse(self):
for p in primes(1000):
for a in range(1, p-1):
v = mod.inverse(a, p)
self.assertEqual(a * v % p, 1)
def test_reduce_equivalencies_rp(self):
for a0, m0, a1, m1 in ((2, 5, 1, 2), (1, 6, 7, 10), (1, 6, 2, 10)):
a, m, works = mod.reduce_equivalencies(a0, m0, a1, m1)
self.assertEqual(m, m0 * m1 // math.gcd(m0, m1))
if works:
self.assertEqual(a % m0, a0)
self.assertEqual(a % m1, a1)
def test_crt(self):
for a_list, m_list in [([2, 6], [14, 18]), ([1, 3, 5, 7],
[3, 7, 11, 109])]:
v = mod.crt(a_list, m_list)
for a, m in zip(a_list, m_list):
self.assertEqual(v % m, a)
def test_square_root_mod_p(self):
for p in primes_3_mod_4(1000):
for a in range(1, p):
for t in mod.square_root_mod_p(a, p):
self.assertEqual(t * t % p, a)
def test_square_root_mod_p_list(self):
for p0 in primes_3_mod_4(10):
for p1 in primes_3_mod_4(100):
if p1 <= p0:
continue
for p2 in primes_3_mod_4(100):
if p2 <= p1:
continue
prod = p0 * p1 * p2
for a in range(1, prod):
for t in mod.square_root_mod_p_list(a, [p0, p1, p2]):
print(a, prod, t, t*t % prod)
self.assertEqual(t * t % prod, a)
def test_solve_mod(self):
def check(a, b, c):
r, s = mod.solve_mod(a, b, c)
b %= c
for k in range(50):
a_coefficient = r + s * k
self.assertEqual((a_coefficient * a) % c, b)
check(3, 4, 5)
check(6, 8, 10)
check(12, 30, 7)
check(6, 15, 411)
check(192, 193, 863)
check(-565721958, 740, 4486780496)
check(565721958, 740, 4486780496)
check(-565721958, -740, 4486780496)
check(565721958, -740, 4486780496)
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
``` |
{
"source": "jonssonlab/satlasso",
"score": 3
} |
#### File: satlasso/satlasso/__init__.py
```python
from collections.abc import Iterable
import numpy as np
import cvxpy as cp
import statistics
from sklearn.linear_model._base import _preprocess_data
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array, _deprecate_positional_args
from sklearn.linear_model._base import LinearModel, RegressorMixin, MultiOutputMixin
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.model_selection import KFold
@_deprecate_positional_args
def sat_separate_data(X, y, saturation = 'max'):
"""Used to separate data X and labels y into saturated data and unsaturated data.
Data separated based on value of parameter 'saturation'.
Parameters
----------
X : ndarray of (n_samples, n_features)
Data
y : ndarray of shape (n_samples,) or \
(n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
saturation : float or string, {'mode', 'max'}, default='max'
If float, regressors X with label y equal to float used as saturated data.
If string in {'mode', 'max'}, use regressors X with label y equal to mode or maximum of labels y, respectively, as saturated data.
Returns
----------
Xu : ndarray of (n_unsaturated_samples, n_features)
Unsaturated data according to saturated parameter
Xs : ndarray of (n_saturated_samples, n_features)
Saturated data according to saturated parameter
yu : ndarray of shape (n_unsaturated_samples,) or \
(n_unsaturated_samples, n_targets)
Target of unsaturated data.
ys : ndarray of shape (n_saturated_samples,) or \
(n_saturated_samples, n_targets)
Target of saturated data.
saturation_val : float
value chosen to determine saturation
"""
if isinstance(saturation, float):
if saturation not in y:
error_msg = 'Saturation value passed : ' + str(saturation) + ' not found in y array.'
raise ValueError(error_msg)
saturation_val = saturation
elif isinstance(saturation, str):
if saturation not in ['mode', 'max']:
# Raise ValueError if saturation arg not in accepted args
raise ValueError('Saturation value must be in {"max", "mode"} or a float.')
if saturation == 'max':
saturation_val = max(y)
elif saturation == 'mode':
saturation_val = statistics.mode(y)
else:
# Raise ValueError if saturation arg not in accepted args
raise ValueError('Saturation value must be in {"max", "mode"} or a float.')
# Set unsaturated and saturated data based on saturated value
Xu = X[y != saturation_val]
Xs = X[y == saturation_val]
yu = y[y != saturation_val]
ys = y[y == saturation_val]
return Xu, Xs, yu, ys, saturation_val
def objective_function(coefs, Xu, Xs, yu, ys, lambda_1, lambda_2, lambda_3):
"""Objective function for SatLasso method.
The function returned:
lambda_1 * ||y_u - X_uw||^2_2 + lambda_2 * ||w||_1 + lambda_3 * max(max(y_s-X_sw), 0)
Parameters
----------
Xu : ndarray of (n_unsaturated_samples, n_features)
Unsaturated data according to saturated parameter
Xs : ndarray of (n_saturated_samples, n_features)
Saturated data according to saturated parameter
yu : ndarray of shape (n_unsaturated_samples,) or \
(n_unsaturated_samples, n_targets)
Target of unsaturated data.
ys : ndarray of shape (n_saturated_samples,) or \
(n_saturated_samples, n_targets)
Target of saturated data.
lambda_1 : float
Constant that multiplies the least squares loss.
lambda_2 : float
Constant that multiplies the L1 term.
lambda_3 : float
Constant that multiplies the penalty on saturated data.
"""
# Convert to numpy arrays
Xu = np.asarray(Xu)
Xs = np.asarray(Xs)
yu = np.asarray(yu)
ys = np.asarray(ys)
m = len(Xu)
# Compute and return objective function
# Check unsaturated and saturated data for empty arrays
if yu.size > 0 and ys.size > 0:
# Compute and return objective function with unsaturated and saturated loss and l1-regularization
return lambda_1*(1/m)*cp.norm2(yu - Xu @ coefs)**2+lambda_2*(1/m)*cp.norm1(coefs)+lambda_3*cp.max(cp.hstack([ys - Xs @ coefs, 0]))
elif yu.size > 0 and ys.size == 0:
# Compute and return objective function with unsaturated penalty only and l1-regularization
return lambda_1*(1/m)*cp.norm2(yu - Xu @ coefs)**2+lambda_2*(1/m)*cp.norm1(coefs)
elif yu.size == 0 and ys.size > 0:
# Compute and return objective function with saturated penalty only and l1-regularization
return lambda_2*(1/m)*cp.norm1(coefs)+lambda_3*cp.max(cp.hstack([ys - Xs @ coefs, 0]))
else:
# If unsaturated and saturated data both empty, raise error
raise ValueError('Encountered empty y: ', yu, ys)
@_deprecate_positional_args
def satlasso_cvxopt(Xu, Xs, yu, ys, lambda_1, lambda_2, lambda_3):
"""Compute optimal coefficient vector for saturated lasso problem using convex optimization.
Parameters
----------
Xu : ndarray of (n_unsaturated_samples, n_features)
Unsaturated data according to saturated parameter
Xs : ndarray of (n_saturated_samples, n_features)
Saturated data according to saturated parameter
yu : ndarray of shape (n_unsaturated_samples,) or \
(n_unsaturated_samples, n_targets)
Target of unsaturated data.
ys : ndarray of shape (n_saturated_samples,) or \
(n_saturated_samples, n_targets)
Target of saturated data.
lambda_1 : float
Constant that multiplies the least squares loss.
lambda_2 : float
Constant that multiplies the L1 term.
lambda_3 : float
Constant that multiplies the penalty on saturated data.
Returns
----------
coefs : ndarray of shape (n_features,) or (n_targets, n_features)
parameter vector found by cvxpy
"""
coefs = cp.Variable(len(Xu[0]))
problem = cp.Problem(cp.Minimize(objective_function(coefs, Xu, Xs, yu, ys, lambda_1, lambda_2, lambda_3)))
solvers = [cp.ECOS, cp.SCS, cp.CVXOPT]
for solver_choice in solvers:
try:
problem.solve(solver = solver_choice)
break
except cp.error.SolverError:
continue
return np.asarray(coefs.value)
class SatLasso(MultiOutputMixin, RegressorMixin, LinearModel):
"""Linear Model trained with L1 prior as regularizer (aka Lasso) and penalty on underestimated saturated data
The optimization objective for SatLasso is::
lambda_1 * ||y_u - X_uw||^2_2 + lambda_2 * ||w||_1 + lambda_3 * max(max(y_s-X_sw), 0)
Parameters
----------
lambda_1 : float, default=1.0
Constant that multiplies the least squares loss. Defaults to 1.0.
lambda_2 : float, default=1.0
Constant that multiplies the L1 term. Defaults to 1.0.
``lambda_2 = 0`` is equivalent to an ordinary least square,
with penalty on underestimated saturated data.
lambda_3 : float, default=1.0
Constant that multiplies the penalty on saturated data. Default to 1.0.
saturation : float or string, {'mode', 'max'}, default='max'
If float, regressors X with label y equal to float used as saturated data.
If string in {'mode', 'max'}, use regressors X with label y equal to mode or maximum of labels y, respectively, as saturated data.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float or ndarray of shape (n_targets,)
independent term in decision function.
saturation_val_ : float
value used to determine saturation
"""
def __init__(self, lambda_1 = 1.0, lambda_2 = 1.0, lambda_3 = 1.0, fit_intercept = True, saturation = 'max', normalize = False, copy_X = True):
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.lambda_3 = lambda_3
self.fit_intercept = fit_intercept
self.saturation = saturation
self.normalize = normalize
self.copy_X = copy_X
method = staticmethod(satlasso_cvxopt)
def fit(self, X, y, check_input=True):
"""Fit model with convex optimization.
Parameters
----------
X : ndarray of (n_samples, n_features)
Data. Note: sparse matrix not accepted at this time
y : ndarray of shape (n_samples,) or \
(n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
# Copy X if copy X is True
if self.copy_X:
X = X.copy()
# Check for correct input shape
if check_input:
X, y = check_X_y(X, y, accept_sparse=False)
# Convert to numpy arrays
X = np.asarray(X)
y = np.asarray(y)
# Normalize with mean centering and l2-norm if normalize
# set to true, and fit_intercept set to true
if self.normalize and self.fit_intercept:
# Already copied so do not need to copy again
X, y, X_offset, y_offset, X_scale = _preprocess_data(X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=False)
if isinstance(self.saturation, float):
self.saturation = self.saturation - y_offset
# Separate unsaturated data from saturated data
Xu, Xs, yu, ys, self.saturation_val_ = sat_separate_data(X, y, saturation = self.saturation)
# Use convex optimization to solve for minimized objective function
if self.fit_intercept and not self.normalize:
# Add a bias variable to each data point if fit intercept = True
Xu_with_bias = np.hstack((Xu, [[1] for i in range(0, len(Xu))]))
Xs_with_bias = np.hstack((Xs, [[1] for i in range(0, len(Xs))]))
# Use convex optimization to solve for coefficients
coefs = self.method(Xu_with_bias, Xs_with_bias, yu, ys, self.lambda_1, self.lambda_2, self.lambda_3)
self.coef_ = coefs[:-1]
self.intercept_ = coefs[-1]
else:
# Use convex optimization to solve for coefficients
self.coef_ = self.method(Xu, Xs, yu, ys, self.lambda_1, self.lambda_2, self.lambda_3)
self.intercept_ = 0.
# Set intercept and rescale coefficient if data was normalized
if self.normalize and self.fit_intercept:
self._set_intercept(X_offset, y_offset, X_scale)
self.saturation_val_ = self.saturation_val_ + y_offset
self.is_fitted_ = True
return self
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array of shape (n_samples, n_features)
Returns
-------
T : ndarray of shape (n_samples,)
The predicted decision function
"""
# Check if fit has been called
check_is_fitted(self, 'is_fitted_')
# Check input
X = check_array(X)
return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
@_deprecate_positional_args
def satlassoCV_cvxopt(Xu, Xs, yu, ys, lambda_1s, lambda_2s, lambda_3s, cv=5):
"""Compute optimal coefficient vector for saturated lasso problem using convex optimization.
Parameters
----------
Xu : ndarray of (n_unsaturated_samples, n_features)
Unsaturated data according to saturated parameter
Xs : ndarray of (n_saturated_samples, n_features)
Saturated data according to saturated parameter
yu : ndarray of shape (n_unsaturated_samples,) or \
(n_unsaturated_samples, n_targets)
Target of unsaturated data.
ys : ndarray of shape (n_saturated_samples,) or \
(n_saturated_samples, n_targets)
Target of saturated data.
lambda_1s : ndarray, default=None
List of lambda_1s where to compute the models.
If ``None`` lambda_1s are set automatically
lambda_2s : ndarray, default=None
List of lambda_2s where to compute the models.
If ``None`` lambda_2s are set automatically
lambda_3s : ndarray, default=None
List of lambda_3s where to compute the models.
If ``None`` lambda_3s are set automatically
Returns
----------
lambda_1 : float
The amount of penalization on unsaturated data chosen by cross validation
lambda_2 : float
The amount of l1-norm penalization chosen by cross validation
lambda_3 : float
The amount of penalization on saturated data chosen by cross validation
mse_dict : python dict
keys : tuple of form (lambda_1, lambda_2, lambda_3)
values : mean square error for values of lambda_1, lambda_2, lambda_3
mean square error for the test set on each fold, varying lambda_1, lambda_2, lambda_3
"""
# TO DO: only have one problem
# Concatenate X and y arrays in order to split for KFold cross validation
X = np.vstack((Xu, Xs))
y = np.hstack((yu, ys))
# Create iterable object to split training and test indices
if isinstance(cv, int):
# Check that cv does not exceed size of data
if cv > len(X):
raise ValueError('Cannot have number of splits cv=' + str(cv) + 'greater than the number of samples: n_samples='+ str(len(X)))
# Create KFold object for iteration if int provided
kfold = KFold(n_splits=cv, shuffle=True, random_state=0)
_ = kfold.get_n_splits(X)
cv_iter = list(kfold.split(X))
elif isinstance(cv, Iterable):
# Use iterable if provided
cv_iter = list(cv)
else:
# Raise ValueError if cv not of accepted type
raise ValueError('Expected cv as an integer, or an iterable.')
# Iterate over possible lambda values and keep track of MSEs
lambda_combns = np.array(np.meshgrid(lambda_1s, lambda_2s, lambda_3s)).T.reshape(-1,3)
mses = {}
for i in range(0, len(lambda_combns)):
sses = []
lambda_1, lambda_2, lambda_3 = lambda_combns[i]
for train_index, test_index in cv_iter:
# Split data
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Separate saturated and unsaturated data
Xu_train = X_train[np.argwhere(y_train != np.unique(ys).item()).flatten()]
Xs_train = X_train[np.argwhere(y_train == np.unique(ys).item()).flatten()]
yu_train = y_train[np.argwhere(y_train != np.unique(ys).item()).flatten()]
ys_train = y_train[np.argwhere(y_train == np.unique(ys).item()).flatten()]
# Calculate optimal coefficient
coefs = satlasso_cvxopt(Xu_train, Xs_train, yu_train, ys_train, lambda_1, lambda_2, lambda_3)
# Calculate error on test data set
y_predicted = safe_sparse_dot(X_test, coefs.T, dense_output=True)
error = np.sum(np.square(y_test-y_predicted))
sses.append(error)
mses[tuple(lambda_combns[i])] = statistics.mean(sses)
# Retrieve optimal lambda values from mses dictionary
lambda_1, lambda_2, lambda_3 = min(mses, key = mses.get)
return lambda_1, lambda_2, lambda_3, mses
class SatLassoCV(MultiOutputMixin, RegressorMixin, LinearModel):
"""Lasso linear model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
The best model is selected by cross-validation.
The optimization objective for SatLasso is::
lambda_1 * ||y_u - X_uw||^2_2 + lambda_2 * ||w||_1 + lambda_3 * max(max(y_s-X_sw), 0)
Parameters
----------
n_lambdas : int, default=10
Number of lambda_1 values, lambda_2 values, lambda_3 values along the regularization path
lambda_1s : ndarray, default=None
List of lambda_1s where to compute the models.
If ``None`` lambda_1s are set automatically
lambda_2s : ndarray, default=None
List of lambda_2s where to compute the models.
If ``None`` lambda_2s are set automatically
lambda_3s : ndarray, default=None
List of lambda_3s where to compute the models.
If ``None`` lambda_3s are set automatically
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
saturation : float or string, {'mode', 'max'}, default='max'
If float, regressors X with label y equal to float used as saturated data.
If string in {'mode', 'max'}, use regressors X with label y equal to mode or maximum of labels y, respectively, as saturated data.
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, or iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- int, to specify the number of folds.
- An iterable yielding (train, test) splits as arrays of indices.
Attributes
----------
lambda_1_ : float
The amount of penalization on unsaturated data chosen by cross validation
lambda_2_ : float
The amount of l1-norm penalization chosen by cross validation
lambda_3_ : float
The amount of penalization on saturated data chosen by cross validation
mse_dict_ : python dict
keys : tuple of form (lambda_1, lambda_2, lambda_3)
values : mean square error for values of lambda_1, lambda_2, lambda_3
mean square error for the test set on each fold, varying lambda_1, lambda_2, lambda_3
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float or ndarray of shape (n_targets,)
independent term in decision function.
lambdas_1s_ : ndarray of shape (n_lambdas_1s,)
The grid of lambdas_1s used for fitting
lambdas_2s_ : ndarray of shape (n_lambdas_2s,)
The grid of lambdas_2s used for fitting
lambdas_3s_ : ndarray of shape (n_lambdas_3s,)
The grid of lambdas_3s used for fitting
saturation_val_ : float
value used to determine saturation
"""
def __init__(self, n_lambdas = 10, lambda_1s = None, lambda_2s = None, lambda_3s = None, fit_intercept = True, saturation = 'max', normalize = False, copy_X = True, cv = 5):
self.n_lambdas=n_lambdas
self.lambda_1s = lambda_1s
self.lambda_2s = lambda_2s
self.lambda_3s = lambda_3s
self.fit_intercept = fit_intercept
self.saturation = saturation
self.normalize = normalize
self.copy_X = copy_X
self.cv = cv
cvmethod = staticmethod(satlassoCV_cvxopt)
method = staticmethod(satlasso_cvxopt)
def fit(self, X, y, check_input=True):
"""Fit model with convex optimization.
Parameters
----------
X : ndarray of (n_samples, n_features)
Data
y : ndarray of shape (n_samples,) or \
(n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
# Copy X if copy X is True
if self.copy_X:
X = X.copy()
# Convert to numpy arrays
X = np.asarray(X)
y = np.asarray(y)
# Check for correct input shape
if check_input:
X, y = check_X_y(X, y, accept_sparse=False)
# Check for lambda values and update if None
self.lambda_1s_ = self.lambda_1s
self.lambda_2s_ = self.lambda_2s
self.lambda_3s_ = self.lambda_3s
if self.lambda_1s_ is None:
self.lambda_1s_ = np.linspace(start=1, stop=10, num=self.n_lambdas_1s) # TO DO
if self.lambda_2s_ is None:
self.lambda_2s_ = np.linspace(start=1, stop=10, num=self.n_lambdas_2s) # TO DO
if self.lambda_3s_ is None:
self.lambda_3s_ = np.linspace(start=1, stop=10, num=self.n_lambdas_3s) # TO DO
# Normalize with mean centering and l2-norm if normalize
# set to true, and fit_intercept set to true
if self.normalize and self.fit_intercept:
# Already copied so do not need to copy again
X, y, X_offset, y_offset, X_scale = _preprocess_data(X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=False)
if isinstance(self.saturation, float):
self.saturation = self.saturation - y_offset
# Separate unsaturated data from saturated data
Xu, Xs, yu, ys, self.saturation_val_ = sat_separate_data(X, y, saturation = self.saturation)
# Use convex optimization to solve for minimized objective function
if self.fit_intercept and not self.normalize:
# Add a bias variable to each data point if fit intercept = True
Xu_with_bias = np.hstack((Xu, [[1] for i in range(0, len(Xu))]))
Xs_with_bias = np.hstack((Xs, [[1] for i in range(0, len(Xs))]))
# Use convex optimization to solve for coefficients
self.lambda_1_, self.lambda_2_, self.lambda_3_, self.mse_dict_ = self.cvmethod(Xu_with_bias, Xs_with_bias, yu, ys, self.lambda_1s_, self.lambda_2s_, self.lambda_3s_, cv = self.cv)
coefs = self.method(Xu_with_bias, Xs_with_bias, yu, ys, self.lambda_1_, self.lambda_2_, self.lambda_3_)
self.coef_ = coefs[:-1]
self.intercept_ = coefs[-1]
else:
# Use convex optimization to solve for coefficients
self.lambda_1_, self.lambda_2_, self.lambda_3_, self.mse_dict_ = self.cvmethod(Xu, Xs, yu, ys, self.lambda_1s_, self.lambda_2s_, self.lambda_3s_, cv = self.cv)
self.coef_ = self.method(Xu, Xs, yu, ys, self.lambda_1_, self.lambda_2_, self.lambda_3_)
self.intercept_ = 0.
# Set intercept and rescale coefficient if data was normalized
if self.normalize and self.fit_intercept:
self._set_intercept(X_offset, y_offset, X_scale)
self.saturation_val_ = self.saturation_val_ + y_offset
self.is_fitted_ = True
return self
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array of shape (n_samples, n_features)
Returns
-------
T : ndarray of shape (n_samples,)
The predicted decision function
"""
# Check if fit has been called
check_is_fitted(self, 'is_fitted_')
# Check input
X = check_array(X)
return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
``` |
{
"source": "JonsSpaghetti/okta-sdk-python",
"score": 2
} |
#### File: okta/models/application_credentials_signing.py
```python
from okta.okta_object import OktaObject
import okta.models.application_credentials_signing_use\
as application_credentials_signing_use
class ApplicationCredentialsSigning(
OktaObject
):
"""
A class for ApplicationCredentialsSigning objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.kid = config["kid"]\
if "kid" in config else None
self.last_rotated = config["lastRotated"]\
if "lastRotated" in config else None
self.next_rotation = config["nextRotation"]\
if "nextRotation" in config else None
self.rotation_mode = config["rotationMode"]\
if "rotationMode" in config else None
if "use" in config:
if isinstance(config["use"],
application_credentials_signing_use.ApplicationCredentialsSigningUse):
self.use = config["use"]
elif config["use"] is not None:
self.use = application_credentials_signing_use.ApplicationCredentialsSigningUse(
config["use"].upper()
)
else:
self.use = None
else:
self.use = None
else:
self.kid = None
self.last_rotated = None
self.next_rotation = None
self.rotation_mode = None
self.use = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"kid": self.kid,
"lastRotated": self.last_rotated,
"nextRotation": self.next_rotation,
"rotationMode": self.rotation_mode,
"use": self.use
}
parent_req_format.update(current_obj_format)
return parent_req_format
```
#### File: okta/models/catalog_application.py
```python
from okta.okta_object import OktaObject
from okta.okta_collection import OktaCollection
import okta.models.catalog_application_status\
as catalog_application_status
class CatalogApplication(
OktaObject
):
"""
A class for CatalogApplication objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.links = config["links"]\
if "links" in config else None
self.category = config["category"]\
if "category" in config else None
self.description = config["description"]\
if "description" in config else None
self.display_name = config["displayName"]\
if "displayName" in config else None
self.features = OktaCollection.form_list(
config["features"] if "features"\
in config else [],
str
)
self.id = config["id"]\
if "id" in config else None
self.last_updated = config["lastUpdated"]\
if "lastUpdated" in config else None
self.name = config["name"]\
if "name" in config else None
self.sign_on_modes = OktaCollection.form_list(
config["signOnModes"] if "signOnModes"\
in config else [],
str
)
if "status" in config:
if isinstance(config["status"],
catalog_application_status.CatalogApplicationStatus):
self.status = config["status"]
elif config["status"] is not None:
self.status = catalog_application_status.CatalogApplicationStatus(
config["status"].upper()
)
else:
self.status = None
else:
self.status = None
self.verification_status = config["verificationStatus"]\
if "verificationStatus" in config else None
self.website = config["website"]\
if "website" in config else None
else:
self.links = None
self.category = None
self.description = None
self.display_name = None
self.features = []
self.id = None
self.last_updated = None
self.name = None
self.sign_on_modes = []
self.status = None
self.verification_status = None
self.website = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"_links": self.links,
"category": self.category,
"description": self.description,
"displayName": self.display_name,
"features": self.features,
"id": self.id,
"lastUpdated": self.last_updated,
"name": self.name,
"signOnModes": self.sign_on_modes,
"status": self.status,
"verificationStatus": self.verification_status,
"website": self.website
}
parent_req_format.update(current_obj_format)
return parent_req_format
```
#### File: okta/models/group_rule.py
```python
from okta.okta_object import OktaObject
import okta.models.group_rule_action\
as group_rule_action
import okta.models.group_rule_conditions\
as group_rule_conditions
import okta.models.group_rule_status\
as group_rule_status
class GroupRule(
OktaObject
):
"""
A class for GroupRule objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
if "actions" in config:
if isinstance(config["actions"],
group_rule_action.GroupRuleAction):
self.actions = config["actions"]
elif config["actions"] is not None:
self.actions = group_rule_action.GroupRuleAction(
config["actions"]
)
else:
self.actions = None
else:
self.actions = None
if "conditions" in config:
if isinstance(config["conditions"],
group_rule_conditions.GroupRuleConditions):
self.conditions = config["conditions"]
elif config["conditions"] is not None:
self.conditions = group_rule_conditions.GroupRuleConditions(
config["conditions"]
)
else:
self.conditions = None
else:
self.conditions = None
self.created = config["created"]\
if "created" in config else None
self.id = config["id"]\
if "id" in config else None
self.last_updated = config["lastUpdated"]\
if "lastUpdated" in config else None
self.name = config["name"]\
if "name" in config else None
if "status" in config:
if isinstance(config["status"],
group_rule_status.GroupRuleStatus):
self.status = config["status"]
elif config["status"] is not None:
self.status = group_rule_status.GroupRuleStatus(
config["status"].upper()
)
else:
self.status = None
else:
self.status = None
self.type = config["type"]\
if "type" in config else None
else:
self.actions = None
self.conditions = None
self.created = None
self.id = None
self.last_updated = None
self.name = None
self.status = None
self.type = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"actions": self.actions,
"conditions": self.conditions,
"created": self.created,
"id": self.id,
"lastUpdated": self.last_updated,
"name": self.name,
"status": self.status,
"type": self.type
}
parent_req_format.update(current_obj_format)
return parent_req_format
```
#### File: okta/models/ion_form.py
```python
from okta.okta_object import OktaObject
from okta.okta_collection import OktaCollection
import okta.models.ion_field\
as ion_field
class IonForm(
OktaObject
):
"""
A class for IonForm objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.accepts = config["accepts"]\
if "accepts" in config else None
self.href = config["href"]\
if "href" in config else None
self.method = config["method"]\
if "method" in config else None
self.name = config["name"]\
if "name" in config else None
self.produces = config["produces"]\
if "produces" in config else None
self.refresh = config["refresh"]\
if "refresh" in config else None
self.rel = OktaCollection.form_list(
config["rel"] if "rel"\
in config else [],
str
)
self.relates_to = OktaCollection.form_list(
config["relatesTo"] if "relatesTo"\
in config else [],
str
)
self.value = OktaCollection.form_list(
config["value"] if "value"\
in config else [],
ion_field.IonField
)
else:
self.accepts = None
self.href = None
self.method = None
self.name = None
self.produces = None
self.refresh = None
self.rel = []
self.relates_to = []
self.value = []
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"accepts": self.accepts,
"href": self.href,
"method": self.method,
"name": self.name,
"produces": self.produces,
"refresh": self.refresh,
"rel": self.rel,
"relatesTo": self.relates_to,
"value": self.value
}
parent_req_format.update(current_obj_format)
return parent_req_format
```
#### File: okta/models/password_policy_recovery_factors.py
```python
from okta.okta_object import OktaObject
import okta.models.password_policy_recovery_factor_settings\
as password_policy_recovery_factor_settings
import okta.models.password_policy_recovery_email\
as password_policy_recovery_email
import okta.models.password_policy_recovery_question\
as password_policy_recovery_question
class PasswordPolicyRecoveryFactors(
OktaObject
):
"""
A class for PasswordPolicyRecoveryFactors objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
if "oktaCall" in config:
if isinstance(config["oktaCall"],
password_policy_recovery_factor_settings.PasswordPolicyRecoveryFactorSettings):
self.okta_call = config["oktaCall"]
elif config["oktaCall"] is not None:
self.okta_call = password_policy_recovery_factor_settings.PasswordPolicyRecoveryFactorSettings(
config["oktaCall"]
)
else:
self.okta_call = None
else:
self.okta_call = None
if "oktaEmail" in config:
if isinstance(config["oktaEmail"],
password_policy_recovery_email.PasswordPolicyRecoveryEmail):
self.okta_email = config["oktaEmail"]
elif config["oktaEmail"] is not None:
self.okta_email = password_policy_recovery_email.PasswordPolicyRecoveryEmail(
config["oktaEmail"]
)
else:
self.okta_email = None
else:
self.okta_email = None
if "oktaSms" in config:
if isinstance(config["oktaSms"],
password_policy_recovery_factor_settings.PasswordPolicyRecoveryFactorSettings):
self.okta_sms = config["oktaSms"]
elif config["oktaSms"] is not None:
self.okta_sms = password_policy_recovery_factor_settings.PasswordPolicyRecoveryFactorSettings(
config["oktaSms"]
)
else:
self.okta_sms = None
else:
self.okta_sms = None
if "recoveryQuestion" in config:
if isinstance(config["recoveryQuestion"],
password_policy_recovery_question.PasswordPolicyRecoveryQuestion):
self.recovery_question = config["recoveryQuestion"]
elif config["recoveryQuestion"] is not None:
self.recovery_question = password_policy_recovery_question.PasswordPolicyRecoveryQuestion(
config["recoveryQuestion"]
)
else:
self.recovery_question = None
else:
self.recovery_question = None
else:
self.okta_call = None
self.okta_email = None
self.okta_sms = None
self.recovery_question = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"okta_call": self.okta_call,
"okta_email": self.okta_email,
"okta_sms": self.okta_sms,
"recovery_question": self.recovery_question
}
parent_req_format.update(current_obj_format)
return parent_req_format
```
#### File: okta/models/session.py
```python
from okta.okta_object import OktaObject
from okta.okta_collection import OktaCollection
import okta.models.session_authentication_method\
as session_authentication_method
import okta.models.session_identity_provider\
as session_identity_provider
import okta.models.session_status\
as session_status
class Session(
OktaObject
):
"""
A class for Session objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.links = config["links"]\
if "links" in config else None
self.amr = OktaCollection.form_list(
config["amr"] if "amr"\
in config else [],
session_authentication_method.SessionAuthenticationMethod
)
self.created_at = config["createdAt"]\
if "createdAt" in config else None
self.expires_at = config["expiresAt"]\
if "expiresAt" in config else None
self.id = config["id"]\
if "id" in config else None
if "idp" in config:
if isinstance(config["idp"],
session_identity_provider.SessionIdentityProvider):
self.idp = config["idp"]
elif config["idp"] is not None:
self.idp = session_identity_provider.SessionIdentityProvider(
config["idp"]
)
else:
self.idp = None
else:
self.idp = None
self.last_factor_verification = config["lastFactorVerification"]\
if "lastFactorVerification" in config else None
self.last_password_verification = config["lastPasswordVerification"]\
if "lastPasswordVerification" in config else None
self.login = config["login"]\
if "login" in config else None
if "status" in config:
if isinstance(config["status"],
session_status.SessionStatus):
self.status = config["status"]
elif config["status"] is not None:
self.status = session_status.SessionStatus(
config["status"].upper()
)
else:
self.status = None
else:
self.status = None
self.user_id = config["userId"]\
if "userId" in config else None
else:
self.links = None
self.amr = []
self.created_at = None
self.expires_at = None
self.id = None
self.idp = None
self.last_factor_verification = None
self.last_password_verification = None
self.login = None
self.status = None
self.user_id = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"_links": self.links,
"amr": self.amr,
"createdAt": self.created_at,
"expiresAt": self.expires_at,
"id": self.id,
"idp": self.idp,
"lastFactorVerification": self.last_factor_verification,
"lastPasswordVerification": self.last_password_verification,
"login": self.login,
"status": self.status,
"userId": self.user_id
}
parent_req_format.update(current_obj_format)
return parent_req_format
``` |
{
"source": "jonstacks/ilo-utils",
"score": 3
} |
#### File: ilo-utils/ilo_utils/utils.py
```python
import socket
import threading
from xml.etree import ElementTree
import requests
class PortScan(threading.Thread):
def __init__(self, ip, port, timeout=2):
threading.Thread.__init__(self)
self.ip = ip
self.port = port
self.timeout = timeout
self.open = False
def run(self):
sock = socket.socket()
sock.settimeout(self.timeout)
try:
sock.connect((self.ip, self.port))
except socket.error:
self.open = False
else:
sock.close()
self.open = True
class ILOInfo(threading.Thread):
def __init__(self, host, timeout=2):
threading.Thread.__init__(self)
self.host = host
self.timeout = timeout
self.resp = None
self.serial = None
self.model = None
self.ilo_version = None
self.firmware = None
self.success = False
def run(self):
url = 'http://{}/xmldata?item=all'.format(self.host)
try:
self.resp = requests.get(url, timeout=self.timeout)
except requests.exceptions.Timeout:
pass
except requests.exceptions.ConnectionError:
pass
else:
if self.resp.status_code == requests.codes.ok:
self.success = True
tree = ElementTree.fromstring(self.resp.content)
hsi = tree.find('HSI')
if hsi is not None:
self.serial = hsi.find('SBSN').text.strip()
self.model = hsi.find('SPN').text.strip()
mp = tree.find('MP')
if mp is not None:
self.ilo_version = mp.find('PN').text.strip()
self.firmware = mp.find('FWRI').text.strip()
```
#### File: ilo-utils/tests/constants.py
```python
import unittest
from ilo_utils.constants import ILO_PORT
class ConstantsTest(unittest.TestCase):
def setUp(self):
pass
def test_ilo_port_is_a_number(self):
self.assertIsInstance(ILO_PORT, int)
def tearDown(self):
pass
``` |
{
"source": "jonsteingass/OpenUxAS",
"score": 2
} |
#### File: docker/ContainerScriptsAndFiles/buildUxAS.py
```python
import time
import sys
import os
import subprocess
from subprocess import call
def callWithShell(cmd):
process = subprocess.Popen(cmd,shell=True)
process.wait()
startAllTime = time.time()
print("\n*** Sync Files ### \n")
sys.stdout.flush()
callWithShell("python /UxASDev/OpenUxAS/docker/ContainerScriptsAndFiles/syncUxASFiles.py")
print("\n#### RUNNING MESON ####\n")
sys.stdout.flush()
mesonStartTime = time.time()
# 1 - change to the directory: OpenUxAS
os.chdir("/tmp_build")
# 2
# if "build" exists the just run Ninja
if(not os.path.isdir("build")):
print("##### NEW MESON BUILD DIRECTORY #####")
callWithShell("meson build --buildtype=release")
print("\n#### FINISHED RUNNING MESON [{}] ####\n".format(time.time() - mesonStartTime))
sys.stdout.flush()
print("\n#### RUNNING NINJA ####\n")
sys.stdout.flush()
ninjaStartTime = time.time()
# 3 - compile the code
callWithShell("ninja -C build all")
# use meson start time
print("\n#### FINISHED-RUNNING NINJA [{}] ####\n".format(time.time() - ninjaStartTime))
sys.stdout.flush()
callWithShell("mkdir -p /UxASDev/OpenUxAS/docker/tmp")
callWithShell("cp /tmp_build/build/uxas /UxASDev/OpenUxAS/docker/tmp/uxas")
print("\n#### FINISHED! Total Time [{}] ####\n".format(time.time() - startAllTime))
sys.stdout.flush()
``` |
{
"source": "JonStewart51/Prediction-Models",
"score": 3
} |
#### File: JonStewart51/Prediction-Models/gated predictive wavenet.py
```python
def RMSLE(y, pred):
return mean_squared_error(y, pred)**0.5
def wavenet1(inputshape):
input = Input(shape = inputshape, name = 'input')
input1 = GaussianDropout(.03)(input)
#res block
tanh_out = Conv1D(3, atrous_rate=2, padding = 'causal', name = 'a1')(input1)#use gated convolution here
tanh_out1 = BatchNormalization()(tanh_out)
tanh_out2 = Activation('tanh')(tanh_out) #apply gated convolution
tanh_out3 = Multiply()([tanh_out1, tanh_out2])
sig_out = Conv1D(3, atrous_rate=2, padding = 'causal', name = 'a2')(input1)
sig_out1 = BatchNormalization()(sig_out)
sig_out2 = Activation('sigmoid')(sig_out)
sig_out3 = Multiply()([sig_out1, sig_out2])
Merge1 = Multiply()([sigout3, tanh_out3])
Res1 = Conv1D(3, atrous_rate=2, padding = 'causal', name = 'a3')(Merge1)
Res1 = BatchNormalization()(Res1)
Merge2 = Activation('tanh')(Res1)
Merge2 = Multiply()([Res1, Merge2])
Merge2a = Add()([Merge2, input1])
tanh_out = Conv1D(3, atrous_rate=4, padding = 'causal', name = 'b1')(Merge2a)
tanh_out1 = BatchNormalization()(tanh_out)
tanh_out2 = Activation('tanh')(tanh_out)
tanh_out3 = Multiply()([tanh_out1, tanh_out2])
sig_out = Conv1D(3, atrous_rate=4, padding = 'causal', name = 'b2')(Merge2a)
sig_out1 = BatchNormalization()(sig_out)
sig_out2 = Activation('sigmoid')(sig_out)
sig_out3 = Multiply()([sig_out1, sig_out2])
Merge1 = Multiply()([sigout3, tanh_out3])
Res1 = Conv1D(3, atrous_rate=4, padding = 'causal', name = 'b3')(Merge1)
Res1 = BatchNormalization()(Res1)
Merge2 = Activation('tanh')(Res1)
Merge2 = Multiply()([Res1, Merge2])
Merge2b = Add()([Merge2a, Merge2])
tanh_out = Conv1D(3, atrous_rate=8, padding = 'causal', name = 'c1')(Merge2b)
tanh_out1 = BatchNormalization()(tanh_out)
tanh_out2 = Activation('tanh')(tanh_out)
tanh_out3 = Multiply()([tanh_out1, tanh_out2])
sig_out = Conv1D(3, atrous_rate=8, padding = 'causal', name = 'c2')(Merge2b)
sig_out1 = BatchNormalization()(sig_out)
sig_out2 = Activation('sigmoid')(sig_out)
sig_out3 = Multiply()([sig_out1, sig_out2])
Merge1 = Multiply()([sigout3, tanh_out3])
Res1 = Conv1D(3, atrous_rate=8, padding = 'causal', name = 'c3')(Merge1)
Res1 = BatchNormalization()(Res1)
Merge2 = Activation('tanh')(Res1)
Merge2 = Multiply()([Res1, Merge2])
Merge2c = Add()([Merge2b, Merge2])
tanh_out = Conv1D(3, atrous_rate=16, padding = 'causal', name = 'd1')(Merge2c)
tanh_out1 = BatchNormalization()(tanh_out)
tanh_out2 = Activation('tanh')(tanh_out) #apply gated convolution
tanh_out3 = Multiply()([tanh_out1, tanh_out2])
sig_out = Conv1D(3, atrous_rate=16, padding = 'causal', name = 'd2')(Merge2c)
sig_out1 = BatchNormalization()(sig_out)
sig_out2 = Activation('sigmoid')(sig_out) #apply gated convolution
sig_out3 = Multiply()([sig_out1, sig_out2])
Merge1 = Multiply()([sigout3, tanh_out3])
Res1 = Conv1D(3, atrous_rate=16, padding = 'causal', name = 'd3')(Merge1)
Res1 = BatchNormalization()(Res1)
Merge2 = Activation('tanh')(Res1)
Merge2 = Multiply()([Res1, Merge2])
Merge2d = Add()([Merge2c, Merge2])
tanh_out = Conv1D(3, atrous_rate=32, padding = 'causal', name = 'e1')(Merge2d)
tanh_out1 = BatchNormalization()(tanh_out)
tanh_out2 = Activation('tanh')(tanh_out) #apply gated convolution
tanh_out3 = Multiply()([tanh_out1, tanh_out2])
sig_out = Conv1D(3, atrous_rate=32, padding = 'causal', name = 'e2')(Merge2d)
sig_out1 = BatchNormalization()(sig_out)
sig_out2 = Activation('sigmoid')(sig_out)
sig_out3 = Multiply()([sig_out1, sig_out2])
Merge1 = Multiply()([sigout3, tanh_out3])
Res1 = Conv1D(3, atrous_rate=32, padding = 'causal', name = 'e3')(Merge1)
Res1 = BatchNormalization()(Res1)
Merge2 = Activation('tanh')(Res1)
Merge2 = Multiply()([Res1, Merge2])
Merge2b = Add()([Merge2d, Merge2])
xout = Flatten()(Merge2b)
xout = Dense(1, activation = 'linear', name = 'out1')(Merge2b)
model = Model(input, xout)
ADM = Adam(lr==0.0015, beta_1=0.9, beta_2=0.999, decay=0.00001, clipnorm=1.)
model.compile(loss=RMSLE, optimizer=ADM) #custom loss function above
return model
``` |
{
"source": "jonsth131/aoc",
"score": 4
} |
#### File: aoc/aoc2021/day1.py
```python
import fileutils
def part1(lst):
return measure(window(lst, 1))
def part2(lst):
return measure(window(lst, 3))
def measure(windows):
increases = 0
prev = None
for value in [sum(x) for x in windows]:
if prev is not None and value > prev:
increases += 1
prev = value
return increases
def window(lst, size):
for i in range(len(lst) - size + 1):
yield lst[i:i + size]
if __name__ == "__main__":
data = fileutils.read_as_ints("inputs/day1.txt")
print("=== Day 1 ===")
print("Part 1:", part1(data))
print("Part 2:", part2(data))
```
#### File: aoc/aoc2021/day3.py
```python
import fileutils
def part1(lst):
(bit_length, data) = gen_data(lst)
gamma_rate = get_max(bit_length, data)
epsilon_rate = ~gamma_rate & (pow(2, bit_length) - 1)
return gamma_rate * epsilon_rate
def part2(lst):
(bit_length, data) = gen_data(lst)
(oxygen, co2) = get_life_support_rating(bit_length, data)
return oxygen * co2
def get_life_support_rating(bit_length, lst):
oxygen = lst
co2 = lst
for pos in range(bit_length - 1, -1, -1):
mask = pow(2, pos)
if len(oxygen) != 1:
oxygen_check_val = get_max(bit_length, oxygen) & mask
oxygen = [i for i in oxygen if i & mask == oxygen_check_val]
if len(co2) != 1:
co2_check_val = get_max(bit_length, co2) & mask
co2 = [i for i in co2 if i & mask != co2_check_val]
return oxygen[0], co2[0]
def get_max(bit_length, data):
max_value = 0
for i in range(bit_length - 1, -1, -1):
mask = pow(2, i)
ones = len([i for i in data if i & mask == mask])
zeros = len(data) - ones
if ones == zeros:
max_value += mask
elif ones > zeros:
max_value += mask
return max_value
def gen_data(data):
return len(data[0]), [int(i, 2) for i in data]
if __name__ == "__main__":
challenge_input = fileutils.read_lines("inputs/day3.txt")
print("=== Day 3 ===")
print("Part 1:", part1(challenge_input))
print("Part 2:", part2(challenge_input))
```
#### File: aoc/aoc2021/day7.py
```python
import fileutils
def part1(lst):
positions = parse_positions(lst)
def calc(x, i):
return abs(x - i)
return get_min_fuel(positions, calc)
def part2(lst):
positions = parse_positions(lst)
def calc(x, i):
n = abs(x - i)
return n * (n + 1) / 2
return get_min_fuel(positions, calc)
def get_min_fuel(positions, calculation):
fuel = [None] * max(positions.keys())
for i in range(len(fuel)):
value = 0
for x in positions.keys():
value += calculation(x, i) * positions.get(x)
fuel[i] = int(value)
return min(fuel)
def parse_positions(data):
positions = {}
for value in [int(x) for x in data.split(',')]:
existing = positions.get(value)
if existing is None:
positions.update({value: 1})
else:
positions.update({value: existing + 1})
return positions
if __name__ == "__main__":
challenge_input = fileutils.read_lines("inputs/day7.txt")[0]
print("=== Day 7 ===")
print("Part 1:", part1(challenge_input))
print("Part 2:", part2(challenge_input))
```
#### File: aoc/aoc2021/test_day10.py
```python
from day10 import part1, part2
test_input = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]""".splitlines()
def test_part1():
assert part1(test_input) == 26397
def test_part2():
assert part2(test_input) == 288957
```
#### File: aoc/aoc2021/test_day1.py
```python
from day1 import part1, part2
test_input = [199, 200, 208, 210, 200, 207, 240, 269, 260, 263]
def test_part1():
assert part1(test_input) == 7
def test_part2():
assert part2(test_input) == 5
```
#### File: aoc/aoc2021/test_day3.py
```python
from day3 import part1, part2
test_input = ["00100", "11110", "10110", "10111", "10101", "01111", "00111", "11100", "10000", "11001", "00010",
"01010"]
def test_part1():
assert part1(test_input) == 198
def test_part2():
assert part2(test_input) == 230
```
#### File: aoc/aoc2021/test_day9.py
```python
from day9 import part1, part2
test_input = """2199943210
3987894921
9856789892
8767896789
9899965678""".splitlines()
def test_part1():
assert part1(test_input) == 15
def test_part2():
assert part2(test_input) == 1134
``` |
{
"source": "jonstites/blackjack",
"score": 3
} |
#### File: blackjack/tests/test_blackjack.py
```python
import pytest
import blackjack.core
class TestCard:
def test_suit_hearts(self):
card = blackjack.core.Card(1, 1)
card.get_suit() == "hearts"
def test_suit_diamonds(self):
card = blackjack.core.Card(1, 1)
card.get_suit() == "diamonds"
def test_suit_fail(self):
with pytest.raises(ValueError):
blackjack.core.Card(1, 5)
def test_rank_ace(self):
card = blackjack.core.Card(1, 1)
card.get_rank() == "ace"
def test_rank_king(self):
card = blackjack.core.Card(13, 1)
card.get_rank() == "king"
def test_rank_fail(self):
with pytest.raises(ValueError):
blackjack.core.Card(14, 1)
def test_print(self):
card = blackjack.core.Card(13, 1)
str(card) == "K" + u"\u2665"
class TestDeck:
def test_create_deck(self):
deck = blackjack.core.Deck()
len(deck.cards) == 52
def test_first_card(self):
deck = blackjack.core.Deck(shuffled=False)
deck.cards[0] == blackjack.core.Card(1, 1)
def test_last_card(self):
deck = blackjack.core.Deck(shuffled=False)
deck.cards[-1] == blackjack.core.Card(13, 4)
def test_shuffle(self):
deck = blackjack.core.Deck(shuffled=False)
deck2 = blackjack.core.Deck()
deck != deck2
def test_print_deck(self):
deck = blackjack.core.Deck()
for card in deck.cards:
str(card)
def test_pop_card(self):
deck = blackjack.core.Deck(shuffled=False)
card = deck.get_top_card()
card == blackjack.core.Card(13, 4)
len(deck.cards) == 51
class TestHand:
def test_create_hand(self):
hand = blackjack.core.Hand()
len(hand.cards) == 0
def test_add_card(self):
card = blackjack.core.Card(1, 1)
hand = blackjack.core.Hand()
hand.add_card(card)
len(hand.cards) == 1
def test_score_card(self):
card = blackjack.core.Card(5, 1)
hand = blackjack.core.Hand()
hand.add_card(card)
hand.score_cards() == 5
def test_score_cards(self):
card1 = blackjack.core.Card(5, 1)
card2 = blackjack.core.Card(6, 1)
hand = blackjack.core.Hand()
hand.add_cards([card1, card2])
hand.score_cards() == 11
def test_score_aces_low(self):
card1 = blackjack.core.Card(9, 1)
card2 = blackjack.core.Card(11, 1)
card3 = blackjack.core.Card(1, 1)
hand = blackjack.core.Hand()
hand.add_cards([card1, card2, card3])
hand.score_cards() == 20
def test_score_aces_high(self):
card1 = blackjack.core.Card(9, 1)
card2 = blackjack.core.Card(1, 1)
card3 = blackjack.core.Card(1, 1)
hand = blackjack.core.Hand()
hand.add_cards([card1, card2, card3])
hand.score_cards() == 21
def test_print_hand(self):
card1 = blackjack.core.Card(9, 1)
card2 = blackjack.core.Card(1, 2)
card3 = blackjack.core.Card(3, 3)
card4 = blackjack.core.Card(13, 4)
hand = blackjack.core.Hand()
hand.add_cards([card1, card2, card3, card4])
correct_str = " ".join([
"9" + u"\u2665",
"A" + u"\u2666",
"3" + u"\u2660",
"K" + u"\u2663"
])
str(hand) == correct_str
``` |
{
"source": "jonstites/comment_classifier",
"score": 3
} |
#### File: comment_classifier/bin/commenter.py
```python
import argh
import bz2
from comment_generator.core import MarkovModel, CommentReader
import numpy
def run(comment_file, test_file, subreddit_counts, ngram_size):
model = MarkovModel(ngram_size, subreddit_counts, max_size=2**30)
num = 0
with bz2.BZ2File(comment_file, "rb") as input_handle:
with bz2.BZ2File(test_file, "rb") as test_handle:
while input_handle.peek() and test_handle.peek():
model.train(input_handle, subreddit_counts, num_batches=3, batch_size=50000)
f1 = model.f1_macro(test_handle, subreddit_counts, num_batches=1, batch_size=10000)
print("{} f1: {}".format(num, f1), flush=True)
num += 1
def get_num_features(subreddits, max_size=2**30):
num_subreddits = len(subreddits.keys())
return int(max_size / num_subreddits)
def get_subreddit_counts(subreddit_file, top):
subreddits = {}
with open(subreddit_file) as handle:
for line_num, line in enumerate(handle):
subreddit = line.split()[0]
count = int(line.split()[1])
if line_num >= top:
break
subreddits[subreddit] = count
return subreddits
def main(comment_file, test_file, subreddit_file, top=100, ngram_size=3):
subreddit_counts = get_subreddit_counts(subreddit_file, top)
run(comment_file, test_file, subreddit_counts, ngram_size)
if __name__ == "__main__":
argh.dispatch_command(main)
``` |
{
"source": "Jontahan/kvad",
"score": 2
} |
#### File: Jontahan/kvad/food_exp_ql.py
```python
import numpy as np
from gw_collect import Gridworld
import pygame as pg
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
from vicero.algorithms.qlearning import Qlearning
#env = Gridworld(width=6, height=6, cell_size=32, agent_pos=(0, 3), food_pos=[(0, 0), (3, 3), (4, 5), (2, 0)])
env = Gridworld(width=4, height=4, cell_size=32, agent_pos=(0, 0), food_pos=[(0, 3), (3, 3)])
pg.init()
screen = pg.display.set_mode((env.cell_size * env.width, env.cell_size * env.height))
env.screen = screen
clock = pg.time.Clock()
def plot(history):
plt.figure(2)
plt.clf()
durations_t = torch.FloatTensor(history)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy(), c='lightgray', linewidth=1)
his = 50
if len(durations_t) >= his:
means = durations_t.unfold(0, his, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(his - 1), means))
plt.plot(means.numpy(), c='green')
plt.pause(0.001)
class DenseNet(nn.Module):
def __init__(self, input_size, layer_sizes):
super(DenseNet, self).__init__()
self.layers = [nn.Linear(input_size, layer_sizes[0])]
for i in range(1, len(layer_sizes)):
self.layers.append(nn.ReLU())
self.layers.append(nn.Linear(layer_sizes[i - 1], layer_sizes[i]))
self.nn = nn.Sequential(*self.layers)
def forward(self, x):
x = torch.flatten(x)
return self.nn.forward(x)
gamma = .95
alpha = .002
all_mean_diffs = []
all_states = env.get_all_states()
ql_a = Qlearning(env, n_states=len(all_states), n_actions=env.action_space.n, plotter=plot, epsilon=1.0, epsilon_decay=lambda e, i: e * .998)
ql_b = Qlearning(env, n_states=len(all_states), n_actions=env.action_space.n, plotter=plot, epsilon=1.0, epsilon_decay=lambda e, i: e * .998)
for ne in range(0, 10):
np.random.seed(10)
num_episodes = 100 #* ne
convergence_durations = []
ql_agents = []
for i in range(2):
#print('Simulation {}/{}'.format(i, 50))
ql = Qlearning(env, n_states=len(all_states), n_actions=env.action_space.n, plotter=plot, epsilon=1.0, epsilon_decay=lambda e, i: e * .998)
#dqn = DQN(env, qnet=net, plotter=plot, render=True, memory_length=2000, gamma=gamma, alpha=alpha, epsilon_start=0.3, caching_interval=3000)
for e in range(num_episodes):
ql.train(1)
#dqn.train_episode(e, num_episodes, 16, plot=True, verbose=False)
#if e > 50 and np.std(ql.history[-50:]) < 1:
# print('Early stop after {} iterations'.format(e))
# convergence_durations.append(e)
# break
ql_agents.append(ql)
env_diffs = []
total_visits = []
for i in range(len(ql_agents[0].Q)):
total_visits.append(ql_agents[0].state_visits[i] + ql_agents[1].state_visits[i])
normalized_visits = total_visits / np.linalg.norm(total_visits)
#normalized_visits = nn.Softmax(dim=-1)(torch.tensor(total_visits))
#for i in range(len(normalized_visits)):
# print('{} -> {}'.format(total_visits[i], normalized_visits[i]))
for i in range(len(ql_agents[0].Q)):
for a in range(env.action_space.n):
env_diffs.append(normalized_visits[i] * abs(ql_agents[0].Q[i][a] - ql_agents[1].Q[i][a]))
#env_diffs.append(normalized_visits[i] * abs(ql_agents[0].Q[i] - ql_agents[1].Q[i]))
print('mean difference: {}'.format(np.mean(env_diffs)))
all_mean_diffs.append(np.mean(env_diffs))
plt.close()
plt.plot(all_mean_diffs)
plt.show()
#print('mean duration: {}'.format(np.mean(convergence_durations)))
# std < 1 for this config
``` |
{
"source": "Jontahan/ris",
"score": 2
} |
#### File: Jontahan/ris/riskrig.py
```python
import pygame as pg
from ris import Ris
import numpy as np
#cell_size = 24
class Riskrig:
def __init__(self, cell_size, width, height):
self.ris_a = Ris(cell_size, height=height, width=width)#, piece_set='koktris')
self.ris_b = Ris(cell_size, height=height, width=width)#, piece_set='koktris')
self.board = self.ris_a.board
self.action_space = self.ris_a.action_space
self.screen = None
self.cell_size = cell_size
def step(self, action):
state_a, reward_a, done_a, info_a = self.ris_a.step(action)
state_b, reward_b, done_b, info_b = self.ris_b.step(self.ris_b.action_space.sample())
self.ris_b.incoming_garbage += info_a['lines_cleared']
self.ris_a.incoming_garbage += info_b['lines_cleared']
multi_state_a = np.array([np.vstack((state_a[0][:3], state_b[0]))])
multi_state_b = np.array([np.vstack((state_b[0][:3], state_a[0]))])
return multi_state_a, reward_a, done_a or done_b, {}
def reset(self):
state_a = self.ris_a.reset()
state_b = self.ris_b.reset()
multi_state_a = np.array([np.vstack((state_a[0][:3], state_b[0]))])
multi_state_b = np.array([np.vstack((state_b[0][:3], state_a[0]))])
return multi_state_a
def render(self):
cell_size = self.cell_size
self.screen.fill((0,0,0))
screen_a = pg.Surface((cell_size * len(self.ris_a.board[0]), cell_size * len(self.ris_a.board)))
self.ris_a.draw(screen_a)
self.screen.blit(screen_a, (0, 0))
screen_b = pg.Surface((cell_size * len(self.ris_b.board[0]), cell_size * len(self.ris_b.board)))
self.ris_b.draw(screen_b)
self.screen.blit(screen_b, (cell_size * (1 + len(self.ris_b.board[0])), 0))
for i in range(self.ris_a.incoming_garbage):
cell = pg.Rect(cell_size * len(self.ris_a.board[0]), cell_size * (len(self.ris_a.board) - 1 - i), cell_size, cell_size)
pg.draw.rect(self.screen, (100, 0, 0), cell)
pg.draw.rect(self.screen, (90, 0, 0), cell, 1)
for i in range(self.ris_b.incoming_garbage):
cell = pg.Rect(cell_size + 2 * cell_size * len(self.ris_a.board[0]), cell_size * (len(self.ris_a.board) - 1 - i), cell_size, cell_size)
pg.draw.rect(self.screen, (100, 0, 0), cell)
pg.draw.rect(self.screen, (90, 0, 0), cell, 1)
pg.display.flip()
if __name__ == "__main__":
cell_size = 24
rk = Riskrig(cell_size, 7, 14)
env = rk.ris_a
screen = pg.display.set_mode((2 * cell_size + 2 * cell_size * (len(env.board[0])), cell_size * len(env.board)))
rk.screen = screen
clock = pg.time.Clock()
while True:
_,_,done,_ = rk.step(rk.ris_a.action_space.sample())
if done: rk.reset()
rk.render()
pg.display.flip()
#clock.tick(30)
```
#### File: Jontahan/ris/ris.py
```python
import numpy as np
import pygame as pg
import gym
from gym import spaces
class Ris(gym.Env):
NOP, LEFT, RIGHT, ROT = range(4)
def __init__(self, scale, width=8, height=16, piece_set='lettris'):
self.action_space = spaces.Discrete(4)
board = np.zeros((height, width))
self.width = width
self.height = height
self.time = 0
self.cutoff = 4000
self.board = np.array(board, dtype=int)
self.size = len(board)
self.cell_size = scale
self.piece_types = Ris.piece_sets[piece_set]
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = self.piece_types[np.random.randint(0, len(self.piece_types))]
self.subframe = 0
self.subframes = 5
self.screen = None
self.incoming_garbage = 0
def reset(self):
board = np.zeros((self.height, self.width))
self.board = np.array(board, dtype=int)
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = self.piece_types[np.random.randint(0, len(self.piece_types))]
self.subframe = 0
piece = np.array(np.zeros((self.height, self.width)))
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
piece[pos[1]][pos[0]] = 1
self.time = 0
state = np.array([[
self.board,
piece,
np.zeros((self.height, self.width)),
np.zeros((self.height, self.width))
]])
return state
def resolve_lines(self):
removed = 0
for i in range(len(self.board)):
line = self.board[i]
if all(x == 1 for x in line):
removed = removed + 1
for j in range(i - 1):
self.board[i - j] = self.board[i - j - 1]
return removed
def apply_garbage(self, n_lines):
done = False
if n_lines > 0:
if np.any(self.board[n_lines - 1]):
done = True
else:
self.board = np.roll(self.board, -n_lines, axis=0)
for i in range(n_lines):
garbage_line = np.ones(self.width)
garbage_line[np.random.randint(0, self.width)] = 0
self.board[self.height - 1 - i] = garbage_line
def step(self, action):
self.time = self.time + 1
self.subframe = self.subframe + 1
done = False
reward = 0
lines_cleared = 0
if action == Ris.LEFT:
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_left = (i + self.falling_piece_pos[0] - 1, j + self.falling_piece_pos[1])
if pos_left[0] < 0 or self.board[pos_left[1]][pos_left[0]] != 0:
coll = True
if not coll:
self.falling_piece_pos = (self.falling_piece_pos[0] - 1, self.falling_piece_pos[1])
if action == Ris.RIGHT:
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_left = (i + self.falling_piece_pos[0] + 1, j + self.falling_piece_pos[1])
if pos_left[0] >= len(self.board[0]) or self.board[pos_left[1]][pos_left[0]] != 0:
coll = True
if not coll:
self.falling_piece_pos = (self.falling_piece_pos[0] + 1, self.falling_piece_pos[1])
if action == Ris.ROT:
rotated = np.rot90(self.falling_piece_shape)
coll = False
for i in range(4):
for j in range(4):
if not coll and rotated[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
if pos[0] not in range(0, len(self.board[0])) or \
pos[1] not in range(0, len(self.board)) or \
self.board[pos[1]][pos[0]] != 0:
coll = True
if not coll:
self.falling_piece_shape = rotated
if self.subframe == self.subframes - 1:
self.subframe = 0
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_below = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1] + 1)
if pos_below[1] >= len(self.board) or self.board[pos_below[1]][pos_below[0]] != 0:
coll = True
if coll:
bottom = False
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
self.board[pos[1]][pos[0]] = 1
if pos[1] > (len(self.board) // 2):
bottom = True
lines_cleared = self.resolve_lines()
if lines_cleared > 0:
reward = (2 + lines_cleared) ** 2
else:
self.apply_garbage(self.incoming_garbage)
self.incoming_garbage = 0
if self.falling_piece_pos[1] == 0:
done = True
reward = -10
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = np.rot90(self.piece_types[np.random.randint(0, len(self.piece_types))], k=np.random.randint(0, 4))
else:
self.falling_piece_pos = (self.falling_piece_pos[0], self.falling_piece_pos[1] + 1)
piece = np.array(np.zeros((self.height, self.width)))
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
piece[pos[1]][pos[0]] = 1
timing_layer = np.zeros((self.height, self.width))
if self.subframe == self.subframes - 2:
timing_layer = np.ones((self.height, self.width))
garbage_layer = np.zeros((self.height, self.width))
for i in range(self.incoming_garbage):
garbage_layer[self.height - 1 - i] = np.ones(self.width)
state = np.array([[
self.board,
piece,
garbage_layer,
timing_layer
]])
if self.time > self.cutoff:
done = True
return state, reward, done, { 'lines_cleared' : lines_cleared }
def step_new(self, action):
self.time = self.time + 1
self.subframe = self.subframe + 1
done = False
reward = 0
lines_cleared = 0
dynamic_layer = np.array(np.zeros((self.height, self.width)), dtype=int)
lbound, rbound, ubound, dbound = (self.width, 0, self.height, 0)
for c in range(4):
for r in range(4):
if self.falling_piece_shape[r][c] == 1:
pos = (c + self.falling_piece_pos[0], r + self.falling_piece_pos[1])
dynamic_layer[pos[1]][pos[0]] = 1
if pos[0] < lbound: lbound = pos[0]
if pos[0] > rbound: rbound = pos[0]
if pos[1] < ubound: ubound = pos[1]
if pos[1] > dbound: dbound = pos[1]
if action == Ris.LEFT:
if lbound > 0:
preview_layer = np.roll(dynamic_layer, -1)
if not np.all(np.bitwise_xor(self.board, preview_layer)):
self.falling_piece_pos = (self.falling_piece_pos[0] - 1, self.falling_piece_pos[1])
if action == Ris.RIGHT:
if rbound < self.width - 1:
preview_layer = np.roll(dynamic_layer, 1)
if not np.all(np.bitwise_xor(self.board, preview_layer)):
self.falling_piece_pos = (self.falling_piece_pos[0] + 1, self.falling_piece_pos[1])
if action == Ris.ROT:
rotated = np.rot90(self.falling_piece_shape)
coll = False
for i in range(4):
for j in range(4):
if not coll and rotated[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
if pos[0] not in range(0, len(self.board[0])) or \
pos[1] not in range(0, len(self.board)) or \
self.board[pos[1]][pos[0]] != 0:
coll = True
if not coll:
self.falling_piece_shape = rotated
if self.subframe == self.subframes - 1:
self.subframe = 0
coll = False
for i in range(4):
for j in range(4):
if not coll and self.falling_piece_shape[j][i] == 1:
pos_below = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1] + 1)
if pos_below[1] >= len(self.board) or self.board[pos_below[1]][pos_below[0]] != 0:
coll = True
if coll:
bottom = False
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
self.board[pos[1]][pos[0]] = 1
if pos[1] > (len(self.board) // 2):
bottom = True
lines_cleared = self.resolve_lines()
if lines_cleared > 0:
reward = (2 + lines_cleared) ** 2
else:
self.apply_garbage(self.incoming_garbage)
self.incoming_garbage = 0
if self.falling_piece_pos[1] == 0:
done = True
reward = -10
self.falling_piece_pos = (np.random.randint(0, self.width - 3), 0)
self.falling_piece_shape = np.rot90(self.piece_types[np.random.randint(0, len(self.piece_types))], k=np.random.randint(0, 4))
else:
self.falling_piece_pos = (self.falling_piece_pos[0], self.falling_piece_pos[1] + 1)
piece = np.array(np.zeros((self.height, self.width)))
for i in range(4):
for j in range(4):
if self.falling_piece_shape[j][i] == 1:
pos = (i + self.falling_piece_pos[0], j + self.falling_piece_pos[1])
piece[pos[1]][pos[0]] = 1
timing_layer = np.zeros((self.height, self.width))
if self.subframe == self.subframes - 2:
timing_layer = np.ones((self.height, self.width))
garbage_layer = np.zeros((self.height, self.width))
for i in range(self.incoming_garbage):
garbage_layer[self.height - 1 - i] = np.ones(self.width)
state = np.array([[
self.board,
piece,
garbage_layer,
timing_layer
]])
if self.time > self.cutoff:
done = True
return state, reward, done, { 'lines_cleared' : lines_cleared }
def draw(self, screen, heatmap=None):
# draw static pieces
for i in range(len(self.board[0])):
for j in range(len(self.board)):
cell = pg.Rect(self.cell_size * i, self.cell_size * j, self.cell_size, self.cell_size)
if self.board[j][i] == 1:
pg.draw.rect(screen, (0, 100, 0), cell)
pg.draw.rect(screen, (0, 90, 0), cell, 1)
else:
pg.draw.rect(screen, (64, 64, 64), cell)
pg.draw.rect(screen, (58, 58, 58), cell, 1)
# draw falling piece
for i in range(4):
for j in range(4):
cell = pg.Rect(self.cell_size * (i + self.falling_piece_pos[0]), self.cell_size * (j + self.falling_piece_pos[1]), self.cell_size, self.cell_size)
if self.falling_piece_shape[j][i] == 1:
pg.draw.rect(screen, (0, 120, 0), cell)
pg.draw.rect(screen, (0, 110, 0), cell, 1)
def render(self, mode=''):
self.draw(self.screen)
pg.display.flip()
def play(self, framerate=30):
clock = pg.time.Clock()
while True:
self.render()
action = Ris.NOP
events = pg.event.get()
for event in events:
if event.type == pg.KEYDOWN:
if event.key == pg.K_LEFT:
action = Ris.LEFT
if event.key == pg.K_RIGHT:
action = Ris.RIGHT
if event.key == pg.K_UP:
action = Ris.ROT
state, reward, done, _ = self.step(action)
if done: self.reset()
clock.tick(int(framerate))
piece_sets = {
'lettris' : [
[[0,0,0,0],
[0,0,0,0],
[0,1,1,0],
[0,0,0,0]]
],
'koktris' : [
[[0,0,0,0],
[0,0,1,0],
[1,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,1,0,0],
[0,1,1,1],
[0,0,0,0]],
[[0,0,0,0],
[0,1,1,0],
[0,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,1,0,0],
[1,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[1,1,0,0],
[0,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,0,1,1],
[0,1,1,0],
[0,0,0,0]],
[[0,0,0,0],
[0,0,0,0],
[1,1,1,1],
[0,0,0,0]],
]
}
if __name__ == "__main__":
cell_size = 24
env = Ris(cell_size, 10, 20, piece_set='koktris')
screen = pg.display.set_mode((cell_size * (len(env.board[0])), cell_size * len(env.board)))
env.screen = screen
clock = pg.time.Clock()
while True:
_,_,done,_ = env.step(env.action_space.sample())
if done: env.reset()
env.render()
pg.display.flip()
#clock.tick(30)
``` |
{
"source": "Jontahan/vicero",
"score": 3
} |
#### File: vicero/examples/dynaq_test.py
```python
from vicero.algorithms.dynaq import DynaQ
import pygame as pg
import environments.maze as maze
#from vicero.algorithms.qlearning import Qlearning
import numpy as np
import matplotlib.pyplot as plt
# [Work in Progress]
np.random.seed()
board = [[0 ,0 ,0 ,0 ,10,0 ,0 ,0 ],
[0 ,0 ,-1,-1,-1,0 ,0 ,0 ],
[0 ,0 ,-1,0 ,0 ,-1,0 ,0 ],
[0 ,0 ,0 ,0 ,0 ,0 ,0 ,-1],
[10,0 ,0 ,0 ,0 ,0 ,0 ,0 ],
[0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ],
[0 ,0 ,0 ,-1,-1,0 ,0 ,0 ],
[0 ,0 ,0 ,1 ,0 ,0 ,0 ,0 ]]
cell_size = 32 # the size of one game cell, in pixels
pad_cells = 1 # padding between the visualizations
framerate = 60 # frames per second
# pygame setup
pg.init()
screen = pg.display.set_mode((cell_size * len(board[0]), cell_size * len(board)))
clock = pg.time.Clock()
class GameInstance:
def __init__(self, env, model, offset):
self.env = env
self.model = model
self.offset = offset
# steps_taken is the number of steps taken before a completion of the maze
# Iteration is the number of times the agent has completed the environment
self.info = {'pos': (3, 7), 'steps_taken': 0}
self.step_history = []
def get_epsilon(self):
return self.model.get_epsilon()
def game_step(self):
# discretize current game state
self.info['steps_taken'] += 1
state = self.env.state
# let the model choose an action
action = self.model.exploratory_action(state)
# run one step in the simulation
new_state, reward, done, self.board = self.env.step(action)
# update the Q table based on the observation
self.model.update_q(state, action, reward, new_state)
# visualize the new state
self.draw_world()
# if in goal state, restart
if done:
self.env.reset()
self.step_history.append(self.info['steps_taken'])
self.info['steps_taken'] = 0
info = {'x' : 3, 'y' : 7}
return self.step_history, True
return None, False
def draw_world(self):
for i in range(len(self.board[0])):
for j in range(len(self.board)):
#qval = model.Q[discretize(i, j)][np.argmax(model.Q[discretize(i, j)])]
qval = np.average(self.model.Q[discretize((i, j))])
qval = 64 + 50 * qval
pg.draw.rect(screen, (np.clip(qval, 0, 220) , 70, 20), pg.Rect(self.offset[0] + cell_size * i, self.offset[1] + cell_size * j, cell_size, cell_size))
if self.board[j][i] == -1:
pg.draw.rect(screen, (64, 64, 64), pg.Rect(self.offset[0] + cell_size * i, self.offset[1] + cell_size * j, cell_size, cell_size))
if self.board[j][i] == 1:
pg.draw.ellipse(screen, (100, 24, 24), pg.Rect(self.offset[0] + cell_size * i, self.offset[1] + cell_size * j, cell_size, cell_size))
if self.board[j][i] == 10:
pg.draw.rect(screen, (180, 180, 64), pg.Rect(self.offset[0] + cell_size * i, self.offset[1] + cell_size * j, cell_size, cell_size))
env = maze.MazeEnvironment(board)
def discretize(state):
return state[1] * env.size + state[0]
ql = DynaQ(env, len(board) ** 2, len(maze.MazeEnvironment.action_space), epsilon=0.1, discretize=discretize)
#ql.train(10000)
game = GameInstance(env, ql, (0, 0))
def plot_durations(steps):
plt.figure(1)
plt.clf()
plt.title('Training...')
plt.xlabel('Iteration')
plt.ylabel('Steps')
plt.plot(steps)
plt.pause(0.001) # To update plots
while True:
i = 0
step_list, finished = game.game_step()
if finished:
plot_durations(step_list)
pg.display.flip()
clock.tick(framerate)
```
#### File: examples/environments/hex.py
```python
import numpy as np
import math
import pydot as pd
# player id is 1 (blue) or 2 (red)
# player 1 wins by bridging top-left to bottom-right or vice versa (on the visualized diamond form)
# player 2 wins by bridging top-right to bottom-left or vice versa (on the visualized diamond form)
class HexSim:
class Node:
def __init__(self, list_index):
self.list_index = list_index
self.neighbors = []
self.border_identities = []
def __repr__(self):
return str(self.list_index)
def init_edges(self, game_graph):
N = int(math.sqrt(len(game_graph)))
nr = range(N)
r, c = int(self.list_index // N), int(self.list_index % N)
def rc_i(row, col):
return row * N + col
if r + 1 in nr: self.neighbors.append(game_graph[rc_i(r + 1, c)])
if r - 1 in nr: self.neighbors.append(game_graph[rc_i(r - 1, c)])
if c + 1 in nr: self.neighbors.append(game_graph[rc_i(r, c + 1)])
if c - 1 in nr: self.neighbors.append(game_graph[rc_i(r, c - 1)])
if r - 1 in nr and c + 1 in nr: self.neighbors.append(game_graph[rc_i(r - 1, c + 1)])
if r + 1 in nr and c - 1 in nr: self.neighbors.append(game_graph[rc_i(r + 1, c - 1)])
if r == 0: self.border_identities.append(-2)
if c == 0: self.border_identities.append(-1)
if r == N - 1: self.border_identities.append(2)
if c == N - 1: self.border_identities.append(1)
def __init__(self, N, starting_player):
self.N = N
self.board = np.zeros(self.N * self.N)
# list of references to all the nodes, nodes contain info about neighbor cells
self.game_graph = [HexSim.Node(i) for i in range(self.N * self.N)]
# after all nodes exist, connect them
for node in self.game_graph:
node.init_edges(self.game_graph)
self.action_space = [i for i in range(self.N * self.N)]
self.next_player_id = starting_player
# number of players, defined to avoid constants in the code
self.P = 2
self.flat_state_size = N * N + 1
self.state_flattener = lambda state : np.array([state[0]] + list(state[1]))
state = property(fget=lambda self : (self.next_player_id, self.board))
def is_winning_move(self, state, action):
player, board = state
def bfs(graph, start):
visited, queue = set(), [start]
while queue:
node = queue.pop(0)
if node not in visited:
for neighbor in node.neighbors:
if board[neighbor.list_index] == player:
queue.append(neighbor)
visited.add(node)
return visited
connected_nodes = bfs(self.game_graph, self.game_graph[action])
ends = 0
for node in connected_nodes:
if player in node.border_identities:
ends += 1
break
for node in connected_nodes:
if player * -1 in node.border_identities:
ends += 1
break
return ends == 2
# one step in the actual game, assumes legal action
def step(self, action):
self.board[action] = self.next_player_id
done = self.is_winning_move(self.state, action)
self.next_player_id = 1 + ((self.next_player_id + 2) % self.P)
return self.state, done
def simulate(self, state, action):
player, board = state
board = np.array(board)
board[action] = player
done = self.is_winning_move(state, action)
player = 1 + ((player + 2) % self.P)
return (player, board), done
def is_legal_action(self, state, action):
if action not in self.action_space:
return False
return state[1][action] == 0
def reset(self, starting_player):
self.board = np.zeros(self.N * self.N)
self.next_player_id = starting_player
return self.state
def get_winner(self, state):
return 1 + ((state[0] + 2) % self.P)
def visualize(self, fname):
color_map = {
0: 'gray',
1: 'blue',
2: 'red'
}
graph = pd.Dot(graph_type='graph', nodesep=0.3)
for node in self.game_graph:
pd_node = pd.Node(node.list_index, shape='hexagon', style='filled', fillcolor=color_map[self.board[node.list_index]])#, label='{}, {}, {}'.format(hnode.listform_index, hnode.color, hnode.connection_end))
graph.add_node(pd_node)
for neighbor in node.neighbors:
graph.add_edge(pd.Edge(pd_node, neighbor.list_index))
graph.write_png(fname, prog='neato')
# test
"""
hs = HexSim(5, 1)
for action in hs.action_space:
assert hs.is_legal_action(hs.state, action) == True
for action in hs.action_space:
hs.board[action] = 1
assert hs.is_legal_action(hs.state, action) == False
for action in hs.action_space:
assert hs.is_legal_action(hs.state, action) == False
player = 1
hs.reset(player)
for action in hs.action_space:
assert hs.is_legal_action(hs.state, action) == True
new_state, done = hs.step(action)
assert new_state[0] != player
player = new_state[0]
state, done = hs.reset(player), False
while not done:
hs.visualize('before_win.png')
action = np.random.choice([action for action in hs.action_space if hs.is_legal_action(state, action)])
state, done = hs.step(action)
print(hs.get_winner(state))
hs.visualize('win.png')
"""
```
#### File: vicero/examples/mcts_nim.py
```python
import vicero.algorithms.mcts as mcts
import environments.nimsim as nimsim
import numpy as np
class GameAgent:
def __init__(self, env, algorithm=None):
self.env = env
self.algorithm = algorithm
def pick_action(self, state, viz=False):
if self.algorithm:
return self.algorithm.pick_action(state, viz)
return np.random.choice(self.env.action_space)
N, K, M = 4, 2, 1000
player_id, evil_id = 0, 1
starting_player = player_id
ns = nimsim.NimSim(N, K, starting_player=starting_player)
player_agent = GameAgent(ns, algorithm=mcts.MCTS(ns, M))
evil_agent = GameAgent(ns, algorithm=mcts.MCTS(ns, M))
n_games = 10
wins = 0
for i in range(n_games): # for each game
print('game', i)
ns.reset(starting_player)
done = False
state = ns.state
i = 0
while not done: # for each turn
if state[0] == player_id: # shitty loop, but readable
action = player_agent.pick_action(ns.state, viz=(i == 0))
else: # opponent move
action = evil_agent.pick_action(ns.state)
i += 1
state, done = ns.step(action)
if state[0] != player_id:
wins += 1
print('wins: {}/{}'.format(wins, n_games))
```
#### File: vicero/algorithms/dynaq.py
```python
from vicero.algorithms.qlearning import Qlearning
import numpy as np
# DynaQ is an algorithm that is very similar to
# classical tabular Q-learning, with the one difference being that it
# keeps an internal model that simulates the consequences of actions
# based entirely on experience
# More details: S&B18 Chapter 8
class DynaQ(Qlearning):
def __init__(self, env, n_states, n_actions, epsilon, discretize, planning_steps=0):
super(DynaQ, self).__init__(env, n_states, n_actions, epsilon=epsilon, discretize=discretize)
self.model = {} # internal model for simulation, built from experience / exploration
self.planning_steps = planning_steps
def train(self, iterations):
print(':D')
for _ in range(iterations):
state_old = self.env.state
action = self.exploratory_action(self.env.state)
state, reward, done, board = self.env.step(action)
self.update_q(state_old, action, reward, state)
self.model[(state_old, action)] = (reward, state)
for _ in range(self.planning_steps):
sample_state_old, sample_action = np.random.sample(self.model.keys)
sample_reward, sample_state = self.model((sample_state_old, sample_action))
self.update_q(sample_state_old, sample_action, sample_reward, sample_state)
if done:
self.env.reset()
``` |
{
"source": "jontambi/kubernetes-ingress",
"score": 2
} |
#### File: tests/suite/custom_resources_utils.py
```python
import pytest
import yaml
import logging
from pprint import pprint
from kubernetes.client import CustomObjectsApi, ApiextensionsV1beta1Api, CoreV1Api
from kubernetes import client
from kubernetes.client.rest import ApiException
from suite.resources_utils import ensure_item_removal, get_file_contents
def create_crd(api_extensions_v1_beta1: ApiextensionsV1beta1Api, body) -> None:
"""
Create a CRD based on a dict
:param api_extensions_v1_beta1: ApiextensionsV1beta1Api
:param body: a dict
"""
try:
api_extensions_v1_beta1.create_custom_resource_definition(body)
except ApiException as api_ex:
raise api_ex
# ApiException(f"An unexpected exception occurred: {api_ex}", reason=api_ex.reason)
except Exception as ex:
# https://github.com/kubernetes-client/python/issues/376
if ex.args[0] == "Invalid value for `conditions`, must not be `None`":
print("There was an insignificant exception during the CRD creation. Continue...")
else:
pytest.fail(f"An unexpected exception {ex} occurred. Exiting...")
def create_crd_from_yaml(
api_extensions_v1_beta1: ApiextensionsV1beta1Api, name, yaml_manifest
) -> None:
"""
Create a specific CRD based on yaml file.
:param api_extensions_v1_beta1: ApiextensionsV1beta1Api
:param name: CRD name
:param yaml_manifest: an absolute path to file
"""
print(f"Create a CRD with name: {name}")
with open(yaml_manifest) as f:
docs = yaml.safe_load_all(f)
for dep in docs:
if dep["metadata"]["name"] == name:
create_crd(api_extensions_v1_beta1, dep)
print("CRD was created")
def delete_crd(api_extensions_v1_beta1: ApiextensionsV1beta1Api, name) -> None:
"""
Delete a CRD.
:param api_extensions_v1_beta1: ApiextensionsV1beta1Api
:param name:
:return:
"""
print(f"Delete a CRD: {name}")
delete_options = client.V1DeleteOptions()
api_extensions_v1_beta1.delete_custom_resource_definition(name, delete_options)
ensure_item_removal(api_extensions_v1_beta1.read_custom_resource_definition, name)
print(f"CRD was removed with name '{name}'")
def read_crd(custom_objects: CustomObjectsApi, namespace, plural, name) -> object:
"""
Get CRD information (kubectl describe output)
:param custom_objects: CustomObjectsApi
:param namespace: The custom resource's namespace
:param plural: the custom resource's plural name
:param name: the custom object's name
:return: object
"""
print(f"Getting info for {name} in namespace {namespace}")
try:
response = custom_objects.get_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, plural, name
)
pprint(response)
return response
except ApiException as ex:
logging.exception(f"Exception occured: {ex} while getting reading CRD")
raise
def create_policy_from_yaml(
custom_objects: CustomObjectsApi, yaml_manifest, namespace
) -> str:
"""
Create a Policy based on yaml file.
:param custom_objects: CustomObjectsApi
:param yaml_manifest: an absolute path to file
:param namespace:
:return: str
"""
print("Create a Policy:")
with open(yaml_manifest) as f:
dep = yaml.safe_load(f)
try:
custom_objects.create_namespaced_custom_object(
"k8s.nginx.org", "v1alpha1", namespace, "policies", dep
)
print(f"Policy created with name '{dep['metadata']['name']}'")
return dep["metadata"]["name"]
except ApiException as ex:
logging.exception(
f"Exception: {ex} occured while creating Policy: {dep['metadata']['name']}"
)
raise
def delete_policy(custom_objects: CustomObjectsApi, name, namespace) -> None:
"""
Delete a Policy.
:param custom_objects: CustomObjectsApi
:param namespace: namespace
:param name:
:return:
"""
print(f"Delete a Policy: {name}")
delete_options = client.V1DeleteOptions()
custom_objects.delete_namespaced_custom_object(
"k8s.nginx.org", "v1alpha1", namespace, "policies", name, delete_options
)
ensure_item_removal(
custom_objects.get_namespaced_custom_object,
"k8s.nginx.org",
"v1alpha1",
namespace,
"policies",
name,
)
print(f"Policy was removed with name '{name}'")
def read_policy(custom_objects: CustomObjectsApi, namespace, name) -> object:
"""
Get policy information (kubectl describe output)
:param custom_objects: CustomObjectsApi
:param namespace: The policy's namespace
:param name: policy's name
:return: object
"""
print(f"Getting info for policy {name} in namespace {namespace}")
try:
response = custom_objects.get_namespaced_custom_object(
"k8s.nginx.org", "v1alpha1", namespace, "policies", name
)
pprint(response)
return response
except ApiException as ex:
logging.exception(f"Exception occured: {ex} while getting reading Policy")
raise
def create_virtual_server_from_yaml(
custom_objects: CustomObjectsApi, yaml_manifest, namespace
) -> str:
"""
Create a VirtualServer based on yaml file.
:param custom_objects: CustomObjectsApi
:param yaml_manifest: an absolute path to file
:param namespace:
:return: str
"""
print("Create a VirtualServer:")
with open(yaml_manifest) as f:
dep = yaml.safe_load(f)
try:
custom_objects.create_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualservers", dep
)
print(f"VirtualServer created with name '{dep['metadata']['name']}'")
return dep["metadata"]["name"]
except ApiException as ex:
logging.exception(
f"Exception: {ex} occured while creating VirtualServer: {dep['metadata']['name']}"
)
raise
def delete_virtual_server(custom_objects: CustomObjectsApi, name, namespace) -> None:
"""
Delete a VirtualServer.
:param custom_objects: CustomObjectsApi
:param namespace: namespace
:param name:
:return:
"""
print(f"Delete a VirtualServer: {name}")
delete_options = client.V1DeleteOptions()
custom_objects.delete_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualservers", name, delete_options
)
ensure_item_removal(
custom_objects.get_namespaced_custom_object,
"k8s.nginx.org",
"v1",
namespace,
"virtualservers",
name,
)
print(f"VirtualServer was removed with name '{name}'")
def patch_virtual_server_from_yaml(
custom_objects: CustomObjectsApi, name, yaml_manifest, namespace
) -> None:
"""
Patch a VS based on yaml manifest
:param custom_objects: CustomObjectsApi
:param name:
:param yaml_manifest: an absolute path to file
:param namespace:
:return:
"""
print(f"Update a VirtualServer: {name}")
with open(yaml_manifest) as f:
dep = yaml.safe_load(f)
try:
custom_objects.patch_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualservers", name, dep
)
print(f"VirtualServer updated with name '{dep['metadata']['name']}'")
except ApiException as ex:
logging.exception(f"Failed with exception {ex} while patching VirtualServer: {name}")
raise
def patch_virtual_server(custom_objects: CustomObjectsApi, name, namespace, body) -> str:
"""
Update a VirtualServer based on a dict.
:param custom_objects: CustomObjectsApi
:param name:
:param body: dict
:param namespace:
:return: str
"""
print("Update a VirtualServer:")
custom_objects.patch_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualservers", name, body
)
print(f"VirtualServer updated with a name '{body['metadata']['name']}'")
return body["metadata"]["name"]
def patch_v_s_route_from_yaml(
custom_objects: CustomObjectsApi, name, yaml_manifest, namespace
) -> None:
"""
Update a VirtualServerRoute based on yaml manifest
:param custom_objects: CustomObjectsApi
:param name:
:param yaml_manifest: an absolute path to file
:param namespace:
:return:
"""
print(f"Update a VirtualServerRoute: {name}")
with open(yaml_manifest) as f:
dep = yaml.safe_load(f)
try:
custom_objects.patch_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualserverroutes", name, dep
)
print(f"VirtualServerRoute updated with name '{dep['metadata']['name']}'")
except ApiException as ex:
logging.exception(f"Failed with exception {ex} while patching VirtualServerRoute: {name}")
raise
def get_vs_nginx_template_conf(
v1: CoreV1Api, vs_namespace, vs_name, pod_name, pod_namespace
) -> str:
"""
Get contents of /etc/nginx/conf.d/vs_{namespace}_{vs_name}.conf in the pod.
:param v1: CoreV1Api
:param vs_namespace:
:param vs_name:
:param pod_name:
:param pod_namespace:
:return: str
"""
file_path = f"/etc/nginx/conf.d/vs_{vs_namespace}_{vs_name}.conf"
return get_file_contents(v1, file_path, pod_name, pod_namespace)
def create_v_s_route_from_yaml(custom_objects: CustomObjectsApi, yaml_manifest, namespace) -> str:
"""
Create a VirtualServerRoute based on a yaml file.
:param custom_objects: CustomObjectsApi
:param yaml_manifest: an absolute path to a file
:param namespace:
:return: str
"""
print("Create a VirtualServerRoute:")
with open(yaml_manifest) as f:
dep = yaml.safe_load(f)
custom_objects.create_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualserverroutes", dep
)
print(f"VirtualServerRoute created with a name '{dep['metadata']['name']}'")
return dep["metadata"]["name"]
def patch_v_s_route(custom_objects: CustomObjectsApi, name, namespace, body) -> str:
"""
Update a VirtualServerRoute based on a dict.
:param custom_objects: CustomObjectsApi
:param name:
:param body: dict
:param namespace:
:return: str
"""
print("Update a VirtualServerRoute:")
custom_objects.patch_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualserverroutes", name, body
)
print(f"VirtualServerRoute updated with a name '{body['metadata']['name']}'")
return body["metadata"]["name"]
def delete_v_s_route(custom_objects: CustomObjectsApi, name, namespace) -> None:
"""
Delete a VirtualServerRoute.
:param custom_objects: CustomObjectsApi
:param namespace: namespace
:param name:
:return:
"""
print(f"Delete a VirtualServerRoute: {name}")
delete_options = client.V1DeleteOptions()
custom_objects.delete_namespaced_custom_object(
"k8s.nginx.org", "v1", namespace, "virtualserverroutes", name, delete_options
)
ensure_item_removal(
custom_objects.get_namespaced_custom_object,
"k8s.nginx.org",
"v1",
namespace,
"virtualserverroutes",
name,
)
print(f"VirtualServerRoute was removed with the name '{name}'")
def generate_item_with_upstream_options(yaml_manifest, options) -> dict:
"""
Generate a VS/VSR item with an upstream option.
Update all the upstreams in VS/VSR
:param yaml_manifest: an absolute path to a file
:param options: dict
:return: dict
"""
with open(yaml_manifest) as f:
dep = yaml.safe_load(f)
for upstream in dep["spec"]["upstreams"]:
upstream.update(options)
return dep
```
#### File: tests/suite/test_virtual_server_error_pages.py
```python
import pytest
import json
import requests
from kubernetes.client.rest import ApiException
from suite.custom_assertions import wait_and_assert_status_code, assert_vs_conf_not_exists, \
assert_event_starts_with_text_and_contains_errors
from settings import TEST_DATA
from suite.custom_resources_utils import patch_virtual_server_from_yaml, get_vs_nginx_template_conf
from suite.resources_utils import wait_before_test, get_first_pod_name, get_events
@pytest.mark.vs
@pytest.mark.parametrize('crd_ingress_controller, virtual_server_setup',
[({"type": "complete", "extra_args": [f"-enable-custom-resources"]},
{"example": "virtual-server-error-pages", "app_type": None})],
indirect=True)
class TestVSErrorPages:
def test_redirect_strategy(self, kube_apis, crd_ingress_controller, virtual_server_setup):
wait_and_assert_status_code(307, virtual_server_setup.backend_1_url,
virtual_server_setup.vs_host, allow_redirects=False)
resp = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host}, allow_redirects=False)
assert f'http://{virtual_server_setup.vs_host}/error.html' in resp.next.url
def test_return_strategy(self, kube_apis, crd_ingress_controller, virtual_server_setup):
wait_and_assert_status_code(207, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host)
resp = requests.get(virtual_server_setup.backend_2_url,
headers={"host": virtual_server_setup.vs_host})
resp_content = json.loads(resp.content)
assert resp_content['status'] == '502' \
and resp_content['message'] == 'Forbidden' \
and resp.headers.get('x-debug-original-status') == '502'
def test_virtual_server_after_update(self, kube_apis, crd_ingress_controller, virtual_server_setup):
patch_virtual_server_from_yaml(kube_apis.custom_objects, virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-error-pages/virtual-server-updated.yaml",
virtual_server_setup.namespace)
wait_and_assert_status_code(301, virtual_server_setup.backend_1_url,
virtual_server_setup.vs_host, allow_redirects=False)
resp = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host, "x-forwarded-proto": "http"},
allow_redirects=False)
assert f'http://{virtual_server_setup.vs_host}/error_http.html' in resp.next.url
wait_and_assert_status_code(502, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host)
resp = requests.get(virtual_server_setup.backend_2_url,
headers={"host": virtual_server_setup.vs_host})
resp_content = resp.content.decode('utf-8')
assert resp_content == 'Hello World!\n'
def test_validation_event_flow(self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller,
virtual_server_setup):
invalid_fields = [
"spec.routes[0].errorPages[0].redirect.url: Invalid value",
"spec.routes[0].errorPages[0].redirect.code: Invalid value: 101",
"spec.routes[1].errorPages[0].return.body: Invalid value: \"status\"",
"spec.routes[1].errorPages[0].return.code: Invalid value: 100",
"spec.routes[1].errorPages[0].return.headers[0].value: Invalid value: \"schema\""
]
text = f"{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}"
vs_event_text = f"VirtualServer {text} is invalid and was rejected: "
vs_file = f"{TEST_DATA}/virtual-server-error-pages/virtual-server-invalid.yaml"
patch_virtual_server_from_yaml(kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_file,
virtual_server_setup.namespace)
wait_before_test(2)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
vs_events = get_events(kube_apis.v1, virtual_server_setup.namespace)
assert_event_starts_with_text_and_contains_errors(vs_event_text, vs_events, invalid_fields)
assert_vs_conf_not_exists(kube_apis, ic_pod_name, ingress_controller_prerequisites.namespace,
virtual_server_setup)
def test_openapi_validation_flow(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, virtual_server_setup):
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config_old = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
vs_file = f"{TEST_DATA}/virtual-server-error-pages/virtual-server-invalid-openapi.yaml"
try:
patch_virtual_server_from_yaml(kube_apis.custom_objects, virtual_server_setup.vs_name, vs_file,
virtual_server_setup.namespace)
except ApiException as ex:
assert ex.status == 422 \
and "spec.routes.errorPages.codes" in ex.body \
and "spec.routes.errorPages.redirect.code" in ex.body \
and "spec.routes.errorPages.redirect.url" in ex.body \
and "spec.routes.errorPages.return.code" in ex.body \
and "spec.routes.errorPages.return.type" in ex.body \
and "spec.routes.errorPages.return.body" in ex.body \
and "spec.routes.errorPages.return.headers.name" in ex.body \
and "spec.routes.errorPages.return.headers.value" in ex.body
except Exception as ex:
pytest.fail(f"An unexpected exception is raised: {ex}")
else:
pytest.fail("Expected an exception but there was none")
wait_before_test(1)
config_new = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
assert config_old == config_new, "Expected: config doesn't change"
@pytest.mark.parametrize('v_s_data', [
{"src": "virtual-server-splits.yaml", "expected_code": 308},
{"src": "virtual-server-matches.yaml", "expected_code": 307}
])
def test_splits_and_matches(self, kube_apis, crd_ingress_controller, virtual_server_setup, v_s_data):
patch_virtual_server_from_yaml(kube_apis.custom_objects, virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-error-pages/{v_s_data['src']}",
virtual_server_setup.namespace)
wait_and_assert_status_code(v_s_data['expected_code'], virtual_server_setup.backend_1_url,
virtual_server_setup.vs_host, allow_redirects=False)
resp = requests.get(virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host}, allow_redirects=False)
assert f'http://{virtual_server_setup.vs_host}/error.html' in resp.next.url
``` |
{
"source": "jontateixeira/Hydrogeology_SALOME_Plugin",
"score": 2
} |
#### File: Hydrogeology_SALOME_Plugin/hydrogeo_salome/utilities.py
```python
import os
import numpy as np
# ------------------------------------------------------------------------------
def CartGrid(x, y, z=None):
"""Build a cartesian grid data (nodes and connections). Returns a tuple with:
(ndarray nodes coordinate, ndarray cells connectivities)"""
if z is None:
nodes = np.array([[i, j, 0.] for j in y for i in x])
nx = x.size
ny = y.size
i, j = np.mgrid[0:nx, 0:ny]
ij = np.ravel_multi_index(
[list(i.ravel()), list(j.ravel())], (nx+1, ny+1), order='F')
cells = np.array([[i, i+1, i+1+nx+1, i+nx+1]
for i in ij], dtype='uint64')
else:
nodes = np.array([[i, j, k] for k in z for j in y for i in x])
nx = x.size - 1
ny = y.size - 1
nz = z.size - 1
i, j, k = np.mgrid[0:nx, 0:ny, 0:nz]
ijk = np.ravel_multi_index(
[list(i.ravel()), list(j.ravel()), list(
k.ravel())], (nx + 1, ny + 1, nz + 1),
order='F')
cells = np.array([[i, i+1, i+1+(nx+1), i+(nx+1),
i+(nx+1)*(ny+1), i+1+(nx+1) *
(ny+1), i+1+(nx+1)+(nx+1)*(ny+1),
i+(nx+1)+(nx+1)*(ny+1)]
for i in ijk], dtype='uint64')
return (nodes, cells)
# ------------------------------------------------------------------------------
def find_indexes(b):
"""This function is similar to the 'find' a MATLAB function"""
return [i for (i, vals) in enumerate(b) if vals]
# ------------------------------------------------------------------------------
def write_unv(fname, nodes, cells, mat=None):
"""
Write the UNV (Universal) file dataset format
reference in: https://docs.plm.automation.siemens.com/tdoc/nx/12/nx_help#uid:xid1128419:index_advanced:xid1404601:xid1404604
"""
# consts
sep = " -1"
si, coordsys, vertices, elements = 164, 2420, 2411, 2412
# settings
if mat is None:
mat = np.zeros((cells.shape[0],), dtype=np.int64) + 1
# write unv file
# print("-- writing file: {}".format(fname))
with open(fname, "w") as unv:
# unit system (164)
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(si)) # unv code
unv.write('{:10d}{:20s}{:10d}\n'.format(1, "SI: Meters (newton)", 2))
unv.write('{:25.17E}{:25.17E}{:25.17E}\n{:25.17E}\n'.format(
1, 1, 1, 273.15))
unv.write('{}\n'.format(sep))
# coordinate system (2420)
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(coordsys)) # unv code
unv.write('{:10d}\n'.format(1)) # coordsys label (uid)
unv.write('{:40s}\n'.format("SMESH_Mesh from Salome Geomechanics"))
# coordsys label, coordsys type (0: cartesian), coordsys color
unv.write('{:10d}{:10d}{:10d}\n'.format(1, 0, 0))
unv.write('{:40s}\n'.format("Global cartesian coord. system"))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(1, 0, 0))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(0, 1, 0))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(0, 0, 1))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(0, 0, 0))
unv.write('{}\n'.format(sep))
# write nodes coordinates
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(vertices)) # unv code
for n in range(nodes.shape[0]):
# node-id, coordinate system label, displ. coord. system, color(11)
unv.write('{:10d}{:10d}{:10d}{:10d}\n'.format(n + 1, 1, 1, 11))
unv.write('{:25.16E}{:25.16E}{:25.16E}'.format(
nodes[n, 0], nodes[n, 1], nodes[n, 2]))
unv.write('\n')
unv.write('{}\n'.format(sep))
# write cells connectivities
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(elements)) # unv code
for c in range(cells.shape[0]):
# node-id, coordinate system label, displ. coord. system, color(11)
unv.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}\n'.format(
c + 1, 115, mat[c], mat[c], mat[c], 8))
unv.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}'.format(
cells[c, 0], cells[c, 1], cells[c, 2], cells[c, 3],
cells[c, 4], cells[c, 5], cells[c, 6], cells[c, 7]))
unv.write('\n')
unv.write('{}\n'.format(sep))
# write cells regions
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(2467)) # unv code
regions = np.unique(mat)
for region in regions:
ind = find_indexes(mat == region)
unv.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}\n'.format(
region, 0, 0, 0, 0, 0, 0, len(ind)))
unv.write('Region_{}\n'.format(region))
i = 0
for c in range(len(ind)):
unv.write('{:10d}{:10d}{:10d}{:10d}'.format( 8, ind[c] + 1, 0, 0))
i += 1
if i == 2:
i = 0
unv.write('\n')
if i == 1:
unv.write('\n')
unv.write('{}\n'.format(sep))
# ------------------------------------------------------------------------------
def write_mesh(fname, smesh, boundaries=None, mat=None):
"""
Write the mesh file format (mfem). Only works for hexahedron (cube)
TODO: impl. other finite elements
"""
import SMESH
# consts
header = """# automatically generated by hydrogeo_salome plugin
MFEM mesh v1.0
#
# MFEM Geometry Types (see mesh/geom.hpp):
#
# POINT = 0
# SEGMENT = 1
# TRIANGLE = 2
# SQUARE = 3
# TETRAHEDRON = 4
# CUBE = 5
#
"""
# settings
ncells = smesh.NbHexas()
nnodes = smesh.NbNodes()
dim = 3
if mat is None:
mat = np.ones((ncells,), dtype=np.int64)
else:
assert mat.shape[0] == ncells, "mismatch length between mat and cells"
cells = smesh.GetElementsByType(SMESH.VOLUME)
# write mesh file
with open(fname + '.mesh', "w") as mesh:
# header
mesh.write('{}'.format(header))
# dimension
mesh.write('dimension\n{}\n\n'.format(dim))
# elements connectivities
mesh.write('elements\n{}\n'.format(ncells))
for i in range(ncells):
# region-id, element-type, connectivities
mesh.write('{} {}'.format(mat[i], 5))
for n in smesh.GetElemNodes(cells[i]):
mesh.write(' {}'.format(n))
mesh.write('\n')
mesh.write('\n')
# boundaries
if boundaries is not None:
count = 0
for b in boundaries:
count += len(b.GetIDs())
mesh.write('boundary\n{}\n'.format(count))
count = 1
for bdr in boundaries:
for b in bdr.GetIDs():
# region-id, element-type, connectivities
mesh.write('{} {}'.format(count, 3))
for n in smesh.GetElemNodes(b):
mesh.write(' {}'.format(n))
mesh.write('\n')
count += 1
mesh.write('\n')
# vertices
mesh.write('vertices\n{}\n{}\n'.format(nnodes, dim))
for n in smesh.GetNodesId():
# x y z
for x in smesh.GetNodeXYZ(n):
mesh.write(' {}'.format(x))
mesh.write('\n')
mesh.write('\n')
# ------------------------------------------------------------------------------
def write_coords_lnods(fname, smesh, boundaries=None, mat=None):
"""
Write the datablock mesh file format (coords and lnodes). Only works for hexahedron (cube)
TODO: impl. other finite elements
"""
import SMESH
# consts
header1 = "# node-ID x y z bdr-ID\n"
header2 = "# elem-ID mat elem-type conn... \n"
# settings
ncells = smesh.NbHexas()
nnodes = smesh.NbNodes()
dim = 3
if mat is None:
mat = np.ones((ncells,), dtype=np.int64)
else:
assert mat.shape[0] == ncells, "mismatch length between mat and cells"
cells = smesh.GetElementsByType(SMESH.VOLUME)
# write mesh file
with open(fname + '.coords', "w") as coords:
coords.write(header1)
# boundaries
bdr = np.zeros((nnodes,), dtype=np.int64)
count = 1
if boundaries is not None:
for b in boundaries:
for id in b.GetIDs():
bdr[np.asarray(smesh.GetElemNodes(id)) - 1] = count
count += 1
count = 0
for n in smesh.GetNodesId():
coords.write('{}'.format(count + 1))
# x y z
for x in smesh.GetNodeXYZ(n):
coords.write(' {}'.format(x))
coords.write(' {}\n'.format(bdr[count]))
count += 1
with open(fname + '.lnods', "w") as lnods:
lnods.write(header2)
# elements connectivities
for i in range(ncells):
# region-id, element-type, connectivities
lnods.write('{} {} hex'.format(i + 1, mat[i]))
for n in smesh.GetElemNodes(cells[i]):
lnods.write(' {}'.format(n))
lnods.write('\n')
lnods.write('\n')
# ------------------------------------------------------------------------------
def write_vtk(fname, smesh, boundaries=None, mat=None):
"""
Write the vtk legacy format (vtk). Only works for hexahedron (cube)
TODO: impl. other finite elements
"""
import SMESH
# consts
header = """# vtk DataFile Version 2.0
meshfile created by hydrogeo_salome plugins
ASCII
DATASET UNSTRUCTURED_GRID
FIELD FieldData 2
TIME 1 1 float
0
CYCLE 1 1 int
0
"""
# settings
ncells = smesh.NbHexas()
nnodes = smesh.NbNodes()
dim = 3
if mat is None:
mat = np.ones((ncells,), dtype=np.int64)
else:
assert mat.shape[0] == ncells, "mismatch length between mat and cells"
cells = smesh.GetElementsByType(SMESH.VOLUME)
# write mesh file
with open(fname + '.vtk', "w") as mesh:
# header
mesh.write('{}'.format(header))
# vertices
mesh.write('POINTS {} float\n'.format(nnodes))
for n in smesh.GetNodesId():
# x y z
for x in smesh.GetNodeXYZ(n):
mesh.write(' {}'.format(x))
mesh.write('\n')
mesh.write('\n')
# elements connectivities
mesh.write('CELLS {} {}\n'.format(ncells, ncells + 8*ncells))
for i in range(ncells):
# region-id, element-type, connectivities
mesh.write('8 ')
for n in smesh.GetElemNodes(cells[i]):
mesh.write(' {}'.format(n - 1))
mesh.write('\n')
mesh.write('\n')
# elements type
mesh.write('CELL_TYPES {}\n'.format(ncells))
for i in range(ncells):
# region-id, element-type, connectivities
mesh.write('12\n')
mesh.write('\n')
# boundaries and materials
if boundaries is not None:
bdr = np.zeros((nnodes,), dtype=np.int64)
count = 1
for b in boundaries:
for id in b.GetIDs():
bdr[np.asarray(smesh.GetElemNodes(id)) - 1] = count
count += 1
mesh.write('POINT_DATA {}\n'.format(nnodes))
mesh.write('SCALARS bdr float\n')
mesh.write('LOOKUP_TABLE default\n')
for i in range(nnodes):
mesh.write('{}\n'.format(bdr[i]))
mesh.write('\n')
mesh.write('CELL_DATA {}\n'.format(ncells))
mesh.write('SCALARS materials float\n')
mesh.write('LOOKUP_TABLE default\n')
for i in range(ncells):
mesh.write('{}\n'.format(mat[i]))
mesh.write('\n')
# ------------------------------------------------------------------------------
def volume_hexahedron(nodes):
"""This function compute hexahedron volume"""
volelm = 0
x1 = nodes[0, 0]
y1 = nodes[0, 1]
z1 = nodes[0, 2]
x2 = nodes[1, 0]
y2 = nodes[1, 1]
z2 = nodes[1, 2]
x3 = nodes[2, 0]
y3 = nodes[2, 1]
z3 = nodes[2, 2]
x4 = nodes[3, 0]
y4 = nodes[3, 1]
z4 = nodes[3, 2]
x5 = nodes[4, 0]
y5 = nodes[4, 1]
z5 = nodes[4, 2]
x6 = nodes[5, 0]
y6 = nodes[5, 1]
z6 = nodes[5, 2]
x7 = nodes[6, 0]
y7 = nodes[6, 1]
z7 = nodes[6, 2]
x8 = nodes[7, 0]
y8 = nodes[7, 1]
z8 = nodes[7, 2]
chi1 = -1.0
eta1 = -1.0
tet1 = -1.0
chi2 = +1.0
eta2 = -1.0
tet2 = -1.0
chi3 = +1.0
eta3 = +1.0
tet3 = -1.0
chi4 = -1.0
eta4 = +1.0
tet4 = -1.0
chi5 = -1.0
eta5 = -1.0
tet5 = +1.0
chi6 = +1.0
eta6 = -1.0
tet6 = +1.0
chi7 = +1.0
eta7 = +1.0
tet7 = +1.0
chi8 = -1.0
eta8 = +1.0
tet8 = +1.0
# 8 integration points
chi01 = -.577350269189626
eta01 = -.577350269189626
tet01 = -.577350269189626
chi02 = +.577350269189626
eta02 = -.577350269189626
tet02 = -.577350269189626
chi03 = +.577350269189626
eta03 = +.577350269189626
tet03 = -.577350269189626
chi04 = -.577350269189626
eta04 = +.577350269189626
tet04 = -.577350269189626
chi05 = -.577350269189626
eta05 = -.577350269189626
tet05 = +.577350269189626
chi06 = +.577350269189626
eta06 = -.577350269189626
tet06 = +.577350269189626
chi07 = +.577350269189626
eta07 = +.577350269189626
tet07 = +.577350269189626
chi08 = -.577350269189626
eta08 = +.577350269189626
tet08 = +.577350269189626
nval = 8
weight = 1.0
for ival in range(0, nval):
if ival == 0:
chi = chi01
eta = eta01
tet = tet01
elif ival == 1:
chi = chi02
eta = eta02
tet = tet02
elif ival == 2:
chi = chi03
eta = eta03
tet = tet03
elif ival == 3:
chi = chi04
eta = eta04
tet = tet04
elif ival == 4:
chi = chi05
eta = eta05
tet = tet05
elif ival == 5:
chi = chi06
eta = eta06
tet = tet06
elif ival == 6:
chi = chi07
eta = eta07
tet = tet07
elif ival == 7:
chi = chi08
eta = eta08
tet = tet08
dn1dchi = chi1*(1.0+eta*eta1)*(1.0+tet*tet1)/8.
dn1deta = eta1*(1.0+chi*chi1)*(1.0+tet*tet1)/8.
dn1dtet = tet1*(1.0+chi*chi1)*(1.0+eta*eta1)/8.
dn2dchi = chi2*(1.0+eta*eta2)*(1.0+tet*tet2)/8.
dn2deta = eta2*(1.0+chi*chi2)*(1.0+tet*tet2)/8.
dn2dtet = tet2*(1.0+chi*chi2)*(1.0+eta*eta2)/8.
dn3dchi = chi3*(1.0+eta*eta3)*(1.0+tet*tet3)/8.
dn3deta = eta3*(1.0+chi*chi3)*(1.0+tet*tet3)/8.
dn3dtet = tet3*(1.0+chi*chi3)*(1.0+eta*eta3)/8.
dn4dchi = chi4*(1.0+eta*eta4)*(1.0+tet*tet4)/8.
dn4deta = eta4*(1.0+chi*chi4)*(1.0+tet*tet4)/8.
dn4dtet = tet4*(1.0+chi*chi4)*(1.0+eta*eta4)/8.
dn5dchi = chi5*(1.0+eta*eta5)*(1.0+tet*tet5)/8.
dn5deta = eta5*(1.0+chi*chi5)*(1.0+tet*tet5)/8.
dn5dtet = tet5*(1.0+chi*chi5)*(1.0+eta*eta5)/8.
dn6dchi = chi6*(1.0+eta*eta6)*(1.0+tet*tet6)/8.
dn6deta = eta6*(1.0+chi*chi6)*(1.0+tet*tet6)/8.
dn6dtet = tet6*(1.0+chi*chi6)*(1.0+eta*eta6)/8.
dn7dchi = chi7*(1.0+eta*eta7)*(1.0+tet*tet7)/8.
dn7deta = eta7*(1.0+chi*chi7)*(1.0+tet*tet7)/8.
dn7dtet = tet7*(1.0+chi*chi7)*(1.0+eta*eta7)/8.
dn8dchi = chi8*(1.0+eta*eta8)*(1.0+tet*tet8)/8.
dn8deta = eta8*(1.0+chi*chi8)*(1.0+tet*tet8)/8.
dn8dtet = tet8*(1.0+chi*chi8)*(1.0+eta*eta8)/8.
a11 = x1*dn1dchi+x2*dn2dchi+x3*dn3dchi+x4*dn4dchi + \
x5*dn5dchi+x6*dn6dchi+x7*dn7dchi+x8*dn8dchi
a12 = y1*dn1dchi+y2*dn2dchi+y3*dn3dchi+y4*dn4dchi + \
y5*dn5dchi+y6*dn6dchi+y7*dn7dchi+y8*dn8dchi
a13 = z1*dn1dchi+z2*dn2dchi+z3*dn3dchi+z4*dn4dchi + \
z5*dn5dchi+z6*dn6dchi+z7*dn7dchi+z8*dn8dchi
a21 = x1*dn1deta+x2*dn2deta+x3*dn3deta+x4*dn4deta + \
x5*dn5deta+x6*dn6deta+x7*dn7deta+x8*dn8deta
a22 = y1*dn1deta+y2*dn2deta+y3*dn3deta+y4*dn4deta + \
y5*dn5deta+y6*dn6deta+y7*dn7deta+y8*dn8deta
a23 = z1*dn1deta+z2*dn2deta+z3*dn3deta+z4*dn4deta + \
z5*dn5deta+z6*dn6deta+z7*dn7deta+z8*dn8deta
a31 = x1*dn1dtet+x2*dn2dtet+x3*dn3dtet+x4*dn4dtet + \
x5*dn5dtet+x6*dn6dtet+x7*dn7dtet+x8*dn8dtet
a32 = y1*dn1dtet+y2*dn2dtet+y3*dn3dtet+y4*dn4dtet + \
y5*dn5dtet+y6*dn6dtet+y7*dn7dtet+y8*dn8dtet
a33 = z1*dn1dtet+z2*dn2dtet+z3*dn3dtet+z4*dn4dtet + \
z5*dn5dtet+z6*dn6dtet+z7*dn7dtet+z8*dn8dtet
det = a11*a22*a33+a12*a23*a31+a21*a32*a13-a13*a22*a31 - \
a12*a21*a33-a23*a32*a11
volelm = volelm+det*weight
return volelm
# ------------------------------------------------------------------------------
def GetModulePath():
"""This function returns the absolute path to the module"""
return os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# ------------------------------------------------------------------------------
def GetAbsPathInModule(*paths):
"""This function prepends the path to the module to a path given in the input"""
return os.path.join(GetModulePath(), *paths)
``` |
{
"source": "jontaydev/sublate",
"score": 3
} |
#### File: jontaydev/sublate/sublate.py
```python
import argparse
import fnmatch
import json
import os
import shutil
import types
import jinja2
import yaml
def main():
parser = argparse.ArgumentParser(prog='sublate')
parser.add_argument('path', metavar='path', type=str, nargs='?', default='.', help='path to project')
parser.add_argument('--data', metavar='data', type=str, nargs='*', help='path to data')
parser.add_argument('--output', metavar='output', type=str, nargs='?', default='output', help='path to output')
parser.add_argument('--render', metavar='render', type=str, nargs='*', help='files to render')
parser.add_argument('--remove', metavar='remove', type=str, nargs='*', help='files to remove')
args = parser.parse_args()
data = {}
if args.data:
for data in args.data:
data.update(get_project_data(args.data))
build(args.path, args.output, args.render, args.remove, data)
# TODO: args.output should overwrite root sublate.yaml
def build(path, output_path, data=None, render=None, remove=None):
if data is None:
data = {}
if render is None:
render = []
if remove is None:
remove = []
_build(path, output_path, data, render, remove, path)
def _build(path, output_path, data, render, remove, root_path):
local_data = get_sublate_data(path)
if "path" in local_data:
path = os.path.join(path, local_data["path"])
if "output" in local_data:
if local_data["output"][0] == "/":
output_path = local_data["output"]
else:
output_path = os.path.join(path, local_data["output"])
if "data" in local_data:
if type(local_data["data"]) is list:
for d in local_data["data"]:
if d[0] == "/":
data_path = d[0]
else:
data_path = os.path.join(root_path, d["data"])
data.update(get_project_data(data_path))
else:
if local_data["data"][0] == "/":
data_path = local_data["data"][0]
else:
data_path = os.path.join(root_path, local_data["data"])
data.update(get_project_data(data_path))
if "remove" in local_data:
if type(local_data["remove"]) is list:
remove = local_data["remove"]
else:
remove = [local_data["remove"]]
if "render" in local_data:
if type(local_data["render"]) is list:
render = local_data["render"]
else:
render = [local_data["render"]]
root_path = path
if os.path.exists(output_path):
shutil.rmtree(output_path)
print(f"Building: {path}")
os.mkdir(output_path)
if "path" in local_data and local_data["path"] not in [".", "./"]:
_build(path, output_path, data, render, remove, root_path)
return
# TODO: skip over data and output paths
for filename in os.listdir(path):
if filename.startswith("sublate."):
continue
if filename in remove:
continue
full_path = os.path.join(path, filename)
full_output_path = os.path.join(output_path, filename)
if os.path.isdir(full_path):
local_render = []
for r in render:
parts = r.split(os.sep)
if filename == parts[0]:
local_render.append(os.path.join(*parts[1:]))
_build(full_path, full_output_path, data, local_render, remove, root_path)
else:
for r in render:
if fnmatch.fnmatch(filename, r):
env = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(searchpath=path),
jinja2.FileSystemLoader(searchpath=root_path)
]))
try:
template = env.get_template(filename)
except UnicodeDecodeError:
print(f"Cannot render binary file: {full_path}")
continue
output = template.render(**data).strip()
with open(full_output_path, 'w+') as f:
f.write(output)
break
else:
shutil.copy(full_path, full_output_path)
def get_project_data(path):
data = {}
if os.path.isdir(path):
for filename in os.listdir(path):
data.update(load_path(os.path.join(path, filename)))
else:
data.update(load_path(path))
return data
def get_sublate_data(path):
data = { }
if os.path.isdir(path):
for filename in os.listdir(path):
if filename.startswith("sublate."):
data.update(load_path(os.path.join(path, filename)))
else:
data.update(load_path(path))
return data
def load_path(path):
if path.endswith(".json"):
return load_json(path)
if path.endswith(".yaml"):
return load_yaml(path)
if path.endswith(".py"):
return load_py(path)
def load_json(path):
with open(path) as f:
return json.loads(f.read())
def load_yaml(path):
with open(path) as f:
return yaml.load(f.read(), Loader=yaml.FullLoader)
def load_py(path):
with open(path) as f:
module = types.ModuleType('module')
cwd = os.getcwd()
os.chdir(os.path.dirname(path))
exec(f.read(), module.__dict__)
os.chdir(cwd)
data = {}
for k, v in module.__dict__.items():
if k[:2] != "__" and k.isupper():
data[k.lower()] = v
return data
if __name__ == "__main__":
main()
``` |
{
"source": "JonTChrome/SDC_SystemIntegrationCapstone",
"score": 3
} |
#### File: src/waypoint_updater/waypoint_updater.py
```python
import numpy as np
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 20 # Number of waypoints we will publish. You can change this number
OFFSET = 5
DECEL_RATE = 0.3
STOP_COUNTER_THRESHOLD = OFFSET + LOOKAHEAD_WPS
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
self.light_idx = None
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.light_wp = None
self.current_vel = None
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.base_waypoints_sub = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.loop()
rospy.spin()
def loop(self):
rate = rospy.Rate(20)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints and self.waypoint_tree:
current_idx = self.set_closest_waypoint_idx()
self.publish_waypoints(current_idx, self.light_idx)
rate.sleep()
def red_light_ahead(self, current_idx, light_idx):
if not light_idx:
return False
elif light_idx >= len(self.base_waypoints.waypoints):
return True
elif light_idx == -1:
return False
else:
if light_idx > current_idx:
return True
else:
return False
def set_closest_waypoint_idx(self):
x = self.pose.position.x
y = self.pose.position.y
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
closest_coord = self.waypoints_2d[closest_idx]
if self.ahead_of(closest_coord, [x, y]):
return closest_idx
else:
return (closest_idx + 1) % len(self.waypoints_2d)
def ahead_of(self, wp1, wp2):
x = self.pose.position.x
y = self.pose.position.y
cl_vect = np.array(wp1)
prev_vect = np.array(wp2)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
return True
else:
return False
def generate_temp_waypoints(self):
lane = Lane()
if self.closest_waypoint_idx == None:
return lane
last_index = max(len(self.base_waypoints.waypoints), self.closest_waypoint_idx + LOOKAHEAD_WPS + OFFSET)
lane.waypoints = self.base_waypoints.waypoints[self.closest_waypoint_idx + OFFSET: last_index]
return lane
def publish_waypoints(self, current_idx, light_idx):
final_lane = self.generate_lane(current_idx, light_idx)
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self, current_idx, light_idx):
lane = Lane()
farthest_idx = min(len(self.base_waypoints.waypoints), current_idx + LOOKAHEAD_WPS + OFFSET)
current_waypoints = self.base_waypoints.waypoints[current_idx + OFFSET:farthest_idx]
light_ahead = self.red_light_ahead(current_idx, light_idx)
if light_ahead:
lane.waypoints = self.decelerate_waypoints(current_waypoints, current_idx, light_idx)
else:
lane.waypoints = current_waypoints
return lane
def decelerate_waypoints(self, waypoints, current_idx, light_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(light_idx - current_idx - 2, 0)
dist = self.distance(i, stop_idx)
vel = self.current_vel
if i >= stop_idx:
vel = 0
elif dist < 25:
vel = DECEL_RATE * dist
if vel < 1:
vel = 0
p.twist.twist.linear.x = vel
temp.append(p)
return temp
def pose_cb(self, msg):
self.pose = msg.pose
pass
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if self.waypoints_2d == None:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in self.base_waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
self.base_waypoints_sub.unregister()
def traffic_cb(self, msg):
self.light_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def velocity_cb(self, velocity):
self.current_vel = velocity.twist.linear.x
def distance(self, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(self.base_waypoints.waypoints[wp1].pose.pose.position, self.base_waypoints.waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
``` |
{
"source": "jonte/javel",
"score": 3
} |
#### File: tests/component-tests/commit-test.py
```python
import unittest
import os
import tempfile
from jvl_test import JvlTest, TmpGitDir
import re
class CommitTest(JvlTest):
def test_commit_on_repo_initialized_by_jvl(self):
with TmpGitDir(self) as d:
self.e("echo test > test_file")
self.jvl_e("commit \"Test commit\"")
hash_jvl = self.jvl_e("show")
hash_git = self.e("git rev-parse HEAD")
self.assertEqual(re.split(" |\n", hash_jvl)[1], hash_git.strip())
def test_commit_on_repo_initialized_by_git(self):
with TmpGitDir(self) as d:
self.e("echo test > test_file")
self.e("echo test2 > test_file2")
self.e("git add test_file")
self.e("git commit -m \"Test commit\"")
self.jvl_e("commit \"Test commit2\"")
hash_jvl = self.jvl_e("show")
hash_git = self.e("git rev-parse HEAD")
self.assertEqual(re.split(" |\n", hash_jvl)[1], hash_git.strip())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonte/tempserver",
"score": 3
} |
#### File: tempserver/tempserver/heater.py
```python
import logging
import os
from enum import Enum
if os.environ.get("DUMMY", False):
from tempserver.gpiozero_dummy import PWMOutputDevice
else:
from gpiozero import PWMOutputDevice
class HeaterMode(Enum):
PID = "PID"
OFF = "OFF"
ON = "ON"
class Heater:
def __init__(self, gpio_pin, name="Unknown", is_manual=False, sensor=None, pid=None, scheduler=None,
notify_change=None, id_=None):
self.heating = False
self.mode = HeaterMode.OFF
self.name = name
self.is_manual = is_manual
self.heating_element = PWMOutputDevice(gpio_pin, frequency=1)
self.sensor = sensor
self.pid = pid
self.scheduler = scheduler
self.notify_change = notify_change
self.id = id_
self.scheduler.add_job(self.process_pid, 'interval', seconds=1, id="pid_iteration %s" % name)
self.previous_level = -1
def _set_mode(self, mode):
if mode == HeaterMode.OFF:
self.stop_heating()
self.mode = mode
self.publish_state()
def enable_pid(self):
self._set_mode(HeaterMode.PID)
def enable(self):
self._set_mode(HeaterMode.ON)
def disable(self):
self._set_mode(HeaterMode.OFF)
def _set_heating_level(self, level):
if (self.mode == HeaterMode.OFF) and level > 0:
return "Heater not enabled"
self.heating_element.value = level
self.publish_state()
logging.info("Heating element %s level: %f" % (self.name, level))
self.heating = (level > 0)
self.previous_level = level
def stop_heating(self):
return self._set_heating_level(0)
def start_heating(self, level):
return self._set_heating_level(level)
def process_pid(self):
self._set_heating_level(self.pid.output / 100.0)
def publish_state(self):
if self.notify_change:
self.notify_change(("vessel-power-" + self.id,
self.heating_element.value * 100))
self.notify_change(("vessel-heater-" + self.id, self))
```
#### File: tempserver/tempserver/power.py
```python
class Power:
def __init__(self, power):
self.power = power
```
#### File: tempserver/tempserver/vessel.py
```python
class Vessel:
def __init__(self, id_, name, sensor=None, pid=None, heater=None):
self.id = id_
self.name = name
self.sensor = sensor
self.pid = pid
self.heater = heater
``` |
{
"source": "jontev/postnord-optimization",
"score": 3
} |
#### File: jontev/postnord-optimization/gui.py
```python
import tkinter as tk
import sys
from tkinter import filedialog
import random
import numpy as np
import pandas as pd
import math
import seaborn as sns
sys.path.append('Portplanering')
sys.path.append('Bilbokning/src')
from bilbokning import calculate_carriages
HEURISTICS = ['local_search',
'simulated_annealing',
'variable_neighbourhood_search',
'tabu_search']
NEIGHBOURHOODS = ['swap_port',
'swap_time',
'move_port',
'move_time']
zone_dict = {
0: 'TÄLT ',
1: 'FRIST ',
2: 'MPFTG\t',
3: 'MPBVV\t',
4: 'MPJÄR\t',
5: 'HPALL\t',
6: 'ADR ',
7: 'ENTEB\t',
8: 'ENTST\t'
}
# Function to change orderfile- Not to be used during testing
def browse_files():
filename = filedialog.askopenfilename(title = "Select a File",
filetypes = (("Text files",
"*.csv*"),
("all files",
"*.*")))
w.configure(text="File Opened: "+filename)
#----------------------------------FUNCTIONS-----------------------------------
# Global variables to be used for prints etc.
global running
global best_cost
global best_solution
global COSTS1
global COSTS2
running = False
# Function when portplanering() is running
def portplanering():
global running
global best_cost
global best_solution
global COSTS1
global COSTS2
COSTS1 = []
COSTS2 = []
from heuristic import run_heuristics
from generate_solution import generate_random_solution
from transportproblem_pulp import transportproblem
# Generate a feasible solution
y = generate_random_solution()
# Calculate the current cost
cost = transportproblem(y)[0]
best_cost = cost
best_solution = np.copy(y)
# Initial constans for SA and Tabu search
temp = 1000
tabu_list_max_len = 10
# Initial Tabu list for tabusearch
tabu_list = []
# Insert an initial word into the text
T.insert(tk.END, 10)
# Set neighbour to the chosen one through gui.
neighbour = chosen_neighbour.get()
local_opt = False
# running == True whenever the search for a heuristic is on
ctr = 0
while running:
ctr += 1
# Start a heuristic iteration
cost, y, local_opt, best_cost, best_solution, temp, COSTS1, COSTS2, tabu_list, tabu_list_max_len, neighbour = \
run_heuristics(y, cost, chosen_heuristic.get(), neighbour, local_opt, best_cost, best_solution, temp, COSTS1, COSTS2, tabu_list, tabu_list_max_len)
# Remove the previous output and insert the current cost
T.delete("1.0", "end")
T.insert(tk.END, cost)
# Generate a new random neighbourhood is condition is fulfilled.
if local_opt:
neighbour = NEIGHBOURHOODS[random.randrange(2)]
local_opt = False
m.update()
if ctr == 200:
running == False
break
def save_pic(cos, colname, filename):
df = pd.DataFrame([cos])
df = df.T
df.columns = colname
a = sns.lineplot(data=df[0:199])
figure = a.get_figure()
figure.savefig(filename+'.pdf')
figure.savefig(filename+'.png')
# function destroys window
def destroy_window():
m.destroy()
# If both Bilbokning and Portplanering is marked then, bilbokning will run first
# and then run Portplanering after.
def run_program():
# If Bilbokning is checked, it starts bilbokning
if bilv.get() == 1:
date=T.get("1.0", 'end-1c')
calculate_carriages(slid.get(), date)
d.configure(text="Date: "+date)
# If Portplanering is checked, it starts portplanering
if portv.get() == 1:
global running
# Sets global vaiable to True, means heuristic is running.
running = True
portplanering()
# Stop-button will not stop Bilbokning. Only heuristic search.
def stop_program():
from transportproblem_pulp import transportproblem
global running
global best_solution
if portv.get() == 1:
running = False
T.delete("1.0", "end")
# Calculate the cost of the best_solution found so far.
cost, x = transportproblem(best_solution)
# Print it in window and run solution_to_txt
T.insert(tk.END, 'Best solution found: ' + str(cost))
solution_to_txt(cost, x)
#------------------------------------------------------------------------------
# -------------------------------Window----------------------------------------
# Creates a window with every orders assigned ports
def view_solution():
L = pd.read_csv('Portplanering/Lj.csv')
number_of_orders = len(L)
J = range(number_of_orders)
import csv
def showSol():
top2 = tk.Toplevel()
with open('solution/'+str(chosen_order_list.get())+'.csv', newline='') as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
label = tk.Label(top2,
width = 10,
height = 2,
text = row,
relief = tk.RIDGE)
label.grid(row = r, column = c)
c += 1
r += 1
# Define buttons
top = tk.Toplevel()
top.title('Solution window')
chosen_order_list = tk.StringVar(top)
chosen_order_list.set(J[0])
op_menu_order = tk.OptionMenu(top, chosen_order_list, *J)
op_menu_order.pack()
button_open_solution = tk.Button(top,
text='Show solution',
command = showSol)
button_open_solution.pack()
# function creates a txtfile to view the current output in a textfile
def solution_to_txt(cost, x):
L = pd.read_csv('Portplanering/Lj.csv')
S = pd.read_csv('Portplanering/Sj.csv')
dij = pd.read_csv('Portplanering/dij.csv')
mj = pd.read_csv('Portplanering/mj.csv')
a = pd.read_csv('Portplanering/aip.csv')
a = np.array(a)
a = a.T
NUMBER_OF_PORTS = 40
list_of_vehicles = L+S
list_of_vehicles = list_of_vehicles.values.tolist()
list_of_vehicles = [val for sublist in list_of_vehicles for val in sublist]
number_of_orders = len(L)
# ------------------
# Functions for the solution window
# Sort x so its sorted for i(zone) -> p(port) -> j(order), from the LP-solver PuLP
x_sorted=[]
for i in range(9):
for p in range(40):
for j in range(number_of_orders):
this_index = np.where(x == 'x_'+str(i)+'_'+str(p)+'_'+str(j))[0]
x_sorted.append(int(float(x[this_index][0][1])))
# Getters for x_index
def get_zone(x_index):
return math.floor(x_index/(number_of_orders*NUMBER_OF_PORTS))
def get_port(x_index):
return math.floor((x_index % (number_of_orders*NUMBER_OF_PORTS)) / number_of_orders)
def get_order(x_index):
return (x_index % (number_of_orders*NUMBER_OF_PORTS)) % number_of_orders
x_sorted=np.array(x_sorted)
ny=[]
x_sorted_index = np.where(x_sorted != 0)[0]
for i in x_sorted_index:
ny.append([get_order(i), get_zone(i), get_port(i), x_sorted[i]])
# Creates CSV-files for each order, with port and transportation data.
for order in range(number_of_orders):
d = pd.DataFrame(np.zeros((9,0)))
for i in ny:
if i[0] == order:
d.at[i[1],i[2]] = i[3]
d.to_csv('solution/'+str(order)+'.csv', index=False)
# --------------------------TO TXT---------------------------
# Constants
ORDER_STEP_LENGTH = 160
TIME_STEP_LENGTH = 80
VEHICLE_STEP_LENGTH = 40
def get_order_yindex(vehicle_index):
order = math.floor(vehicle_index / ORDER_STEP_LENGTH)
return order
def get_time_yindex(vehicle_index):
time = math.floor((vehicle_index % ORDER_STEP_LENGTH) / TIME_STEP_LENGTH)
return time
def get_port_yindex(vehicle_index):
port = ((vehicle_index % ORDER_STEP_LENGTH) % TIME_STEP_LENGTH) % VEHICLE_STEP_LENGTH
return port
def get_vehicle_type_yindex(vehicle_index):
return math.floor(((vehicle_index % ORDER_STEP_LENGTH) % TIME_STEP_LENGTH) / VEHICLE_STEP_LENGTH)
# Number of timeslot used for this order
num_of_times = int(max(np.array(mj)))+1
# Get y_index
y_index = np.where(best_solution != 0)[0]
# y_index split for each timeslot
y_index_time = [[] for i in range(num_of_times)]
time_order = [[] for i in range(num_of_times)]
# time_order contains all the orders at a specific time.
# y_index_time contains the y_index at a specific time.
for i in y_index:
for j in range(num_of_times):
if get_time_yindex(i) == j:
y_index_time[j].append(i)
time_order[j].append(get_order_yindex(i))
for i in range(len(time_order)):
time_order[i] = list(set(time_order[i]))
time_order[i] = [int(x) for x in time_order[i]]
time_order[i].sort()
# Make cost to real cost:
cost = 0
for j in range(number_of_orders):
for p in range(NUMBER_OF_PORTS):
for i in range(9):
cost += a[i,p] * x_sorted[i*NUMBER_OF_PORTS*number_of_orders + p*number_of_orders + j]
# Writes this data to a .txt
with open('solution.txt', 'w') as file:
# This 'Datum' has to be set if you create for a certain date
file.write('------------------------------------------------------------\n')
file.write('Datum: XXXX-XX-XX Tidsintervall: '+str(num_of_times)+'\n')
file.write('----------------------------------------------------------\n')
# cost = best_cost found so far
file.write('Total sträcka: '+str(cost)+'\n')
file.write('Ordrar\n')
# Shows on what time slot the orders have been set
for t in range(num_of_times):
file.write(str(t)+': ')
for i in time_order[t]:
file.write(str(i)+', ')
file.write(' \n')
file.write('------------------------------------------------------------\n\n')
file.write('------------------------------------------------------------\n')
file.write('Port\tT = 1\t\t\tT = 2\n')
# Shows for each port where the orders are set for each timeslot
for p in range(40):
first_time='--------'
second_time='--------'
for i in y_index_time[0]:
if get_time_yindex(i)==0 and get_port_yindex(i) == p:
if get_vehicle_type_yindex(i) == 0:
amount = '(18)'
else:
amount = '(30)'
first_time = str(get_order_yindex(i))+' '+amount
for i in y_index_time[1]:
if get_time_yindex(i)==1 and get_port_yindex(i) == p:
if get_vehicle_type_yindex(i) == 0:
amount = '(18)'
else:
amount = '(30)'
second_time = str(get_order_yindex(i))+' '+amount
file.write(str(p+1)+'\t'+first_time+'\t\t'+ second_time+'\n')
# Shows for eachtime slot where the orders are set for each port
for t in range(num_of_times):
file.write('\n\nTidsintervall: ' + str(t) + '\n')
file.write('------------------------------------------------------------\n')
file.write('ORDER\t\t TOT\t BIL+SLÄP\t\t PORT (#PALL)\n')
order = -1
for j in y_index_time[t]:
if order==get_order_yindex(j):
port = get_port_yindex(j)
num_of_pallets = 0
for i in range(9):
num_of_pallets += x_sorted[order + number_of_orders*port + i*(40*number_of_orders)]
file.write(' & '+str(port+1)+' ('+str(num_of_pallets)+')')
else:
order = get_order_yindex(j)
tot = dij.sum(axis=0)[order]
fordon = str(L.at[order,'0'])+' + '+str(S.at[order,'0'])
port = get_port_yindex(j)
num_of_pallets = 0
for i in range(9):
num_of_pallets += x_sorted[order + number_of_orders * port + i*(40*number_of_orders)]
file.write('\n'+str(order)+'\t\t'+str(tot)+'\t\t'+str(fordon)+'\t\t'+str(port+1)+' ('+str(num_of_pallets)+')')
# Creates specific data for each orders.
for j in range(number_of_orders):
file.write('\n------------------------------------------------------------\n\n')
file.write('------------------------------------------------------------\n')
vehicles =[]
for j2 in y_index:
if get_order_yindex(j2) == j:
vehicles.append(j2)
#print(j)
#print(y_index)
file.write('Order\t'+str(j)+' '+'\tTidsintervall: '+str(get_time_yindex(vehicles[0]))+'\n\n')
file.write('Bil')
for v in vehicles:
if get_vehicle_type_yindex(v) == 0:
file.write('\t\t18')
elif get_vehicle_type_yindex(v) == 1:
if len(vehicles) == 2:
file.write('\t30')
else:
file.write('\t\t30')
file.write('\nPort\t\t')
for v in vehicles:
file.write(str(get_port_yindex(v))+'\t')
file.write('\n------------------------------------------------------------')
for i in range(9):
file.write('\n'+zone_dict[i]+'\t')
for v in vehicles:
port = get_port_yindex(v)
order = get_order_yindex(v)
file.write(str(x_sorted[order + number_of_orders * port + i*(40*number_of_orders)])+'\t')
# ------------------------------------------------------------------------------
# Creates the gui window
m = tk.Tk()
m.geometry('600x400')
m.title(' xXx Bilbokning | Portplanering xXx')
# Define frames
top_frame = tk.Frame(m)
top_frame.pack(side=tk.TOP)
left_frame = tk.Frame(m)
left_frame.pack(side=tk.LEFT)
right_frame = tk.Frame(m)
right_frame.pack(side=tk.RIGHT)
bottom_frame=tk.Frame(m)
bottom_frame.pack(side=tk.BOTTOM)
w = tk.Label(top_frame, text='No file chosen', font = '100')
d = tk.Label(top_frame, text='No date chosen', font = '100')
#------------------------------------------------------------------------------
#----------------------------------Slider--------------------------------------
#Define a slider to change packing factor, DEFAULT=0.8
slid = tk.Scale(left_frame, from_=0.20, to=1.0, orient=tk.HORIZONTAL, resolution=0.05)
slid.set(0.80)
slid.pack()
#------------------------------------------------------------------------------
#---------------------------Options Meny for heuristics------------------------
# Option menu for heuristcs
chosen_heuristic = tk.StringVar(m)
chosen_heuristic.set(HEURISTICS[0])
opmenu = tk.OptionMenu(right_frame, chosen_heuristic, *HEURISTICS)
# Option menu for starting neighbourhood
chosen_neighbour = tk.StringVar(m)
chosen_neighbour.set(NEIGHBOURHOODS[0])
opmenu_n = tk.OptionMenu(right_frame, chosen_neighbour, *NEIGHBOURHOODS)
#--------------------------------Buttons etc-----------------------------------
bilv=tk.IntVar()
portv=tk.IntVar()
stapling=tk.IntVar()
#Checkbuttons to choose which script should run. Bilbokning or Portplanering
check_bilbokning = tk.Checkbutton(left_frame, text = 'Bilbokning',
variable=bilv,
onvalue=1,
offvalue=0,
height=2,
width=15,
state=tk.NORMAL)
check_bilbokning.pack(side=tk.TOP)
check_portplanering=tk.Checkbutton(left_frame, text = 'Portplanering',
variable = portv,
onvalue = 1,
offvalue = 0,
height = 2,
width = 15,
state = tk.NORMAL)
check_portplanering.pack(side=tk.BOTTOM)
# A check if stackability should be used
check_staplingsbarhet=tk.Checkbutton(right_frame,text='Staplingsbarhet',
variable=stapling,
onvalue=1,
offvalue=0,
height=2,
width=15,
state=tk.DISABLED)
check_staplingsbarhet.pack(side=tk.TOP)
#if bilv != 1:
# check_staplingsbarhet.config(state=tk.DISABLED)
#else:
# check_staplingsbarhet.config(state=tk.NORMAL)
# Pack the option menus
opmenu_n.pack()
opmenu.pack()
# Stop button to stop the heurstic search
button_stop = tk.Button(right_frame,
text = 'Stopp',
command = stop_program)
button_stop.pack(side=tk.RIGHT)
# Start button to run heurstic or bilbokning
button_run = tk.Button(right_frame,
text = 'Kör',
command = run_program)
button_run.pack(side=tk.RIGHT)
button_solution = tk.Button(top_frame,
text = 'Lösning',
command = view_solution)
button_solution.pack()
# Button to exit the window
button_exit = tk.Button(top_frame,
text = 'Avsluta',
command = destroy_window)
# Button to explore a file
button_explore = tk.Button(top_frame,
text = 'Bläddra',
command = browse_files)
button_explore.pack(side=tk.TOP)
button_exit.pack(side=tk.TOP)
w.pack()
d.pack()
# Text frame to show the current object value and to enter a valid date to test
T = tk.Text(bottom_frame, height = 1, width = 100)
T.pack()
T.insert(tk.END, '2021-03-16')
#------------------------------------------------------------------------------
# Loop the window
m.mainloop()
```
#### File: postnord-optimization/Portplanering/generate_solution.py
```python
import numpy as np
from numpy import random
import pandas as pd
this_path = 'Portplanering/'
NUMBER_OF_PORTS = 40
NUMBER_OF_TIMES = 2
NUMBER_OF_VEHICLES = 2
order_step_length = NUMBER_OF_PORTS*NUMBER_OF_VEHICLES*NUMBER_OF_TIMES
time_step_length = NUMBER_OF_PORTS*NUMBER_OF_VEHICLES
vehicle_step_length = NUMBER_OF_PORTS
P = list(range(NUMBER_OF_PORTS))
T = list(range(NUMBER_OF_TIMES))
V = list(range(NUMBER_OF_VEHICLES)) # 0 = lastbil, 1 = släp
#Checks if a vehicle is ok to place at a given port at a given time for a given order.
#Divided into 3 parts, each creating a boolean and every boolean has to be true to place a vehicle.
def ok_to_place(y, order, time, vehicle, port, vehicle_counter, schedule, NUMBER_OF_ORDERS, vehicle_cap, L, S, m):
# Can't use more vehicles than vehicle capacity for the order (At most 1 truck and 1 trailer)
available_vehicles = (vehicle_counter[vehicle,order] < vehicle_cap[vehicle,order])[0]
# Checks if there are enough slots for all the vehicles at the time
# This is only checked when placing the first vehicle, if two the second will always be ok
available_timeslot = False
#Nothing scheduled
if sum(schedule[order,:]) == 0:
occupied_ports = 0
for k in range(NUMBER_OF_ORDERS):
occupied_ports += sum(y[(k*order_step_length + time*time_step_length) :
((k*order_step_length + (time + 1)*time_step_length))])
num_available_ports = NUMBER_OF_PORTS - occupied_ports
#Is the amount of vehicles in this order less than number of available ports
if (L[order] + S[order]) <= num_available_ports and m[order] >= time:
available_timeslot = True
schedule[order,time] = 1
#If first vehicle is already placed, the next vehicle must be ok to place at same time slot.
elif schedule[order,time] == 1:
available_timeslot = True
# Port can't be occupied by any type of vehicle for any other order at the same time interval
available_port = True
for orders in range(NUMBER_OF_ORDERS):
for vehicles in range(NUMBER_OF_VEHICLES):
idx = orders*order_step_length + time*time_step_length + vehicles*vehicle_step_length + port
if y[idx] != 0:
available_port = False
return available_port and available_timeslot and available_vehicles
def generate_random_solution():
S = pd.read_csv(this_path+'Sj.csv')
L = pd.read_csv(this_path+'Lj.csv')
m = pd.read_csv(this_path+'mj.csv')
dij = pd.read_csv(this_path+'dij.csv')
S = S.to_numpy()
L = L.to_numpy()
m = m.to_numpy()
dij = dij.to_numpy()
NUMBER_OF_ORDERS = len(L)
#timeslot 0, vehicle 1 and port 2
J = list(range(NUMBER_OF_ORDERS))
vehicle_cap = np.array((L,S)) #Matrix with first row L second row S
vehicle_counter = np.zeros([NUMBER_OF_VEHICLES, NUMBER_OF_ORDERS])
y = np.zeros(NUMBER_OF_ORDERS*NUMBER_OF_TIMES*NUMBER_OF_VEHICLES*NUMBER_OF_PORTS) #Vectorize y variable
schedule = np.zeros([NUMBER_OF_ORDERS, NUMBER_OF_TIMES])
# Fixing to prioritize morning orders
m_n = np.array(m)
m_n = [val for sublist in m_n for val in sublist]
J = [x for _,x in sorted(zip(m_n,J))]
# randomly order sets to avoid any bias
random.shuffle(P); random.shuffle(V); random.shuffle(T)
for order in J:
random.shuffle(T) #shuffle again to avoid bias
for time in T:
random.shuffle(V)
for vehicle in V: # randomly ordered vehichles
random.shuffle(P)
for port in P: # randomly ordered ports
index = order*order_step_length + time*time_step_length + vehicle*vehicle_step_length + port
if ok_to_place(y, order, time, vehicle, port, vehicle_counter, schedule, NUMBER_OF_ORDERS, vehicle_cap, L, S, m):
y[index] = 1 #place a vehicle
vehicle_counter[vehicle,order] += 1 #a vehicle of type vehicle has been used for order order
return y
``` |
{
"source": "JonthanLT/ts-slack-twitter",
"score": 3
} |
#### File: JonthanLT/ts-slack-twitter/ts_bot_stream.py
```python
import time
import json
import sys
import tweepy
import slackweb
import configparser
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
def read_config(filename = 'twitter.ini'):
config = configparser.ConfigParser()
config.read(filename)
data = {}
for key in config['config']:
data[key] = config['config'][key]
return data
def read_twitter_id(filename = 'twitter.ini'):
config = configparser.ConfigParser()
config.read(filename)
data = []
for key in config['id']:
data.append(config['id'][key])
return data
def get_auth():
cfg = read_config()
auth = tweepy.OAuthHandler(cfg['consumer_key'], cfg['consumer_secret'])
auth.set_access_token(cfg['access_token'], cfg['access_token_secret'])
return auth
def get_api():
return tweepy.API(get_auth())
def get_slack():
return slackweb.Slack(url=read_config('slack.ini')['webhook'])
class StdOutListener(StreamListener):
def on_data(self, data):
if 'delete' in json.loads(data):
return True
if 'RT' in json.loads(data)['text']:
return True
if str(json.loads(data)['user']['id']) in ids:
slack = get_slack()
print(data)
slack.notify(text=json.loads(data)['text'])
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
l = StdOutListener()
stream = Stream(get_auth(), l)
stream.filter(follow=read_twitter_id())
``` |
{
"source": "jonthdiaz/django-copy",
"score": 2
} |
#### File: django-copy/kopy/models.py
```python
from django.db import models
#from datatrans.utils import register
class Copy(models.Model):
key = models.CharField(max_length=500, unique=True, editable=True)
text = models.TextField()
def __unicode__(self):
return u'%s' % self.text
class Meta:
db_table = 'copy_copy'
#class CopyTranslation(object):
#fields = ('text',)
#register(Copy, CopyTranslation)
``` |
{
"source": "jonthebeach/estimator-bot",
"score": 3
} |
#### File: src/messages/excuses_message.py
```python
from jira_client import JiraClient
import random
class ExcusesMessage:
def __init__(self, bot):
self.bot = bot
self.words = [':excuse']
self.excuses = ["What about security?", "Consider caching issues", "Browser compatibility may be a concern", "documentation can be hard to write", "It might require a hard work at the integration level",
"Accesibility has to be taken into account", "It might require to reinvent the wheel", "Have you consider making a POC?", "Scaffolding is required?"]
def listenTo(self, channel, message, event):
return any([word in message.lower() for word in self.words])
def reactTo(self, channel, message, event):
self.bot.sendMessage(channel, random.choice(self.excuses))
``` |
{
"source": "JonTheBurger/python_class",
"score": 4
} |
#### File: chapter 3/lessons/function_parameters.py
```python
def times2(x):
return x * 2
# Functions can also be passed into functions. In this case we pass in "transformation()"
def transform(items, transformation):
for i in range(len(items)):
items[i] = transformation(items[i])
numbers = [1, 2, 3, 4, 5]
transform(numbers, times2)
for num in numbers:
print(num)
```
#### File: chapter 3/lessons/functions.py
```python
def squared(x):
# The "return" statement defines the output of a function.
return x ** 2
# A function input that satisfies a parameter is called an "argument".
# Many people use argument and parameter interchangeably, which is fine.
print(squared(2))
def say_hello():
# If a function contains no return statement, it returns None.
print('hello')
output = say_hello()
print(output)
# Functions can call other functions
def cubed(x):
return squared(x) * x
# print() is a function provided by Python!
print(cubed(3))
```
#### File: chapter 4/lessons/classes.py
```python
class Rectangle:
# Functions that begin and end with __ are "magic methods" reserved by Python.
# The init function is used to initialize and return an instance of your Rectangle class.
# self refers to the current instance of Rectangle.
# (self is just a naming convention. In reality, the first parameter is what refers to the current instance.)
def __init__(self, length, width):
self.length = length # Create member variables length and width.
self.width = width
# Create member function area. Notice that self (the current rectangle) is always passed as the first parameter.
def area(self):
return self.length * self.width
rect = Rectangle(2, 3)
print(rect)
print(rect.area())
```
#### File: chapter 4/lessons/privates.py
```python
class RestroomStall:
def __init__(self):
self._occupants = 0 # protected variables discourage use
self.__bathroom_cam = 0 # private variables prevent use
def is_occupied(self):
return self._occupants != 0
def enter(self):
if not self.is_occupied():
print('You have entered the bathroom')
self._occupants += 1
else:
print("You can't enter the bathroom, it's occupied!")
def exit(self):
if self.is_occupied():
print("You're exiting the bathroom")
self._occupants -= 1
else:
print('ERROR! Attempted restroom exit with 0 occupants!')
stall = RestroomStall()
stall.enter()
stall.exit()
stall.enter()
stall.enter()
# If we really need to, we can access protected variables,
# but the class author is trying to tell you that modification is dangerous!
stall._occupants = 0
stall.enter()
# However, we cannot access private variables!
stall.__bathroom_cam
``` |
{
"source": "JonTheNiceGuy/fortios-ansible-generator",
"score": 3
} |
#### File: fortios-ansible-generator/scripts/generate_modules.py
```python
from jinja2 import Template, Environment, FileSystemLoader
import json
import autopep8
import os
import re
import sys
def replaceSpecialChars(str):
return str.replace('-', '_').replace('.', '_').replace('+', 'plus')
def getModuleName(path, name):
return replaceSpecialChars(path.lower()) + "_" + replaceSpecialChars(name.lower())
def searchProperBreakableChar(line, startingPosition):
breakableChars = " :.,;"
for i in reversed(range(0, startingPosition)):
if line[i] in breakableChars:
return i
return startingPosition
def numberOfInitialSpaces(line):
return len(line) - len(line.lstrip()) + 2
def splitLargeLines(output):
output = output.splitlines()
for i in range(0, len(output)):
line = output[i]
if len(line) > 159:
position = searchProperBreakableChar(line, 159)
initialSpaces = " " * numberOfInitialSpaces(output[i])
output.insert(i + 1, initialSpaces + line[position:])
output[i] = output[i][:position]
output = '\n'.join(output)
return output
def calculateFullPath(parent_attrs, attribute_name):
return attribute_name if not parent_attrs else parent_attrs + ',' + attribute_name
def hyphenToUnderscore(data):
if isinstance(data, list):
for elem in data:
elem = hyphenToUnderscore(elem)
return data
elif isinstance(data, dict):
for k, v in data.items():
if not (len(data) == 2 and 'name' in data and 'help' in data) and \
k != 'help':
# Only change hyphens for names and complex types.
# Simple types (enums) only contain name and help
# Also, avoid changing hyphens in 'help' attributes
data[k] = hyphenToUnderscore(v)
return data
elif isinstance(data, str):
return data.replace('-', '_')
elif isinstance(data, unicode):
return data.encode('utf-8').replace('-', '_')
else:
return data
def invalid_attr_to_valid_attr(data, valid_identifiers):
if (valid_identifiers.has_key(data)):
return valid_identifiers[data]
return data
def invalid_attr_to_valid_attrs(data, valid_identifiers, valid_identifiers_module):
if isinstance(data, list):
for elem in data:
elem = invalid_attr_to_valid_attrs(elem, valid_identifiers, valid_identifiers_module)
return data
elif isinstance(data, dict):
for k, v in data.items():
if not (len(data) == 2 and 'name' in data and 'help' in data) and \
k != 'help':
data[k] = invalid_attr_to_valid_attrs(v, valid_identifiers, valid_identifiers_module)
return data
elif isinstance(data, str):
data1 = invalid_attr_to_valid_attr(data, valid_identifiers)
if data1 != data:
valid_identifiers_module[data] = valid_identifiers[data]
return data1
elif isinstance(data, unicode):
data1 = invalid_attr_to_valid_attr(data.encode('utf-8'), valid_identifiers)
if data1 != data:
valid_identifiers_module[data] = valid_identifiers[data]
return data1
else:
return data
def removeDefaultCommentsInFGTDoc(str):
regex = r"(-.*\(.*?)(, ){0,1}([D|d]efault[ |:|=\n]+.*)(\))"
str = re.sub(regex, r"\g<1>\g<4>", str)
regex = r"(-.*)\(\)"
str = re.sub(regex, r"\g<1>", str)
return str
def renderModule(schema, version, special_attributes, valid_identifiers, version_added, supports_check_mode, movable=False):
# Generate module
file_loader = FileSystemLoader('ansible_templates')
env = Environment(loader=file_loader,
lstrip_blocks=False, trim_blocks=False)
if 'children' not in schema['schema']:
print('warning: not a valid schema, skip.')
return
schema['schema'] = hyphenToUnderscore(schema['schema'])
valid_identifiers_module = {}
schema['schema']['children'] = invalid_attr_to_valid_attrs(schema['schema']['children'], valid_identifiers, valid_identifiers_module)
short_description = schema['schema']['help'][:-1] + " in Fortinet's FortiOS and FortiGate."
description = ""
original_path = schema['path']
original_name = schema['name']
path = replaceSpecialChars(original_path).lower()
name = replaceSpecialChars(original_name).lower()
module_name = "fortios_" + path + "_" + name
if module_name in version_added:
module_version_added = version_added[module_name]
else:
module_version_added = '2.10'
#try:
# module_version_added = version_added[module_name]
#except KeyError:
# print("cannot find", module_name)
# return
mkeyname = None
if 'mkey' in schema['schema']:
mkeyname = schema['schema']['mkey']
special_attributes_flattened = [','.join(x for x in elem) for elem in special_attributes]
template = env.get_template('doc.j2')
output = template.render(calculateFullPath=calculateFullPath, **locals())
template = env.get_template('examples.j2')
output += template.render(**locals())
template = env.get_template('return.j2')
output += template.render(**locals())
template = env.get_template('code.j2')
output += template.render(calculateFullPath=calculateFullPath, vi=valid_identifiers_module, **locals())
dir = 'output/' + version + '/' + path
if not os.path.exists(dir):
os.makedirs(dir)
file = open('output/' + version + '/' + path + '/fortios_' + path + '_' + name + '.py', 'w')
output = removeDefaultCommentsInFGTDoc(output)
output = splitLargeLines(output)
file.write(output)
file.close()
# Generate example
file_example = open('output/' + version + '/' + path + '/fortios_' + path +
'_' + name + '_example.yml', 'w')
template = env.get_template('examples.j2')
output = template.render(**locals())
lines = output.splitlines(True)
file_example.writelines(lines[2:-1])
file_example.close()
# Generate test
file_example = open('output/' + version + '/' + path + '/test_fortios_' + path +
'_' + name + '.py', 'w')
template = env.get_template('tests.j2')
output = template.render(**locals())
lines = output.splitlines(True)
file_example.writelines(lines)
file_example.close()
print("\033[0mFile generated: " + 'output/' + version + '/\033[37mfortios_' + path + '_' + name + '.py')
print("\033[0mFile generated: " + 'output/' + version + '/\033[37mfortios_' + path + '_' + name + '_example.yml')
print("\033[0mFile generated: " + 'output/' + version + '/\033[37mtest_fortios_' + path + '_' + name + '.py')
def convert_mkey_type(mkey_type):
if mkey_type is None:
return None
if mkey_type == 'integer':
return 'int'
return 'str'
def renderFactModule(schema_results, version):
# Generate module
file_loader = FileSystemLoader('ansible_templates')
env = Environment(loader=file_loader,
lstrip_blocks=False, trim_blocks=False)
template = env.get_template('fact.j2')
selector_definitions = {
schema_result['path'] + "_" + schema_result['name']: {
'mkey': schema_result['schema'].get('mkey', None),
'mkey_type': convert_mkey_type(schema_result['schema'].get('mkey_type', None)),
}
for schema_result in schema_results
if 'diagnose' not in schema_result['path'] and 'execute' not in schema_result['path']
}
output = template.render(**locals())
output_path = 'output/' + version + '/fortios_configuration_fact.py'
file = open(output_path, 'w')
output = splitLargeLines(output)
file.write(output)
file.close()
print('generated config fact in ' + output_path)
return output_path
def jinjaExecutor(number=None):
fgt_schema_file = open('fgt_schema.json').read()
fgt_schema = json.loads(fgt_schema_file)
fgt_sch_results = fgt_schema['results']
special_attributes_file = open('special_attributes.lst').read()
special_attributes = json.loads(special_attributes_file)
valid_identifiers_file = open('valid_identifiers.lst').read()
valid_identifiers = json.loads(valid_identifiers_file)
version_added_file = open('version_added.json').read()
version_added_json = json.loads(version_added_file)
check_mode_support_file = open('check_mode_support.txt').read()
check_mode_support_set = set(check_mode_support_file.split('\n'))
movable_modules_file = open('movable_modules.lst').read()
movable_modules = json.loads(movable_modules_file)
autopep_files = './output/' + fgt_schema['version']
if not number:
real_counter = 0
for i, pn in enumerate(fgt_sch_results):
if 'diagnose' not in pn['path'] and 'execute' not in pn['path']:
module_name = getModuleName(pn['path'], pn['name'])
print('\n\033[0mParsing schema:')
print('\033[0mModule name: \033[92m' + module_name)
print('\033[0mIteration:\033[93m' + str(real_counter) + "\033[0m, Schema position: \033[93m" + str(i))
renderModule(fgt_sch_results[i],
fgt_schema['version'],
special_attributes[module_name] if module_name in special_attributes else [],
valid_identifiers,
version_added_json,
module_name in check_mode_support_set,
module_name in movable_modules)
real_counter += 1
else:
module_name = getModuleName(fgt_sch_results[number]['path'], fgt_sch_results[number]['name'])
renderModule(fgt_sch_results[number],
fgt_schema['version'],
special_attributes[module_name] if module_name in special_attributes else [],
valid_identifiers,
version_added_json,
module_name in check_mode_support_set)
autopep_files = './output/' + \
fgt_schema['version'] + '/' + \
replaceSpecialChars(fgt_sch_results[number]['path']) + \
'/fortios_' + replaceSpecialChars(fgt_sch_results[number]['path']) + '_' + replaceSpecialChars(fgt_sch_results[number]['name']) + '.py'
autopep_files += ' ./output/' + \
fgt_schema['version'] + '/' + \
replaceSpecialChars(fgt_sch_results[number]['path']) + \
'/test_fortios_' + replaceSpecialChars(fgt_sch_results[number]['path']) + '_' + replaceSpecialChars(fgt_sch_results[number]['name']) + '.py'
autopep_files += ' ' + renderFactModule(fgt_sch_results, fgt_schema['version'])
# there is an escape letter in fortios_vpn_ssl_settings.py, replace it.
os.popen("sed -i 's/Encode \\\\2F sequence/Encode 2F sequence/g' ./output/" + fgt_schema['version'] + "/vpn_ssl/fortios_vpn_ssl_settings.py")
# copy licence modules
licence_output_folder = './output/' + fgt_schema['version'] + '/licence'
os.popen('mkdir -p ' + licence_output_folder)
os.popen('cp ./galaxy_templates/licence_modules/* ' + licence_output_folder)
from generate_modules_utility import generate_cofiguration_fact_rst
generate_cofiguration_fact_rst(fgt_sch_results, fgt_schema['version'])
print("\n\n\033[0mExecuting autopep8 ....")
# Note this is done with popen and not with autopep8.fix_code in order to get the multiprocessig optimization, only available from CLI
os.popen('autopep8 --aggressive --max-line-length 160 --jobs 8 --ignore E402 --in-place --recursive ' + autopep_files)
# Avoid this check since it conflicts with Ansible guidelines:
# E402 - Fix module level import not at top of file
# Fix exceptional issues due to bugs in autopep
# Using os.popen for quick edit and modification. Should be replaced by proper Python calls
print("\n\n\033[0mFinal fixes ....")
os.popen("sed -i 's/filtered_data =/filtered_data = \\\/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_anqp_ip_address_type.py")
os.popen("sed -i 's/filtered_data =/filtered_data = \\\/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_anqp_network_auth_type.py")
os.popen("sed -i 's/filtered_data =/filtered_data = \\\/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_anqp_roaming_consortium.py")
os.popen("sed -i 's/filtered_data =/filtered_data = \\\/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_h2qp_conn_capability.py")
os.popen("sed -i 's/ underscore_to_hyphen/ underscore_to_hyphen/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_anqp_ip_address_type.py")
os.popen("sed -i 's/ underscore_to_hyphen/ underscore_to_hyphen/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_anqp_network_auth_type.py")
os.popen("sed -i 's/ underscore_to_hyphen/ underscore_to_hyphen/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_anqp_roaming_consortium.py")
os.popen("sed -i 's/ underscore_to_hyphen/ underscore_to_hyphen/' ./output/" + fgt_schema['version'] + "/wireless_controller_hotspot20/fortios_wireless_controller_hotspot20_h2qp_conn_capability.py")
os.popen("find . -name 'test_fortios_router_bfd*.py' -exec rm {} \\;")
if __name__ == "__main__":
print("args " + str(sys.argv))
arg = int(sys.argv[1]) if len(sys.argv) > 1 else None
jinjaExecutor(arg)
``` |
{
"source": "jonthesquirrel/colorstorage",
"score": 3
} |
#### File: jonthesquirrel/colorstorage/main.py
```python
from numpy import interp
from os import listdir
from PIL import Image, ImageStat
# Directory for block textures extracted from version jar
textures = 'assets/minecraft/textures/block'
# Special case: animated blocks like crimson_stem are
# taller than 64px: crop when compositing later?
# List of blocks to allow loading
# > Change this file for different lists
with open('blocks_full.txt') as reader:
allow_blocks = reader.read().splitlines()
# Unused because redundant
# # List of blocks to deny loading
# with open('blocks_deny.txt') as reader:
# deny_blocks = reader.read().splitlines()
# Find png filenames in textures directory and remove .png extension
# (Create list of all blocks)
block_ids = [filename[:-4] for filename in listdir(textures) if filename.endswith('.png')]
# Remove all blocks except those in allow list from block id list
block_ids = [id for id in block_ids if id in allow_blocks]
# Unused because redundant
# # Remove blocks in deny list from block id list
# block_ids = [id for id in block_ids if not id in deny_blocks]
# Convert HSV into hsv(360°, 100%, 100%) color code string
def hsv_string (h, s, v):
hsv_string = f'hsv({round(h)}, {round(s)}%, {round(v)}%)'
return (hsv_string)
# Get average HSV color from image
def avg_hsv(block_id):
# Open Minecraft texture as RGBA image
im = Image.open(f'{textures}/{block_id}.png')
# Convert RGBA image into HSV (Hue, Saturation, Value) image
im = im.convert('HSV')
# Split HSV into separate channels
hue_channel = im.getchannel('H')
sat_channel = im.getchannel('S')
val_channel = im.getchannel('V')
# Get average of each channel
h = ImageStat.Stat(hue_channel).mean
s = ImageStat.Stat(sat_channel).mean
v = ImageStat.Stat(val_channel).mean
# Scale from 8-bit channel range (255, 255, 255) to hsv(360°, 100%, 100%) range
# These are converted to floats
h = interp(h, [0, 255], [0, 360])[0]
s = interp(s, [0, 255], [0, 100])[0]
v = interp(v, [0, 255], [0, 100])[0]
# Collect this block's data in a dictionary
return {'block_id': block_id, 'hue': h, 'sat': s, 'val': v, 'hsv_string': hsv_string(h, s, v)}
# Make a list of blocks and their average colors
blocks = map(avg_hsv, block_ids)
# Sort blocks by hue, then saturation, then value
blocks = sorted(blocks, key = lambda block: (block['hue'], block['sat'], block['val']))
# Print blocks and their color
for block in blocks:
print(f"{block['block_id']} : {block['hsv_string']}")
``` |
{
"source": "JonTheWong/pySnippets",
"score": 4
} |
#### File: pySnippets/code/readcsv.py
```python
import csv
# Documentation
# https://docs.python.org/3.7/library/csv.html
# csv.reader already splits the list data so daae[0] works out of the box.
def readcsv(filepath):
'''
Read file in filepath, return all rows as a list type.
Call function: ```readcsv('data/file0.csv')```
'''
with open(filepath, newline='', encoding='utf-8') as data:
data = csv.reader(data, delimiter=',', quotechar='|')
rowofdata = []
for row in data:
rowofdata.append(row)
return rowofdata
# print(readcsv('data/file0.csv')) # expected output [['file0,line0'], ['file0,line1'], ['file0,line2'], ['file0,line3'], ['file0,line4'], ['file0,line5']]
``` |
{
"source": "jonthierry/SNS-CF",
"score": 2
} |
#### File: trackers/SiamMask/siammask.py
```python
from __future__ import division
import argparse
import logging
import numpy as np
import cv2
from PIL import Image
from os import makedirs
from os.path import join, isdir, isfile
import torch
from torch.autograd import Variable
import torch.nn.functional as F
# relative imports
from .utils.log_helper import init_log, add_file_handler
from .utils.bbox_helper import get_axis_aligned_bbox, cxy_wh_2_rect
from .utils.anchors import Anchors, generate_anchor
from .utils.tracker_config import TrackerConfig
from .utils.tracking_utils import get_subwindow_tracking
def SiamMask_init(im, target_pos, target_sz, model, hp=None):
state = dict()
state['im_h'] = im.shape[0]
state['im_w'] = im.shape[1]
p = TrackerConfig()
p.update(hp, model.anchors)
p.renew()
p.scales = model.anchors['scales']
p.ratios = model.anchors['ratios']
p.anchor_num = len(p.ratios) * len(p.scales)
p.anchor = generate_anchor(model.anchors, p.score_size)
avg_chans = np.mean(im, axis=(0, 1))
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
elif p.windowing == 'uniform':
window = np.ones((p.score_size, p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
use_cuda = torch.cuda.is_available()
state['device'] = torch.device("cuda" if use_cuda else "cpu")
state['p'] = p
state['model'] = model
state['avg_chans'] = avg_chans
state['window'] = window
state['score'] = 1.0
state['target_pos'] = target_pos
state['target_sz'] = target_sz
return state
def SiamMask_track(state, im, temp_mem):
p = state['p']
avg_chans = state['avg_chans']
window = state['window']
old_pos = state['target_pos']
old_sz = state['target_sz']
dev = state['device']
# get search area
wc_x = old_sz[1] + p.context_amount * sum(old_sz)
hc_x = old_sz[0] + p.context_amount * sum(old_sz)
s_z = np.sqrt(wc_x * hc_x)
scale_x = p.exemplar_size / s_z
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_x
s_x = s_z + 2 * pad
crop_box = [old_pos[0] - round(s_x) / 2, old_pos[1] - round(s_x) / 2, round(s_x), round(s_x)]
# extract scaled crops for search region x at previous target position
x_crop = Variable(get_subwindow_tracking(im, old_pos, p.instance_size, round(s_x), avg_chans).unsqueeze(0))
# track
target_pos, target_sz, score, best_id = temp_mem.batch_evaluate(x_crop.to(dev), old_pos,
old_sz, window,
scale_x, p)
# mask refinement
best_pscore_id_mask = np.unravel_index(best_id, (5, p.score_size, p.score_size))
delta_x, delta_y = best_pscore_id_mask[2], best_pscore_id_mask[1]
mask = state['model'].track_refine((delta_y, delta_x)).to(dev).sigmoid().squeeze().view(
p.out_size, p.out_size).cpu().data.numpy()
def crop_back(image, bbox, out_sz, padding=-1):
a = (out_sz[0] - 1) / bbox[2]
b = (out_sz[1] - 1) / bbox[3]
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz[0], out_sz[1]),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=padding)
return crop
s = crop_box[2] / p.instance_size
sub_box = [crop_box[0] + (delta_x - p.base_size / 2) * p.total_stride * s,
crop_box[1] + (delta_y - p.base_size / 2) * p.total_stride * s,
s * p.exemplar_size, s * p.exemplar_size]
s = p.out_size / sub_box[2]
back_box = [-sub_box[0] * s, -sub_box[1] * s, state['im_w'] * s, state['im_h'] * s]
mask_in_img = crop_back(mask, back_box, (state['im_w'], state['im_h']))
target_mask = (mask_in_img > p.seg_thr).astype(np.uint8)
if cv2.__version__[-5] == '4':
contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
else:
_, contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt_area = [cv2.contourArea(cnt) for cnt in contours]
if len(contours) != 0 and np.max(cnt_area) > 100:
contour = contours[np.argmax(cnt_area)] # use max area polygon
polygon = contour.reshape(-1, 2)
prbox = cv2.boxPoints(cv2.minAreaRect(polygon)) # Rotated Rectangle
rbox_in_img = prbox
else: # empty mask
location = cxy_wh_2_rect(target_pos, target_sz)
rbox_in_img = np.array([[location[0], location[1]],
[location[0] + location[2], location[1]],
[location[0] + location[2], location[1] + location[3]],
[location[0], location[1] + location[3]]])
state['mask'] = mask_in_img
state['polygon'] = rbox_in_img
# clip in min and max of the bb
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score
state['crop'] = x_crop
return state
``` |
{
"source": "jonthierry/Tkinter",
"score": 3
} |
#### File: jonthierry/Tkinter/btn_command.py
```python
import random
import tkinter as tk
def increase():
value = int(lbl_value["text"])
lbl_value["text"] = f"{value + 1}"
def decrease():
value = int(lbl_value["text"])
lbl_value["text"] = f"{value - 1}"
def roll():
value = int(lbl_value["text"])
lbl_value["text"] = str(random.randint(1, 6))
def both_decrease(event):
value = int(lbl_value["text"])
lbl_value["text"] = f"{value - 1}"
def both_increase(event):
value = int(lbl_value["text"])
lbl_value["text"] = f"{value + 1}"
window = tk.Tk()
window.rowconfigure([0, 1, 2], minsize=50, weight=1)
window.columnconfigure([0, 1, 2], minsize=50, weight=1)
btn_decrease = tk.Button(master=window, text="-", command=decrease)
btn_decrease.grid(row=0, column=0, sticky="nsew")
lbl_value = tk.Label(master=window, text="0")
lbl_value.grid(row=0, column=1)
btn_increase = tk.Button(master=window, text="+", command=increase)
btn_increase.grid(row=0, column=2, sticky="nsew")
btn_both = tk.Button(master=window, text="+/-")
btn_both.grid(row=1, column=1, sticky="nsew")
btn_both.bind("<Button-1>", both_decrease)
btn_both.bind("<Button-3>", both_increase)
btn_roll = tk.Button(master=window, text="Roll", command=roll)
btn_roll.grid(row=2, column=1, sticky="nsew")
window.mainloop()
```
#### File: jonthierry/Tkinter/Temp_Conv.py
```python
import tkinter as tk
def fahrenheit_to_celsius():
"""Convert the value for Fahrenheit to Celsius and insert the
result into lbl_result.
"""
fahrenheit = ent_temperature.get()
celsius = (5 / 9) * (float(fahrenheit) - 32)
lbl_result["text"] = f"{round(celsius, 2)} \N{DEGREE CELSIUS}"
def celsius_to_fahrenheit():
"""Convert the value for Celsius to Fahrenheit to and insert the
result into lbl_result.
"""
celsius = ent_temperature.get()
fahrenheit = (9 / 5) * (float(celsius)) + 32
lbl_result["text"] = f"{round(fahrenheit, 2)} \N{DEGREE FAHRENHEIT}"
# Set-up the window
window = tk.Tk()
window.title("Temperature Converter")
window.resizable(width=False, height=False)
# Create the Fahrenheit entry frame with an Entry
# widget and label in it
frm_entry = tk.Frame(master=window)
ent_temperature = tk.Entry(master=frm_entry, width=10)
# lbl_temp = tk.Label(master=frm_entry, text="\N{DEGREE FAHRENHEIT}")
# Layout the temperature Entry and Label in frm_entry
# using the .grid() geometry manager
ent_temperature.grid(row=0, column=0, sticky="e")
# lbl_temp.grid(row=0, column=1, sticky="w")
# Create the conversion Button and result display Label
btn_convert_to_c = tk.Button(
master=window,
text="\N{DEGREE FAHRENHEIT}",
command=fahrenheit_to_celsius
)
btn_convert_to_f = tk.Button(
master=window,
text="\N{DEGREE CELSIUS}",
command=celsius_to_fahrenheit
)
lbl_result = tk.Label(master=window, text="")
# Set-up the layout using the .grid() geometry manager
frm_entry.grid(row=0, column=0, padx=10)
btn_convert_to_c.grid(row=0, column=1, pady=10)
btn_convert_to_f.grid(row=0, column=2, pady=10)
lbl_result.grid(row=0, column=3, padx=10)
# Run the application
window.mainloop()
```
#### File: jonthierry/Tkinter/Tkinter_GUI.py
```python
import tkinter as tk
from tkinter import *
from tkinter.filedialog import *
from tkinter.ttk import *
import time
models = [] # To store model paths
imgs = [] # To append image paths
# Defining functions to make our interface alive
def model_upload_prgrss():
"""Monitor the model upload progress"""
def upload_model():
"""Upload the model"""
"""for widget in frm_out.winfo_children():
widget.destroy() # To destroy previous list and update with another
"""
model_path = askopenfilename(
# initialdir="/", # Toggle to start from the initial directory, C:
title="Select a Model",
filetypes=[("Model Files", "*.net"), ("All Files", "*.*")]
)
if not model_path:
return
# lbl_model["text"] = f"{filepath}"
models.append(model_path) # To append model paths in model library
print(model_path) # To display the model path on the screen/ console
for model_name in models: # To loop over stored model paths
lbl_model = tk.Label(frm_out, text=model_name, bg="black", fg="white", pady=5, width=30
) # To set the model name as the new label name
# Progress bar (Not connected to the action of uploading yet)
prgrss_model = Progressbar(frm_out, orient=HORIZONTAL, mode="determinate", length=300)
prgrss_model.grid(row=1, column=0, pady=5, sticky='ew') # To pack the progress bar in the frame
for x in range(100):
prgrss_model['value'] += 20
frm_out.update_idletasks()
time.sleep(0.05)
# print(prgrss_model['value'])
if prgrss_model['value'] == 80:
lbl_model.grid(row=0, column=0, sticky="ew", pady=5) # To upload explicitly one model
def upload_img():
img_path = askopenfilenames(
title="Select an image or image folder",
filetypes=[("Image Files", "*.png"), ("All Files", "*.*")]
)
if not img_path:
return
# lbl_img["text"] = f"{img_path}"
# lbl_img.grid(row=3, column=0)
imgs.extend(img_path) # To append model paths in model library
# print(img_path) # To display the model path on the screen/ console
for img_name in imgs: # To loop over stored model paths
lbl_img = tk.Label(frm_out, text=img_name, bg="black", fg="white", pady=5, width=70)
# lbl_img.pack() # To upload more than one model
listbox_images = tk.Listbox(frm_out, bd=2, width=100)
# listbox_images.insert(END, "Image list") # To give the list a title
scr_imgs = Scrollbar(frm_out, orient="vertical")
scr_imgs.config(command=listbox_images.yview)
for item in imgs:
listbox_images.insert(END, item)
listbox_images.grid(row=3, column=0, sticky="nsew", padx=5, pady=10)
scr_imgs.grid(row=3, column=1, sticky="ns")
listbox_images.config(yscrollcommand=scr_imgs.set)
# Create a new window
window = tk.Tk()
window.title("One Class Segmentation")
window.rowconfigure(0, minsize=400, weight=1)
window.columnconfigure(1, minsize=800, weight=1)
# Create necessary widgets
"""
1. Frame with one row and two columns as frm_input
2. A radio button to choose btn Model and Images as radio_choose
2. If Model, a dropdown list/ combobox widget of models as comb_choose
3. Else, a load images button as btn_load
4. An evaluation button as btn_eval
5. A display panel (?)
6. A label for uploaded models and images
"""
frm_input = tk.Frame(master=window, relief=tk.RAISED, bd=2)
choose = tk.IntVar() # Creating the choice variable
# choose.set(1) # Initializing the choice (i.e. Model upload)
"""# Combobox
comb_upload = Combobox(frm_input)
items = ("Choose a Model", "Upload Images")
comb_upload["values"] = items
comb_upload.grid(row=2, column=0, pady=5)"""
radio_model = tk.Radiobutton(frm_input,
text="Choose a Model",
variable=choose, value="1",
padx=5, pady=5,
command=upload_model
)
radio_images = tk.Radiobutton(frm_input,
text="Upload Images",
variable=choose, value="2",
padx=5,
command=upload_img
)
# Pack the widgets into the frame, and the frame into the window
radio_model.grid(row=0, column=0, sticky="w")
radio_images.grid(row=1, column=0, sticky="w")
frm_input.grid(row=0, column=0, pady=5, sticky="ew", padx=5)
# Frame of output
frm_out = tk.Frame(window, relief=tk.RAISED, bd=2)
btn_eval = tk.Button(window, text="EVALUATE")
# Pack
btn_eval.grid(row=1, column=1, sticky="ew")
frm_out.grid(row=0, column=1, pady=5, sticky="nsew")
# lbl_img = tk.Label(frm_out, text="")
# Run the application
window.mainloop()
``` |
{
"source": "jonthn/dotfiles",
"score": 2
} |
#### File: config/lldb/lldbinit.py
```python
if __name__ == "__main__":
print("Run only as script from LLDB... Not as standalone program!")
try:
import lldb
except:
pass
import sys
import re
import os
import time
import struct
import argparse
import subprocess
import tempfile
try:
from keystone import *
CONFIG_KEYSTONE_AVAILABLE = 1
except:
CONFIG_KEYSTONE_AVAILABLE = 0
pass
VERSION = "2.0"
#
# User configurable options
#
CONFIG_ENABLE_COLOR = 1
CONFIG_DISPLAY_DISASSEMBLY_BYTES = 1
CONFIG_DISASSEMBLY_LINE_COUNT = 8
CONFIG_USE_CUSTOM_DISASSEMBLY_FORMAT = 1
CONFIG_DISPLAY_STACK_WINDOW = 0
CONFIG_DISPLAY_FLOW_WINDOW = 0
CONFIG_ENABLE_REGISTER_SHORTCUTS = 1
CONFIG_DISPLAY_DATA_WINDOW = 0
# setup the logging level, which is a bitmask of any of the following possible values (don't use spaces, doesn't seem to work)
#
# LOG_VERBOSE LOG_PROCESS LOG_THREAD LOG_EXCEPTIONS LOG_SHLIB LOG_MEMORY LOG_MEMORY_DATA_SHORT LOG_MEMORY_DATA_LONG LOG_MEMORY_PROTECTIONS LOG_BREAKPOINTS LOG_EVENTS LOG_WATCHPOINTS
# LOG_STEP LOG_TASK LOG_ALL LOG_DEFAULT LOG_NONE LOG_RNB_MINIMAL LOG_RNB_MEDIUM LOG_RNB_MAX LOG_RNB_COMM LOG_RNB_REMOTE LOG_RNB_EVENTS LOG_RNB_PROC LOG_RNB_PACKETS LOG_RNB_ALL LOG_RNB_DEFAULT
# LOG_DARWIN_LOG LOG_RNB_NONE
#
# to see log (at least in macOS)
# $ log stream --process debugserver --style compact
# (or whatever style you like)
CONFIG_LOG_LEVEL = "LOG_NONE"
# removes the offsets and modifies the module name position
# reference: https://lldb.llvm.org/formats.html
CUSTOM_DISASSEMBLY_FORMAT = "\"{${function.initial-function}{${function.name-without-args}} @ {${module.file.basename}}:\n}{${function.changed}\n{${function.name-without-args}} @ {${module.file.basename}}:\n}{${current-pc-arrow} }${addr-file-or-load}: \""
# default colors - modify as you wish
COLOR_REGVAL = "BLACK"
COLOR_REGNAME = "GREEN"
COLOR_CPUFLAGS = "RED"
COLOR_SEPARATOR = "BLUE"
COLOR_HIGHLIGHT_LINE = "RED"
COLOR_REGVAL_MODIFIED = "RED"
COLOR_SYMBOL_NAME = "BLUE"
COLOR_CURRENT_PC = "RED"
#
# Don't mess after here unless you know what you are doing!
#
COLORS = {
"BLACK": "\033[30m",
"RED": "\033[31m",
"GREEN": "\033[32m",
"YELLOW": "\033[33m",
"BLUE": "\033[34m",
"MAGENTA": "\033[35m",
"CYAN": "\033[36m",
"WHITE": "\033[37m",
"RESET": "\033[0m",
"BOLD": "\033[1m",
"UNDERLINE": "\033[4m"
}
DATA_WINDOW_ADDRESS = 0
old_x86 = { "eax": 0, "ecx": 0, "edx": 0, "ebx": 0, "esp": 0, "ebp": 0, "esi": 0, "edi": 0, "eip": 0, "eflags": 0,
"cs": 0, "ds": 0, "fs": 0, "gs": 0, "ss": 0, "es": 0, }
old_x64 = { "rax": 0, "rcx": 0, "rdx": 0, "rbx": 0, "rsp": 0, "rbp": 0, "rsi": 0, "rdi": 0, "rip": 0, "rflags": 0,
"cs": 0, "fs": 0, "gs": 0, "r8": 0, "r9": 0, "r10": 0, "r11": 0, "r12": 0,
"r13": 0, "r14": 0, "r15": 0 }
old_arm = { "r0": 0, "r1": 0, "r2": 0, "r3": 0, "r4": 0, "r5": 0, "r6": 0, "r7": 0, "r8": 0, "r9": 0, "r10": 0,
"r11": 0, "r12": 0, "sp": 0, "lr": 0, "pc": 0, "cpsr": 0 }
arm_type = "thumbv7-apple-ios"
GlobalListOutput = []
Int3Dictionary = {}
crack_cmds = []
crack_cmds_noret = []
All_Registers = [ "rip", "rax", "rbx", "rbp", "rsp", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "eip", "eax", "ebx", "ebp", "esp", "edi", "esi", "edx", "ecx" ]
def __lldb_init_module(debugger, internal_dict):
''' we can execute commands using debugger.HandleCommand which makes all output to default
lldb console. With GetCommandinterpreter().HandleCommand() we can consume all output
with SBCommandReturnObject and parse data before we send it to output (eg. modify it);
'''
# don't load if we are in Xcode since it is not compatible and will block Xcode
if os.getenv('PATH').startswith('/Applications/Xcode'):
return
'''
If I'm running from $HOME where .lldbinit is located, seems like lldb will load
.lldbinit 2 times, thus this dirty hack is here to prevent doulbe loading...
if somebody knows better way, would be great to know :)
'''
var = debugger.GetInternalVariableValue("stop-disassembly-count", debugger.GetInstanceName())
if var.IsValid():
var = var.GetStringAtIndex(0)
if var == "0":
return
res = lldb.SBCommandReturnObject()
ci = debugger.GetCommandInterpreter()
# settings
ci.HandleCommand("settings set target.x86-disassembly-flavor intel", res)
ci.HandleCommand("settings set prompt \"(lldbinit) \"", res)
#lldb.debugger.GetCommandInterpreter().HandleCommand("settings set prompt \"\033[01;31m(lldb) \033[0m\"", res);
ci.HandleCommand("settings set stop-disassembly-count 0", res)
# set the log level - must be done on startup?
ci.HandleCommand("settings set target.process.extra-startup-command QSetLogging:bitmask=" + CONFIG_LOG_LEVEL + ";", res)
if CONFIG_USE_CUSTOM_DISASSEMBLY_FORMAT == 1:
ci.HandleCommand("settings set disassembly-format " + CUSTOM_DISASSEMBLY_FORMAT, res)
# the hook that makes everything possible :-)
ci.HandleCommand("command script add -f lldbinit.HandleHookStopOnTarget HandleHookStopOnTarget", res)
ci.HandleCommand("command script add -f lldbinit.HandleHookStopOnTarget ctx", res)
ci.HandleCommand("command script add -f lldbinit.HandleHookStopOnTarget context", res)
# commands
ci.HandleCommand("command script add -f lldbinit.cmd_lldbinitcmds lldbinitcmds", res)
ci.HandleCommand("command script add -f lldbinit.cmd_IphoneConnect iphone", res)
#
# dump memory commands
#
ci.HandleCommand("command script add -f lldbinit.cmd_db db", res)
ci.HandleCommand("command script add -f lldbinit.cmd_dw dw", res)
ci.HandleCommand("command script add -f lldbinit.cmd_dd dd", res)
ci.HandleCommand("command script add -f lldbinit.cmd_dq dq", res)
ci.HandleCommand("command script add -f lldbinit.cmd_DumpInstructions u", res)
ci.HandleCommand("command script add -f lldbinit.cmd_findmem findmem", res)
#
# Settings related commands
#
ci.HandleCommand("command script add -f lldbinit.cmd_enable enable", res)
ci.HandleCommand("command script add -f lldbinit.cmd_disable disable", res)
ci.HandleCommand("command script add -f lldbinit.cmd_contextcodesize contextcodesize", res)
# a few settings aliases
ci.HandleCommand("command alias enablesolib enable solib", res)
ci.HandleCommand("command alias disablesolib disable solib", res)
ci.HandleCommand("command alias enableaslr enable aslr", res)
ci.HandleCommand("command alias disableaslr disable aslr", res)
#
# Breakpoint related commands
#
ci.HandleCommand("command script add -f lldbinit.cmd_bhb bhb", res)
ci.HandleCommand("command script add -f lldbinit.cmd_bht bht", res)
ci.HandleCommand("command script add -f lldbinit.cmd_bpt bpt", res)
ci.HandleCommand("command script add -f lldbinit.cmd_bpn bpn", res)
# disable a breakpoint or all
ci.HandleCommand("command script add -f lldbinit.cmd_bpd bpd", res)
ci.HandleCommand("command script add -f lldbinit.cmd_bpda bpda", res)
# clear a breakpoint or all
ci.HandleCommand("command script add -f lldbinit.cmd_bpc bpc", res)
ci.HandleCommand("command alias bpca breakpoint delete", res)
# enable a breakpoint or all
ci.HandleCommand("command script add -f lldbinit.cmd_bpe bpe", res)
ci.HandleCommand("command script add -f lldbinit.cmd_bpea bpea", res)
# commands to set temporary int3 patches and restore original bytes
ci.HandleCommand("command script add -f lldbinit.cmd_int3 int3", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rint3 rint3", res)
ci.HandleCommand("command script add -f lldbinit.cmd_listint3 listint3", res)
ci.HandleCommand("command script add -f lldbinit.cmd_nop nop", res)
ci.HandleCommand("command script add -f lldbinit.cmd_null null", res)
# change eflags commands
ci.HandleCommand("command script add -f lldbinit.cmd_cfa cfa", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfc cfc", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfd cfd", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfi cfi", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfo cfo", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfp cfp", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfs cfs", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cft cft", res)
ci.HandleCommand("command script add -f lldbinit.cmd_cfz cfz", res)
# skip/step current instruction commands
ci.HandleCommand("command script add -f lldbinit.cmd_skip skip", res)
ci.HandleCommand("command script add -f lldbinit.cmd_stepo stepo", res)
ci.HandleCommand("command script add -f lldbinit.cmd_si si", res)
# load breakpoints from file
ci.HandleCommand("command script add -f lldbinit.cmd_LoadBreakPoints lb", res)
ci.HandleCommand("command script add -f lldbinit.cmd_LoadBreakPointsRva lbrva", res)
# cracking friends
ci.HandleCommand("command script add -f lldbinit.cmd_crack crack", res)
ci.HandleCommand("command script add -f lldbinit.cmd_crackcmd crackcmd", res)
ci.HandleCommand("command script add -f lldbinit.cmd_crackcmd_noret crackcmd_noret", res)
# alias for existing breakpoint commands
# list all breakpoints
ci.HandleCommand("command alias bpl breakpoint list", res)
# alias "bp" command that exists in gdbinit - lldb also has alias for "b"
ci.HandleCommand("command alias bp _regexp-break", res)
# to set breakpoint commands - I hate typing too much
ci.HandleCommand("command alias bcmd breakpoint command add", res)
# launch process and stop at entrypoint (not exactly as gdb command that just inserts breakpoint)
# usually it will be inside dyld and not the target main()
ci.HandleCommand("command alias break_entrypoint process launch --stop-at-entry", res)
ci.HandleCommand("command script add -f lldbinit.cmd_show_loadcmds show_loadcmds", res)
ci.HandleCommand("command script add -f lldbinit.cmd_show_header show_header", res)
ci.HandleCommand("command script add -f lldbinit.cmd_tester tester", res)
ci.HandleCommand("command script add -f lldbinit.cmd_datawin datawin", res)
# shortcut command to modify registers content
if CONFIG_ENABLE_REGISTER_SHORTCUTS == 1:
# x64
ci.HandleCommand("command script add -f lldbinit.cmd_rip rip", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rax rax", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rbx rbx", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rbp rbp", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rsp rsp", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rdi rdi", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rsi rsi", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rdx rdx", res)
ci.HandleCommand("command script add -f lldbinit.cmd_rcx rcx", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r8 r8", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r9 r9", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r10 r10", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r11 r11", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r12 r12", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r13 r13", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r14 r14", res)
ci.HandleCommand("command script add -f lldbinit.cmd_r15 r15", res)
# x86
ci.HandleCommand("command script add -f lldbinit.cmd_eip eip", res)
ci.HandleCommand("command script add -f lldbinit.cmd_eax eax", res)
ci.HandleCommand("command script add -f lldbinit.cmd_ebx ebx", res)
ci.HandleCommand("command script add -f lldbinit.cmd_ebp ebp", res)
ci.HandleCommand("command script add -f lldbinit.cmd_esp esp", res)
ci.HandleCommand("command script add -f lldbinit.cmd_edi edi", res)
ci.HandleCommand("command script add -f lldbinit.cmd_esi esi", res)
ci.HandleCommand("command script add -f lldbinit.cmd_edx edx", res)
ci.HandleCommand("command script add -f lldbinit.cmd_ecx ecx", res)
if CONFIG_KEYSTONE_AVAILABLE == 1:
ci.HandleCommand("command script add -f lldbinit.cmd_asm32 asm32", res)
ci.HandleCommand("command script add -f lldbinit.cmd_asm64 asm64", res)
ci.HandleCommand("command script add -f lldbinit.cmd_arm32 arm32", res)
ci.HandleCommand("command script add -f lldbinit.cmd_arm64 arm64", res)
ci.HandleCommand("command script add -f lldbinit.cmd_armthumb armthumb", res)
# add the hook - we don't need to wait for a target to be loaded
ci.HandleCommand("target stop-hook add -o \"HandleHookStopOnTarget\"", res)
ci.HandleCommand("command script add --function lldbinit.cmd_banner banner", res)
debugger.HandleCommand("banner")
return
def cmd_banner(debugger,command,result,dict):
print(COLORS["RED"] + "[+] Loaded lldbinit version: " + VERSION + COLORS["RESET"])
def cmd_lldbinitcmds(debugger, command, result, dict):
'''Display all available lldbinit commands.'''
help_table = [
[ "lldbinitcmds", "this command" ],
[ "enable", "configure lldb and lldbinit options" ],
[ "disable", "configure lldb and lldbinit options" ],
[ "contextcodesize", "set number of instruction lines in code window" ],
[ "b", "breakpoint address" ],
[ "bpt", "set a temporary software breakpoint" ],
[ "bhb", "set an hardware breakpoint" ],
[ "bpc", "clear breakpoint" ],
[ "bpca", "clear all breakpoints" ],
[ "bpd", "disable breakpoint" ],
[ "bpda", "disable all breakpoints" ],
[ "bpe", "enable a breakpoint" ],
[ "bpea", "enable all breakpoints" ],
[ "bcmd", "alias to breakpoint command add"],
[ "bpl", "list all breakpoints"],
[ "bpn", "temporarly breakpoint next instruction" ],
[ "break_entrypoint", "launch target and stop at entrypoint" ],
[ "skip", "skip current instruction" ],
[ "int3", "patch memory address with INT3" ],
[ "rint3", "restore original byte at address patched with INT3" ],
[ "listint3", "list all INT3 patched addresses" ],
[ "nop", "patch memory address with NOP" ],
[ "null", "patch memory address with NULL" ],
[ "stepo", "step over calls and loop instructions" ],
[ "lb", "load breakpoints from file and apply them (currently only func names are applied)" ],
[ "lbrva", "load breakpoints from file and apply to main executable, only RVA in this case" ],
[ "db/dw/dd/dq", "memory hex dump in different formats" ],
[ "findmem", "search memory" ],
[ "cfa/cfc/cfd/cfi/cfo/cfp/cfs/cft/cfz", "change CPU flags" ],
[ "u", "dump instructions" ],
[ "iphone", "connect to debugserver running on iPhone" ],
[ "ctx/context", "show current instruction pointer CPU context" ],
[ "show_loadcmds", "show otool output of Mach-O load commands" ],
[ "show_header", "show otool output of Mach-O header" ],
[ "enablesolib/disablesolib", "enable/disable the stop on library load events" ],
[ "enableaslr/disableaslr", "enable/disable process ASLR" ],
[ "crack", "return from current function" ],
[ "crackcmd", "set a breakpoint and return from that function" ],
[ "crackcmd_noret", "set a breakpoint and set a register value. doesn't return from function" ],
[ "datawin", "set start address to display on data window" ],
[ "rip/rax/rbx/etc", "shortcuts to modify x64 registers" ],
[ "eip/eax/ebx/etc", "shortcuts to modify x86 register" ],
[ "asm32/asm64", "x86/x64 assembler using keystone" ],
[ "arm32/arm64/armthumb", "ARM assembler using keystone" ]
]
print("lldbinit available commands:")
for row in help_table:
print(" {: <20} - {: <30}".format(*row))
print("\nUse \'cmdname help\' for extended command help.")
# placeholder to make tests
def cmd_tester(debugger, command, result, dict):
print("test")
#frame = get_frame()
# the SBValue to ReturnFromFrame must be eValueTypeRegister type
# if we do a lldb.SBValue() we can't set to that type
# so we need to make a copy
# can we use FindRegister() from frame?
#return_value = frame.reg["rax"]
#return_value.value = "1"
#thread.ReturnFromFrame(frame, return_value)
# -------------------------
# Settings related commands
# -------------------------
def cmd_enable(debugger, command, result, dict):
'''Enable certain lldb and lldbinit options. Use \'enable help\' for more information.'''
help = """
Enable certain lldb and lldbinit configuration options.
Syntax: enable <setting>
Available settings:
color: enable color mode.
solib: enable stop on library events trick.
aslr: enable process aslr.
stackwin: enable stack window in context display.
datawin: enable data window in context display, configure address with datawin.
flow: call targets and objective-c class/methods.
"""
global CONFIG_ENABLE_COLOR
global CONFIG_DISPLAY_STACK_WINDOW
global CONFIG_DISPLAY_FLOW_WINDOW
global CONFIG_DISPLAY_DATA_WINDOW
cmd = command.split()
if len(cmd) == 0:
print("[-] error: command requires arguments.")
print("")
print(help)
return
if cmd[0] == "color":
CONFIG_ENABLE_COLOR = 1
print("[+] Enabled color mode.")
elif cmd[0] == "solib":
debugger.HandleCommand("settings set target.process.stop-on-sharedlibrary-events true")
print("[+] Enabled stop on library events trick.")
elif cmd[0] == "aslr":
debugger.HandleCommand("settings set target.disable-aslr false")
print("[+] Enabled ASLR.")
elif cmd[0] == "stackwin":
CONFIG_DISPLAY_STACK_WINDOW = 1
print("[+] Enabled stack window in context display.")
elif cmd[0] == "flow":
CONFIG_DISPLAY_FLOW_WINDOW = 1
print("[+] Enabled indirect control flow window in context display.")
elif cmd[0] == "datawin":
CONFIG_DISPLAY_DATA_WINDOW = 1
print("[+] Enabled data window in context display. Configure address with \'datawin\' cmd.")
elif cmd[0] == "help":
print(help)
else:
print("[-] error: unrecognized command.")
print(help)
return
def cmd_disable(debugger, command, result, dict):
'''Disable certain lldb and lldbinit options. Use \'disable help\' for more information.'''
help = """
Disable certain lldb and lldbinit configuration options.
Syntax: disable <setting>
Available settings:
color: disable color mode.
solib: disable stop on library events trick.
aslr: disable process aslr.
stackwin: disable stack window in context display.
datawin: enable data window in context display.
flow: call targets and objective-c class/methods.
"""
global CONFIG_ENABLE_COLOR
global CONFIG_DISPLAY_STACK_WINDOW
global CONFIG_DISPLAY_FLOW_WINDOW
global CONFIG_DISPLAY_DATA_WINDOW
cmd = command.split()
if len(cmd) == 0:
print("[-] error: command requires arguments.")
print("")
print(help)
return
if cmd[0] == "color":
CONFIG_ENABLE_COLOR = 0
print("[+] Disabled color mode.")
elif cmd[0] == "solib":
debugger.HandleCommand("settings set target.process.stop-on-sharedlibrary-events false")
print("[+] Disabled stop on library events trick.")
elif cmd[0] == "aslr":
debugger.HandleCommand("settings set target.disable-aslr true")
print("[+] Disabled ASLR.")
elif cmd[0] == "stackwin":
CONFIG_DISPLAY_STACK_WINDOW = 0
print("[+] Disabled stack window in context display.")
elif cmd[0] == "flow":
CONFIG_DISPLAY_FLOW_WINDOW = 0
print("[+] Disabled indirect control flow window in context display.")
elif cmd[0] == "datawin":
CONFIG_DISPLAY_DATA_WINDOW = 0
print("[+] Disabled data window in context display.")
elif cmd[0] == "help":
print(help)
else:
print("[-] error: unrecognized command.")
print(help)
return
def cmd_contextcodesize(debugger, command, result, dict):
'''Set the number of disassembly lines in code window. Use \'contextcodesize help\' for more information.'''
help = """
Configures the number of disassembly lines displayed in code window.
Syntax: contextcodesize <line_count>
Note: expressions supported, do not use spaces between operators.
"""
global CONFIG_DISASSEMBLY_LINE_COUNT
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert the number of disassembly lines to display.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
print("\nCurrent configuration value is: {:d}".format(CONFIG_DISASSEMBLY_LINE_COUNT))
return
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid input value.")
print("")
print(help)
return
CONFIG_DISASSEMBLY_LINE_COUNT = value
return
# ---------------------------------
# Color and output related commands
# ---------------------------------
def color(x):
out_col = ""
if CONFIG_ENABLE_COLOR == 0:
output(out_col)
return
output(COLORS[x])
# append data to the output that we display at the end of the hook-stop
def output(x):
global GlobalListOutput
GlobalListOutput.append(x)
# ---------------------------
# Breakpoint related commands
# ---------------------------
# temporary software breakpoint
def cmd_bpt(debugger, command, result, dict):
'''Set a temporary software breakpoint. Use \'bpt help\' for more information.'''
help = """
Set a temporary software breakpoint.
Syntax: bpt <address>
Note: expressions supported, do not use spaces between operators.
"""
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert a breakpoint address.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid input value.")
print("")
print(help)
return
target = get_target()
breakpoint = target.BreakpointCreateByAddress(value)
breakpoint.SetOneShot(True)
breakpoint.SetThreadID(get_frame().GetThread().GetThreadID())
print("[+] Set temporary breakpoint at 0x{:x}".format(value))
# hardware breakpoint
def cmd_bhb(debugger, command, result, dict):
'''Set an hardware breakpoint'''
help = """
Set an hardware breakpoint.
Syntax: bhb <address>
Note: expressions supported, do not use spaces between operators.
"""
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert a breakpoint address.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid input value.")
print("")
print(help)
return
# the python API doesn't seem to support hardware breakpoints
# so we set it via command line interpreter
res = lldb.SBCommandReturnObject()
lldb.debugger.GetCommandInterpreter().HandleCommand("breakpoint set -H -a " + hex(value), res)
print("[+] Set hardware breakpoint at 0x{:x}".format(value))
return
# temporary hardware breakpoint
def cmd_bht(debugger, command, result, dict):
'''Set a temporary hardware breakpoint'''
print("[-] error: lldb has no x86/x64 temporary hardware breakpoints implementation.")
return
# clear breakpoint number
def cmd_bpc(debugger, command, result, dict):
'''Clear a breakpoint. Use \'bpc help\' for more information.'''
help = """
Clear a breakpoint.
Syntax: bpc <breakpoint_number>
Note: only breakpoint numbers are valid, not addresses. Use \'bpl\' to list breakpoints.
Note: expressions supported, do not use spaces between operators.
"""
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert a breakpoint number.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
# breakpoint disable only accepts breakpoint numbers not addresses
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid input value - only a breakpoint number is valid.")
print("")
print(help)
return
target = get_target()
for bpt in target.breakpoint_iter():
if bpt.id == value:
if target.BreakpointDelete(bpt.id) == False:
print("[-] error: failed to delete breakpoint #{:d}".format(value))
return
print("[+] Deleted breakpoint #{:d}".format(value))
return
print("[-] error: breakpoint #{:d} not found".format(value))
return
# disable breakpoint number
# XXX: we could support addresses, not sure it's worth the trouble
def cmd_bpd(debugger, command, result, dict):
'''Disable a breakpoint. Use \'bpd help\' for more information.'''
help = """
Disable a breakpoint.
Syntax: bpd <breakpoint_number>
Note: only breakpoint numbers are valid, not addresses. Use \'bpl\' to list breakpoints.
Note: expressions supported, do not use spaces between operators.
"""
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert a breakpoint number.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
# breakpoint disable only accepts breakpoint numbers not addresses
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid input value - only a breakpoint number is valid.")
print("")
print(help)
return
target = get_target()
for bpt in target.breakpoint_iter():
if bpt.id == value and bpt.IsEnabled() == True:
bpt.SetEnabled(False)
print("[+] Disabled breakpoint #{:d}".format(value))
# disable all breakpoints
def cmd_bpda(debugger, command, result, dict):
'''Disable all breakpoints. Use \'bpda help\' for more information.'''
help = """
Disable all breakpoints.
Syntax: bpda
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
target = get_target()
if target.DisableAllBreakpoints() == False:
print("[-] error: failed to disable all breakpoints.")
print("[+] Disabled all breakpoints.")
# enable breakpoint number
def cmd_bpe(debugger, command, result, dict):
'''Enable a breakpoint. Use \'bpe help\' for more information.'''
help = """
Enable a breakpoint.
Syntax: bpe <breakpoint_number>
Note: only breakpoint numbers are valid, not addresses. Use \'bpl\' to list breakpoints.
Note: expressions supported, do not use spaces between operators.
"""
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert a breakpoint number.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
# breakpoint enable only accepts breakpoint numbers not addresses
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid input value - only a breakpoint number is valid.")
print("")
print(help)
return
target = get_target()
for bpt in target.breakpoint_iter():
if bpt.id == value and bpt.IsEnabled() == False:
bpt.SetEnabled(True)
print("[+] Enabled breakpoint #{:d}".format(value))
# enable all breakpoints
def cmd_bpea(debugger, command, result, dict):
'''Enable all breakpoints. Use \'bpea help\' for more information.'''
help = """
Enable all breakpoints.
Syntax: bpea
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
target = get_target()
if target.EnableAllBreakpoints() == False:
print("[-] error: failed to enable all breakpoints.")
print("[+] Enabled all breakpoints.")
# skip current instruction - just advances PC to next instruction but doesn't execute it
def cmd_skip(debugger, command, result, dict):
'''Advance PC to instruction at next address. Use \'skip help\' for more information.'''
help = """
Advance current instruction pointer to next instruction.
Syntax: skip
Note: control flow is not respected, it advances to next instruction in memory.
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
start_addr = get_current_pc()
next_addr = start_addr + get_inst_size(start_addr)
if is_x64():
get_frame().reg["rip"].value = format(next_addr, '#x')
elif is_i386():
get_frame().reg["eip"].value = format(next_addr, '#x')
# show the updated context
lldb.debugger.HandleCommand("context")
# XXX: ARM breakpoint
def cmd_int3(debugger, command, result, dict):
'''Patch byte at address to an INT3 (0xCC) instruction. Use \'int3 help\' for more information.'''
help = """
Patch process memory with an INT3 byte at given address.
Syntax: int3 [<address>]
Note: useful in cases where the debugger breakpoints aren't respected but an INT3 will always trigger the debugger.
Note: ARM not yet supported.
Note: expressions supported, do not use spaces between operators.
"""
global Int3Dictionary
error = lldb.SBError()
target = get_target()
cmd = command.split()
# if empty insert a int3 at current PC
if len(cmd) == 0:
int3_addr = get_current_pc()
if int3_addr == 0:
print("[-] error: invalid current address.")
return
elif len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
int3_addr = evaluate(cmd[0])
if int3_addr == None:
print("[-] error: invalid input address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a breakpoint address.")
print("")
print(help)
return
bytes_string = target.GetProcess().ReadMemory(int3_addr, 1, error)
if error.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(int3_addr))
return
bytes_read = bytearray(bytes_string)
patch_bytes = str('\xCC')
result = target.GetProcess().WriteMemory(int3_addr, patch_bytes, error)
if error.Success() == False:
print("[-] error: Failed to write memory at 0x{:x}.".format(int3_addr))
return
# save original bytes for later restore
Int3Dictionary[str(int3_addr)] = bytes_read[0]
print("[+] Patched INT3 at 0x{:x}".format(int3_addr))
return
def cmd_rint3(debugger, command, result, dict):
'''Restore byte at address from a previously patched INT3 (0xCC) instruction. Use \'rint3 help\' for more information.'''
help = """
Restore the original byte at a previously patched address using \'int3\' command.
Syntax: rint3 [<address>]
Note: expressions supported, do not use spaces between operators.
"""
global Int3Dictionary
error = lldb.SBError()
target = get_target()
cmd = command.split()
# if empty insert a int3 at current PC
if len(cmd) == 0:
int3_addr = get_current_pc()
if int3_addr == 0:
print("[-] error: invalid current address.")
return
elif len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
int3_addr = evaluate(cmd[0])
if int3_addr == None:
print("[-] error: invalid input address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a INT3 patched address.")
print("")
print(help)
return
if len(Int3Dictionary) == 0:
print("[-] error: No INT3 patched addresses to restore available.")
return
bytes_string = target.GetProcess().ReadMemory(int3_addr, 1, error)
if error.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(int3_addr))
return
bytes_read = bytearray(bytes_string)
if bytes_read[0] == 0xCC:
#print("Found byte patched byte at 0x{:x}".format(int3_addr))
try:
original_byte = Int3Dictionary[str(int3_addr)]
except:
print("[-] error: Original byte for address 0x{:x} not found.".format(int3_addr))
return
patch_bytes = chr(original_byte)
result = target.GetProcess().WriteMemory(int3_addr, patch_bytes, error)
if error.Success() == False:
print("[-] error: Failed to write memory at 0x{:x}.".format(int3_addr))
return
# remove element from original bytes list
del Int3Dictionary[str(int3_addr)]
else:
print("[-] error: No INT3 patch found at 0x{:x}.".format(int3_addr))
return
def cmd_listint3(debugger, command, result, dict):
'''List all patched INT3 (0xCC) instructions. Use \'listint3 help\' for more information.'''
help = """
List all addresses patched with \'int3\' command.
Syntax: listint3
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
if len(Int3Dictionary) == 0:
print("[-] No INT3 patched addresses available.")
return
print("Current INT3 patched addresses:")
for address, byte in Int3Dictionary.items():
print("[*] {:s}".format(hex(int(address, 10))))
return
# XXX: ARM NOPs
def cmd_nop(debugger, command, result, dict):
'''NOP byte(s) at address. Use \'nop help\' for more information.'''
help = """
Patch process memory with NOP (0x90) byte(s) at given address.
Syntax: nop <address> [<size>]
Note: default size is one byte if size not specified.
Note: ARM not yet supported.
Note: expressions supported, do not use spaces between operators.
"""
error = lldb.SBError()
target = get_target()
cmd = command.split()
if len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
nop_addr = evaluate(cmd[0])
patch_size = 1
if nop_addr == None:
print("[-] error: invalid address value.")
print("")
print(help)
return
elif len(cmd) == 2:
nop_addr = evaluate(cmd[0])
if nop_addr == None:
print("[-] error: invalid address value.")
print("")
print(help)
return
patch_size = evaluate(cmd[1])
if patch_size == None:
print("[-] error: invalid size value.")
print("")
print(help)
return
else:
print("[-] error: please insert a breakpoint address.")
print("")
print(help)
return
current_patch_addr = nop_addr
# format for WriteMemory()
patch_bytes = str('\x90')
# can we do better here? WriteMemory takes an input string... weird
for i in xrange(patch_size):
result = target.GetProcess().WriteMemory(current_patch_addr, patch_bytes, error)
if error.Success() == False:
print("[-] error: Failed to write memory at 0x{:x}.".format(current_patch_addr))
return
current_patch_addr = current_patch_addr + 1
return
def cmd_null(debugger, command, result, dict):
'''Patch byte(s) at address to NULL (0x00). Use \'null help\' for more information.'''
help = """
Patch process memory with NULL (0x00) byte(s) at given address.
Syntax: null <address> [<size>]
Note: default size is one byte if size not specified.
Note: expressions supported, do not use spaces between operators.
"""
error = lldb.SBError()
target = get_target()
cmd = command.split()
if len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
null_addr = evaluate(cmd[0])
patch_size = 1
if null_addr == None:
print("[-] error: invalid address value.")
print("")
print(help)
return
elif len(cmd) == 2:
null_addr = evaluate(cmd[0])
if null_addr == None:
print("[-] error: invalid address value.")
print("")
print(help)
return
patch_size = evaluate(cmd[1])
if patch_size == None:
print("[-] error: invalid size value.")
print("")
print(help)
return
else:
print("[-] error: please insert a breakpoint address.")
print("")
print(help)
return
current_patch_addr = null_addr
# format for WriteMemory()
patch_bytes = str('\x00')
# can we do better here? WriteMemory takes an input string... weird
for i in xrange(patch_size):
result = target.GetProcess().WriteMemory(current_patch_addr, patch_bytes, error)
if error.Success() == False:
print("[-] error: Failed to write memory at 0x{:x}.".format(current_patch_addr))
return
current_patch_addr = current_patch_addr + 1
return
'''
Implements stepover instruction.
'''
def cmd_stepo(debugger, command, result, dict):
'''Step over calls and some other instructions so we don't need to step into them. Use \'stepo help\' for more information.'''
help = """
Step over calls and loops that we want executed but not step into.
Affected instructions: call, movs, stos, cmps, loop.
Syntax: stepo
"""
cmd = command.split()
if len(cmd) != 0 and cmd[0] == "help":
print(help)
return
global arm_type
debugger.SetAsync(True)
arch = get_arch()
target = get_target()
if is_arm():
cpsr = get_gp_register("cpsr")
t = (cpsr >> 5) & 1
if t:
#it's thumb
arm_type = "thumbv7-apple-ios"
else:
arm_type = "armv7-apple-ios"
# compute the next address where to breakpoint
pc_addr = get_current_pc()
if pc_addr == 0:
print("[-] error: invalid current address.")
return
next_addr = pc_addr + get_inst_size(pc_addr)
# much easier to use the mnemonic output instead of disassembling via cmd line and parse
mnemonic = get_mnemonic(pc_addr)
if is_arm():
if "blx" == mnemonic or "bl" == mnemonic:
breakpoint = target.BreakpointCreateByAddress(next_addr)
breakpoint.SetThreadID(get_frame().GetThread().GetThreadID())
breakpoint.SetOneShot(True)
breakpoint.SetThreadID(get_frame().GetThread().GetThreadID())
target.GetProcess().Continue()
return
else:
get_process().selected_thread.StepInstruction(False)
return
# XXX: make the other instructions besides call user configurable?
# calls can be call, callq, so use wider matching for those
if mnemonic == "call" or mnemonic == "callq" or "movs" == mnemonic or "stos" == mnemonic or "loop" == mnemonic or "cmps" == mnemonic:
breakpoint = target.BreakpointCreateByAddress(next_addr)
breakpoint.SetOneShot(True)
breakpoint.SetThreadID(get_frame().GetThread().GetThreadID())
target.GetProcess().Continue()
else:
get_process().selected_thread.StepInstruction(False)
# XXX: help
def cmd_LoadBreakPointsRva(debugger, command, result, dict):
global GlobalOutputList
GlobalOutputList = []
'''
frame = get_frame();
target = lldb.debugger.GetSelectedTarget();
nummods = target.GetNumModules();
#for x in range (0, nummods):
# mod = target.GetModuleAtIndex(x);
# #print(dir(mod));
# print(target.GetModuleAtIndex(x));
# for sec in mod.section_iter():
# addr = sec.GetLoadAddress(target);
# name = sec.GetName();
# print(hex(addr));
#1st module is executable
mod = target.GetModuleAtIndex(0);
sec = mod.GetSectionAtIndex(0);
loadaddr = sec.GetLoadAddress(target);
if loadaddr == lldb.LLDB_INVALID_ADDRESS:
sec = mod.GetSectionAtIndex(1);
loadaddr = sec.GetLoadAddress(target);
print(hex(loadaddr));
'''
target = get_target()
mod = target.GetModuleAtIndex(0)
sec = mod.GetSectionAtIndex(0)
loadaddr = sec.GetLoadAddress(target)
if loadaddr == lldb.LLDB_INVALID_ADDRESS:
sec = mod.GetSectionAtIndex(1)
loadaddr = sec.GetLoadAddress(target)
try:
f = open(command, "r")
except:
output("[-] Failed to load file : " + command)
result.PutCString("".join(GlobalListOutput))
return
while True:
line = f.readline()
if not line:
break
line = line.rstrip()
if not line:
break
debugger.HandleCommand("breakpoint set -a " + hex(loadaddr + long(line, 16)))
f.close()
# XXX: help
def cmd_LoadBreakPoints(debugger, command, result, dict):
global GlobalOutputList
GlobalOutputList = []
try:
f = open(command, "r")
except:
output("[-] Failed to load file : " + command)
result.PutCString("".join(GlobalListOutput))
return
while True:
line = f.readline()
if not line:
break
line = line.rstrip()
if not line:
break
debugger.HandleCommand("breakpoint set --name " + line)
f.close()
# Temporarily breakpoint next instruction - this is useful to skip loops (don't want to use stepo for this purpose)
def cmd_bpn(debugger, command, result, dict):
'''Temporarily breakpoint instruction at next address. Use \'bpn help\' for more information.'''
help = """
Temporarily breakpoint instruction at next address
Syntax: bpn
Note: control flow is not respected, it breakpoints next instruction in memory.
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
target = get_target()
start_addr = get_current_pc()
next_addr = start_addr + get_inst_size(start_addr)
breakpoint = target.BreakpointCreateByAddress(next_addr)
breakpoint.SetOneShot(True)
breakpoint.SetThreadID(get_frame().GetThread().GetThreadID())
print("[+] Set temporary breakpoint at 0x{:x}".format(next_addr))
# command that sets rax to 1 or 0 and returns right away from current function
# technically just a shortcut to "thread return"
def cmd_crack(debugger, command, result, dict):
'''Return from current function and set return value. Use \'crack help\' for more information.'''
help = """
Return from current function and set return value
Syntax: crack <return value>
Sets rax to return value and returns immediately from current function.
You probably want to use this at the top of the function you want to return from.
"""
cmd = command.split()
if len(cmd) != 1:
print("[-] error: please insert a return value.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
# breakpoint disable only accepts breakpoint numbers not addresses
value = evaluate(cmd[0])
if value == None:
print("[-] error: invalid return value.")
print("")
print(help)
return
frame = get_frame()
# if we copy the SBValue from any register and use that copy
# for return value we will get that register and rax/eax set
# on return
# the SBValue to ReturnFromFrame must be eValueTypeRegister type
# if we do a lldb.SBValue() we can't set to that type
# so we need to make a copy
# can we use FindRegister() from frame?
return_value = frame.reg["rax"]
return_value.value = str(value)
get_thread().ReturnFromFrame(frame, return_value)
# set a breakpoint with return command associated when hit
def cmd_crackcmd(debugger, command, result, dict):
'''Breakpoint an address, when breakpoint is hit return from function and set return value. Use \'crackcmd help\' for more information.'''
help = """
Breakpoint an address, when breakpoint is hit return from function and set return value.
Syntax: crackcmd <address> <return value>
Sets rax/eax to return value and returns immediately from current function where breakpoint was set.
"""
global crack_cmds
cmd = command.split()
if len(cmd) == 0:
print("[-] error: please check required arguments.")
print("")
print(help)
return
elif len(cmd) > 0 and cmd[0] == "help":
print(help)
return
elif len(cmd) < 2:
print("[-] error: please check required arguments.")
print("")
print(help)
return
# XXX: is there a way to verify if address is valid? or just let lldb error when setting the breakpoint
address = evaluate(cmd[0])
if address == None:
print("[-] error: invalid address value.")
print("")
print(help)
return
return_value = evaluate(cmd[1])
if return_value == None:
print("[-] error: invalid return value.")
print("")
print(help)
return
for tmp_entry in crack_cmds:
if tmp_entry['address'] == address:
print("[-] error: address already contains a crack command.")
return
# set a new entry so we can deal with it in the callback
new_crack_entry = {}
new_crack_entry['address'] = address
new_crack_entry['return_value'] = return_value
crack_cmds.append(new_crack_entry)
target = get_target()
# we want a global breakpoint
breakpoint = target.BreakpointCreateByAddress(address)
# when the breakpoint is hit we get this callback executed
breakpoint.SetScriptCallbackFunction('lldbinit.crackcmd_callback')
def crackcmd_callback(frame, bp_loc, internal_dict):
global crack_cmds
# retrieve address we just hit
current_bp = bp_loc.GetLoadAddress()
print("[+] warning: hit crack command breakpoint at 0x{:x}".format(current_bp))
crack_entry = None
for tmp_entry in crack_cmds:
if tmp_entry['address'] == current_bp:
crack_entry = tmp_entry
break
if crack_entry == None:
print("[-] error: current breakpoint not found in list.")
return
# we can just set the register in the frame and return empty SBValue
if is_x64() == True:
frame.reg["rax"].value = str(crack_entry['return_value']).rstrip('L')
elif is_i386() == True:
frame.reg["eax"].value = str(crack_entry['return_value']).rstrip('L')
else:
print("[-] error: unsupported architecture.")
return
get_thread().ReturnFromFrame(frame, lldb.SBValue())
get_process().Continue()
# set a breakpoint with a command that doesn't return, just sets the specified register to a value
def cmd_crackcmd_noret(debugger, command, result, dict):
'''Set a breakpoint and a register to a value when hit. Use \'crackcmd_noret help\' for more information.'''
help = """
Set a breakpoint and a register to a value when hit.
Syntax: crackcmd_noret <address> <register> <value>
Sets the specified register to a value when the breakpoint at specified address is hit, and resumes execution.
"""
global crack_cmds_noret
cmd = command.split()
if len(cmd) == 0:
print("[-] error: please check required arguments.")
print("")
print(help)
return
if len(cmd) > 0 and cmd[0] == "help":
print(help)
return
if len(cmd) < 3:
print("[-] error: please check required arguments.")
print("")
print(help)
return
address = evaluate(cmd[0])
if address == None:
print("[-] error: invalid address.")
print("")
print(help)
return
# check if register is set and valid
if (cmd[1] in All_Registers) == False:
print("[-] error: invalid register.")
print("")
print(help)
return
value = evaluate(cmd[2])
if value == None:
print("[-] error: invalid value.")
print("")
print(help)
return
register = cmd[1]
for tmp_entry in crack_cmds_noret:
if tmp_entry['address'] == address:
print("[-] error: address already contains a crack command.")
return
# set a new entry so we can deal with it in the callback
new_crack_entry = {}
new_crack_entry['address'] = address
new_crack_entry['register'] = register
new_crack_entry['value'] = value
crack_cmds_noret.append(new_crack_entry)
target = get_target()
# we want a global breakpoint
breakpoint = target.BreakpointCreateByAddress(address)
# when the breakpoint is hit we get this callback executed
breakpoint.SetScriptCallbackFunction('lldbinit.crackcmd_noret_callback')
def crackcmd_noret_callback(frame, bp_loc, internal_dict):
global crack_cmds_noret
# retrieve address we just hit
current_bp = bp_loc.GetLoadAddress()
print("[+] warning: hit crack command no ret breakpoint at 0x{:x}".format(current_bp))
crack_entry = None
for tmp_entry in crack_cmds_noret:
if tmp_entry['address'] == current_bp:
crack_entry = tmp_entry
break
if crack_entry == None:
print("[-] error: current breakpoint not found in list.")
return
# must be a string!
frame.reg[crack_entry['register']].value = str(crack_entry['value']).rstrip('L')
get_process().Continue()
# -----------------------
# Memory related commands
# -----------------------
'''
Output nice memory hexdumps...
'''
# display byte values and ASCII characters
def cmd_db(debugger, command, result, dict):
'''Display hex dump in byte values and ASCII characters. Use \'db help\' for more information.'''
help = """
Display memory hex dump in byte length and ASCII representation.
Syntax: db [<address>]
Note: if no address specified it will dump current instruction pointer address.
Note: expressions supported, do not use spaces between operators.
"""
global GlobalListOutput
GlobalListOutput = []
cmd = command.split()
if len(cmd) == 0:
dump_addr = get_current_pc()
if dump_addr == 0:
print("[-] error: invalid current address.")
return
elif len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
dump_addr = evaluate(cmd[0])
if dump_addr == None:
print("[-] error: invalid input address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a start address.")
print("")
print(help)
return
err = lldb.SBError()
size = 0x100
while size != 0:
membuff = get_process().ReadMemory(dump_addr, size, err)
if err.Success() == False and size == 0:
output(str(err))
result.PutCString("".join(GlobalListOutput))
return
if err.Success() == True:
break
size = size - 1
membuff = membuff + "\x00" * (0x100-size)
color("BLUE")
if get_pointer_size() == 4:
output("[0x0000:0x%.08X]" % dump_addr)
output("------------------------------------------------------")
else:
output("[0x0000:0x%.016lX]" % dump_addr)
output("------------------------------------------------------")
color("BOLD")
output("[data]")
color("RESET")
output("\n")
#output(hexdump(dump_addr, membuff, " ", 16));
index = 0
while index < 0x100:
data = struct.unpack("B"*16, membuff[index:index+0x10])
if get_pointer_size() == 4:
szaddr = "0x%.08X" % dump_addr
else:
szaddr = "0x%.016lX" % dump_addr
fmtnice = "%.02X %.02X %.02X %.02X %.02X %.02X %.02X %.02X"
fmtnice = fmtnice + " - " + fmtnice
output("\033[1m%s :\033[0m %.02X %.02X %.02X %.02X %.02X %.02X %.02X %.02X - %.02X %.02X %.02X %.02X %.02X %.02X %.02X %.02X \033[1m%s\033[0m" %
(szaddr,
data[0],
data[1],
data[2],
data[3],
data[4],
data[5],
data[6],
data[7],
data[8],
data[9],
data[10],
data[11],
data[12],
data[13],
data[14],
data[15],
quotechars(membuff[index:index+0x10])));
if index + 0x10 != 0x100:
output("\n")
index += 0x10
dump_addr += 0x10
color("RESET")
#last element of the list has all data output...
#so we remove last \n
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
# display word values and ASCII characters
def cmd_dw(debugger, command, result, dict):
''' Display hex dump in word values and ASCII characters. Use \'dw help\' for more information.'''
help = """
Display memory hex dump in word length and ASCII representation.
Syntax: dw [<address>]
Note: if no address specified it will dump current instruction pointer address.
Note: expressions supported, do not use spaces between operators.
"""
global GlobalListOutput
GlobalListOutput = []
cmd = command.split()
if len(cmd) == 0:
dump_addr = get_current_pc()
if dump_addr == 0:
print("[-] error: invalid current address.")
return
elif len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
dump_addr = evaluate(cmd[0])
if dump_addr == None:
print("[-] error: invalid input address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a start address.")
print("")
print(help)
return
err = lldb.SBError()
size = 0x100
while size != 0:
membuff = get_process().ReadMemory(dump_addr, size, err)
if err.Success() == False and size == 0:
output(str(err))
result.PutCString("".join(GlobalListOutput))
return
if err.Success() == True:
break
size = size - 2
membuff = membuff + "\x00" * (0x100-size)
color("BLUE")
if get_pointer_size() == 4: #is_i386() or is_arm():
output("[0x0000:0x%.08X]" % dump_addr)
output("--------------------------------------------")
else: #is_x64():
output("[0x0000:0x%.016lX]" % dump_addr)
output("--------------------------------------------")
color("BOLD")
output("[data]")
color("RESET")
output("\n")
index = 0
while index < 0x100:
data = struct.unpack("HHHHHHHH", membuff[index:index+0x10])
if get_pointer_size() == 4:
szaddr = "0x%.08X" % dump_addr
else:
szaddr = "0x%.016lX" % dump_addr
output("\033[1m%s :\033[0m %.04X %.04X %.04X %.04X %.04X %.04X %.04X %.04X \033[1m%s\033[0m" % (szaddr,
data[0],
data[1],
data[2],
data[3],
data[4],
data[5],
data[6],
data[7],
quotechars(membuff[index:index+0x10])));
if index + 0x10 != 0x100:
output("\n")
index += 0x10
dump_addr += 0x10
color("RESET")
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
# display dword values and ASCII characters
def cmd_dd(debugger, command, result, dict):
''' Display hex dump in double word values and ASCII characters. Use \'dd help\' for more information.'''
help = """
Display memory hex dump in double word length and ASCII representation.
Syntax: dd [<address>]
Note: if no address specified it will dump current instruction pointer address.
Note: expressions supported, do not use spaces between operators.
"""
global GlobalListOutput
GlobalListOutput = []
cmd = command.split()
if len(cmd) == 0:
dump_addr = get_current_pc()
if dump_addr == 0:
print("[-] error: invalid current address.")
return
elif len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
dump_addr = evaluate(cmd[0])
if dump_addr == None:
print("[-] error: invalid input address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a start address.")
print("")
print(help)
return
err = lldb.SBError()
size = 0x100
while size != 0:
membuff = get_process().ReadMemory(dump_addr, size, err)
if err.Success() == False and size == 0:
output(str(err))
result.PutCString("".join(GlobalListOutput))
return
if err.Success() == True:
break
size = size - 4
membuff = membuff + bytes(0x100-size)
color("BLUE")
if get_pointer_size() == 4: #is_i386() or is_arm():
output("[0x0000:0x%.08X]" % dump_addr)
output("----------------------------------------")
else: #is_x64():
output("[0x0000:0x%.016lX]" % dump_addr)
output("----------------------------------------")
color("BOLD")
output("[data]")
color("RESET")
output("\n")
index = 0
while index < 0x100:
(mem0, mem1, mem2, mem3) = struct.unpack("IIII", membuff[index:index+0x10])
if get_pointer_size() == 4: #is_i386() or is_arm():
szaddr = "0x%.08X" % dump_addr
else: #is_x64():
szaddr = "0x%.016lX" % dump_addr
output("\033[1m%s :\033[0m %.08X %.08X %.08X %.08X \033[1m%s\033[0m" % (szaddr,
mem0,
mem1,
mem2,
mem3,
quotechars(membuff[index:index+0x10])));
if index + 0x10 != 0x100:
output("\n")
index += 0x10
dump_addr += 0x10
color("RESET")
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
# display quad values
def cmd_dq(debugger, command, result, dict):
''' Display hex dump in quad values. Use \'dq help\' for more information.'''
help = """
Display memory hex dump in quad word length.
Syntax: dq [<address>]
Note: if no address specified it will dump current instruction pointer address.
Note: expressions supported, do not use spaces between operators.
"""
global GlobalListOutput
GlobalListOutput = []
cmd = command.split()
if len(cmd) == 0:
dump_addr = get_current_pc()
if dump_addr == 0:
print("[-] error: invalid current address.")
return
elif len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
dump_addr = evaluate(cmd[0])
if dump_addr == None:
print("[-] error: invalid input address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a start address.")
print("")
print(help)
return
err = lldb.SBError()
size = 0x100
while size != 0:
membuff = get_process().ReadMemory(dump_addr, size, err)
if err.Success() == False and size == 0:
output(str(err))
result.PutCString("".join(GlobalListOutput))
return
if err.Success() == True:
break
size = size - 8
membuff = membuff + "\x00" * (0x100-size)
if err.Success() == False:
output(str(err))
result.PutCString("".join(GlobalListOutput))
return
color("BLUE")
if get_pointer_size() == 4:
output("[0x0000:0x%.08X]" % dump_addr)
output("-------------------------------------------------------")
else:
output("[0x0000:0x%.016lX]" % dump_addr)
output("-------------------------------------------------------")
color("BOLD")
output("[data]")
color("RESET")
output("\n")
index = 0
while index < 0x100:
(mem0, mem1, mem2, mem3) = struct.unpack("QQQQ", membuff[index:index+0x20])
if get_pointer_size() == 4:
szaddr = "0x%.08X" % dump_addr
else:
szaddr = "0x%.016lX" % dump_addr
output("\033[1m%s :\033[0m %.016lX %.016lX %.016lX %.016lX" % (szaddr, mem0, mem1, mem2, mem3))
if index + 0x20 != 0x100:
output("\n")
index += 0x20
dump_addr += 0x20
color("RESET")
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
def hexdump(addr, chars, sep, width, lines=5):
l = []
line_count = 0
while chars:
if line_count >= lines:
break
line = chars[:width]
chars = chars[width:]
line = line.ljust( width, '\000' )
arch = get_arch()
if get_pointer_size() == 4:
szaddr = "0x%.08X" % addr
else:
szaddr = "0x%.016lX" % addr
l.append("\033[1m%s :\033[0m %s%s \033[1m%s\033[0m" % (szaddr, sep.join( "%02X" % ord(c) for c in line ), sep, quotechars( line )))
addr += 0x10
line_count = line_count + 1
return "\n".join(l)
def quotechars( chars ):
data = ""
for x in bytearray(chars):
if x >= 0x20 and x <= 126:
data += chr(x)
else:
data += "."
return data
# XXX: help
def cmd_findmem(debugger, command, result, dict):
'''Search memory'''
help == """
[options]
-s searches for specified string
-u searches for specified unicode string
-b searches binary (eg. -b 4142434445 will find ABCDE anywhere in mem)
-d searches dword (eg. -d 0x41414141)
-q searches qword (eg. -d 0x4141414141414141)
-f loads patern from file if it's tooooo big to fit into any of specified options
-c specify if you want to find N occurances (default is all)
"""
global GlobalListOutput
GlobalListOutput = []
arg = str(command)
parser = argparse.ArgumentParser(prog="lldb")
parser.add_argument("-s", "--string", help="Search string")
parser.add_argument("-u", "--unicode", help="Search unicode string")
parser.add_argument("-b", "--binary", help="Serach binary string")
parser.add_argument("-d", "--dword", help="Find dword (native packing)")
parser.add_argument("-q", "--qword", help="Find qword (native packing)")
parser.add_argument("-f", "--file" , help="Load find pattern from file")
parser.add_argument("-c", "--count", help="How many occurances to find, default is all")
parser = parser.parse_args(arg.split())
if parser.string != None:
search_string = parser.string
elif parser.unicode != None:
search_string = unicode(parser.unicode)
elif parser.binary != None:
search_string = parser.binary.decode("hex")
elif parser.dword != None:
dword = evaluate(parser.dword)
if dword == None:
print("[-] Error evaluating : " + parser.dword)
return
search_string = struct.pack("I", dword & 0xffffffff)
elif parser.qword != None:
qword = evaluate(parser.qword)
if qword == None:
print("[-] Error evaluating : " + parser.qword)
return
search_string = struct.pack("Q", qword & 0xffffffffffffffff)
elif parser.file != None:
f = 0
try:
f = open(parser.file, "rb")
except:
print("[-] Failed to open file : " + parser.file)
return
search_string = f.read()
f.close()
else:
print("[-] Wrong option... use findmem --help")
return
count = -1
if parser.count != None:
count = evaluate(parser.count)
if count == None:
print("[-] Error evaluating count : " + parser.count)
return
process = get_process()
pid = process.GetProcessID()
output_data = subprocess.check_output(["/usr/bin/vmmap", "%d" % pid])
lines = output_data.split("\n")
#print(lines);
#this relies on output from /usr/bin/vmmap so code is dependant on that
#only reason why it's used is for better description of regions, which is
#nice to have. If they change vmmap in the future, I'll use my version
#and that output is much easier to parse...
newlines = []
for x in lines:
p = re.compile("([\S\s]+)\s([\da-fA-F]{16}-[\da-fA-F]{16}|[\da-fA-F]{8}-[\da-fA-F]{8})")
m = p.search(x)
if not m: continue
tmp = []
mem_name = m.group(1)
mem_range = m.group(2)
#0x000000-0x000000
mem_start = long(mem_range.split("-")[0], 16)
mem_end = long(mem_range.split("-")[1], 16)
tmp.append(mem_name)
tmp.append(mem_start)
tmp.append(mem_end)
newlines.append(tmp)
lines = sorted(newlines, key=lambda sortnewlines: sortnewlines[1])
#move line extraction a bit up, thus we can latter sort it, as vmmap gives
#readable pages only, and then writable pages, so it looks ugly a bit :)
newlines = []
for x in lines:
mem_name = x[0]
mem_start= x[1]
mem_end = x[2]
mem_size = mem_end - mem_start
err = lldb.SBError()
membuff = process.ReadMemory(mem_start, mem_size, err)
if err.Success() == False:
#output(str(err));
#result.PutCString("".join(GlobalListOutput));
continue
off = 0
base_displayed = 0
while True:
if count == 0:
return
idx = membuff.find(search_string)
if idx == -1:
break
if count != -1:
count = count - 1
off += idx
GlobalListOutput = []
if get_pointer_size() == 4:
ptrformat = "%.08X"
else:
ptrformat = "%.016lX"
color("RESET")
output("Found at : ")
color("GREEN")
output(ptrformat % (mem_start + off))
color("RESET")
if base_displayed == 0:
output(" base : ")
color("YELLOW")
output(ptrformat % mem_start)
color("RESET")
base_displayed = 1
else:
output(" ")
if get_pointer_size() == 4:
output(" " * 8)
else:
output(" " * 16)
#well if somebody allocated 4GB of course offset will be to small to fit here
#but who cares...
output(" off : %.08X %s" % (off, mem_name))
print("".join(GlobalListOutput))
membuff = membuff[idx+len(search_string):]
off += len(search_string)
return
def cmd_datawin(debugger, command, result, dict):
'''Configure address to display in data window. Use \'datawin help\' for more information.'''
help = """
Configure address to display in data window.
Syntax: datawin <address>
The data window display will be fixed to the address you set. Useful to observe strings being decrypted, etc.
Note: expressions supported, do not use spaces between operators.
"""
global DATA_WINDOW_ADDRESS
cmd = command.split()
if len(cmd) == 0:
print("[-] error: please insert an address.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
dump_addr = evaluate(cmd[0])
if dump_addr == None:
print("[-] error: invalid address value.")
print("")
print(help)
DATA_WINDOW_ADDRESS = 0
return
DATA_WINDOW_ADDRESS = dump_addr
# ----------------------------------------------------------
# Functions to extract internal and process lldb information
# ----------------------------------------------------------
def get_arch():
return lldb.debugger.GetSelectedTarget().triple.split('-')[0]
#return frame for stopped thread... there should be one at least...
def get_frame():
ret = None
# SBProcess supports thread iteration -> SBThread
for thread in get_process():
if thread.GetStopReason() != lldb.eStopReasonNone and thread.GetStopReason() != lldb.eStopReasonInvalid:
ret = thread.GetFrameAtIndex(0)
break
# this will generate a false positive when we start the target the first time because there's no context yet.
if ret == None:
print("[-] warning: get_frame() failed. Is the target binary started?")
return ret
def get_thread():
ret = None
# SBProcess supports thread iteration -> SBThread
for thread in get_process():
if thread.GetStopReason() != lldb.eStopReasonNone and thread.GetStopReason() != lldb.eStopReasonInvalid:
ret = thread
if ret == None:
print("[-] warning: get_thread() failed. Is the target binary started?")
return ret
def get_target():
target = lldb.debugger.GetSelectedTarget()
if not target:
print("[-] error: no target available. please add a target to lldb.")
return
return target
def get_process():
# process
# A read only property that returns an lldb object that represents the process (lldb.SBProcess) that this target owns.
return lldb.debugger.GetSelectedTarget().process
# evaluate an expression and return the value it represents
def evaluate(command):
frame = get_frame()
if frame != None:
value = frame.EvaluateExpression(command)
if value.IsValid() == False:
return None
try:
value = int(value.GetValue(), base=10)
return value
except Exception as e:
print("Exception on evaluate: " + str(e))
return None
# use the target version - if no target exists we can't do anything about it
else:
target = get_target()
if target == None:
return None
value = target.EvaluateExpression(command)
if value.IsValid() == False:
return None
try:
value = int(value.GetValue(), base=10)
return value
except:
return None
def is_i386():
arch = get_arch()
if arch[0:1] == "i":
return True
return False
def is_x64():
arch = get_arch()
if arch == "x86_64" or arch == "x86_64h":
return True
return False
def is_arm():
arch = get_arch()
if "arm" in arch:
return True
return False
def get_pointer_size():
poisz = evaluate("sizeof(long)")
return poisz
# from https://github.com/facebook/chisel/blob/master/fblldbobjcruntimehelpers.py
def get_instance_object():
instanceObject = None
if is_i386():
instanceObject = '*(id*)($esp+4)'
elif is_x64():
instanceObject = '(id)$rdi'
# not supported yet
elif is_arm():
instanceObject = None
return instanceObject
# -------------------------
# Register related commands
# -------------------------
# return the int value of a general purpose register
def get_gp_register(reg_name):
regs = get_registers("general purpose")
if regs == None:
return 0
for reg in regs:
if reg_name == reg.GetName():
#return int(reg.GetValue(), 16)
return reg.unsigned
return 0
def get_gp_registers():
regs = get_registers("general purpose")
if regs == None:
return 0
registers = {}
for reg in regs:
reg_name = reg.GetName()
registers[reg_name] = reg.unsigned
return registers
def get_register(reg_name):
regs = get_registers("general purpose")
if regs == None:
return "0"
for reg in regs:
if reg_name == reg.GetName():
return reg.GetValue()
return "0"
def get_registers(kind):
"""Returns the registers given the frame and the kind of registers desired.
Returns None if there's no such kind.
"""
frame = get_frame()
if frame == None:
return None
registerSet = frame.GetRegisters() # Return type of SBValueList.
for value in registerSet:
if kind.lower() in value.GetName().lower():
return value
return None
# retrieve current instruction pointer via platform independent $pc register
def get_current_pc():
frame = get_frame()
if frame == None:
return 0
pc = frame.FindRegister("pc")
return int(pc.GetValue(), 16)
# retrieve current stack pointer via registers information
# XXX: add ARM
def get_current_sp():
if is_i386():
sp_addr = get_gp_register("esp")
elif is_x64():
sp_addr = get_gp_register("rsp")
else:
print("[-] error: wrong architecture.")
return 0
return sp_addr
# helper function that updates given register
def update_register(register, command):
help = """
Update given register with a new value.
Syntax: register_name <value>
Where value can be a single value or an expression.
"""
cmd = command.split()
if len(cmd) == 0:
print("[-] error: command requires arguments.")
print("")
print(help)
return
if cmd[0] == "help":
print(help)
return
value = evaluate(command)
if value == None:
print("[-] error: invalid input value.")
print("")
print(help)
return
# we need to format because hex() will return string with an L and that will fail to update register
get_frame().reg[register].value = format(value, '#x')
# shortcut functions to modify each register
def cmd_rip(debugger, command, result, dict):
update_register("rip", command)
def cmd_rax(debugger, command, result, dict):
update_register("rax", command)
def cmd_rbx(debugger, command, result, dict):
update_register("rbx", command)
def cmd_rbp(debugger, command, result, dict):
update_register("rbp", command)
def cmd_rsp(debugger, command, result, dict):
update_register("rsp", command)
def cmd_rdi(debugger, command, result, dict):
update_register("rdi", command)
def cmd_rsi(debugger, command, result, dict):
update_register("rsi", command)
def cmd_rdx(debugger, command, result, dict):
update_register("rdx", command)
def cmd_rcx(debugger, command, result, dict):
update_register("rcx", command)
def cmd_r8(debugger, command, result, dict):
update_register("r8", command)
def cmd_r9(debugger, command, result, dict):
update_register("r9", command)
def cmd_r10(debugger, command, result, dict):
update_register("r10", command)
def cmd_r11(debugger, command, result, dict):
update_register("r11", command)
def cmd_r12(debugger, command, result, dict):
update_register("r12", command)
def cmd_r13(debugger, command, result, dict):
update_register("r13", command)
def cmd_r14(debugger, command, result, dict):
update_register("r14", command)
def cmd_r15(debugger, command, result, dict):
update_register("r15", command)
def cmd_eip(debugger, command, result, dict):
update_register("eip", command)
def cmd_eax(debugger, command, result, dict):
update_register("eax", command)
def cmd_ebx(debugger, command, result, dict):
update_register("ebx", command)
def cmd_ebp(debugger, command, result, dict):
update_register("ebp", command)
def cmd_esp(debugger, command, result, dict):
update_register("esp", command)
def cmd_edi(debugger, command, result, dict):
update_register("edi", command)
def cmd_esi(debugger, command, result, dict):
update_register("esi", command)
def cmd_edx(debugger, command, result, dict):
update_register("edx", command)
def cmd_ecx(debugger, command, result, dict):
update_register("ecx", command)
# -----------------------------
# modify eflags/rflags commands
# -----------------------------
def modify_eflags(flag):
# read the current value so we can modify it
if is_x64():
eflags = get_gp_register("rflags")
elif is_i386():
eflags = get_gp_register("eflags")
else:
print("[-] error: unsupported architecture.")
return
masks = { "CF":0, "PF":2, "AF":4, "ZF":6, "SF":7, "TF":8, "IF":9, "DF":10, "OF":11 }
if flag not in masks.keys():
print("[-] error: requested flag not available")
return
# we invert whatever value is set
if bool(eflags & (1 << masks[flag])) == True:
eflags = eflags & ~(1 << masks[flag])
else:
eflags = eflags | (1 << masks[flag])
# finally update the value
if is_x64():
get_frame().reg["rflags"].value = format(eflags, '#x')
elif is_i386():
get_frame().reg["eflags"].value = format(eflags, '#x')
def cmd_cfa(debugger, command, result, dict):
'''Change adjust flag. Use \'cfa help\' for more information.'''
help = """
Flip current adjust flag.
Syntax: cfa
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("AF")
def cmd_cfc(debugger, command, result, dict):
'''Change carry flag. Use \'cfc help\' for more information.'''
help = """
Flip current carry flag.
Syntax: cfc
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("CF")
def cmd_cfd(debugger, command, result, dict):
'''Change direction flag. Use \'cfd help\' for more information.'''
help = """
Flip current direction flag.
Syntax: cfd
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("DF")
def cmd_cfi(debugger, command, result, dict):
'''Change interrupt flag. Use \'cfi help\' for more information.'''
help = """
Flip current interrupt flag.
Syntax: cfi
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("IF")
def cmd_cfo(debugger, command, result, dict):
'''Change overflow flag. Use \'cfo help\' for more information.'''
help = """
Flip current overflow flag.
Syntax: cfo
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("OF")
def cmd_cfp(debugger, command, result, dict):
'''Change parity flag. Use \'cfp help\' for more information.'''
help = """
Flip current parity flag.
Syntax: cfp
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("PF")
def cmd_cfs(debugger, command, result, dict):
'''Change sign flag. Use \'cfs help\' for more information.'''
help = """
Flip current sign flag.
Syntax: cfs
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("SF")
def cmd_cft(debugger, command, result, dict):
'''Change trap flag. Use \'cft help\' for more information.'''
help = """
Flip current trap flag.
Syntax: cft
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("TF")
def cmd_cfz(debugger, command, result, dict):
'''Change zero flag. Use \'cfz help\' for more information.'''
help = """
Flip current zero flag.
Syntax: cfz
"""
cmd = command.split()
if len(cmd) != 0:
if cmd[0] == "help":
print(help)
return
print("[-] error: command doesn't take any arguments.")
print("")
print(help)
return
modify_eflags("ZF")
def dump_eflags(eflags):
# the registers are printed by inverse order of bit field
# no idea where this comes from :-]
# masks = { "CF":0, "PF":2, "AF":4, "ZF":6, "SF":7, "TF":8, "IF":9, "DF":10, "OF":11 }
# printTuples = sorted(masks.items() , reverse=True, key=lambda x: x[1])
eflagsTuples = [('OF', 11), ('DF', 10), ('IF', 9), ('TF', 8), ('SF', 7), ('ZF', 6), ('AF', 4), ('PF', 2), ('CF', 0)]
# use the first character of each register key to output, lowercase if bit not set
for flag, bitfield in eflagsTuples :
if bool(eflags & (1 << bitfield)) == True:
output(flag[0] + " ")
else:
output(flag[0].lower() + " ")
# function to dump the conditional jumps results
def dump_jumpx86(eflags):
# masks and flags from https://github.com/ant4g0nist/lisa.py
masks = { "CF":0, "PF":2, "AF":4, "ZF":6, "SF":7, "TF":8, "IF":9, "DF":10, "OF":11 }
flags = { key: bool(eflags & (1 << value)) for key, value in masks.items() }
error = lldb.SBError()
target = get_target()
if is_i386():
pc_addr = get_gp_register("eip")
elif is_x64():
pc_addr = get_gp_register("rip")
else:
print("[-] error: wrong architecture.")
return
mnemonic = get_mnemonic(pc_addr)
color("RED")
output_string=""
## opcode 0x77: JA, JNBE (jump if CF=0 and ZF=0)
## opcode 0x0F87: JNBE, JA
if "ja" == mnemonic or "jnbe" == mnemonic:
if flags["CF"] == False and flags["ZF"] == False:
output_string="Jump is taken (c = 0 and z = 0)"
else:
output_string="Jump is NOT taken (c = 0 and z = 0)"
## opcode 0x73: JAE, JNB, JNC (jump if CF=0)
## opcode 0x0F83: JNC, JNB, JAE (jump if CF=0)
if "jae" == mnemonic or "jnb" == mnemonic or "jnc" == mnemonic:
if flags["CF"] == False:
output_string="Jump is taken (c = 0)"
else:
output_string="Jump is NOT taken (c != 0)"
## opcode 0x72: JB, JC, JNAE (jump if CF=1)
## opcode 0x0F82: JNAE, JB, JC
if "jb" == mnemonic or "jc" == mnemonic or "jnae" == mnemonic:
if flags["CF"] == True:
output_string="Jump is taken (c = 1)"
else:
output_string="Jump is NOT taken (c != 1)"
## opcode 0x76: JBE, JNA (jump if CF=1 or ZF=1)
## opcode 0x0F86: JBE, JNA
if "jbe" == mnemonic or "jna" == mnemonic:
if flags["CF"] == True or flags["ZF"] == 1:
output_string="Jump is taken (c = 1 or z = 1)"
else:
output_string="Jump is NOT taken (c != 1 or z != 1)"
## opcode 0xE3: JCXZ, JECXZ, JRCXZ (jump if CX=0 or ECX=0 or RCX=0)
# XXX: we just need cx output...
if "jcxz" == mnemonic or "jecxz" == mnemonic or "jrcxz" == mnemonic:
rcx = get_gp_register("rcx")
ecx = get_gp_register("ecx")
cx = get_gp_register("cx")
if ecx == 0 or cx == 0 or rcx == 0:
output_string="Jump is taken (cx = 0 or ecx = 0 or rcx = 0)"
else:
output_string="Jump is NOT taken (cx != 0 or ecx != 0 or rcx != 0)"
## opcode 0x74: JE, JZ (jump if ZF=1)
## opcode 0x0F84: JZ, JE, JZ (jump if ZF=1)
if "je" == mnemonic or "jz" == mnemonic:
if flags["ZF"] == 1:
output_string="Jump is taken (z = 1)"
else:
output_string="Jump is NOT taken (z != 1)"
## opcode 0x7F: JG, JNLE (jump if ZF=0 and SF=OF)
## opcode 0x0F8F: JNLE, JG (jump if ZF=0 and SF=OF)
if "jg" == mnemonic or "jnle" == mnemonic:
if flags["ZF"] == 0 and flags["SF"] == flags["OF"]:
output_string="Jump is taken (z = 0 and s = o)"
else:
output_string="Jump is NOT taken (z != 0 or s != o)"
## opcode 0x7D: JGE, JNL (jump if SF=OF)
## opcode 0x0F8D: JNL, JGE (jump if SF=OF)
if "jge" == mnemonic or "jnl" == mnemonic:
if flags["SF"] == flags["OF"]:
output_string="Jump is taken (s = o)"
else:
output_string="Jump is NOT taken (s != o)"
## opcode: 0x7C: JL, JNGE (jump if SF != OF)
## opcode: 0x0F8C: JNGE, JL (jump if SF != OF)
if "jl" == mnemonic or "jnge" == mnemonic:
if flags["SF"] != flags["OF"]:
output_string="Jump is taken (s != o)"
else:
output_string="Jump is NOT taken (s = o)"
## opcode 0x7E: JLE, JNG (jump if ZF = 1 or SF != OF)
## opcode 0x0F8E: JNG, JLE (jump if ZF = 1 or SF != OF)
if "jle" == mnemonic or "jng" == mnemonic:
if flags["ZF"] == 1 or flags["SF"] != flags["OF"]:
output_string="Jump is taken (z = 1 or s != o)"
else:
output_string="Jump is NOT taken (z != 1 or s = o)"
## opcode 0x75: JNE, JNZ (jump if ZF = 0)
## opcode 0x0F85: JNE, JNZ (jump if ZF = 0)
if "jne" == mnemonic or "jnz" == mnemonic:
if flags["ZF"] == 0:
output_string="Jump is taken (z = 0)"
else:
output_string="Jump is NOT taken (z != 0)"
## opcode 0x71: JNO (OF = 0)
## opcode 0x0F81: JNO (OF = 0)
if "jno" == mnemonic:
if flags["OF"] == 0:
output_string="Jump is taken (o = 0)"
else:
output_string="Jump is NOT taken (o != 0)"
## opcode 0x7B: JNP, JPO (jump if PF = 0)
## opcode 0x0F8B: JPO (jump if PF = 0)
if "jnp" == mnemonic or "jpo" == mnemonic:
if flags["PF"] == 0:
output_string="Jump is NOT taken (p = 0)"
else:
output_string="Jump is taken (p != 0)"
## opcode 0x79: JNS (jump if SF = 0)
## opcode 0x0F89: JNS (jump if SF = 0)
if "jns" == mnemonic:
if flags["SF"] == 0:
output_string="Jump is taken (s = 0)"
else:
output_string="Jump is NOT taken (s != 0)"
## opcode 0x70: JO (jump if OF=1)
## opcode 0x0F80: JO (jump if OF=1)
if "jo" == mnemonic:
if flags["OF"] == 1:
output_string="Jump is taken (o = 1)"
else:
output_string="Jump is NOT taken (o != 1)"
## opcode 0x7A: JP, JPE (jump if PF=1)
## opcode 0x0F8A: JP, JPE (jump if PF=1)
if "jp" == mnemonic or "jpe" == mnemonic:
if flags["PF"] == 1:
output_string="Jump is taken (p = 1)"
else:
output_string="Jump is NOT taken (p != 1)"
## opcode 0x78: JS (jump if SF=1)
## opcode 0x0F88: JS (jump if SF=1)
if "js" == mnemonic:
if flags["SF"] == 1:
output_string="Jump is taken (s = 1)"
else:
output_string="Jump is NOT taken (s != 1)"
if is_i386():
output(" " + output_string)
elif is_x64():
output(" " + output_string)
else:
output(output_string)
color("RESET")
def reg64():
registers = get_gp_registers()
rax = registers["rax"]
rcx = registers["rcx"]
rdx = registers["rdx"]
rbx = registers["rbx"]
rsp = registers["rsp"]
rbp = registers["rbp"]
rsi = registers["rsi"]
rdi = registers["rdi"]
r8 = registers["r8"]
r9 = registers["r9"]
r10 = registers["r10"]
r11 = registers["r11"]
r12 = registers["r12"]
r13 = registers["r13"]
r14 = registers["r14"]
r15 = registers["r15"]
rip = registers["rip"]
rflags = registers["rflags"]
cs = registers["cs"]
gs = registers["gs"]
fs = registers["fs"]
color(COLOR_REGNAME)
output(" RAX: ")
if rax == old_x64["rax"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rax))
old_x64["rax"] = rax
color(COLOR_REGNAME)
output(" RBX: ")
if rbx == old_x64["rbx"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rbx))
old_x64["rbx"] = rbx
color(COLOR_REGNAME)
output(" RBP: ")
if rbp == old_x64["rbp"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rbp))
old_x64["rbp"] = rbp
color(COLOR_REGNAME)
output(" RSP: ")
if rsp == old_x64["rsp"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rsp))
old_x64["rsp"] = rsp
output(" ")
color("BOLD")
color("UNDERLINE")
color(COLOR_CPUFLAGS)
dump_eflags(rflags)
color("RESET")
output("\n")
color(COLOR_REGNAME)
output(" RDI: ")
if rdi == old_x64["rdi"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rdi))
old_x64["rdi"] = rdi
color(COLOR_REGNAME)
output(" RSI: ")
if rsi == old_x64["rsi"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rsi))
old_x64["rsi"] = rsi
color(COLOR_REGNAME)
output(" RDX: ")
if rdx == old_x64["rdx"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rdx))
old_x64["rdx"] = rdx
color(COLOR_REGNAME)
output(" RCX: ")
if rcx == old_x64["rcx"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rcx))
old_x64["rcx"] = rcx
color(COLOR_REGNAME)
output(" RIP: ")
if rip == old_x64["rip"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (rip))
old_x64["rip"] = rip
output("\n")
color(COLOR_REGNAME)
output(" R8: ")
if r8 == old_x64["r8"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r8))
old_x64["r8"] = r8
color(COLOR_REGNAME)
output(" R9: ")
if r9 == old_x64["r9"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r9))
old_x64["r9"] = r9
color(COLOR_REGNAME)
output(" R10: ")
if r10 == old_x64["r10"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r10))
old_x64["r10"] = r10
color(COLOR_REGNAME)
output(" R11: ")
if r11 == old_x64["r11"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r11))
old_x64["r11"] = r11
color(COLOR_REGNAME)
output(" R12: ")
if r12 == old_x64["r12"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r12))
old_x64["r12"] = r12
output("\n")
color(COLOR_REGNAME)
output(" R13: ")
if r13 == old_x64["r13"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r13))
old_x64["r13"] = r13
color(COLOR_REGNAME)
output(" R14: ")
if r14 == old_x64["r14"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r14))
old_x64["r14"] = r14
color(COLOR_REGNAME)
output(" R15: ")
if r15 == old_x64["r15"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.016lX" % (r15))
old_x64["r15"] = r15
output("\n")
color(COLOR_REGNAME)
output(" CS: ")
if cs == old_x64["cs"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (cs))
old_x64["cs"] = cs
color(COLOR_REGNAME)
output(" FS: ")
if fs == old_x64["fs"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (fs))
old_x64["fs"] = fs
color(COLOR_REGNAME)
output(" GS: ")
if gs == old_x64["gs"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (gs))
old_x64["gs"] = gs
dump_jumpx86(rflags)
output("\n")
def reg32():
registers = get_gp_registers()
eax = registers["eax"]
ecx = registers["ecx"]
edx = registers["edx"]
ebx = registers["ebx"]
esp = registers["esp"]
ebp = registers["ebp"]
esi = registers["esi"]
edi = registers["edi"]
eflags = registers["eflags"]
cs = registers["cs"]
ds = registers["ds"]
es = registers["es"]
gs = registers["gs"]
fs = registers["fs"]
ss = registers["ss"]
color(COLOR_REGNAME)
output(" EAX: ")
if eax == old_x86["eax"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (eax))
old_x86["eax"] = eax
color(COLOR_REGNAME)
output(" EBX: ")
if ebx == old_x86["ebx"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (ebx))
old_x86["ebx"] = ebx
color(COLOR_REGNAME)
output(" ECX: ")
if ecx == old_x86["ecx"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (ecx))
old_x86["ecx"] = ecx
color(COLOR_REGNAME)
output(" EDX: ")
if edx == old_x86["edx"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (edx))
old_x86["edx"] = edx
output(" ")
color("BOLD")
color("UNDERLINE")
color(COLOR_CPUFLAGS)
dump_eflags(eflags)
color("RESET")
output("\n")
color(COLOR_REGNAME)
output(" ESI: ")
if esi == old_x86["esi"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (esi))
old_x86["esi"] = esi
color(COLOR_REGNAME)
output(" EDI: ")
if edi == old_x86["edi"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (edi))
old_x86["edi"] = edi
color(COLOR_REGNAME)
output(" EBP: ")
if ebp == old_x86["ebp"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (ebp))
old_x86["ebp"] = ebp
color(COLOR_REGNAME)
output(" ESP: ")
if esp == old_x86["esp"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (esp))
old_x86["esp"] = esp
color(COLOR_REGNAME)
output(" EIP: ")
if eip == old_x86["eip"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (eip))
old_x86["eip"] = eip
output("\n")
color(COLOR_REGNAME)
output(" CS: ")
if cs == old_x86["cs"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (cs))
old_x86["cs"] = cs
color(COLOR_REGNAME)
output(" DS: ")
if ds == old_x86["ds"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (ds))
old_x86["ds"] = ds
color(COLOR_REGNAME)
output(" ES: ")
if es == old_x86["es"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (es))
old_x86["es"] = es
color(COLOR_REGNAME)
output(" FS: ")
if fs == old_x86["fs"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (fs))
old_x86["fs"] = fs
color(COLOR_REGNAME)
output(" GS: ")
if gs == old_x86["gs"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (gs))
old_x86["gs"] = gs
color(COLOR_REGNAME)
output(" SS: ")
if ss == old_x86["ss"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("%.04X" % (ss))
old_x86["ss"] = ss
dump_jumpx86(eflags)
output("\n")
def dump_cpsr(cpsr):
# XXX: some fields reserved in recent ARM specs so we should revise and set to latest?
cpsrTuples = [ ('N', 31), ('Z', 30), ('C', 29), ('V', 28), ('Q', 27), ('J', 24),
('E', 9), ('A', 8), ('I', 7), ('F', 6), ('T', 5) ]
# use the first character of each register key to output, lowercase if bit not set
for flag, bitfield in cpsrTuples :
if bool(cpsr & (1 << bitfield)) == True:
output(flag + " ")
else:
output(flag.lower() + " ")
def regarm():
color(COLOR_REGNAME)
output(" R0: ")
r0 = get_gp_register("r0")
if r0 == old_arm["r0"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r0))
old_arm["r0"] = r0
color(COLOR_REGNAME)
output(" R1: ")
r1 = get_gp_register("r1")
if r1 == old_arm["r1"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r1))
old_arm["r1"] = r1
color(COLOR_REGNAME)
output(" R2: ")
r2 = get_gp_register("r2")
if r2 == old_arm["r2"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r2))
old_arm["r2"] = r2
color(COLOR_REGNAME)
output(" R3: ")
r3 = get_gp_register("r3")
if r3 == old_arm["r3"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r3))
old_arm["r3"] = r3
output(" ")
color("BOLD")
color("UNDERLINE")
color(COLOR_CPUFLAGS)
cpsr = get_gp_register("cpsr")
dump_cpsr(cpsr)
color("RESET")
output("\n")
color(COLOR_REGNAME)
output(" R4: ")
r4 = get_gp_register("r4")
if r4 == old_arm["r4"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r4))
old_arm["r4"] = r4
color(COLOR_REGNAME)
output(" R5: ")
r5 = get_gp_register("r5")
if r5 == old_arm["r5"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r5))
old_arm["r5"] = r5
color(COLOR_REGNAME)
output(" R6: ")
r6 = get_gp_register("r6")
if r6 == old_arm["r6"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r6))
old_arm["r6"] = r6
color(COLOR_REGNAME)
output(" R7: ")
r7 = get_gp_register("r7")
if r7 == old_arm["r7"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r7))
old_arm["r7"] = r7
output("\n")
color(COLOR_REGNAME)
output(" R8: ")
r8 = get_gp_register("r8")
if r8 == old_arm["r8"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r8))
old_arm["r8"] = r8
color(COLOR_REGNAME)
output(" R9: ")
r9 = get_gp_register("r9")
if r9 == old_arm["r9"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r9))
old_arm["r9"] = r9
color(COLOR_REGNAME)
output(" R10: ")
r10 = get_gp_register("r10")
if r10 == old_arm["r10"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r10))
old_arm["r10"] = r10
color(COLOR_REGNAME)
output(" R11: ")
r11 = get_gp_register("r11")
if r11 == old_arm["r11"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r11))
old_arm["r11"] = r11
output("\n")
color(COLOR_REGNAME)
output(" R12: ")
r12 = get_gp_register("r12")
if r12 == old_arm["r12"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (r12))
old_arm["r12"] = r12
color(COLOR_REGNAME)
output(" SP: ")
sp = get_gp_register("sp")
if sp == old_arm["sp"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (sp))
old_arm["sp"] = sp
color(COLOR_REGNAME)
output(" LR: ")
lr = get_gp_register("lr")
if lr == old_arm["lr"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (lr))
old_arm["lr"] = lr
color(COLOR_REGNAME)
output(" PC: ")
pc = get_gp_register("pc")
if pc == old_arm["pc"]:
color(COLOR_REGVAL)
else:
color(COLOR_REGVAL_MODIFIED)
output("0x%.08X" % (pc))
old_arm["pc"] = pc
output("\n")
def print_registers():
arch = get_arch()
if is_i386():
reg32()
elif is_x64():
reg64()
elif is_arm():
regarm()
'''
si, c, r instruction override deault ones to consume their output.
For example:
si is thread step-in which by default dumps thread and frame info
after every step. Consuming output of this instruction allows us
to nicely display informations in our hook-stop
Same goes for c and r (continue and run)
'''
def cmd_si(debugger, command, result, dict):
debugger.SetAsync(True)
res = lldb.SBCommandReturnObject()
lldb.debugger.GetSelectedTarget().process.selected_thread.StepInstruction(False)
result.SetStatus(lldb.eReturnStatusSuccessFinishNoResult)
def c(debugger, command, result, dict):
debugger.SetAsync(True)
res = lldb.SBCommandReturnObject()
lldb.debugger.GetSelectedTarget().GetProcess().Continue()
result.SetStatus(lldb.eReturnStatusSuccessFinishNoResult)
# ------------------------------
# Disassembler related functions
# ------------------------------
'''
Handles 'u' command which displays instructions. Also handles output of
'disassemble' command ...
'''
# XXX: help
def cmd_DumpInstructions(debugger, command, result, dict):
'''Dump instructions at certain address (SoftICE like u command style)'''
help = """ """
global GlobalListOutput
GlobalListOutput = []
target = get_target()
cmd = command.split()
if len(cmd) == 0 or len(cmd) > 2:
disassemble(get_current_pc(), CONFIG_DISASSEMBLY_LINE_COUNT)
elif len(cmd) == 1:
address = evaluate(cmd[0])
if address == None:
return
disassemble(address, CONFIG_DISASSEMBLY_LINE_COUNT)
else:
address = evaluate(cmd[0])
if address == None:
return
count = evaluate(cmd[1])
if count == None:
return
disassemble(address, count)
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
# return the instruction mnemonic at input address
def get_mnemonic(target_addr):
err = lldb.SBError()
target = get_target()
instruction_list = target.ReadInstructions(lldb.SBAddress(target_addr, target), 1, 'intel')
if instruction_list.GetSize() == 0:
print("[-] error: not enough instructions disassembled.")
return ""
cur_instruction = instruction_list.GetInstructionAtIndex(0)
# much easier to use the mnemonic output instead of disassembling via cmd line and parse
mnemonic = cur_instruction.GetMnemonic(target)
return mnemonic
# returns the instruction operands
def get_operands(source_address):
err = lldb.SBError()
target = get_target()
# use current memory address
# needs to be this way to workaround SBAddress init bug
src_sbaddr = lldb.SBAddress()
src_sbaddr.load_addr = source_address
instruction_list = target.ReadInstructions(src_sbaddr, 1, 'intel')
if instruction_list.GetSize() == 0:
print("[-] error: not enough instructions disassembled.")
return ""
cur_instruction = instruction_list[0]
return cur_instruction.operands
# find out the size of an instruction using internal disassembler
def get_inst_size(target_addr):
err = lldb.SBError()
target = get_target()
instruction_list = target.ReadInstructions(lldb.SBAddress(target_addr, target), 1, 'intel')
if instruction_list.GetSize() == 0:
print("[-] error: not enough instructions disassembled.")
return 0
cur_instruction = instruction_list.GetInstructionAtIndex(0)
return cur_instruction.size
# the disassembler we use on stop context
# we can customize output here instead of using the cmdline as before and grabbing its output
def disassemble(start_address, count):
target = get_target()
if target == None:
return
# this init will set a file_addr instead of expected load_addr
# and so the disassembler output will be referenced to the file address
# instead of the current loaded memory address
# this is annoying because all RIP references will be related to file addresses
file_sbaddr = lldb.SBAddress(start_address, target)
# create a SBAddress object with the load_addr set so we can disassemble with
# current memory addresses and what is happening right now
# we use the empty init and then set the property which is read/write for load_addr
# this whole thing seems like a bug?
mem_sbaddr = lldb.SBAddress()
mem_sbaddr.load_addr = start_address
# disassemble to get the file and memory version
# we could compute this by finding sections etc but this way it seems
# much simpler and faster
# this seems to be a bug or missing feature because there is no way
# to distinguish between the load and file addresses in the disassembler
# the reason might be because we can't create a SBAddress that has
# load_addr and file_addr set so that the disassembler can distinguish them
# somehow when we use file_sbaddr object the SBAddress GetLoadAddress()
# retrieves the correct memory address for the instruction while the
# SBAddress GetFileAddress() retrives the correct file address
# but the branch instructions addresses are the file addresses
# bug on SBAddress init implementation???
# this also has problems with symbols - the memory version doesn't have them
instructions_mem = target.ReadInstructions(mem_sbaddr, count, "intel")
instructions_file = target.ReadInstructions(file_sbaddr, count, "intel")
if instructions_mem.GetSize() != instructions_file.GetSize():
print("[-] error: instructions arrays sizes are different.")
return
# find out the biggest instruction lenght and mnemonic length
# so we can have a uniform output
max_size = 0
max_mnem_size = 0
for i in instructions_mem:
if i.size > max_size:
max_size = i.size
mnem_len = len(i.mnemonic)
if mnem_len > max_mnem_size:
max_mnem_size = mnem_len
current_pc = get_current_pc()
# get info about module if there is a symbol
module = file_sbaddr.module
#module_name = module.file.GetFilename()
module_name = module.file.fullpath
count = 0
blockstart_sbaddr = None
blockend_sbaddr = None
for mem_inst in instructions_mem:
# get the same instruction but from the file version because we need some info from it
file_inst = instructions_file[count]
# try to extract the symbol name from this location if it exists
# needs to be referenced to file because memory it doesn't work
symbol_name = instructions_file[count].addr.GetSymbol().GetName()
# if there is no symbol just display module where current instruction is
# also get rid of unnamed symbols since they are useless
if symbol_name == None or "___lldb_unnamed_symbol" in symbol_name:
if count == 0:
if CONFIG_ENABLE_COLOR == 1:
color(COLOR_SYMBOL_NAME)
output("@ {}:".format(module_name) + "\n")
color("RESET")
else:
output("@ {}:".format(module_name) + "\n")
elif symbol_name != None:
# print the first time there is a symbol name and save its interval
# so we don't print again until there is a different symbol
if blockstart_sbaddr == None or (int(file_inst.addr) < int(blockstart_sbaddr)) or (int(file_inst.addr) >= int(blockend_sbaddr)):
if CONFIG_ENABLE_COLOR == 1:
color(COLOR_SYMBOL_NAME)
output("{} @ {}:".format(symbol_name, module_name) + "\n")
color("RESET")
else:
output("{} @ {}:".format(symbol_name, module_name) + "\n")
blockstart_sbaddr = file_inst.addr.GetSymbol().GetStartAddress()
blockend_sbaddr = file_inst.addr.GetSymbol().GetEndAddress()
# get the instruction bytes formatted as uint8
inst_data = mem_inst.GetData(target).uint8
mnem = mem_inst.mnemonic
operands = mem_inst.operands
bytes_string = ""
total_fill = max_size - mem_inst.size
total_spaces = mem_inst.size - 1
for x in inst_data:
bytes_string += "{:02x}".format(x)
if total_spaces > 0:
bytes_string += " "
total_spaces -= 1
if total_fill > 0:
# we need one more space because the last byte doesn't have space
# and if we are smaller than max size we are one space short
bytes_string += " " * total_fill
bytes_string += " " * total_fill
mnem_len = len(mem_inst.mnemonic)
if mnem_len < max_mnem_size:
missing_spaces = max_mnem_size - mnem_len
mnem += " " * missing_spaces
# the address the current instruction is loaded at
# we need to extract the address of the instruction and then find its loaded address
memory_addr = mem_inst.addr.GetLoadAddress(target)
# the address of the instruction in the current module
# for main exe it will be the address before ASLR if enabled, otherwise the same as current
# for modules it will be the address in the module code, not the address it's loaded at
# so we can use this address to quickly get to current instruction in module loaded at a disassembler
# without having to rebase everything etc
#file_addr = mem_inst.addr.GetFileAddress()
file_addr = file_inst.addr.GetFileAddress()
comment = ""
if file_inst.comment != "":
comment = " ; " + file_inst.comment
if current_pc == memory_addr:
# try to retrieve extra information if it's a branch instruction
# used to resolve indirect branches and try to extract Objective-C selectors
if mem_inst.DoesBranch():
flow_addr = get_indirect_flow_address(int(mem_inst.addr))
if flow_addr > 0:
flow_module_name = get_module_name(flow_addr)
symbol_info = ""
# try to solve the symbol for the target address
target_symbol_name = lldb.SBAddress(flow_addr,target).GetSymbol().GetName()
# if there is a symbol append to the string otherwise
# it will be empty and have no impact in output
if target_symbol_name != None:
symbol_info = target_symbol_name + " @ "
if comment == "":
# remove space for instructions without operands
if mem_inst.operands == "":
comment = "; " + symbol_info + hex(flow_addr) + " @ " + flow_module_name
else:
comment = " ; " + symbol_info + hex(flow_addr) + " @ " + flow_module_name
else:
comment = comment + " " + hex(flow_addr) + " @ " + flow_module_name
objc = get_objectivec_selector(current_pc)
if objc != "":
comment = comment + " -> " + objc
if CONFIG_ENABLE_COLOR == 1:
color("BOLD")
color(COLOR_CURRENT_PC)
output("-> 0x{:x} (0x{:x}): {} {} {}{}".format(memory_addr, file_addr, bytes_string, mnem, operands, comment) + "\n")
color("RESET")
else:
output("-> 0x{:x} (0x{:x}): {} {} {}{}".format(memory_addr, file_addr, bytes_string, mnem, operands, comment) + "\n")
else:
output(" 0x{:x} (0x{:x}): {} {} {}{}".format(memory_addr, file_addr, bytes_string, mnem, operands, comment) + "\n")
count += 1
return
# ------------------------------------
# Commands that use external utilities
# ------------------------------------
def cmd_show_loadcmds(debugger, command, result, dict):
'''Show otool output of Mach-O load commands. Use \'show_loadcmds\' for more information.'''
help = """
Show otool output of Mach-O load commands.
Syntax: show_loadcmds <address>
Where address is start of Mach-O header in memory.
Note: expressions supported, do not use spaces between operators.
"""
error = lldb.SBError()
cmd = command.split()
if len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
header_addr = evaluate(cmd[0])
if header_addr == None:
print("[-] error: invalid header address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a valid Mach-O header address.")
print("")
print(help)
return
if os.path.isfile("/usr/bin/otool") == False:
print("/usr/bin/otool not found. Please install Xcode or Xcode command line tools.")
return
bytes_string = get_process().ReadMemory(header_addr, 4096*10, error)
if error.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(header_addr))
return
# open a temporary filename and set it to delete on close
f = tempfile.NamedTemporaryFile(delete=True)
f.write(bytes_string)
# pass output to otool
output_data = subprocess.check_output(["/usr/bin/otool", "-l", f.name])
# show the data
print(output_data)
# close file - it will be automatically deleted
f.close()
return
def cmd_show_header(debugger, command, result, dict):
'''Show otool output of Mach-O header. Use \'show_header\' for more information.'''
help = """
Show otool output of Mach-O header.
Syntax: show_header <address>
Where address is start of Mach-O header in memory.
Note: expressions supported, do not use spaces between operators.
"""
error = lldb.SBError()
cmd = command.split()
if len(cmd) == 1:
if cmd[0] == "help":
print(help)
return
header_addr = evaluate(cmd[0])
if header_addr == None:
print("[-] error: invalid header address value.")
print("")
print(help)
return
else:
print("[-] error: please insert a valid Mach-O header address.")
print("")
print(help)
return
if os.path.isfile("/usr/bin/otool") == False:
print("/usr/bin/otool not found. Please install Xcode or Xcode command line tools.")
return
# recent otool versions will fail so we need to read a reasonable amount of memory
# even just for the mach-o header
bytes_string = get_process().ReadMemory(header_addr, 4096*10, error)
if error.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(header_addr))
return
# open a temporary filename and set it to delete on close
f = tempfile.NamedTemporaryFile(delete=True)
f.write(bytes_string)
# pass output to otool
output_data = subprocess.check_output(["/usr/bin/otool", "-hv", f.name])
# show the data
print(output_data)
# close file - it will be automatically deleted
f.close()
return
# use keystone-engine.org to assemble
def assemble_keystone(arch, mode, code, syntax=0):
ks = Ks(arch, mode)
if syntax != 0:
ks.syntax = syntax
print("\nKeystone output:\n----------")
for inst in code:
try:
encoding, count = ks.asm(inst)
except KsError as e:
print("[-] error: keystone failed to assemble: {:s}".format(e))
return
output = []
output.append(inst)
output.append('->')
for i in encoding:
output.append("{:02x}".format(i))
print(" ".join(output))
def cmd_asm32(debugger, command, result, dict):
'''32 bit x86 interactive Keystone based assembler. Use \'asm32 help\' for more information.'''
help = """
32 bit x86 interactive Keystone based assembler.
Syntax: asm32
Type one instruction per line. Finish with \'end\' or \'stop\'.
Keystone set to KS_ARCH_X86 and KS_MODE_32.
Requires Keystone and Python bindings from www.keystone-engine.org.
"""
cmd = command.split()
if len(cmd) != 0 and cmd[0] == "help":
print(help)
return
if CONFIG_KEYSTONE_AVAILABLE == 0:
print("[-] error: keystone python bindings not available. please install from www.keystone-engine.org.")
return
inst_list = []
while True:
line = raw_input('Assemble ("stop" or "end" to finish): ')
if line == 'stop' or line == 'end':
break
inst_list.append(line)
assemble_keystone(KS_ARCH_X86, KS_MODE_32, inst_list)
def cmd_asm64(debugger, command, result, dict):
'''64 bit x86 interactive Keystone based assembler. Use \'asm64 help\' for more information.'''
help = """
64 bit x86 interactive Keystone based assembler
Syntax: asm64
Type one instruction per line. Finish with \'end\' or \'stop\'.
Keystone set to KS_ARCH_X86 and KS_MODE_64.
Requires Keystone and Python bindings from www.keystone-engine.org.
"""
cmd = command.split()
if len(cmd) != 0 and cmd[0] == "help":
print(help)
return
if CONFIG_KEYSTONE_AVAILABLE == 0:
print("[-] error: keystone python bindings not available. please install from www.keystone-engine.org.")
return
inst_list = []
while True:
line = raw_input('Assemble ("stop" or "end" to finish): ')
if line == 'stop' or line == 'end':
break
inst_list.append(line)
assemble_keystone(KS_ARCH_X86, KS_MODE_64, inst_list)
def cmd_arm32(debugger, command, result, dict):
'''32 bit ARM interactive Keystone based assembler. Use \'arm32 help\' for more information.'''
help = """
32 bit ARM interactive Keystone based assembler
Syntax: arm32
Type one instruction per line. Finish with \'end\' or \'stop\'.
Keystone set to KS_ARCH_ARM and KS_MODE_ARM.
Requires Keystone and Python bindings from www.keystone-engine.org.
"""
cmd = command.split()
if len(cmd) != 0 and cmd[0] == "help":
print(help)
return
if CONFIG_KEYSTONE_AVAILABLE == 0:
print("[-] error: keystone python bindings not available. please install from www.keystone-engine.org.")
return
inst_list = []
while True:
line = raw_input('Assemble ("stop" or "end" to finish): ')
if line == 'stop' or line == 'end':
break
inst_list.append(line)
assemble_keystone(KS_ARCH_ARM, KS_MODE_ARM, inst_list)
def cmd_armthumb(debugger, command, result, dict):
'''32 bit ARM Thumb interactive Keystone based assembler. Use \'armthumb help\' for more information.'''
help = """
32 bit ARM Thumb interactive Keystone based assembler
Syntax: armthumb
Type one instruction per line. Finish with \'end\' or \'stop\'.
Keystone set to KS_ARCH_ARM and KS_MODE_THUMB.
Requires Keystone and Python bindings from www.keystone-engine.org.
"""
cmd = command.split()
if len(cmd) != 0 and cmd[0] == "help":
print(help)
return
if CONFIG_KEYSTONE_AVAILABLE == 0:
print("[-] error: keystone python bindings not available. please install from www.keystone-engine.org.")
return
inst_list = []
while True:
line = raw_input('Assemble ("stop" or "end" to finish): ')
if line == 'stop' or line == 'end':
break
inst_list.append(line)
assemble_keystone(KS_ARCH_ARM, KS_MODE_THUMB, inst_list)
def cmd_arm64(debugger, command, result, dict):
'''64 bit ARM interactive Keystone based assembler. Use \'arm64 help\' for more information.'''
help = """
64 bit ARM interactive Keystone based assembler
Syntax: arm64
Type one instruction per line. Finish with \'end\' or \'stop\'.
Keystone set to KS_ARCH_ARM64 and KS_MODE_ARM.
Requires Keystone and Python bindings from www.keystone-engine.org.
"""
cmd = command.split()
if len(cmd) != 0 and cmd[0] == "help":
print(help)
return
if CONFIG_KEYSTONE_AVAILABLE == 0:
print("[-] error: keystone python bindings not available. please install from www.keystone-engine.org.")
return
inst_list = []
while True:
line = raw_input('Assemble ("stop" or "end" to finish): ')
if line == 'stop' or line == 'end':
break
inst_list.append(line)
assemble_keystone(KS_ARCH_ARM64, KS_MODE_ARM, inst_list)
# XXX: help
def cmd_IphoneConnect(debugger, command, result, dict):
'''Connect to debugserver running on iPhone'''
help = """ """
global GlobalListOutput
GlobalListOutput = []
if len(command) == 0 or ":" not in command:
output("Connect to remote iPhone debug server")
output("\n")
output("iphone <ipaddress:port>")
output("\n")
output("iphone 192.168.0.2:5555")
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
return
res = lldb.SBCommandReturnObject()
lldb.debugger.GetCommandInterpreter().HandleCommand("platform select remote-ios", res)
if res.Succeeded() == True:
output(res.GetOutput())
else:
output("[-] Error running platform select remote-ios")
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
return
lldb.debugger.GetCommandInterpreter().HandleCommand("process connect connect://" + command, res)
if res.Succeeded() == True:
output("[+] Connected to iphone at : " + command)
else:
output(res.GetOutput())
result.PutCString("".join(GlobalListOutput))
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
def display_stack():
'''Hex dump current stack pointer'''
stack_addr = get_current_sp()
if stack_addr == 0:
return
err = lldb.SBError()
target = get_target()
membuff = get_process().ReadMemory(stack_addr, 0x100, err)
if err.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(stack_addr))
return
if len(membuff) == 0:
print("[-] error: not enough bytes read.")
return
output(hexdump(stack_addr, membuff, " ", 16, 4))
def display_data():
'''Hex dump current data window pointer'''
data_addr = DATA_WINDOW_ADDRESS
print(data_addr)
if data_addr == 0:
return
err = lldb.SBError()
target = get_target()
membuff = get_process().ReadMemory(data_addr, 0x100, err)
if err.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(stack_addr))
return
if len(membuff) == 0:
print("[-] error: not enough bytes read.")
return
output(hexdump(data_addr, membuff, " ", 16, 4))
# workaround for lldb bug regarding RIP addressing outside main executable
def get_rip_relative_addr(source_address):
err = lldb.SBError()
target = get_target()
inst_size = get_inst_size(source_address)
if inst_size <= 1:
print("[-] error: instruction size too small.")
return 0
# XXX: problem because it's not just 2 and 5 bytes
# 0x7fff53fa2180 (0x1180): 0f 85 84 01 00 00 jne 0x7fff53fa230a ; stack_not_16_byte_aligned_error
offset_bytes = get_process().ReadMemory(source_address+1, inst_size-1, err)
if err.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(source_address))
return 0
if inst_size == 2:
data = struct.unpack("b", offset_bytes)
elif inst_size == 5:
data = struct.unpack("i", offset_bytes)
rip_call_addr = source_address + inst_size + data[0]
#output("source {:x} rip call offset {:x} {:x}\n".format(source_address, data[0], rip_call_addr))
return rip_call_addr
# XXX: instead of reading memory we can dereference right away in the evaluation
def get_indirect_flow_target(source_address):
err = lldb.SBError()
operand = get_operands(source_address)
#output("Operand: {}\n".format(operand))
# calls into a deferenced memory address
if "qword" in operand:
#output("dereferenced call\n")
deref_addr = 0
# first we need to find the address to dereference
if '+' in operand:
x = re.search('\[([a-z0-9]{2,3} \+ 0x[0-9a-z]+)\]', operand)
if x == None:
return 0
value = get_frame().EvaluateExpression("$" + x.group(1))
if value.IsValid() == False:
return 0
deref_addr = int(value.GetValue(), 10)
if "rip" in operand:
deref_addr = deref_addr + get_inst_size(source_address)
else:
x = re.search('\[([a-z0-9]{2,3})\]', operand)
if x == None:
return 0
value = get_frame().EvaluateExpression("$" + x.group(1))
if value.IsValid() == False:
return 0
deref_addr = int(value.GetValue(), 10)
# now we can dereference and find the call target
if get_pointer_size() == 4:
call_target_addr = get_process().ReadUnsignedFromMemory(deref_addr, 4, err)
return call_target_addr
elif get_pointer_size() == 8:
call_target_addr = get_process().ReadUnsignedFromMemory(deref_addr, 8, err)
return call_target_addr
if err.Success() == False:
return 0
return 0
# calls into a register
elif operand.startswith('r') or operand.startswith('e'):
#output("register call\n")
x = re.search('([a-z0-9]{2,3})', operand)
if x == None:
return 0
#output("Result {}\n".format(x.group(1)))
value = get_frame().EvaluateExpression("$" + x.group(1))
if value.IsValid() == False:
return 0
return int(value.GetValue(), 10)
# RIP relative calls
elif operand.startswith('0x'):
#output("direct call\n")
# the disassembler already did the dirty work for us
# so we just extract the address
x = re.search('(0x[0-9a-z]+)', operand)
if x != None:
#output("Result {}\n".format(x.group(0)))
return int(x.group(1), 16)
return 0
def get_ret_address():
err = lldb.SBError()
stack_addr = get_current_sp()
if stack_addr == 0:
return -1
ret_addr = get_process().ReadPointerFromMemory(stack_addr, err)
if err.Success() == False:
print("[-] error: Failed to read memory at 0x{:x}.".format(stack_addr))
return -1
return ret_addr
def is_sending_objc_msg():
err = lldb.SBError()
target = get_target()
call_addr = get_indirect_flow_target(get_current_pc())
sym_addr = lldb.SBAddress(call_addr, target)
symbol = sym_addr.GetSymbol()
# XXX: add others?
if symbol.name != "objc_msgSend":
return False
return True
# XXX: x64 only
def display_objc():
pc_addr = get_current_pc()
err = lldb.SBError()
target = get_target()
options = lldb.SBExpressionOptions()
options.SetLanguage(lldb.eLanguageTypeObjC)
options.SetTrapExceptions(False)
# command = '(void*)object_getClass({})'.format(get_instance_object())
# value = get_frame().EvaluateExpression(command, options).GetObjectDescription()
classname_command = '(const char *)object_getClassName((id){})'.format(get_instance_object())
classname_value = get_frame().EvaluateExpression(classname_command)
if classname_value.IsValid() == False:
return
className = classname_value.GetSummary().strip('"')
selector_addr = get_gp_register("rsi")
membuff = get_process().ReadMemory(selector_addr, 0x100, err)
strings = membuff.split('\00')
if len(strings) != 0:
color("RED")
output('Class: ')
color("RESET")
output(className)
color("RED")
output(' Selector: ')
color("RESET")
output(strings[0])
def display_indirect_flow():
target = get_target()
pc_addr = get_current_pc()
mnemonic = get_mnemonic(pc_addr)
if ("ret" in mnemonic) == True:
indirect_addr = get_ret_address()
output("0x%x -> %s" % (indirect_addr, lldb.SBAddress(indirect_addr, target).GetSymbol().name))
output("\n")
return
if "call" == mnemonic or "callq" == mnemonic or ("jmp" in mnemonic) == True:
# we need to identify the indirect target address
indirect_addr = get_indirect_flow_target(pc_addr)
output("0x%x -> %s" % (indirect_addr, lldb.SBAddress(indirect_addr, target).GetSymbol().name))
if is_sending_objc_msg() == True:
output("\n")
display_objc()
output("\n")
return
# find out the target address of ret, and indirect call and jmp
def get_indirect_flow_address(src_addr):
target = get_target()
instruction_list = target.ReadInstructions(lldb.SBAddress(src_addr, target), 1, 'intel')
if instruction_list.GetSize() == 0:
print("[-] error: not enough instructions disassembled.")
return -1
cur_instruction = instruction_list.GetInstructionAtIndex(0)
if cur_instruction.DoesBranch() == False:
return -1
if "ret" in cur_instruction.mnemonic:
ret_addr = get_ret_address()
return ret_addr
if ("call" in cur_instruction.mnemonic) or ("jmp" in cur_instruction.mnemonic):
# don't care about RIP relative jumps
if cur_instruction.operands.startswith('0x'):
return -1
indirect_addr = get_indirect_flow_target(src_addr)
return indirect_addr
# all other branches just return -1
return -1
# retrieve the module full path name an address belongs to
def get_module_name(src_addr):
target = get_target()
src_module = lldb.SBAddress(src_addr, target).module
module_name = src_module.file.fullpath
if module_name == None:
return ""
else:
return module_name
def get_objectivec_selector(src_addr):
err = lldb.SBError()
target = get_target()
call_addr = get_indirect_flow_target(src_addr)
if call_addr == 0:
return ""
sym_addr = lldb.SBAddress(call_addr, target)
symbol = sym_addr.GetSymbol()
# XXX: add others?
if symbol.name != "objc_msgSend":
return ""
options = lldb.SBExpressionOptions()
options.SetLanguage(lldb.eLanguageTypeObjC)
options.SetTrapExceptions(False)
classname_command = '(const char *)object_getClassName((id){})'.format(get_instance_object())
classname_value = get_frame().EvaluateExpression(classname_command)
if classname_value.IsValid() == False:
return ""
className = classname_value.GetSummary().strip('"')
selector_addr = get_gp_register("rsi")
membuf = get_process().ReadMemory(selector_addr, 0x100, err)
strings = membuf.split('\00')
if len(strings) != 0:
return "[" + className + " " + strings[0] + "]"
else:
return "[" + className + "]"
return ""
# ------------------------------------------------------------
# The heart of lldbinit - when lldb stop this is where we land
# ------------------------------------------------------------
def HandleHookStopOnTarget(debugger, command, result, dict):
'''Display current code context.'''
# Don't display anything if we're inside Xcode
if os.getenv('PATH').startswith('/Applications/Xcode.app'):
return
global GlobalListOutput
global CONFIG_DISPLAY_STACK_WINDOW
global CONFIG_DISPLAY_FLOW_WINDOW
debugger.SetAsync(True)
# when we start the thread is still not valid and get_frame() will always generate a warning
# this way we avoid displaying it in this particular case
if get_process().GetNumThreads() == 1:
thread = get_process().GetThreadAtIndex(0)
if thread.IsValid() == False:
return
frame = get_frame()
if not frame:
return
thread= frame.GetThread()
while True:
frame = get_frame()
thread = frame.GetThread()
if thread.GetStopReason() == lldb.eStopReasonNone or thread.GetStopReason() == lldb.eStopReasonInvalid:
time.sleep(0.001)
else:
break
GlobalListOutput = []
arch = get_arch()
if not is_i386() and not is_x64() and not is_arm():
#this is for ARM probably in the future... when I will need it...
print("[-] error: Unknown architecture : " + arch)
return
color(COLOR_SEPARATOR)
if is_i386() or is_arm():
output("---------------------------------------------------------------------------------")
elif is_x64():
output("-----------------------------------------------------------------------------------------------------------------------")
color("BOLD")
output("[regs]\n")
color("RESET")
print_registers()
if CONFIG_DISPLAY_STACK_WINDOW == 1:
color(COLOR_SEPARATOR)
if is_i386() or is_arm():
output("--------------------------------------------------------------------------------")
elif is_x64():
output("----------------------------------------------------------------------------------------------------------------------")
color("BOLD")
output("[stack]\n")
color("RESET")
display_stack()
output("\n")
if CONFIG_DISPLAY_DATA_WINDOW == 1:
color(COLOR_SEPARATOR)
if is_i386() or is_arm():
output("---------------------------------------------------------------------------------")
elif is_x64():
output("-----------------------------------------------------------------------------------------------------------------------")
color("BOLD")
output("[data]\n")
color("RESET")
display_data()
output("\n")
if CONFIG_DISPLAY_FLOW_WINDOW == 1 and is_x64():
color(COLOR_SEPARATOR)
if is_i386() or is_arm():
output("---------------------------------------------------------------------------------")
elif is_x64():
output("-----------------------------------------------------------------------------------------------------------------------")
color("BOLD")
output("[flow]\n")
color("RESET")
display_indirect_flow()
color(COLOR_SEPARATOR)
if is_i386() or is_arm():
output("---------------------------------------------------------------------------------")
elif is_x64():
output("-----------------------------------------------------------------------------------------------------------------------")
color("BOLD")
output("[code]\n")
color("RESET")
# disassemble and add its contents to output inside
disassemble(get_current_pc(), CONFIG_DISASSEMBLY_LINE_COUNT)
color(COLOR_SEPARATOR)
if get_pointer_size() == 4: #is_i386() or is_arm():
output("---------------------------------------------------------------------------------------")
elif get_pointer_size() == 8: #is_x64():
output("-----------------------------------------------------------------------------------------------------------------------------")
color("RESET")
# XXX: do we really need to output all data into the array and then print it in a single go? faster to just print directly?
# was it done this way because previously disassembly was capturing the output and modifying it?
data = "".join(GlobalListOutput)
result.PutCString(data)
result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
return 0
``` |
{
"source": "jonti09/postgresql-manager",
"score": 2
} |
#### File: postgresql-manager/services/helpers.py
```python
from configparser import ConfigParser
import boto3
class S3Client:
@staticmethod
def get_client(config: ConfigParser):
return boto3.Session(
profile_name=config.get('S3', 'profile', fallback='default')
).client('s3')
``` |
{
"source": "jontkoh/python-image-gallery",
"score": 2
} |
#### File: gallery/ui/app.py
```python
from flask import Flask
from flask import request
from flask import render_template
from flask import redirect
from flask import session
from werkzeug.utils import secure_filename
import json
import psycopg2
from ..data.db import *
from ..data.user import User
from ..data.postgres_user_dao import PostgresUserDAO
from ..aws.secrets import get_secret_flask_session
app = Flask(__name__)
UPLOAD_FOLDER = '/images'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = b'<KEY>'
connect()
def check_admin():
return 'username' in session and session['username'] == 'admin'
def get_user_dao():
return PostgresUserDAO()
@app.route('/')
def hello_world():
return render_template('main.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user = get_user_dao().get_user_by_username(request.form["username"])
if user is None or user.password != request.form["password"]:
return redirect('/invalidLogin')
else:
session['username'] = request.form["username"]
return redirect("/debugSession")
else:
return render_template('login.html')
@app.route('/invalidLogin')
def invalid_login():
return "Invalid"
@app.route('/debugSession')
def debugSession():
result = ""
for key, value in session.items():
result += key + "->" + str(value) + "<br />"
return result
@app.route('/upload')
def upload_file():
return render_template('upload.html')
@app.route('/uploader', methods = ['GET', 'POST'])
def uploaded_file():
if request.method == 'POST':
f = request.files['file']
f.save(secure_filename(f.filename))
return 'file uploaded successfully'
@app.route('/viewImages')
def view_images():
return 'images'
@app.route('/admin')
def hello_admin():
if not check_admin():
return redirect('/login')
connect()
row = listUsers()
return render_template('admin.html', rows=row)
@app.route('/admin/createUser')
def create_user():
if not check_admin():
return redirect('/login')
return render_template('createUser.html')
@app.route('/admin/userCreated', methods=['POST'])
def user_created():
if not check_admin():
return redirect('/login')
user = request.form['username']
password = request.form['password']
fullName = request.form['fullName']
connect()
createUser(user, password, fullName)
return render_template('userCreated.html', username=user, password=password, fullname=fullName)
@app.route('/admin/user')
def hello_user():
if not check_admin():
return redirect('/login')
return 'Hello, user'
@app.route('/admin/user/edit')
def edit():
if not check_admin():
return redirect('/login')
return render_template('edit.html')
@app.route('/admin/user/editUser', methods=['POST'])
def edit_user():
if not check_admin():
return redirect('/login')
return render_template('editUser.html')
@app.route('/admin/user/editConfirmed', methods=['POST'])
def edit_confirmed():
if not check_admin():
return redirect('/login')
user = request.form['username']
password = request.form['password']
fullName = request.form['fullName']
connect()
editUsers(user, password, fullName)
return render_template('editConfirmed.html', username=user,password=password, fullname=fullName)
@app.route('/admin/user/delete')
def delete_confirmation():
if not check_admin():
return redirect('/login')
return render_template('delete.html')
@app.route('/admin/user/deleteConfirmed', methods=['POST'])
def delete_confirmed():
if not check_admin():
return redirect('/login')
userToDelete = request.form['username']
connect()
deleteUser(userToDelete)
return render_template('deleteConfirmed.html', user=userToDelete)
``` |
{
"source": "jontlu/ECE303-Comm-Nets",
"score": 3
} |
#### File: ECE303-Comm-Nets/Project 2/sender.py
```python
import logging
import socket
import sys
import channelsimulator
import utils
import array
import hashlib
class Sender(object):
def __init__(self, inbound_port=50006, outbound_port=50005, timeout=1, debug_level=logging.INFO):
self.logger = utils.Logger(self.__class__.__name__, debug_level)
self.inbound_port = inbound_port
self.outbound_port = outbound_port
self.simulator = channelsimulator.ChannelSimulator(inbound_port=inbound_port, outbound_port=outbound_port,
debug_level=debug_level)
self.simulator.sndr_setup(timeout)
self.simulator.rcvr_setup(timeout)
class RDTSender(Sender):
senderRDTBit = 0
timeout = 0.1
packetStart = 0
packetEnd = MaxSegSize = 991
currentSegment = []
def __init__(self):
super(RDTSender, self).__init__()
def send(self, payload):
while self.packetStart < len(payload):
try:
if self.packetEnd > len(payload):
data = payload[self.packetStart:len(payload)]
else:
data = payload[self.packetStart:self.packetEnd]
self.currentSegment = str(self.senderRDTBit) + str(checksumGet(data)) + data
self.simulator.u_send(self.currentSegment)
self.simulator.sndr_socket.settimeout(self.timeout)
while 1:
returnSegment = self.simulator.u_receive()
returnRDTBit = returnSegment[0:1]
returnChecksum = returnSegment[1:33]
if (str(self.senderRDTBit) == returnRDTBit) and (str(checksumGet(data)) == returnChecksum):
self.simulator.sndr_socket.settimeout(None)
self.packetStart += self.MaxSegSize
self.packetEnd += self.MaxSegSize
self.senderRDTBit = 1 - self.senderRDTBit
break
except socket.timeout:
self.simulator.u_send(self.currentSegment)
print("DONE")
sys.exit()
def checksumGet(data):
return hashlib.md5(data).hexdigest()
if __name__ == "__main__":
sender = RDTSender()
sender.send(sys.stdin.read())
```
#### File: ECE303-Comm-Nets/Project 2/utils.py
```python
import datetime
import logging
class Logger(object):
def __init__(self, name, debug_level):
now = datetime.datetime.now()
logging.basicConfig(filename='{}_{}.log'.format(name, datetime.datetime.strftime(now, "%Y_%m_%dT%H%M%S")),
level=debug_level)
@staticmethod
def info(message):
logging.info(message)
@staticmethod
def debug(message):
logging.debug(message)
``` |
{
"source": "Jontom01/Data-Structures",
"score": 4
} |
#### File: Python/GraphAlgorithms/AdjacencyListGraph.py
```python
class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
def getData(self):
return self.data
#Adjanceny List graph implementation
class ALGraph:
#initialize list
def __init__(self):
self.table = []
#add a vertex to the graph
def insertVertex(self, data):
temp = Node(data)
self.table.append(temp)
#search the graph for a certain vertex, returns the List index of the target
def search(self, target):
counter = 0
for i in self.table:
if i.data == target:
return counter
counter += 1
return None
#links
def adjacentTo(self, vertex, neighbor):
tableIndex = self.search(vertex)
neighborNode = Node(neighbor)
curr = self.table[tableIndex]
while curr.next != None:
curr = curr.next
curr.next = neighborNode
def pointUndirected(self, vertex, neighbor):
self.adjacentTo(vertex, neighbor)
self.adjacentTo(neighbor, vertex)
def pointDirected(self, vertex, neighbor):
self.adjacentTo(vertex, neighbor)
def printGraph(self):
for node in self.table:
curr = node
while(curr != None):
print(curr.data, end="->")
curr = curr.next
print()
test = ALGraph()
test.insertVertex('A')
test.insertVertex('B')
test.insertVertex('C')
test.insertVertex('D')
test.insertVertex('E')
test.pointUndirected('A', 'B')
test.pointUndirected('A', 'C')
test.pointUndirected('B', 'D')
test.pointUndirected('D', 'E')
test.pointUndirected('C', 'E')
test.pointUndirected('B', 'E')
test.printGraph()
```
#### File: Data-Structures/Python/MaxHeap.py
```python
class MaxHeap:
def __init__(self, arr):
self.arr = []
self.arr.append(len(arr))
for data in arr:
self.arr.append(data)
self._createMaxHeap()
def _createMaxHeap(self):
start = int(self.arr[0] / 2)
while(start > 0):
self._swap(start)
start -= 1
def _swap(self, current):
while True:
greaterChild = self._greaterChild(current)
if(greaterChild == -1):
break
if self.arr[greaterChild] > self.arr[current]:
tmp = self.arr[greaterChild]
self.arr[greaterChild] = self.arr[current]
self.arr[current] = tmp
current = greaterChild
else:
break
def _greaterChild(self, objIndex):
objLeftChild = int(objIndex * 2)
objRightChild = objLeftChild + 1
if objLeftChild < len(self.arr) and objRightChild < len(self.arr):
greaterChild = max(self.arr[objLeftChild], self.arr[objRightChild])
if greaterChild == self.arr[objLeftChild]:
return objLeftChild
else:
return objRightChild
elif objLeftChild > len(self.arr) and objRightChild > len(self.arr):
return -1
elif objLeftChild > len(self.arr):
return objRightChild
else:
return objLeftChild
def insert(self, obj):
self.arr[0] += 1
self.arr.append(obj)
self._createMaxHeap()
def remove(self):
self.arr[0] -= 1
out = self.arr[1]
self.arr.pop(1)
self._createMaxHeap()
return out
def print(self):
print(self.arr)
x = [2,3,34,4,5,6,7,3]
test = MaxHeap(x)
test.insert(5)
test.print()
print(test.remove())
``` |
{
"source": "jontonsoup4/ascii_art",
"score": 4
} |
#### File: ascii_art/ascii_art/ascii.py
```python
from PIL import Image
class ASCIIArt:
GRAYSCALE = " .,:'`\";~-_|/=\<+>?)*^(!}{v[I&]wrcVisJmYejoWn%Xtzux17lCFLT3fSZ2a@y4GOKMU#APk605Ed8Qb9NhBDHRqg$p"
FULL_RANGE = GRAYSCALE
HALF_RANGE = GRAYSCALE[::2]
QUARTER_RANGE = GRAYSCALE[::4]
EIGHTH_RANGE = GRAYSCALE[::8]
BLOCK = "#"
def __init__(self, picture_path, scale=1):
"""
Converts an image to ASCII Art
:param picture_path: path to ascii_picture name including extension
:param scale: 1 is roughly the original grey_image size. Can be larger (3), can be smaller (1/4)
"""
if '.' not in picture_path:
picture_path += '.jpg'
self.picture_path = picture_path
self.color_image = Image.open(picture_path)
self.scale = 7 / scale
self.width = int(self.color_image.size[0] / self.scale)
self.height = int(self.color_image.size[1] / (self.scale * 1.8))
self.color_image = self.color_image.resize((self.width, self.height), Image.BILINEAR)
self.grey_image = self.color_image.convert("L")
self.ascii_picture = []
def draw_ascii(self, char_list=' .:-=+*#%@', high_pass=0, low_pass=0, curve=1.2):
"""
Draws ASCII art using the given parameters. Using an alternate char_set allows for custom ASCII. Use
sort() to return the correct order if you aren't sure what the greyscale order of the character set is.
:param char_list: sets the characters used in ASCII rendering
:param high_pass: 1 to 100 removes darker areas
:param low_pass: 1 to 100 removes lighter
:param curve: defaults to linear. curve > 1 is lighter. 1 > curve > 0 is darker. curve < 0 is grey
:return: list of all characters in the ASCII piece. Use .join() to display the text
"""
for y in range(0, self.grey_image.size[1]):
for x in range(0, self.grey_image.size[0]):
brightness = 255 - self.grey_image.getpixel((x, y))
if curve < 1:
choice = int(brightness * (len(char_list) / 255) ** curve)
else:
choice = int(len(char_list) * (brightness / 255) ** curve)
if choice >= len(char_list):
choice = len(char_list) - 1
self.ascii_picture.append(char_list[choice])
self.ascii_picture.append("\n")
if low_pass > 0:
low_pass = int(low_pass * len(char_list) / 100)
for i in range(low_pass):
self.ascii_picture = [char.replace((char_list[i]), ' ') for char in self.ascii_picture]
if high_pass > 0:
high_pass = int(high_pass * len(char_list) / 100)
for i in range(high_pass):
self.ascii_picture = [char.replace((char_list[-i]), ' ') for char in self.ascii_picture]
return self.ascii_picture
def draw_color_ascii(self, char_list=' .:-=+*#%@', high_pass=0, low_pass=0, curve=1.2):
"""
Color adaptation of draw_ascii.
:param char_list: sets the characters used in ASCII rendering
:param high_pass: 1 to 100 removes darker areas
:param low_pass: 1 to 100 removes lighter
:param curve: defaults to linear. curve > 1 is lighter. 1 > curve > 0 is darker. curve < 0 is grey
:return: list of lists. list[0] contains ASCII character, list[1] contains color value
"""
ascii_picture = self.draw_ascii(char_list, high_pass, low_pass, curve)
color_code = self.get_color_codes()
ascii_color = []
count = 0
for index, char in enumerate(ascii_picture):
if char != '\n':
ascii_color.append([char, color_code[count]])
count += 1
else:
ascii_color.append([char, '#000000'])
return ascii_color
def draw_html(self, char_list=' .:-=+*#%@', high_pass=0, low_pass=0, curve=1.2, background_color='white'):
"""
HTML adaptation of draw_ascii. Formats colored ASCII to HTML
:param char_list: sets the characters used in ASCII rendering
:param high_pass: 1 to 100 removes darker areas
:param low_pass: 1 to 100 removes lighter
:param curve: defaults to linear. curve > 1 is lighter. 1 > curve > 0 is darker. curve < 0 is grey
:return: string of HTML formatting
"""
init_draw = self.draw_ascii(char_list, high_pass, low_pass, curve)
ascii_picture = [char for char in init_draw if char != '\n']
num_breaks = len(init_draw) - len(ascii_picture)
hex_codes = self.get_color_codes()
html = ['<body bgcolor={}><pre>'.format(background_color)]
for index, char in enumerate(ascii_picture):
if index % (len(ascii_picture) / num_breaks) == 0 and index > 0:
html.append('<br>')
html.append('<span style="color: {0};">{1}</span>'.format(hex_codes[index], char))
html.append('</pre></body>')
return ''.join(html)
def get_color_codes(self, mode='hex'):
"""
Gets the color value of every pixel in the transformed picture
:param mode: hex or rgb
:return: list of color codes in the picture
"""
color_codes = []
for y in range(0, self.color_image.size[1]):
for x in range(0, self.color_image.size[0]):
r, g, b, = self.color_image.getpixel((x, y))
if mode.lower() == 'hex':
color_codes.append('#{:02x}{:02x}{:02x}'.format(r, g, b))
elif mode.lower() == 'rgb':
color_codes.append((r, g, b))
else:
print('Please choose hex or rgb')
return color_codes
@staticmethod
def sort(char_list):
"""
Sorts a string of characters from lightest to darkest when represented in ASCII art
:param char_list: string of characters
:return: sorted string of characters from lightest to darkest
"""
output = []
for i in char_list:
output.append([i, ASCIIArt.GRAYSCALE.index(i)])
output = [x for (x, y) in sorted(output, key=lambda x: x[1])]
return ''.join(output)
``` |
{
"source": "jon-torodash/hirefire",
"score": 2
} |
#### File: hirefire/procs/celery.py
```python
from __future__ import absolute_import
from collections import Counter
from itertools import chain
from logging import getLogger
from celery.app import app_or_default
try:
from librabbitmq import ChannelError
except ImportError:
try:
from amqp.exceptions import ChannelError
except ImportError:
# No RabbitMQ API wrapper installed, different celery broker used
ChannelError = Exception
from ..utils import KeyDefaultDict
from . import Proc
logger = getLogger('hirefire')
class CeleryInspector(KeyDefaultDict):
"""
A defaultdict that manages the celery inspector cache.
"""
def __init__(self, app, simple_queues=False):
super(CeleryInspector, self).__init__(self.get_status_task_counts)
self.app = app
self.simple_queues = simple_queues
self.route_queues = None
@classmethod
def simple_queues(cls, *args, **kwargs):
return cls(*args, simple_queues=True, **kwargs)
def get_route_queues(self):
"""Find the queue to each active routing pair.
Cache to avoid additional calls to inspect().
Returns a mapping from (exchange, routing_key) to queue_name.
"""
if self.route_queues is not None:
return self.route_queues
worker_queues = self.inspect['active_queues']
active_queues = chain.from_iterable(worker_queues.values())
self.route_queues = {
(queue['exchange']['name'], queue['routing_key']): queue['name']
for queue in active_queues
}
return self.route_queues
@property
def inspect(self):
"""Proxy the inspector.
Make it easy to get the return value from an inspect method.
Use it like a dictionary, with the desired method as the key.
"""
allowed_methods = ['active_queues', 'active', 'reserved', 'scheduled']
inspect = self.app.control.inspect()
def get_inspect_value(method):
if method not in allowed_methods:
raise KeyError('Method not allowed: {}'.format(method))
return getattr(inspect, method)() or {}
return KeyDefaultDict(get_inspect_value)
def get_queue_fn(self, status):
"""Get a queue identifier function for the given status.
scheduled tasks have a different layout from reserved and
active tasks, so we need to look up the queue differently.
Additionally, if the ``simple_queues`` flag is True, then
we can shortcut the lookup process and avoid getting
the route queues.
"""
if not self.simple_queues:
route_queues = self.get_route_queues()
def identify_queue(delivery_info):
exchange = delivery_info['exchange']
routing_key = delivery_info['routing_key']
if self.simple_queues:
# If the exchange is '', use the routing_key instead
return exchange or routing_key
try:
return route_queues[exchange, routing_key]
except KeyError:
msg = 'exchange, routing_key pair not found: {}'.format(
(exchange, routing_key))
logger.warning(msg)
return None # Special queue name, not expected to be used
def get_queue(task):
if status == 'scheduled':
return identify_queue(task['request']['delivery_info'])
return identify_queue(task['delivery_info'])
return get_queue
def get_status_task_counts(self, status):
"""Get the tasks on all queues for the given status.
This is called lazily to avoid running long methods when not needed.
"""
if status not in ['active', 'reserved', 'scheduled']:
raise KeyError('Invalid task status: {}'.format(status))
tasks = chain.from_iterable(self.inspect[status].values())
queues = map(self.get_queue_fn(status), tasks)
if status == 'scheduled':
queues = set(queues) # Only count each queue once
return Counter(queues)
class CeleryProc(Proc):
"""
A proc class for the `Celery <http://celeryproject.org>`_ library.
:param name: the name of the proc (required)
:param queues: list of queue names to check (required)
:param app: the Celery app to check for the queues (optional)
:type name: str
:type queues: str or list
:type app: :class:`~celery.Celery`
Declarative example::
from celery import Celery
from hirefire.procs.celery import CeleryProc
celery = Celery('myproject', broker='amqp://guest@localhost//')
class WorkerProc(CeleryProc):
name = 'worker'
queues = ['celery']
app = celery
Or a simpler variant::
worker_proc = CeleryProc('worker', queues=['celery'], app=celery)
In case you use one of the non-standard Celery clients (e.g.
django-celery) you can leave the ``app`` attribute empty because
Celery will automatically find the correct Celery app::
from hirefire.procs.celery import CeleryProc
class WorkerProc(CeleryProc):
name = 'worker'
queues = ['celery']
Querying the tasks that are on the workers is a more expensive
process, and if you're sure that you don't need them, then you
can improve the response time by not looking for some statuses.
The default statuses that are looked for are ``active``,
``reserved``, and ``scheduled``. You can configure to *not*
look for those by overriding the ``inspect_statuses`` property.
For example, this proc would not look at any tasks held by
the workers.
::
class WorkerProc(CeleryProc):
name = 'worker'
queues = ['celery']
inspect_statuses = []
``scheduled`` tasks are tasks that have been triggered with an
``eta``, the most common example of which is using ``retry``
on tasks. If you're sure you aren't using these tasks, you can
skip querying for these tasks.
``reserved`` tasks are tasks that have been taken from the queue
by the main process (coordinator) on the worker dyno, but have
not yet been given to a worker run. If you've configured Celery
to only fetch the tasks that it is currently running, then you
may be able to skip querying for these tasks. See
http://docs.celeryproject.org/en/latest/userguide/optimizing.html#prefetch-limits
form more information.
``active`` tasks are currently running tasks. If your tasks are
short-lived enough, then you may not need to look for these tasks.
If you choose to not look at active tasks, look out for
``WorkerLostError`` exceptions.
See https://github.com/celery/celery/issues/2839 for more information.
If you have a particular simple case, you can use a shortcut to
eliminate one inspect call when inspecting statuses. The
``active_queues`` inspect call is needed to map ``exchange`` and
``routing_key`` back to the celery ``queue`` that it is for. If all
of your ``queue``, ``exchange``, and ``routing_key`` are the same
(which is the default in Celery), then you can use the
``simple_queues = True`` flag to note that all the queues in the
proc use the same name for their ``exchange`` and ``routing_key``.
This defaults to ``False`` for backward compatibility, but if
your queues are using this simple setup, you're encouraged to use
it like so:
::
class WorkerProc(CeleryProc):
name = 'worker'
queues = ['celery']
simple_queues = True
Because of how this is implemented, you will almost certainly
wish to use this feature on all of your procs, or on none of
them. This is because both variants have separate caches that
make separate calls to the inspect methods, so having both
kinds present will mean that the inspect calls will be run twice.
"""
#: The name of the proc (required).
name = None
#: The list of queues to check (required).
queues = ['celery']
#: The Celery app to check for the queues (optional).
app = None
#: The Celery task status to check for on workers (optional).
#: Valid options are 'active', 'reserved', and 'scheduled'.
inspect_statuses = ['active', 'reserved', 'scheduled']
#: Whether or not the exchange and routing_key are the same
#: as the queue name for the queues in this proc.
#: Default: False.
simple_queues = False
def __init__(self, app=None, *args, **kwargs):
super(CeleryProc, self).__init__(*args, **kwargs)
if app is not None:
self.app = app
self.app = app_or_default(self.app)
@staticmethod
def _get_redis_task_count(channel, queue):
return channel.client.llen(queue)
@staticmethod
def _get_rabbitmq_task_count(channel, queue):
try:
return channel.queue_declare(queue=queue, passive=True).message_count
except ChannelError:
logger.warning("The requested queue %s has not been created yet", queue)
return 0
def quantity(self, cache=None, **kwargs):
"""
Returns the aggregated number of tasks of the proc queues.
"""
with self.app.connection_or_acquire() as connection:
channel = connection.channel()
# Redis
if hasattr(channel, '_size'):
return sum(self._get_redis_task_count(channel, queue) for queue in self.queues)
# RabbitMQ
count = sum(self._get_rabbitmq_task_count(channel, queue) for queue in self.queues)
if cache is not None and self.inspect_statuses:
count += self.inspect_count(cache)
return count
def inspect_count(self, cache):
"""Use Celery's inspect() methods to see tasks on workers."""
cache.setdefault('celery_inspect', {
True: KeyDefaultDict(CeleryInspector.simple_queues),
False: KeyDefaultDict(CeleryInspector),
})
celery_inspect = cache['celery_inspect'][self.simple_queues][self.app]
return sum(
celery_inspect[status][queue]
for status in self.inspect_statuses
for queue in self.queues
)
```
#### File: contrib/django/test_middleware.py
```python
class TestHireFireMiddleware:
def test_test_page(self, client):
response = client.get('/hirefire/test')
assert response.status_code == 200
def test_token(self, client, settings):
response = client.get('/hirefire/%s/info' % settings.HIREFIRE_TOKEN)
assert response.status_code == 200
response = client.get('/hirefire/not-the-token-%s/info' % settings.HIREFIRE_TOKEN)
assert response.status_code == 404
``` |
{
"source": "jontrulson/mraa",
"score": 2
} |
#### File: mraa/tests/check_clean.py
```python
import unittest as u
import re, fnmatch, os
rootDir = 'src/java'
swigtypeStr = 'SWIGTYPE'
class Clean(u.TestCase):
def test_existing_swigtype(self):
unclean = []
for fileName in os.listdir(rootDir):
if swigtypeStr in fileName:
unclean.append(fileName)
self.assertEqual( len(unclean), 0,
"\nmraa contains unclean Java bindings:\n" + \
"\n".join(unclean) + "\n\n")
if __name__ == '__main__':
u.main()
```
#### File: tests/mock/gpio_checks_basic.py
```python
import mraa as m
import unittest as u
MRAA_TEST_PIN = 0
class GpioChecksBasic(u.TestCase):
def setUp(self):
self.pin = m.Gpio(MRAA_TEST_PIN)
def tearDown(self):
del self.pin
def test_gpio_state_after_init(self):
# After GPIO init it should be in INPUT and LOW state
self.assertEqual(self.pin.read(), 0, "GPIO is in a wrong state after init")
def test_gpio_dir_after_init(self):
# After GPIO init it should be in INPUT and LOW state
self.assertEqual(self.pin.readDir(), m.DIR_IN, "GPIO has wrong direction after init")
def test_get_pin_num(self):
self.assertEqual(self.pin.getPin(), MRAA_TEST_PIN, "Returned GPIO pin number is incorrect")
if __name__ == '__main__':
u.main()
```
#### File: tests/mock/i2c_checks_addr.py
```python
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksAddr(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_address(self):
self.assertEqual(self.i2c.address(0x10),
m.SUCCESS,
"Setting address to 0x10 did not return success")
def test_i2c_address_invalid_bigger_than_max(self):
# For standard 7-bit addressing 0x7F is max address
self.assertEqual(self.i2c.address(0xFF),
m.ERROR_INVALID_PARAMETER,
"Setting address to 0xFF did not return INVALID_PARAMETER")
def test_i2c_address_invalid_smaller_than_min(self):
self.assertRaises(OverflowError, self.i2c.address, -100)
if __name__ == "__main__":
u.main()
```
#### File: tests/mock/i2c_checks_read.py
```python
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksRead(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_read_full_reg_range(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
expected_res = bytearray([MRAA_MOCK_I2C_DATA_INIT_BYTE for i in range(MRAA_MOCK_I2C_DATA_LEN)])
res = self.i2c.read(MRAA_MOCK_I2C_DATA_LEN)
self.assertEqual(res, expected_res, "I2C read() of full register range returned unexpected data")
def test_i2c_read_part_reg_range(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
expected_res = bytearray([MRAA_MOCK_I2C_DATA_INIT_BYTE for i in range(MRAA_MOCK_I2C_DATA_LEN - 1)])
res = self.i2c.read(MRAA_MOCK_I2C_DATA_LEN - 1)
self.assertEqual(res, expected_res, "I2C read() of partial register range returned unexpected data")
def test_i2c_read_invalid_addr(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)
self.assertRaises(IOError, self.i2c.read, MRAA_MOCK_I2C_DATA_LEN)
def test_i2c_read_invalid_len_bigger_than_max(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
self.assertRaises(IOError, self.i2c.read, MRAA_MOCK_I2C_DATA_LEN + 1)
if __name__ == "__main__":
u.main()
```
#### File: tests/mock/i2c_checks_write_byte.py
```python
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksWriteByte(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_write_byte(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
test_byte = 0xEE
self.assertEqual(self.i2c.writeByte(test_byte),
m.SUCCESS,
"I2C writeByte() did not return success")
self.assertEqual(self.i2c.readByte(),
test_byte,
"I2C readByte() after writeByte() returned unexpected data")
def test_i2c_write_byte_invalid_addr(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)
self.assertEqual(self.i2c.writeByte(0xEE),
m.ERROR_UNSPECIFIED,
"I2C writeByte() to invalid address did not return error")
if __name__ == "__main__":
u.main()
```
#### File: tests/mock/spi_checks_bit_per_word.py
```python
import mraa as m
import unittest as u
from spi_checks_shared import *
class SpiChecksBitPerWord(u.TestCase):
def setUp(self):
self.spi = m.Spi(MRAA_SPI_BUS_NUM)
def tearDown(self):
del self.spi
def test_spi_bit_per_word(self):
TEST_BIT_PER_WORD = 16
self.assertEqual(self.spi.bitPerWord(TEST_BIT_PER_WORD),
m.SUCCESS,
"Setting bit per word to %d did not return success" %TEST_BIT_PER_WORD)
def test_i2c_frequency_invalid_smaller_than_min(self):
TEST_BIT_PER_WORD = -100
self.assertRaises(OverflowError, self.spi.bitPerWord, TEST_BIT_PER_WORD)
if __name__ == "__main__":
u.main()
```
#### File: tests/mock/spi_checks_write_byte.py
```python
import mraa as m
import unittest as u
from spi_checks_shared import *
class SpiChecksWriteByte(u.TestCase):
def setUp(self):
self.spi = m.Spi(MRAA_SPI_BUS_NUM)
def tearDown(self):
del self.spi
def test_spi_write_byte(self):
TEST_BYTE = 0xEE
self.assertEqual(self.spi.writeByte(TEST_BYTE),
TEST_BYTE ^ MOCK_SPI_REPLY_DATA_MODIFIER_BYTE,
"SPI writeByte() returned unexpected data")
def test_spi_write_byte_invalid_bigger_than_max(self):
TEST_VALUE = 0xEEFF
self.assertRaises(OverflowError, self.spi.writeByte, TEST_VALUE)
def test_spi_write_byte_invalid_smaller_than_min(self):
TEST_VALUE = -1
self.assertRaises(OverflowError, self.spi.writeByte, TEST_VALUE)
if __name__ == "__main__":
u.main()
```
#### File: tests/mock/spi_checks_write.py
```python
import mraa as m
import unittest as u
from spi_checks_shared import *
class SpiChecksWrite(u.TestCase):
def setUp(self):
self.spi = m.Spi(MRAA_SPI_BUS_NUM)
def tearDown(self):
del self.spi
def test_spi_write(self):
DATA_TO_WRITE = bytearray([0xEE for i in range(MOCK_SPI_TEST_DATA_LEN)])
DATA_TO_EXPECT = bytearray([0xEE ^ MOCK_SPI_REPLY_DATA_MODIFIER_BYTE for i in range(MOCK_SPI_TEST_DATA_LEN)])
self.assertEqual(self.spi.write(DATA_TO_WRITE),
DATA_TO_EXPECT,
"SPI write() returned unexpected data")
if __name__ == "__main__":
u.main()
```
#### File: tests/mock/uart_checks_read.py
```python
import mraa as m
import unittest as u
from uart_checks_shared import *
class UartChecksRead(u.TestCase):
def setUp(self):
self.uart = m.Uart(MRAA_UART_DEV_NUM)
def tearDown(self):
del self.uart
def test_uart_read(self):
TEST_DATA_LEN = 10
EXPECTED_RESULT = bytearray([MOCK_UART_DATA_BYTE for x in range(TEST_DATA_LEN)])
self.assertEqual(self.uart.read(TEST_DATA_LEN),
EXPECTED_RESULT,
"Running UART read(%d) did not return %s" % (TEST_DATA_LEN, repr(EXPECTED_RESULT)))
def test_uart_readStr(self):
TEST_DATA_LEN = 10
EXPECTED_RESULT = chr(MOCK_UART_DATA_BYTE) * TEST_DATA_LEN
self.assertEqual(self.uart.readStr(TEST_DATA_LEN),
EXPECTED_RESULT,
"Running UART readStr(%d) did not return %s" % (TEST_DATA_LEN, EXPECTED_RESULT))
if __name__ == "__main__":
u.main()
``` |
{
"source": "jontsai/hacktoolkit",
"score": 3
} |
#### File: yahoo/weather/weather.py
```python
import getopt
import json
import requests
import os
import sys
import urllib
YAHOO_WEATHER_DIR = os.path.dirname(__file__)
YAHOO_DIR = os.path.realpath(os.path.join(YAHOO_WEATHER_DIR, '..').replace('\\', '/'))
sys.path.append(YAHOO_DIR)
from geo.geoplanet.geoplanet import get_woeid
WEATHER_API_BASE_URL = 'http://weather.yahooapis.com/forecastrss?w=%(woeid)s'
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv = None):
OPT_STR = 'hfc'
OPT_LIST = [
'help',
'fahrenheit',
'celsius',
]
is_celsius = False
if argv is None:
argv = sys.argv
try:
try:
progname = argv[0]
opts, args = getopt.getopt(argv[1:],
OPT_STR,
OPT_LIST)
except getopt.error, msg:
raise Usage(msg)
# process options
for o, a in opts:
if o in ('-h', '--help'):
print __doc__
sys.exit(0)
elif o in ('-f', '--fahrenheit'):
is_celsius = False
elif o in ('-c', '--celsius'):
is_celsius = True
# process arguments
if len(args) == 1:
weather = get_weather(args[0], is_celsius=is_celsius)
print weather
else:
raise Usage('Incorrect arguments')
except Usage, err:
print >> sys.stderr, err.msg
print >> sys.stderr, "for help use --help"
return 3.14159
def get_weather(location, woeid=None, is_celsius=False):
if woeid is None:
woeid = get_woeid(location)
values = {
'woeid': woeid,
}
url = WEATHER_API_BASE_URL % values
response = requests.get(url)
return response.text
if __name__ == '__main__':
main()
``` |
{
"source": "jontsnz/dummy-sensor",
"score": 3
} |
#### File: jontsnz/dummy-sensor/dummy-sensor.py
```python
import os
import sys
import argparse
import yaml
import random
import time
from datetime import datetime
from datetime import timedelta
from abc import ABC, abstractmethod
import csv
import json
import paho.mqtt.client as mqtt
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class ReadingsOutputter(ABC):
"""Abstract readings output class
"""
@abstractmethod
def output(self, readings):
""" Implement this method to write readings somewhere
"""
pass
class ScreenJsonOutputter(ReadingsOutputter):
"""Class for sending readings to the screen in JSON format
"""
def __init__(self):
super().__init__()
def output(self, readings):
logger.info(json.dumps(dict(readings)))
class CSVOutputter(ReadingsOutputter):
""" Class for sending readings to a CSV file
"""
def __init__(self, filename):
"""Initialise class
Parameters
----------
filename : str
The name of the CSV file to create
"""
super().__init__()
self._csvfile = csv.writer(open(filename, 'w'), quoting = csv.QUOTE_NONNUMERIC)
self._first_line = True
def output(self, readings):
logger.info('Writing line to CSV file...%s' % (readings[0][1]))
if self._first_line:
self._csvfile.writerow(t[0] for t in readings)
self._first_line = False
self._csvfile.writerow(t[1] for t in readings)
class MqttOutputter(ReadingsOutputter):
""" Class for sending readings to an MQTT topic
"""
def __init__(self, host, port, topic):
"""Initialise class
Parameters
----------
host : str
MQTT host name
port : int
MQTT port
topic : str
MQTT topic where readings will be pushed to
"""
super().__init__()
self._client = mqtt.Client()
self._topic = topic
keepalive = 60
self._client.connect(host,port,keepalive)
self.silent = False
def output(self, readings):
if not self.silent:
logger.info('Pushing readings to MQTT...%s' % (readings[0][1]))
self._client.publish(self._topic,json.dumps(dict(readings)))
def __del__(self):
"""Class destructor. Clean up MQTT connection.
"""
self._client.disconnect()
class Station:
""" This class defines a sensor station that has a
name and may contain 1 or more sensors.
"""
def __init__(self, station_config):
"""Class initialise.
Parameters
----------
station_config : dict
Dictionary containing station configuration (including sensors)
"""
self.station_name = station_config['station_name']
self.sensors = []
for sensor_config in station_config['sensors']:
self.sensors.append(Sensor(sensor_config))
class Sensor:
"""This class defines a given sensor: its name, possible values and precision.
"""
def __init__(self, sensor_config):
"""Class initialise.
Parameters
----------
sensor_config : dict
Configuration for this sensor
"""
self.name = list(sensor_config.keys())[0]
self._min = sensor_config[self.name]['min']
self._max = sensor_config[self.name]['max']
self.reading = sensor_config[self.name]['start']
self._last_direction = 1
self._dp = sensor_config[self.name]['dp']
self._max_step = sensor_config[self.name]['max_step']
def generate_reading(self):
"""Generate a single reading for this sensor
Returns
-------
float
A randomly geerated reading for this sensor
"""
step = self._max_step * random.random()
if random.random() < 0.9:
direction = self._last_direction
else:
direction = -1 * self._last_direction
if (self.reading + (step * direction) > self._max) or (self.reading + (step * direction) < self._min):
direction = -1 * direction
reading = round(self.reading + (step * direction),self._dp)
reading = min(max(reading, self._min), self._max)
self.reading = reading
self._last_direction = direction
return reading
def generate_readings(config, intervals_secs, max_iterations, outputter):
"""Continuosly generate readings for a station and send them to the outputter.
This method will run forever if no interatio limit is provided.
Parameters
----------
config : dict
Dictionary containing station configuration (including sensors)
interval_secs : int
How many seconds to wait between each generated reading
max_iterations : int
How many readings to generate before stopping. Never stops if -1.
outputter : obj
The ReadingsOutputter to send sensor readings to
"""
station = Station(config['station'])
logger.info('Generating %d readings for station: %s' % (max_iterations, station.station_name))
cnt = 0
while (cnt < max_iterations) or (max_iterations == -1):
cnt += 1
timestamp = datetime.now().strftime(config['settings']['timestamp_format'])
readings = [('TIMESTAMP', timestamp),('RECORD', cnt),('Station', station.station_name)]
readings.extend([(s.name, s.generate_reading()) for s in station.sensors])
outputter.output(readings)
time.sleep(intervals_secs)
def generate_backfill_readings(config, intervals_secs, outputter, from_date):
"""Generate backdated sensor readings for a station from a given date until now
Parameters
----------
config : dict
Dictionary containing station configuration (including sensors)
interval_secs : int
How many seconds between each generated reading
outputter : obj
The ReadingsOutputter to send sensor readings to
from_date : date
The starting date to generate readings from
"""
station = Station(config['station'])
logger.info('Generating backfill readings for station: %s since %s' % (station.station_name, from_date))
cnt = 0
next_time = from_date
while (next_time < datetime.now()):
cnt += 1
if cnt % 1000 == 0:
logger.info('Date %s, count=%d' % (next_time, cnt))
timestamp = next_time.strftime(config['settings']['timestamp_format'])
readings = [('TIMESTAMP', timestamp),('RECORD', cnt),('Station', station.station_name)]
readings.extend([(s.name, s.generate_reading()) for s in station.sensors])
outputter.output(readings)
next_time = next_time + timedelta(seconds=intervals_secs)
def main(arguments):
"""Main method"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--configfile', help="Config file")
parser.add_argument('-o', '--outputfile', help="Output file", required=False)
parser.add_argument('--interval', help="Intervals (seconds)", required=False, type=float ,default=0.5)
parser.add_argument('--count', help="Number of readings (-1 = infinite)", required=False, type=float, default=5)
parser.add_argument('--mqtt_topic', help="The MQTT topic to publish", required=False)
parser.add_argument('--mqtt_hostname', help="The MQTT hostname", required=False, default='localhost')
parser.add_argument('--mqtt_port', help="The MQTT port", required=False, type=int, default=1883)
parser.add_argument('--backfill_from', help="Backfill readings starting from this date eg. 2020-01-31", required=False)
args = parser.parse_args(arguments)
config = None
if args.configfile:
with open(args.configfile) as config_file:
try:
config = yaml.safe_load(config_file)
except yaml.YAMLError as exc:
logger.exception(exc)
else:
logger.error('Config file must be provided')
if not config:
sys.exit(1)
if args.mqtt_topic:
host = args.mqtt_hostname
port = args.mqtt_port
topic = args.mqtt_topic
logger.info('Sending output to MQTT %s:%s on %s' % (host, port, topic))
outputter = MqttOutputter(host, port, topic)
elif args.outputfile:
logger.info('Sending output to file %s' % args.outputfile)
outputter = CSVOutputter(args.outputfile)
else:
logger.info('Sending output to screen')
outputter = ScreenJsonOutputter()
if config:
if args.backfill_from:
from_date = datetime.strptime(args.backfill_from, '%Y-%m-%d')
logger.info('Back-filling data from %s' % from_date)
outputter.silent = True
generate_backfill_readings(config, args.interval, outputter, from_date)
logger.info('Back-fill completed')
else:
# Kick off sensor data generation loop
generate_readings(config, args.interval, args.count, outputter)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
``` |
{
"source": "jontstaz/BinanceWatch",
"score": 2
} |
#### File: BinanceWatch/BinanceWatch/BinanceManager.py
```python
import datetime
import math
import time
from typing import Optional, Dict
import dateparser
from binance.client import Client
from binance.exceptions import BinanceAPIException
from tqdm import tqdm
from BinanceWatch.storage import tables
from BinanceWatch.utils.LoggerGenerator import LoggerGenerator
from BinanceWatch.utils.time_utils import datetime_to_millistamp
from BinanceWatch.storage.BinanceDataBase import BinanceDataBase
class BinanceManager:
"""
This class is in charge of filling the database by calling the binance API
"""
API_MAX_RETRY = 3
def __init__(self, api_key: str, api_secret: str, account_name: str = 'default'):
"""
initialise the binance manager.
:param api_key: key for the Binance api
:type api_key: str
:param api_secret: secret for the Binance api
:type api_secret: str
:param account_name: if you have several accounts to monitor, you need to give them different names or the
database will collide
:type account_name: str
"""
self.account_name = account_name
self.db = BinanceDataBase(name=f"{self.account_name}_db")
self.client = Client(api_key=api_key, api_secret=api_secret)
self.logger = LoggerGenerator.get_logger(f"BinanceManager_{self.account_name}")
def update_spot(self):
"""
call all update methods related to the spot account
:return: None
:rtype: None
"""
self.update_all_spot_trades()
self.update_spot_deposits()
self.update_spot_withdraws()
self.update_spot_dusts()
self.update_spot_dividends()
self.update_universal_transfers(transfer_filter='MAIN')
def update_cross_margin(self):
"""
call all update methods related to cross margin spot account
:return: None
:rtype: None
"""
self.update_all_cross_margin_trades()
self.update_cross_margin_loans()
self.update_cross_margin_interests()
self.update_cross_margin_repays()
self.update_universal_transfers(transfer_filter='MARGIN')
def update_lending(self):
"""
call all update methods related to lending activities
:return: None
:rtype: None
"""
self.update_lending_interests()
self.update_lending_purchases()
self.update_lending_redemptions()
def update_universal_transfers(self, transfer_filter: Optional[str] = None):
"""
update the universal transfers database.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.query_universal_transfer_history
https://binance-docs.github.io/apidocs/spot/en/#query-user-universal-transfer-history
:param transfer_filter: if not None, only the transfers containing this filter will be updated (ex: 'MAIN')
:type transfer_filter: Optional[str]
:return: None
:rtype: None
"""
all_types = ['MAIN_C2C', 'MAIN_UMFUTURE', 'MAIN_CMFUTURE', 'MAIN_MARGIN', 'MAIN_MINING', 'C2C_MAIN',
'C2C_UMFUTURE', 'C2C_MINING', 'C2C_MARGIN', 'UMFUTURE_MAIN', 'UMFUTURE_C2C',
'UMFUTURE_MARGIN', 'CMFUTURE_MAIN', 'CMFUTURE_MARGIN', 'MARGIN_MAIN', 'MARGIN_UMFUTURE',
'MARGIN_CMFUTURE', 'MARGIN_MINING', 'MARGIN_C2C', 'MINING_MAIN', 'MINING_UMFUTURE',
'MINING_C2C', 'MINING_MARGIN']
if transfer_filter is not None:
transfers_types = list(filter(lambda x: transfer_filter in x, all_types))
else:
transfers_types = all_types
pbar = tqdm(total=len(transfers_types))
for transfer_type in transfers_types:
pbar.set_description(f"fetching transfer type {transfer_type}")
latest_time = self.db.get_last_universal_transfer_time(transfer_type=transfer_type) + 1
current = 1
while True:
client_params = {
'type': transfer_type,
'startTime': latest_time,
'current': current,
'size': 100
}
universal_transfers = self._call_binance_client('query_universal_transfer_history', client_params)
try:
universal_transfers = universal_transfers['rows']
except KeyError:
break
for transfer in universal_transfers:
self.db.add_universal_transfer(transfer_id=transfer['tranId'],
transfer_type=transfer['type'],
transfer_time=transfer['timestamp'],
asset=transfer['asset'],
amount=float(transfer['amount'])
)
if len(universal_transfers):
current += 1 # next page
self.db.commit()
else:
break
pbar.update()
pbar.close()
def update_cross_margin_interests(self):
"""
update the interests for all cross margin assets
sources:
https://binance-docs.github.io/apidocs/spot/en/#query-repay-record-user_data
:return:
:rtype:
"""
margin_type = 'cross'
latest_time = self.db.get_last_margin_interest_time(margin_type)
archived = 1000 * time.time() - latest_time > 1000 * 3600 * 24 * 30 * 3
current = 1
pbar = tqdm()
pbar.set_description("fetching cross margin interests")
while True:
params = {
'current': current,
'startTime': latest_time + 1000,
'size': 100,
'archived': archived
}
# no built-in method yet in python-binance for margin/interestHistory
client_params = {
'method': 'get',
'path': 'margin/interestHistory',
'signed': True,
'data': params
}
interests = self._call_binance_client('_request_margin_api', client_params)
for interest in interests['rows']:
self.db.add_margin_interest(margin_type=margin_type,
interest_time=interest['interestAccuredTime'],
asset=interest['asset'],
interest=interest['interest'],
interest_type=interest['type'],
auto_commit=False)
if len(interests['rows']):
current += 1 # next page
self.db.commit()
elif archived: # switching to non archived interests
current = 1
archived = False
latest_time = self.db.get_last_margin_interest_time(margin_type)
else:
break
pbar.update()
pbar.close()
def update_cross_margin_repays(self):
"""
update the repays for all cross margin assets
:return: None
:rtype: None
"""
client_params = {
'method': 'get',
'path': 'margin/allPairs',
'data': {}
}
symbols_info = self._call_binance_client('_request_margin_api', client_params) # not built-in yet
assets = set()
for symbol_info in symbols_info:
assets.add(symbol_info['base'])
assets.add(symbol_info['quote'])
pbar = tqdm(total=len(assets))
for asset in assets:
pbar.set_description(f"fetching {asset} cross margin repays")
self.update_margin_asset_repay(asset=asset)
pbar.update()
pbar.close()
def update_margin_asset_repay(self, asset: str, isolated_symbol=''):
"""
update the repays database for a specified asset.
sources:
https://binance-docs.github.io/apidocs/spot/en/#query-repay-record-user_data
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_margin_repay_details
:param asset: asset for the repays
:type asset: str
:param isolated_symbol: the symbol must be specified of isolated margin, otherwise cross margin data is returned
:type isolated_symbol: str
:return: None
:rtype: None
"""
margin_type = 'cross' if isolated_symbol == '' else 'isolated'
latest_time = self.db.get_last_repay_time(asset=asset, margin_type=margin_type)
archived = 1000 * time.time() - latest_time > 1000 * 3600 * 24 * 30 * 3
current = 1
while True:
client_params = {
'asset': asset,
'current':current,
'startTime': latest_time + 1000,
'archived': archived,
'isolatedSymbol': isolated_symbol,
'size': 100
}
repays = self._call_binance_client('get_margin_repay_details', client_params)
for repay in repays['rows']:
if repay['status'] == 'CONFIRMED':
self.db.add_repay(margin_type=margin_type,
tx_id=repay['txId'],
repay_time=repay['timestamp'],
asset=repay['asset'],
principal=repay['principal'],
interest=repay['interest'],
auto_commit=False)
if len(repays['rows']):
current += 1 # next page
self.db.commit()
elif archived: # switching to non archived repays
current = 1
archived = False
latest_time = self.db.get_last_repay_time(asset=asset, margin_type=margin_type)
else:
break
def update_cross_margin_loans(self):
"""
update the loans for all cross margin assets
:return: None
:rtype: None
"""
client_params = {
'method': 'get',
'path': 'margin/allPairs',
'data': {}
}
symbols_info = self._call_binance_client('_request_margin_api', client_params) # not built-in yet
assets = set()
for symbol_info in symbols_info:
assets.add(symbol_info['base'])
assets.add(symbol_info['quote'])
pbar = tqdm(total=len(assets))
for asset in assets:
pbar.set_description(f"fetching {asset} cross margin loans")
self.update_margin_asset_loans(asset=asset)
pbar.update()
pbar.close()
def update_margin_asset_loans(self, asset: str, isolated_symbol=''):
"""
update the loans database for a specified asset.
sources:
https://binance-docs.github.io/apidocs/spot/en/#query-loan-record-user_data
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_margin_loan_details
:param asset: asset for the loans
:type asset: str
:param isolated_symbol: the symbol must be specified of isolated margin, otherwise cross margin data is returned
:type isolated_symbol: str
:return: None
:rtype: None
"""
margin_type = 'cross' if isolated_symbol == '' else 'isolated'
latest_time = self.db.get_last_loan_time(asset=asset, margin_type=margin_type)
archived = 1000 * time.time() - latest_time > 1000 * 3600 * 24 * 30 * 3
current = 1
while True:
client_params = {
'asset': asset,
'current': current,
'startTime': latest_time + 1000,
'archived': archived,
'isolatedSymbol': isolated_symbol,
'size': 100
}
loans = self._call_binance_client('get_margin_loan_details', client_params)
for loan in loans['rows']:
if loan['status'] == 'CONFIRMED':
self.db.add_loan(margin_type=margin_type,
tx_id=loan['txId'],
loan_time=loan['timestamp'],
asset=loan['asset'],
principal=loan['principal'],
auto_commit=False)
if len(loans['rows']):
current += 1 # next page
self.db.commit()
elif archived: # switching to non archived loans
current = 1
archived = False
latest_time = self.db.get_last_loan_time(asset=asset, margin_type=margin_type)
else:
break
def update_cross_margin_symbol_trades(self, asset: str, ref_asset: str, limit: int = 1000):
"""
This update the cross_margin trades in the database for a single trading pair.
It will check the last trade id and will requests the all trades after this trade_id.
sources:
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-trade-list-user_data
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_margin_trades
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param limit: max size of each trade requests
:type limit: int
:return: None
:rtype: None
"""
limit = min(1000, limit)
symbol = asset + ref_asset
last_trade_id = self.db.get_max_trade_id(asset, ref_asset, 'cross_margin')
while True:
client_params = {
'symbol': symbol,
'fromId': last_trade_id + 1,
'limit': limit
}
new_trades = self._call_binance_client('get_margin_trades', client_params)
for trade in new_trades:
self.db.add_trade(trade_type='cross_margin',
trade_id=int(trade['id']),
trade_time=int(trade['time']),
asset=asset,
ref_asset=ref_asset,
qty=float(trade['qty']),
price=float(trade['price']),
fee=float(trade['commission']),
fee_asset=trade['commissionAsset'],
is_buyer=trade['isBuyer'],
auto_commit=False
)
last_trade_id = max(last_trade_id, int(trade['id']))
if len(new_trades):
self.db.commit()
if len(new_trades) < limit:
break
def update_all_cross_margin_trades(self, limit: int = 1000):
"""
This update the cross margin trades in the database for every trading pairs
:param limit: max size of each trade requests
:type limit: int
:return: None
:rtype: None
"""
client_params = {
'method': 'get',
'path': 'margin/allPairs',
'data': {}
}
symbols_info = self._call_binance_client('_request_margin_api', client_params) # not built-in yet
pbar = tqdm(total=len(symbols_info))
for symbol_info in symbols_info:
pbar.set_description(f"fetching {symbol_info['symbol']} cross margin trades")
self.update_cross_margin_symbol_trades(asset=symbol_info['base'],
ref_asset=symbol_info['quote'],
limit=limit)
pbar.update()
pbar.close()
def update_lending_redemptions(self):
"""
update the lending redemptions database.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_lending_redemption_history
https://binance-docs.github.io/apidocs/spot/en/#get-redemption-record-user_data
:return: None
:rtype: None
"""
lending_types = ['DAILY', 'ACTIVITY', 'CUSTOMIZED_FIXED']
pbar = tqdm(total=3)
for lending_type in lending_types:
pbar.set_description(f"fetching lending redemptions of type {lending_type}")
latest_time = self.db.get_last_lending_redemption_time(lending_type=lending_type) + 1
current = 1
while True:
client_params = {
'lendingType': lending_type,
'startTime': latest_time,
'current': current,
'size': 100
}
lending_redemptions = self._call_binance_client('get_lending_redemption_history', client_params)
for li in lending_redemptions:
if li['status'] == 'PAID':
self.db.add_lending_redemption(redemption_time=li['createTime'],
lending_type=lending_type,
asset=li['asset'],
amount=li['amount']
)
if len(lending_redemptions):
current += 1 # next page
self.db.commit()
else:
break
pbar.update()
pbar.close()
def update_lending_purchases(self):
"""
update the lending purchases database.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_lending_purchase_history
https://binance-docs.github.io/apidocs/spot/en/#get-purchase-record-user_data
:return: None
:rtype: None
"""
lending_types = ['DAILY', 'ACTIVITY', 'CUSTOMIZED_FIXED']
pbar = tqdm(total=3)
for lending_type in lending_types:
pbar.set_description(f"fetching lending purchases of type {lending_type}")
latest_time = self.db.get_last_lending_purchase_time(lending_type=lending_type) + 1
current = 1
while True:
client_params = {
'lendingType': lending_type,
'startTime': latest_time,
'current': current,
'size': 100
}
lending_purchases = self._call_binance_client('get_lending_purchase_history', client_params)
for li in lending_purchases:
if li['status'] == 'SUCCESS':
self.db.add_lending_purchase(purchase_id=li['purchaseId'],
purchase_time=li['createTime'],
lending_type=li['lendingType'],
asset=li['asset'],
amount=li['amount']
)
if len(lending_purchases):
current += 1 # next page
self.db.commit()
else:
break
pbar.update()
pbar.close()
def update_lending_interests(self):
"""
update the lending interests database.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_lending_interest_history
https://binance-docs.github.io/apidocs/spot/en/#get-interest-history-user_data-2
:return: None
:rtype: None
"""
lending_types = ['DAILY', 'ACTIVITY', 'CUSTOMIZED_FIXED']
pbar = tqdm(total=3)
for lending_type in lending_types:
pbar.set_description(f"fetching lending interests of type {lending_type}")
latest_time = self.db.get_last_lending_interest_time(lending_type=lending_type) + 3600 * 1000 # add 1 hour
current = 1
while True:
client_params = {
'lendingType': lending_type,
'startTime': latest_time,
'current': current,
'size': 100
}
lending_interests = self._call_binance_client('get_lending_interest_history', client_params)
for li in lending_interests:
self.db.add_lending_interest(time=li['time'],
lending_type=li['lendingType'],
asset=li['asset'],
amount=li['interest']
)
if len(lending_interests):
current += 1 # next page
self.db.commit()
else:
break
pbar.update()
pbar.close()
def update_spot_dusts(self):
"""
update the dust database. As there is no way to get the dust by id or timeframe, the table is cleared
for each update
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_dust_log
https://binance-docs.github.io/apidocs/spot/en/#dustlog-user_data
:return: None
:rtype: None
"""
self.db.drop_table(tables.SPOT_DUST_TABLE)
result = self._call_binance_client('get_dust_log')
dusts = result['results']
pbar = tqdm(total=dusts['total'])
pbar.set_description("fetching spot dusts")
for d in dusts['rows']:
for sub_dust in d['logs']:
date_time = dateparser.parse(sub_dust['operateTime'] + 'Z')
self.db.add_dust(tran_id=sub_dust['tranId'],
time=datetime_to_millistamp(date_time),
asset=sub_dust['fromAsset'],
asset_amount=sub_dust['amount'],
bnb_amount=sub_dust['transferedAmount'],
bnb_fee=sub_dust['serviceChargeAmount'],
auto_commit=False
)
pbar.update()
self.db.commit()
pbar.close()
def update_spot_dividends(self, day_jump: float = 90, limit: int = 500):
"""
update the dividends database (earnings distributed by Binance)
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_asset_dividend_history
https://binance-docs.github.io/apidocs/spot/en/#asset-dividend-record-user_data
:param day_jump: length of the time window in days, max is 90
:type day_jump: float
:param limit: max number of dividends to retrieve per call, max is 500
:type limit: int
:return: None
:rtype: None
"""
limit = min(500, limit)
delta_jump = min(day_jump, 90) * 24 * 3600 * 1000
start_time = self.db.get_last_spot_dividend_time() + 1
now_millistamp = datetime_to_millistamp(datetime.datetime.now(tz=datetime.timezone.utc))
pbar = tqdm(total=math.ceil((now_millistamp - start_time) / delta_jump))
pbar.set_description("fetching spot dividends")
while start_time < now_millistamp:
# the stable working version of client.get_asset_dividend_history is not released yet,
# for now it has a post error, so this protected member is used in the meantime
params = {
'startTime': start_time,
'endTime': start_time + delta_jump,
'limit': limit
}
client_params = {
'method': 'get',
'path': 'asset/assetDividend',
'signed': True,
'data': params
}
result = self._call_binance_client('_request_margin_api', client_params)
dividends = result['rows']
for div in dividends:
self.db.add_dividend(div_id=int(div['tranId']),
div_time=int(div['divTime']),
asset=div['asset'],
amount=float(div['amount']),
auto_commit=False
)
pbar.update()
if len(dividends) < limit:
start_time += delta_jump
else: # limit was reached before the end of the time windows
start_time = int(dividends[0]['divTime']) + 1
if len(dividends):
self.db.commit()
pbar.close()
def update_spot_withdraws(self, day_jump: float = 90):
"""
This fetch the crypto withdraws made on the spot account from the last withdraw time in the database to now.
It is done with multiple call, each having a time window of day_jump days.
The withdraws are then saved in the database.
Only successful withdraws are fetched.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_withdraw_history
https://binance-docs.github.io/apidocs/spot/en/#withdraw-history-user_data
:param day_jump: length of the time window for each call (max 90)
:type day_jump: float
:return: None
:rtype: None
"""
delta_jump = min(day_jump, 90) * 24 * 3600 * 1000
start_time = self.db.get_last_spot_withdraw_time() + 1
now_millistamp = datetime_to_millistamp(datetime.datetime.now(tz=datetime.timezone.utc))
pbar = tqdm(total=math.ceil((now_millistamp - start_time) / delta_jump))
pbar.set_description("fetching spot withdraws")
while start_time < now_millistamp:
client_params = {
'startTime': start_time,
'endTime': start_time + delta_jump,
'status': 6
}
result = self._call_binance_client('get_withdraw_history', client_params)
withdraws = result['withdrawList']
for withdraw in withdraws:
self.db.add_withdraw(withdraw_id=withdraw['id'],
tx_id=withdraw['txId'],
apply_time=int(withdraw['applyTime']),
asset=withdraw['asset'],
amount=float(withdraw['amount']),
fee=float(withdraw['transactionFee']),
auto_commit=False
)
pbar.update()
start_time += delta_jump
if len(withdraws):
self.db.commit()
pbar.close()
def update_spot_deposits(self, day_jump: float = 90):
"""
This fetch the crypto deposit made on the spot account from the last deposit time in the database to now.
It is done with multiple call, each having a time window of day_jump days.
The deposits are then saved in the database.
Only successful deposits are fetched.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_deposit_history
https://binance-docs.github.io/apidocs/spot/en/#deposit-history-user_data
:param day_jump: length of the time window for each call (max 90)
:type day_jump: float
:return: None
:rtype: None
"""
delta_jump = min(day_jump, 90) * 24 * 3600 * 1000
start_time = self.db.get_last_spot_deposit_time() + 1
now_millistamp = datetime_to_millistamp(datetime.datetime.now(tz=datetime.timezone.utc))
pbar = tqdm(total=math.ceil((now_millistamp - start_time) / delta_jump))
pbar.set_description("fetching spot deposits")
while start_time < now_millistamp:
client_params = {
'startTime': start_time,
'endTime': start_time + delta_jump,
'status': 1
}
result = self._call_binance_client('get_deposit_history', client_params)
deposits = result['depositList']
for deposit in deposits:
self.db.add_deposit(tx_id=deposit['txId'],
asset=deposit['asset'],
insert_time=int(deposit['insertTime']),
amount=float(deposit['amount']),
auto_commit=False)
pbar.update()
start_time += delta_jump
if len(deposits):
self.db.commit()
pbar.close()
def update_spot_symbol_trades(self, asset: str, ref_asset: str, limit: int = 1000):
"""
This update the spot trades in the database for a single trading pair. It will check the last trade id and will
requests the all trades after this trade_id.
sources:
https://python-binance.readthedocs.io/en/latest/binance.html#binance.client.Client.get_my_trades
https://binance-docs.github.io/apidocs/spot/en/#account-trade-list-user_data
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param limit: max size of each trade requests
:type limit: int
:return: None
:rtype: None
"""
limit = min(1000, limit)
symbol = asset + ref_asset
last_trade_id = self.db.get_max_trade_id(asset, ref_asset, 'spot')
while True:
client_params = {
'symbol': symbol,
'fromId': last_trade_id + 1,
'limit': limit
}
new_trades = self._call_binance_client('get_my_trades', client_params)
for trade in new_trades:
self.db.add_trade(trade_type='spot',
trade_id=int(trade['id']),
trade_time=int(trade['time']),
asset=asset,
ref_asset=ref_asset,
qty=float(trade['qty']),
price=float(trade['price']),
fee=float(trade['commission']),
fee_asset=trade['commissionAsset'],
is_buyer=trade['isBuyer'],
auto_commit=False
)
last_trade_id = max(last_trade_id, int(trade['id']))
if len(new_trades):
self.db.commit()
if len(new_trades) < limit:
break
def update_all_spot_trades(self, limit: int = 1000):
"""
This update the spot trades in the database for every trading pairs
:param limit: max size of each trade requests
:type limit: int
:return: None
:rtype: None
"""
symbols_info = self.client.get_exchange_info()['symbols']
pbar = tqdm(total=len(symbols_info))
for symbol_info in symbols_info:
pbar.set_description(f"fetching {symbol_info['symbol']} spot trades")
self.update_spot_symbol_trades(asset=symbol_info['baseAsset'],
ref_asset=symbol_info['quoteAsset'],
limit=limit)
pbar.update()
pbar.close()
def _call_binance_client(self, method_name: str, params: Optional[Dict] = None, retry_count: int = 0):
"""
This method is used to handle rate limits: if a rate limits is breached, it will wait the necessary time
to call again the API.
:param method_name: name of the method binance.Client to call
:type method_name: str
:param params: parameters to pass to the above method
:type params: Dict
:param retry_count: internal use only to count the number of retry if rate limits are breached
:type retry_count: int
:return: response of binance.Client method
:rtype: Dict
"""
if params is None:
params = dict()
if retry_count >= BinanceManager.API_MAX_RETRY:
raise RuntimeError(f"The API rate limits has been breached {retry_count} times")
try:
return getattr(self.client, method_name)(**params)
except BinanceAPIException as err:
if err.code == -1003: # API rate Limits
wait_time = float(err.response.headers['Retry-After'])
if err.response.status_code == 418: # ban
self.logger.error(f"API calls resulted in a ban, retry in {wait_time} seconds")
raise err
self.logger.info(f"API calls resulted in a breach of rate limits, will retry after {wait_time} seconds")
time.sleep(wait_time + 1)
return self._call_binance_client(method_name, params, retry_count + 1)
raise err
```
#### File: BinanceWatch/storage/tables.py
```python
from dataclasses import dataclass
from typing import List, Optional
class Table:
"""
@DynamicAttrs
"""
def __init__(self, name: str, columns_names: List[str], columns_sql_types: List[str],
primary_key: Optional[str] = None, primary_key_sql_type: Optional[str] = None):
self.name = name
self.columns_names = columns_names
self.columns_sql_types = columns_sql_types
self.primary_key = primary_key
self.primary_key_sql_type = primary_key_sql_type
for column_name in self.columns_names:
try:
value = getattr(self, column_name)
raise ValueError(f"the name {column_name} conflicts with an existing attribute of value {value}")
except AttributeError:
setattr(self, column_name, column_name)
if self.primary_key is not None:
setattr(self, self.primary_key, self.primary_key)
SPOT_TRADE_TABLE = Table(
'spot_trade',
[
'tradeId',
'tdTime',
'asset',
'refAsset',
'qty',
'price',
'fee',
'feeAsset',
'isBuyer'
],
[
'INTEGER',
'INTEGER',
'TEXT',
'TEXT',
'REAL',
'REAL',
'REAL',
'TEXT',
'INTEGER'
]
)
SPOT_DEPOSIT_TABLE = Table(
'spot_deposit',
[
'insertTime',
'asset',
'amount',
],
[
'INTEGER',
'TEXT',
'REAL'
],
primary_key='txId',
primary_key_sql_type='TEXT'
)
SPOT_WITHDRAW_TABLE = Table(
'spot_withdraw',
[
'txId',
'applyTime',
'asset',
'amount',
'fee'
],
[
'TEXT',
'INTEGER',
'TEXT',
'REAL',
'REAL'
],
primary_key='withdrawId',
primary_key_sql_type='TEXT'
)
SPOT_DIVIDEND_TABLE = Table(
'spot_dividend_table',
[
'divTime',
'asset',
'amount'
],
[
'INTEGER',
'TEXT',
'REAL'
],
primary_key='divId',
primary_key_sql_type='INTEGER'
)
SPOT_DUST_TABLE = Table(
'spot_dust_table',
[
'tranId',
'dustTime',
'asset',
'assetAmount',
'bnbAmount',
'bnbFee',
],
[
'INTEGER',
'INTEGER',
'TEXT',
'REAL',
'REAL',
'REAL'
]
)
LENDING_INTEREST_TABLE = Table(
'lending_interest_table',
[
'interestTime',
'lendingType',
'asset',
'amount',
],
[
'INTEGER',
'TEXT',
'TEXT',
'REAL',
]
)
LENDING_PURCHASE_TABLE = Table(
'lending_purchase_history',
[
'purchaseTime',
'lendingType',
'asset',
'amount'
],
[
'INTEGER',
'TEXT',
'TEXT',
'INTEGER'
],
primary_key='purchaseId',
primary_key_sql_type='INTEGER'
)
LENDING_REDEMPTION_TABLE = Table(
'lending_redemption_history',
[
'redemptionTime',
'lendingType',
'asset',
'amount'
],
[
'INTEGER',
'TEXT',
'TEXT',
'INTEGER'
]
)
CROSS_MARGIN_TRADE_TABLE = Table(
'cross_margin_trade',
[
'tradeId',
'tdTime',
'asset',
'refAsset',
'qty',
'price',
'fee',
'feeAsset',
'isBuyer'
],
[
'INTEGER',
'INTEGER',
'TEXT',
'TEXT',
'REAL',
'REAL',
'REAL',
'TEXT',
'INTEGER'
]
)
CROSS_MARGIN_LOAN_TABLE = Table(
"cross_margin_loan_table",
[
'loanTime',
'asset',
'principal',
],
[
'INTEGER',
'TEXT',
'REAL'
],
primary_key='txId',
primary_key_sql_type='INTEGER'
)
CROSS_MARGIN_REPAY_TABLE = Table(
"cross_margin_repay_table",
[
'repayTime',
'asset',
'principal',
'interest',
],
[
'INTEGER',
'TEXT',
'REAL',
'REAL'
],
primary_key='txId',
primary_key_sql_type='INTEGER'
)
CROSS_MARGIN_INTEREST_TABLE = Table(
"cross_margin_interest_table",
[
'interestTime',
'asset',
'interest',
'interestType'
],
[
'INTEGER',
'TEXT',
'REAL',
'TEXT'
]
)
UNIVERSAL_TRANSFER_TABLE = Table(
"universal_transfer_table",
[
'trfType',
'trfTime',
'asset',
'amount'
],
[
'TEXT',
'INTEGER',
'TEXT',
'REAL'
],
primary_key='tranId',
primary_key_sql_type='INTEGER'
)
``` |
{
"source": "jon-turney/calm",
"score": 2
} |
#### File: calm/calm/calm.py
```python
from contextlib import ExitStack
import argparse
import logging
import lzma
import os
import shutil
import signal
import sys
import tempfile
import time
from .abeyance_handler import AbeyanceHandler
from .buffering_smtp_handler import BufferingSMTPHandler
from .movelist import MoveList
from . import common_constants
from . import irk
from . import maintainers
from . import package
from . import pkg2html
from . import setup_exe
from . import uploads
from . import utils
#
#
#
class CalmState(object):
def __init__(self):
self.subject = ''
self.packages = {}
#
#
#
def process_relarea(args):
packages = {}
error = False
# for each arch
for arch in common_constants.ARCHES:
logging.debug("reading existing packages for arch %s" % (arch))
# build package list
packages[arch] = package.read_packages(args.rel_area, arch)
# validate the package set
if not package.validate_packages(args, packages[arch]):
logging.error("existing %s package set has errors" % (arch))
error = True
if error:
return None
# packages can be stale due to changes made directly in the release
# area, so first check here if there are any stale packages to vault
if args.stale:
stale_to_vault = remove_stale_packages(args, packages)
if stale_to_vault:
for arch in common_constants.ARCHES + ['noarch', 'src']:
logging.info("vaulting %d old package(s) for arch %s" % (len(stale_to_vault[arch]), arch))
stale_to_vault[arch].move_to_vault(args)
else:
logging.error("error while evaluating stale packages")
return None
return packages
#
#
#
def process_uploads(args, state):
# read maintainer list
mlist = maintainers.read(args, getattr(args, 'orphanmaint', None))
# make the list of all packages
all_packages = maintainers.all_packages(mlist)
# for each maintainer
for name in sorted(mlist.keys()):
m = mlist[name]
# also send a mail to each maintainer about their packages
threshold = logging.WARNING if m.quiet else logging.INFO
with mail_logs(args.email, toaddrs=m.email, subject='%s for %s' % (state.subject, name), thresholdLevel=threshold, retainLevel=logging.INFO) as maint_email: # noqa: F841
# for each arch and noarch
scan_result = {}
skip_maintainer = False
for arch in common_constants.ARCHES + ['noarch', 'src']:
logging.debug("reading uploaded arch %s packages from maintainer %s" % (arch, name))
# read uploads
scan_result[arch] = uploads.scan(m, all_packages, arch, args)
# remove triggers
uploads.remove(args, scan_result[arch].remove_always)
if scan_result[arch].error:
logging.error("error while reading uploaded arch %s packages from maintainer %s" % (arch, name))
skip_maintainer = True
continue
# if there are no added or removed files for this maintainer, we
# don't have anything to do
if not any([scan_result[a].to_relarea or scan_result[a].to_vault for a in scan_result]):
logging.debug("nothing to do for maintainer %s" % (name))
skip_maintainer = True
if skip_maintainer:
continue
# for each arch
merged_packages = {}
valid = True
for arch in common_constants.ARCHES:
logging.debug("merging %s package set with uploads from maintainer %s" % (arch, name))
# merge package sets
merged_packages[arch] = package.merge(state.packages[arch], scan_result[arch].packages, scan_result['noarch'].packages, scan_result['src'].packages)
if not merged_packages[arch]:
logging.error("error while merging uploaded %s packages for %s" % (arch, name))
valid = False
break
# remove files which are to be removed
scan_result[arch].to_vault.map(lambda p, f: package.delete(merged_packages[arch], p, f))
# validate the package set
logging.debug("validating merged %s package set for maintainer %s" % (arch, name))
if not package.validate_packages(args, merged_packages[arch]):
logging.error("error while validating merged %s packages for %s" % (arch, name))
valid = False
# if an error occurred ...
if not valid:
# ... discard move list and merged_packages
continue
# check for packages which are stale as a result of this upload,
# which we will want in the same report
if args.stale:
stale_to_vault = remove_stale_packages(args, merged_packages)
# if an error occurred ...
if not stale_to_vault:
# ... discard move list and merged_packages
logging.error("error while evaluating stale packages for %s" % (name))
continue
# check for conflicting movelists
conflicts = False
for arch in common_constants.ARCHES + ['noarch', 'src']:
conflicts = conflicts or report_movelist_conflicts(scan_result[arch].to_relarea, scan_result[arch].to_vault, "manually")
if args.stale:
conflicts = conflicts or report_movelist_conflicts(scan_result[arch].to_relarea, stale_to_vault[arch], "automatically")
# if an error occurred ...
if conflicts:
# ... discard move list and merged_packages
logging.error("error while validating movelists for %s" % (name))
continue
# for each arch and noarch
for arch in common_constants.ARCHES + ['noarch', 'src']:
logging.debug("moving %s packages for maintainer %s" % (arch, name))
# process the move lists
if scan_result[arch].to_vault:
logging.info("vaulting %d package(s) for arch %s, by request" % (len(scan_result[arch].to_vault), arch))
scan_result[arch].to_vault.move_to_vault(args)
uploads.remove(args, scan_result[arch].remove_success)
if scan_result[arch].to_relarea:
logging.info("adding %d package(s) for arch %s" % (len(scan_result[arch].to_relarea), arch))
scan_result[arch].to_relarea.move_to_relarea(m, args)
# XXX: Note that there seems to be a separate process, not run
# from cygwin-admin's crontab, which changes the ownership of
# files in the release area to cyguser:cygwin
# for each arch
if args.stale:
for arch in common_constants.ARCHES + ['noarch', 'src']:
if stale_to_vault[arch]:
logging.info("vaulting %d old package(s) for arch %s" % (len(stale_to_vault[arch]), arch))
stale_to_vault[arch].move_to_vault(args)
# for each arch
for arch in common_constants.ARCHES:
# use merged package list
state.packages[arch] = merged_packages[arch]
# report what we've done
added = []
for arch in common_constants.ARCHES + ['noarch', 'src']:
added.append('%d (%s)' % (len(scan_result[arch].packages), arch))
msg = "added %s packages from maintainer %s" % (' + '.join(added), name)
logging.debug(msg)
irk.irk("calm %s" % msg)
# record updated reminder times for maintainers
maintainers.update_reminder_times(mlist)
return state.packages
#
#
#
def process(args, state):
# send one email per run to leads, if any errors occurred
with mail_logs(args.email, toaddrs=args.email, subject='%s' % (state.subject), thresholdLevel=logging.ERROR) as leads_email: # noqa: F841
if args.dryrun:
logging.warning("--dry-run is in effect, nothing will really be done")
state.packages = process_relarea(args)
if not state.packages:
return None
state.packages = process_uploads(args, state)
return state.packages
#
# remove stale packages
#
def remove_stale_packages(args, packages):
to_vault = {}
to_vault['noarch'] = MoveList()
to_vault['src'] = MoveList()
for arch in common_constants.ARCHES:
logging.debug("checking for stale packages for arch %s" % (arch))
# find stale packages
to_vault[arch] = package.stale_packages(packages[arch])
# remove stale packages from package set
to_vault[arch].map(lambda p, f: package.delete(packages[arch], p, f))
# if there are no stale packages, we don't have anything to do
if not any([to_vault[a] for a in to_vault]):
logging.debug("nothing is stale")
return to_vault
# re-validate package sets
# (this shouldn't fail, but we check just to sure...)
error = False
for arch in common_constants.ARCHES:
if not package.validate_packages(args, packages[arch]):
logging.error("%s package set has errors after removing stale packages" % arch)
error = True
if error:
return None
# since noarch and src packages are included in the package set for both
# arch, we will build (hopefully) identical move lists for those packages
# for each arch.
#
# de-duplicate these package moves, as rather awkward workaround for that
moved_list = set()
def dedup(path, f):
for prefix in ['noarch', 'src']:
if path.startswith(prefix):
to_vault[prefix].add(path, f)
moved_list.add(path)
to_vault[common_constants.ARCHES[0]].map(dedup)
for path in moved_list:
for arch in common_constants.ARCHES:
to_vault[arch].remove(path)
return to_vault
#
# report movelist conflicts
#
def report_movelist_conflicts(a, b, reason):
conflicts = False
n = MoveList.intersect(a, b)
if n:
def report_conflict(p, f):
logging.error("%s/%s is both uploaded and %s vaulted" % (p, f, reason))
n.map(report_conflict)
conflicts = True
return conflicts
#
#
#
def do_main(args, state):
# read package set and process uploads
packages = process(args, state)
if not packages:
logging.error("not processing uploads or writing setup.ini")
return 1
state.packages = packages
do_output(args, state)
return 0
#
#
#
def do_output(args, state):
# update packages listings
# XXX: perhaps we need a --[no]listing command line option to disable this from being run?
pkg2html.update_package_listings(args, state.packages)
# if we are daemonized, allow force regeneration of static content in htdocs
# initially (in case the generation code has changed), but update that
# static content only as needed on subsequent loops
args.force = 0
update_json = False
# for each arch
for arch in common_constants.ARCHES:
logging.debug("writing setup.ini for arch %s" % (arch))
args.arch = arch
args.setup_version = setup_exe.extract_version(os.path.join(args.setupdir, 'setup-' + args.arch + '.exe'))
logging.debug("setup version is '%s'" % (args.setup_version))
basedir = os.path.join(args.rel_area, args.arch)
inifile = os.path.join(basedir, 'setup.ini')
# write setup.ini to a temporary file
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
args.inifile = tmpfile.name
changed = False
# write setup.ini
package.write_setup_ini(args, state.packages[arch], arch)
# make it world-readable, if we can
try:
os.chmod(args.inifile, 0o644)
except (OSError):
pass
if not os.path.exists(inifile):
# if the setup.ini file doesn't exist yet
logging.warning('no existing %s' % (inifile))
changed = True
else:
# or, if it's changed in more than timestamp and comments
status = os.system('/usr/bin/diff -I^setup-timestamp -I^# -w -B -q %s %s >/dev/null' % (inifile, tmpfile.name))
logging.debug('diff exit status %d' % (status))
if (status >> 8) == 1:
changed = True
# then update setup.ini
if changed:
update_json = True
if args.dryrun:
logging.warning("not moving %s to %s, due to --dry-run" % (tmpfile.name, inifile))
os.remove(tmpfile.name)
else:
# make a backup of the current setup.ini
if os.path.exists(inifile):
shutil.copy2(inifile, inifile + '.bak')
# replace setup.ini
logging.info("moving %s to %s" % (tmpfile.name, inifile))
shutil.move(tmpfile.name, inifile)
irk.irk("calm updated setup.ini for arch '%s'" % (arch))
# compress and re-sign
for ext in ['.ini', '.bz2', '.xz', '.zst']:
extfile = os.path.join(basedir, 'setup' + ext)
try:
os.remove(extfile + '.sig')
except FileNotFoundError:
pass
if ext == '.bz2':
utils.system('/usr/bin/bzip2 <%s >%s' % (inifile, extfile))
elif ext == '.xz':
utils.system('/usr/bin/xz -6e <%s >%s' % (inifile, extfile))
elif ext == '.zst':
utils.system('/usr/bin/zstd -q -f --ultra -20 %s -o %s' % (inifile, extfile))
keys = ' '.join(['-u' + k for k in args.keys])
utils.system('/usr/bin/gpg ' + keys + ' --batch --yes -b ' + extfile)
# arrange for checksums to be recomputed
for sumfile in ['md5.sum', 'sha512.sum']:
try:
os.remove(os.path.join(basedir, sumfile))
except FileNotFoundError:
pass
else:
logging.debug("removing %s, unchanged %s" % (tmpfile.name, inifile))
os.remove(tmpfile.name)
# write packages.json
jsonfile = os.path.join(args.htdocs, 'packages.json.xz')
if update_json or not os.path.exists(jsonfile):
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as tmpfile:
logging.debug('writing %s' % (tmpfile.name))
with lzma.open(tmpfile, 'wt') as lzf:
package.write_repo_json(args, state.packages, lzf)
logging.info("moving %s to %s" % (tmpfile.name, jsonfile))
shutil.move(tmpfile.name, jsonfile)
# make it world-readable, if we can
try:
os.chmod(jsonfile, 0o644)
except (OSError):
pass
#
# daemonization loop
#
def do_daemon(args, state):
import daemon
import lockfile.pidlockfile
context = daemon.DaemonContext(
stdout=sys.stdout,
stderr=sys.stderr,
umask=0o002,
pidfile=lockfile.pidlockfile.PIDLockFile(args.daemon))
running = True
read_relarea = True
read_uploads = True
last_signal = None
# signals! the first, and best, interprocess communications mechanism! :)
def sigusr1(signum, frame):
logging.debug("SIGUSR1")
nonlocal last_signal
last_signal = signum
nonlocal read_uploads
read_uploads = True
def sigusr2(signum, frame):
logging.debug("SIGUSR2")
nonlocal last_signal
last_signal = signum
nonlocal read_relarea
read_relarea = True
def sigalrm(signum, frame):
logging.debug("SIGALRM")
nonlocal last_signal
last_signal = signum
nonlocal read_relarea
read_relarea = True
nonlocal read_uploads
read_uploads = True
def sigterm(signum, frame):
logging.debug("SIGTERM")
nonlocal running
running = False
context.signal_map = {
signal.SIGUSR1: sigusr1,
signal.SIGUSR2: sigusr2,
signal.SIGALRM: sigalrm,
signal.SIGTERM: sigterm,
}
with context:
logging_setup(args)
logging.info("calm daemon started, pid %d" % (os.getpid()))
irk.irk("calm daemon started")
state.packages = {}
try:
while running:
with mail_logs(args.email, toaddrs=args.email, subject='%s' % (state.subject), thresholdLevel=logging.ERROR) as leads_email:
# re-read relarea on SIGALRM or SIGUSR2
if read_relarea:
if last_signal != signal.SIGALRM:
irk.irk("calm processing release area")
read_relarea = False
state.packages = process_relarea(args)
if not state.packages:
logging.error("errors in relarea, not processing uploads or writing setup.ini")
else:
if read_uploads:
if last_signal != signal.SIGALRM:
irk.irk("calm processing uploads")
# read uploads on SIGUSR1
read_uploads = False
state.packages = process_uploads(args, state)
do_output(args, state)
# if there is more work to do, but don't spin if we
# can't do anything because relarea is bad
if read_uploads:
continue
# if there is more work to do
if read_relarea:
continue
# we wake at a 10 minute offset from the next 240 minute boundary
# (i.e. at :10 past every fourth hour) to check the state of the
# release area, in case someone has ninja-ed in a change there...
interval = 240 * 60
offset = 10 * 60
delay = interval - ((time.time() - offset) % interval)
signal.alarm(int(delay))
# wait until interrupted by a signal
if last_signal != signal.SIGALRM:
irk.irk("calm processing done")
logging.info("sleeping for %d seconds" % (delay))
signal.pause()
logging.info("woken")
# cancel any pending alarm
signal.alarm(0)
except Exception as e:
with mail_logs(args.email, toaddrs=args.email, subject='calm stopping due to unhandled exception', thresholdLevel=logging.ERROR) as leads_email: # noqa: F841
logging.error("exception %s" % (type(e).__name__), exc_info=True)
irk.irk("calm daemon stopped due to unhandled exception")
else:
irk.irk("calm daemon stopped")
logging.info("calm daemon stopped")
#
# we only want to mail the logs if the email option was used
# (otherwise use ExitStack() as a 'do nothing' context)
#
def mail_logs(enabled, toaddrs, subject, thresholdLevel, retainLevel=None):
if enabled:
return AbeyanceHandler(BufferingSMTPHandler(toaddrs, subject), thresholdLevel, retainLevel)
return ExitStack()
#
# setup logging configuration
#
def logging_setup(args):
# set up logging to a file
utils.makedirs(args.logdir)
rfh = logging.handlers.TimedRotatingFileHandler(os.path.join(args.logdir, 'calm.log'), backupCount=48, when='midnight')
rfh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)-8s - %(message)s'))
rfh.setLevel(logging.DEBUG)
logging.getLogger().addHandler(rfh)
# setup logging to stdout, of WARNING messages or higher (INFO if verbose)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter(os.path.basename(sys.argv[0]) + ': %(message)s'))
if args.verbose:
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.WARNING)
logging.getLogger().addHandler(ch)
# change root logger level from the default of WARNING to NOTSET so it
# doesn't filter out any log messages due to level
logging.getLogger().setLevel(logging.NOTSET)
#
#
#
def main():
htdocs_default = os.path.join(common_constants.HTDOCS, 'packages')
homedir_default = common_constants.HOMEDIR
orphanmaint_default = common_constants.ORPHANMAINT
pidfile_default = '/sourceware/cygwin-staging/calm.pid'
pkglist_default = common_constants.PKGMAINT
relarea_default = common_constants.FTP
setupdir_default = common_constants.HTDOCS
vault_default = common_constants.VAULT
logdir_default = '/sourceware/cygwin-staging/logs'
parser = argparse.ArgumentParser(description='Upset replacement')
parser.add_argument('-d', '--daemon', action='store', nargs='?', const=pidfile_default, help="daemonize (PIDFILE defaults to " + pidfile_default + ")", metavar='PIDFILE')
parser.add_argument('--email', action='store', dest='email', nargs='?', const=common_constants.EMAILS, help="email output to maintainer and ADDRS (ADDRS defaults to '" + common_constants.EMAILS + "')", metavar='ADDRS')
parser.add_argument('--force', action='count', help="force regeneration of static htdocs content", default=0)
parser.add_argument('--homedir', action='store', metavar='DIR', help="maintainer home directory (default: " + homedir_default + ")", default=homedir_default)
parser.add_argument('--htdocs', action='store', metavar='DIR', help="htdocs output directory (default: " + htdocs_default + ")", default=htdocs_default)
parser.add_argument('--key', action='append', metavar='KEYID', help="key to use to sign setup.ini", default=[], dest='keys')
parser.add_argument('--logdir', action='store', metavar='DIR', help="log directory (default: '" + logdir_default + "')", default=logdir_default)
parser.add_argument('--orphanmaint', action='store', metavar='NAMES', help="orphan package maintainers (default: '" + orphanmaint_default + "')", default=orphanmaint_default)
parser.add_argument('--pkglist', action='store', metavar='FILE', help="package maintainer list (default: " + pkglist_default + ")", default=pkglist_default)
parser.add_argument('--release', action='store', help='value for setup-release key (default: cygwin)', default='cygwin')
parser.add_argument('--releasearea', action='store', metavar='DIR', help="release directory (default: " + relarea_default + ")", default=relarea_default, dest='rel_area')
parser.add_argument('--setupdir', action='store', metavar='DIR', help="setup executable directory (default: " + setupdir_default + ")", default=setupdir_default)
parser.add_argument('--no-stale', action='store_false', dest='stale', help="don't vault stale packages")
parser.set_defaults(stale=True)
parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun', help="don't do anything")
parser.add_argument('--vault', action='store', metavar='DIR', help="vault directory (default: " + vault_default + ")", default=vault_default, dest='vault')
parser.add_argument('-v', '--verbose', action='count', dest='verbose', help='verbose output')
(args) = parser.parse_args()
if args.email:
args.email = args.email.split(',')
state = CalmState()
host = os.uname()[1]
if 'sourceware.org' not in host:
host = ' from ' + host
else:
host = ''
state.subject = 'calm%s: cygwin package upload report%s' % (' [dry-run]' if args.dryrun else '', host)
status = 0
if args.daemon:
do_daemon(args, state)
else:
logging_setup(args)
status = do_main(args, state)
return status
#
#
#
if __name__ == "__main__":
sys.exit(main())
```
#### File: calm/calm/hint-migrate.py
```python
import argparse
import re
import os
import shutil
import sys
from . import common_constants
from . import hint
#
# migrate setup.hint to pvr.hint
#
# (just copy setup.hint to any missing pvr.hint. we don't need to bother
# cleaning up setup.hint which are no longer needed, as calm can do that)
#
def hint_migrate(args):
for arch in common_constants.ARCHES + ['noarch']:
basedir = os.path.join(args.rel_area, arch, 'release')
for (dirpath, _subdirs, files) in os.walk(basedir):
if 'setup.hint' not in files:
continue
setup_hint_fn = os.path.join(dirpath, 'setup.hint')
migrate = set()
vr = set()
for f in files:
match = re.match(r'^(.*?)(-src|)\.tar' + common_constants.PACKAGE_COMPRESSIONS_RE + r'$', f)
# not an archive?
if not match:
continue
pvr = match.group(1)
vr.add(pvr)
# pvr.hint already exists?
if os.path.exists(os.path.join(dirpath, pvr + '.hint')):
continue
migrate.add(pvr)
# nothing to migrate
if not migrate:
# that's ok if all vr already have a pvr.hint, but if we didn't
# find any vr, something is wrong
if not vr:
print("can't migrate %s as it has no versions" % (setup_hint_fn))
continue
# does the setup.hint parse as a pvr.hint?
hints = hint.hint_file_parse(setup_hint_fn, hint.pvr)
if 'parse-errors' in hints:
reason = "is invalid as a pvr.hint"
# specifically mention if it doesn't parse as a pvr.hint because
# it contains version keys
for e in hints['parse-errors']:
if (e.startswith('unknown key prev') or
e.startswith('unknown key curr') or
e.startswith('test has non-empty value')):
reason = "contains version keys"
print("can't migrate %s as it %s" % (setup_hint_fn, reason))
continue
for pvr in migrate:
pvr_hint_fn = os.path.join(dirpath, pvr + '.hint')
print('copy %s -> %s' % (setup_hint_fn, pvr_hint_fn))
shutil.copy2(setup_hint_fn, pvr_hint_fn)
#
#
#
def main():
relarea_default = common_constants.FTP
parser = argparse.ArgumentParser(description='setup.hint migrator')
parser.add_argument('--releasearea', action='store', metavar='DIR', help="release directory (default: " + relarea_default + ")", default=relarea_default, dest='rel_area')
(args) = parser.parse_args()
return hint_migrate(args)
#
#
#
if __name__ == "__main__":
sys.exit(main())
```
#### File: calm/calm/irk.py
```python
import json
import socket
import sys
DEFAULT_SERVER = ("localhost", 6659)
DEFAULT_TARGET = ['cygwin-bots', 'irc://irc.libera.chat/cygwin-bots']
def connect(server=DEFAULT_SERVER):
return socket.create_connection(server)
def send(s, target, message):
data = {"to": target, "privmsg": message}
# print(json.dumps(data))
s.sendall(bytes(json.dumps(data), "ascii"))
def irk(message, target=DEFAULT_TARGET, server=DEFAULT_SERVER):
if not isinstance(target, list):
target = [target]
for t in target:
try:
s = connect(server)
if "irc:" not in t and "ircs:" not in t:
t = "irc://chat.freenode.net/{0}".format(t)
send(s, t, message)
s.close()
except OSError:
pass
def main():
message = " ".join(sys.argv[1:])
try:
irk(message)
except socket.error as e:
sys.stderr.write("irk: write to server failed: %r\n" % e)
sys.exit(1)
if __name__ == '__main__':
main()
```
#### File: calm/calm/mksetupini.py
```python
import argparse
import logging
import os
import sys
from . import common_constants
from . import hint
from . import package
try:
import spelling
except ImportError:
pass
#
#
#
def do_main(args):
# build package list
packages = package.read_packages(args.rel_area, args.arch)
# spellcheck text hints
if args.spell:
if spelling:
spelling.spellcheck_hints(args, packages)
else:
logging.error("spell-checking support not available")
# validate the package set
if not package.validate_packages(args, packages):
logging.error("package set has errors, not writing setup.ini")
return 1
# write setup.ini
package.write_setup_ini(args, packages, args.arch)
if args.stats:
stats(packages)
return 0
#
#
#
def stats(packages):
# make a histogram of categories
histogram = {}
for c in hint.categories:
histogram[c.lower()] = 0
for p in packages.values():
if 'category' in p.hints:
for c in p.hints['category'].split():
histogram.setdefault(c.lower(), 0)
histogram[c.lower()] += 1
for c in sorted(histogram, key=histogram.get, reverse=True):
print('%16s: %4d' % (c, histogram[c]))
#
# argparse helpers for an option which can take a comma separated list of
# choices, or can be repeated (e.g.: --option a --option b,c ->
# option:[a,b,c])
#
class flatten_append(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
curr = getattr(namespace, self.dest, self.default)
curr.extend(values)
setattr(namespace, self.dest, curr)
class choiceList(object):
def __init__(self, choices):
self.choices = choices
def __call__(self, csv):
args = csv.split(',')
remainder = sorted(set(args) - set(self.choices))
if remainder:
msg = "invalid choices: %r (choose from %r)" % (remainder, self.choices)
raise argparse.ArgumentTypeError(msg)
return args
def help(self):
return '{%s}' % (','.join(self.choices))
#
#
#
def main():
pkglist_default = common_constants.PKGMAINT
relarea_default = common_constants.FTP
disable_check_choices = choiceList(['missing-curr', 'missing-depended-package', 'missing-obsoleted-package', 'missing-required-package', 'curr-most-recent'])
parser = argparse.ArgumentParser(description='Make setup.ini')
parser.add_argument('--arch', action='store', required=True, choices=common_constants.ARCHES)
parser.add_argument('--disable-check', action=flatten_append, help='checks to disable', type=disable_check_choices, default=[], metavar=disable_check_choices.help())
parser.add_argument('--inifile', '-u', action='store', help='output filename', required=True)
parser.add_argument('--okmissing', action='append', help='superseded by --disable-check', choices=['curr', 'depended-package', 'obsoleted-package', 'required-package'])
parser.add_argument('--pkglist', action='store', nargs='?', metavar='FILE', help="package maintainer list (default: " + pkglist_default + ")", const=pkglist_default)
parser.add_argument('--release', action='store', help='value for setup-release key (default: cygwin)', default='cygwin')
parser.add_argument('--releasearea', action='store', metavar='DIR', help="release directory (default: " + relarea_default + ")", default=relarea_default, dest='rel_area')
parser.add_argument('--spell', action='store_true', help='spellcheck text hints')
parser.add_argument('--stats', action='store_true', help='show additional package statistics')
parser.add_argument('--setup-version', action='store', metavar='VERSION', help='value for setup-version key')
parser.add_argument('-v', '--verbose', action='count', dest='verbose', help='verbose output')
(args) = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format=os.path.basename(sys.argv[0]) + ': %(message)s')
# The option name 'okmissing' was inherited from genini. The more general
# option 'disable-check' is intended to supersede that, eventually.
#
# For the moment '--okmissing=foo' is silently transformed into it's
# equivalent '--disable-check=missing-foo'
if args.okmissing:
args.disable_check.extend(['missing-' + m for m in args.okmissing])
# disabling either of these checks, implies both of these are disabled
# (since depends: is generated from requires:, and vice versa, if not
# present)
implied = ['missing-depended-package', 'missing-required-package']
for p in implied:
if p in args.disable_check:
for c in implied:
if c not in args.disable_check:
args.disable_check.append(c)
return do_main(args)
#
#
#
if __name__ == "__main__":
sys.exit(main())
```
#### File: calm/calm/untest.py
```python
import argparse
import logging
import os
import re
import sys
from . import common_constants
from . import maintainers
def untest(pvr):
# split name and vr
match = re.match(r'^(.+?)-(\d.*)', pvr)
if not match:
logging.error("unable to extract package and version from '%s'" % (pvr))
return
p = match.group(1)
vr = match.group(2)
# check CYGNAME is a maintainer for package
cygname = os.environ['CYGNAME']
mlist = {}
mlist = maintainers.add_packages(mlist, common_constants.PKGMAINT, orphanMaint=common_constants.ORPHANMAINT)
if cygname not in mlist:
logging.error("'%s' is not a package maintainer" % (cygname))
return
if p not in mlist[cygname].pkgs:
logging.error("package '%s' is not in the package list for maintainer '%s'" % (p, cygname))
return
# remove '^test:' lines from any package and subpackage hints
removed = 0
total = 0
for arch in common_constants.ARCHES + ['noarch']:
for (dirpath, _subdirs, files) in os.walk(os.path.join(common_constants.FTP, arch, 'release', p)):
for f in files:
if re.match(r'.*-' + re.escape(vr) + '(|-src).hint$', f):
total = total + 1
fn = os.path.join(dirpath, f)
with open(fn) as fh:
content = fh.read()
if re.search(r'^test:', content, re.MULTILINE):
content = re.sub(r'^test:\s*$', '', content, 0, re.MULTILINE)
with open(fn, 'w') as fh:
fh.write(content)
logging.info("Removed test: label from %s" % os.path.relpath(fn, common_constants.FTP))
removed = removed + 1
if removed == 0:
logging.error("'%s' is not marked test" % pvr)
else:
logging.info("%d out of %d hints for '%s' version '%s' modified" % (removed, total, p, vr))
def main():
parser = argparse.ArgumentParser(description='test hint remover')
parser.add_argument('package', nargs='*', metavar='PACKAGE')
(args) = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='untest: %(message)s')
for p in args.package:
untest(p)
if __name__ == "__main__":
sys.exit(main())
```
#### File: calm/test/test_calm.py
```python
import collections
import contextlib
import filecmp
import io
import json
import logging
import os
import pprint
import re
import shutil
import tempfile
import types
import unittest
from calm.version import SetupVersion
import calm.calm
import calm.common_constants as common_constants
import calm.hint as hint
import calm.maintainers as maintainers
import calm.package as package
import calm.pkg2html as pkg2html
import calm.uploads as uploads
#
# helper functions
#
# write results to the file 'results'
# read expected from the file 'expected'
# compare them
#
def compare_with_expected_file(test, dirpath, results, basename=None):
results_str = pprint.pformat(results, width=120)
if basename:
results_fn = basename + '.results'
expected_fn = basename + '.expected'
else:
results_fn = 'results'
expected_fn = 'expected'
# save results in a file
with open(os.path.join(dirpath, results_fn), 'w') as f:
print(results_str, file=f)
# read expected from a file
with open(os.path.join(dirpath, expected_fn)) as f:
expected = f.read().rstrip()
test.assertMultiLineEqual(expected, results_str)
#
# capture a directory tree as a dict 'tree', where each key is a directory path
# and the value is a sorted list of filenames
#
def capture_dirtree(basedir):
tree = {}
for dirpath, _dirnames, filenames in os.walk(basedir):
tree[os.path.relpath(dirpath, basedir)] = sorted(filenames)
return tree
#
# a context to monkey-patch pprint so OrderedDict appears as with python <3.5
# (a dict, with lines ordered, rather than OrderedDict repr)
#
def patched_pprint_ordered_dict(self, obj, stream, indent, allowance, context, level):
write = stream.write
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(obj)
if length:
items = list(obj.items())
self._format_dict_items(items, stream, indent, allowance + 1,
context, level)
write('}')
@contextlib.contextmanager
def pprint_patch():
if isinstance(getattr(pprint.PrettyPrinter, '_dispatch', None), dict):
orig = pprint.PrettyPrinter._dispatch[collections.OrderedDict.__repr__]
pprint.PrettyPrinter._dispatch[collections.OrderedDict.__repr__] = patched_pprint_ordered_dict
try:
yield
finally:
pprint.PrettyPrinter._dispatch[collections.OrderedDict.__repr__] = orig
else:
yield
#
#
#
class CalmTest(unittest.TestCase):
def test_hint_parser(self):
self.maxDiff = None
basedir = 'testdata/relarea'
for (dirpath, _subdirs, files) in os.walk(basedir):
relpath = os.path.relpath(dirpath, basedir)
for f in files:
expected = os.path.join('testdata/hints', relpath)
if f.endswith('.hint'):
if f == 'override.hint':
kind = hint.override
name = 'override'
elif f.endswith('-src.hint'):
kind = hint.spvr
name = f[:-5]
else:
kind = hint.pvr
name = f[:-5]
with self.subTest(package=os.path.basename(dirpath)):
logging.info('Reading %s' % os.path.join(dirpath, f))
results = hint.hint_file_parse(os.path.join(dirpath, f), kind)
with pprint_patch():
compare_with_expected_file(self, expected, results, name)
#
# something like "find -name results -exec sh -c 'cd `dirname {}` ; cp results
# expected' \;" can be used to update the expected output (after you have
# checked it to make sure it is really correct, of course :) )
#
def test_html_writer(self):
self.maxDiff = None
htdocs = 'testdata/htdocs'
args = types.SimpleNamespace()
args.arch = 'x86'
args.htdocs = htdocs
args.rel_area = 'testdata/relarea'
args.homedir = 'testdata/homes'
args.dryrun = False
args.force = True
args.pkglist = 'testdata/pkglist/cygwin-pkg-maint'
try:
shutil.rmtree(htdocs)
except FileNotFoundError:
pass
packages = {}
for arch in common_constants.ARCHES:
packages[arch] = {}
packages[args.arch] = package.read_packages(args.rel_area, args.arch)
package.validate_packages(args, packages[args.arch])
pkg2html.update_package_listings(args, packages)
# compare the output dirtree with expected
with self.subTest('dirtree'):
dirlist = capture_dirtree(htdocs)
compare_with_expected_file(self, 'testdata/htdocs.expected', dirlist, 'dirtree')
# compare the output files with expected
for (dirpath, _subdirs, files) in os.walk(htdocs):
relpath = os.path.relpath(dirpath, htdocs)
for f in files:
with self.subTest(file=os.path.join(relpath, f)):
results = os.path.join(htdocs, relpath, f)
expected = os.path.join('testdata/htdocs.expected', relpath, f)
if not filecmp.cmp(results, expected, shallow=False):
logging.info("%s different", os.path.join(relpath, f))
with open(results) as r, open(expected) as e:
self.assertMultiLineEqual(e.read(), r.read())
else:
logging.info("%s identical", os.path.join(relpath, f))
def test_version_sort(self):
test_data = [
["1.0.0", "2.0.0", -1],
[".0.0", "2.0.0", -1],
["alpha", "beta", -1],
["1.0", "1.0.0", -1],
["2.456", "2.1000", -1],
["2.1000", "3.111", -1],
["2.001", "2.1", 0],
["2.34", "2.34", 0],
["6.1.2-4", "6.3.8-1", -1],
["1.7.3.0-2", "2.0.0-b8-1", -1],
["1.3.30c-2", "1.3.30c-10", -1],
["2.24.51-1", "2.25-1", -1],
["2.1.5+20120813+gitdcbe778-1", "2.1.5-3", 1],
["3.4.1-1", "3.4b1-1", 1],
["041206-1", "200090325-1", -1],
["0.6.2+git20130413-2", "0.6.2-1", 1],
["2.6.0+bzr6602-1", "2.6.0-2", 1],
["2.6.0-2", "2.6b2-1", 1],
["2.6.0+bzr6602-1", "2.6b2-1", 1],
["0.6.7+20150214+git3a710f9-1", "0.6.7-1", 1],
["15.8b-1", "15.8.0.1-2", -1],
["1.2rc1-1", "1.2.0-2", -1],
["20090325-1", "1:5.6.0-1", -1],
["0:20090325-1", "1:5.6.0-1", -1],
["2:20090325-1", "1:5.6.0-1", 1],
["2:1.0-1", "1:5.6.0-1", 1],
["1.0-1", "0:1.0-1", 0],
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
["1.0010", "1.9", 1],
["1.05", "1.5", 0],
["1.0", "1", 1],
["2.50", "2.5", 1],
["fc4", "fc.4", 0],
["FC5", "fc4", -1],
["2a", "2.0", -1],
["1.0", "1.fc4", 1],
["3.0.0_fc", "3.0.0.fc", 0],
# from RPM tests
["1.0", "1.0", 0],
["1.0", "2.0", -1],
["2.0", "1.0", 1],
["2.0.1", "2.0.1", 0],
["2.0", "2.0.1", -1],
["2.0.1", "2.0", 1],
["2.0.1a", "2.0.1a", 0],
["2.0.1a", "2.0.1", 1],
["2.0.1", "2.0.1a", -1],
["5.5p1", "5.5p1", 0],
["5.5p1", "5.5p2", -1],
["5.5p2", "5.5p1", 1],
["5.5p10", "5.5p10", 0],
["5.5p1", "5.5p10", -1],
["5.5p10", "5.5p1", 1],
["10xyz", "10.1xyz", -1],
["10.1xyz", "10xyz", 1],
["xyz10", "xyz10", 0],
["xyz10", "xyz10.1", -1],
["xyz10.1", "xyz10", 1],
["xyz.4", "xyz.4", 0],
["xyz.4", "8", -1],
["8", "xyz.4", 1],
["xyz.4", "2", -1],
["2", "xyz.4", 1],
["5.5p2", "5.6p1", -1],
["5.6p1", "5.5p2", 1],
["5.6p1", "6.5p1", -1],
["6.5p1", "5.6p1", 1],
["6.0.rc1", "6.0", 1],
["6.0", "6.0.rc1", -1],
["10b2", "10a1", 1],
["10a2", "10b2", -1],
["1.0aa", "1.0aa", 0],
["1.0a", "1.0aa", -1],
["1.0aa", "1.0a", 1],
["10.0001", "10.0001", 0],
["10.0001", "10.1", 0],
["10.1", "10.0001", 0],
["10.0001", "10.0039", -1],
["10.0039", "10.0001", 1],
["4.999.9", "5.0", -1],
["5.0", "4.999.9", 1],
["20101121", "20101121", 0],
["20101121", "20101122", -1],
["20101122", "20101121", 1],
["2_0", "2_0", 0],
["2.0", "2_0", 0],
["2_0", "2.0", 0],
["a", "a", 0],
["a+", "a+", 0],
["a+", "a_", 0],
["a_", "a+", 0],
["+a", "+a", 0],
["+a", "_a", 0],
["_a", "+a", 0],
["+_", "+_", 0],
["_+", "+_", 0],
["_+", "_+", 0],
["+", "_", 0],
["_", "+", 0],
]
for d in test_data:
a = SetupVersion(d[0])
b = SetupVersion(d[1])
e = d[2]
# logging.info("%s %s %d" % (a, b, e))
self.assertEqual(SetupVersion.__cmp__(a, b), e, msg='%s %s %d' % (a, b, e))
self.assertEqual(SetupVersion.__cmp__(b, a), -e, msg='%s %s %d' % (a, b, -e))
def test_maint_pkglist(self):
self.maxDiff = None
mlist = {}
mlist = maintainers.add_directories(mlist, 'testdata/homes')
mlist = maintainers.add_packages(mlist, 'testdata/pkglist/cygwin-pkg-maint', None)
compare_with_expected_file(self, 'testdata/pkglist', mlist)
def test_scan_uploads(self):
self.maxDiff = None
test_root = tempfile.mktemp()
logging.info('test_root = %s', test_root)
args = types.SimpleNamespace()
args.arch = 'x86'
args.rel_area = 'testdata/relarea'
args.dryrun = False
shutil.copytree('testdata/homes', os.path.join(test_root, 'testdata/homes'))
oldcwd = os.getcwd()
os.chdir(test_root)
pkglist = ['after-ready', 'not-ready', 'testpackage', 'testpackage2', 'testpackage-zstd']
mlist = {}
mlist = maintainers.add_directories(mlist, 'testdata/homes')
m = mlist['<NAME>']
m.pkgs.extend(pkglist + ['not-on-package-list'])
ready_fns = [(os.path.join(m.homedir(), 'x86', 'release', 'testpackage', '!ready'), ''),
(os.path.join(m.homedir(), 'x86', 'release', 'testpackage2', 'testpackage2-subpackage', '!ready'), ''),
(os.path.join(m.homedir(), 'x86', 'release', 'testpackage-zstd', '!ready'), ''),
(os.path.join(m.homedir(), 'x86', 'release', 'after-ready', '!ready'), '-t 198709011700'),
(os.path.join(m.homedir(), 'x86', 'release', 'corrupt', '!ready'), '')]
for (f, t) in ready_fns:
os.system('touch %s "%s"' % (t, f))
scan_result = uploads.scan(m, pkglist + ['not-on-maintainer-list'], args.arch, args)
os.chdir(oldcwd)
shutil.rmtree(test_root)
self.assertEqual(scan_result.error, False)
compare_with_expected_file(self, 'testdata/uploads', dict(scan_result.to_relarea.movelist), 'move')
self.assertCountEqual(scan_result.to_vault.movelist, {'x86/release/testpackage': ['x86/release/testpackage/testpackage-0.1-1.tar.bz2']})
self.assertCountEqual(scan_result.remove_always, [f for (f, t) in ready_fns])
self.assertEqual(scan_result.remove_success, ['testdata/homes/Blooey McFooey/x86/release/testpackage/-testpackage-0.1-1-src.tar.bz2', 'testdata/homes/Blooey McFooey/x86/release/testpackage/-testpackage-0.1-1.tar.bz2'])
with pprint_patch():
compare_with_expected_file(self, 'testdata/uploads', dict(scan_result.packages), 'pkglist')
def test_package_set(self):
self.maxDiff = None
args = types.SimpleNamespace()
args.arch = 'x86'
args.dryrun = False
args.force = True
args.inifile = 'testdata/inifile/setup.ini'
args.pkglist = 'testdata/pkglist/cygwin-pkg-maint'
args.rel_area = 'testdata/relarea'
args.release = 'testing'
args.setup_version = '4.321'
packages = package.read_packages(args.rel_area, args.arch)
package.delete(packages, 'x86/release/nonexistent', 'nosuchfile-1.0.0.tar.xz')
self.assertEqual(package.validate_packages(args, packages), True)
package.write_setup_ini(args, packages, args.arch)
with open(args.inifile) as inifile:
results = inifile.read()
# fix the timestamp to match expected
results = re.sub('setup-timestamp: .*', 'setup-timestamp: 1458221800', results, 1)
results = re.sub('generated at .*', 'generated at 2016-03-17 13:36:40 GMT', results, 1)
compare_with_expected_file(self, 'testdata/inifile', (results,), 'setup.ini')
# XXX: delete a needed package, and check validate fails
def test_process_uploads_conflict(self):
self.maxDiff = None
args = types.SimpleNamespace()
for d in ['rel_area', 'homedir', 'vault']:
setattr(args, d, tempfile.mktemp())
logging.info('%s = %s', d, getattr(args, d))
shutil.copytree('testdata/relarea', args.rel_area)
shutil.copytree('testdata/homes.conflict', args.homedir)
args.dryrun = False
args.email = None
args.force = False
args.pkglist = 'testdata/pkglist/cygwin-pkg-maint'
args.stale = True
# set appropriate !ready
m_homedir = os.path.join(args.homedir, '<NAME>Fooey')
os.system('touch "%s"' % (os.path.join(m_homedir, 'x86', 'release', 'staleversion', '!ready')))
state = calm.calm.CalmState()
state.packages = calm.calm.process_relarea(args)
state.packages = calm.calm.process_uploads(args, state)
self.assertTrue(state.packages)
for d in ['rel_area', 'homedir', 'vault']:
with self.subTest(directory=d):
dirlist = capture_dirtree(getattr(args, d))
compare_with_expected_file(self, 'testdata/conflict', dirlist, d)
shutil.rmtree(getattr(args, d))
def test_process(self):
self.maxDiff = None
args = types.SimpleNamespace()
for d in ['rel_area', 'homedir', 'htdocs', 'vault']:
setattr(args, d, tempfile.mktemp())
logging.info('%s = %s', d, getattr(args, d))
args.dryrun = False
args.email = None
args.force = False
args.inifile = os.path.join(args.rel_area, 'setup.ini')
args.pkglist = 'testdata/pkglist/cygwin-pkg-maint'
args.release = 'trial'
args.setup_version = '3.1415'
args.stale = True
state = calm.calm.CalmState()
shutil.copytree('testdata/relarea', args.rel_area)
shutil.copytree('testdata/homes', args.homedir)
# set appropriate !readys
m_homedir = os.path.join(args.homedir, '<NAME>')
ready_fns = [(os.path.join(m_homedir, 'x86', 'release', 'testpackage', '!ready'), ''),
(os.path.join(m_homedir, 'x86', 'release', 'testpackage2', 'testpackage2-subpackage', '!ready'), ''),
(os.path.join(m_homedir, 'x86', 'release', 'after-ready', '!ready'), '-t 198709011700'),
(os.path.join(m_homedir, 'noarch', 'release', 'perl-Net-SMTP-SSL', '!ready'), ''),
(os.path.join(m_homedir, 'x86', 'release', 'corrupt', '!ready'), ''),
(os.path.join(m_homedir, 'x86', 'release', 'per-version', '!ready'), ''),
(os.path.join(m_homedir, 'x86', 'release', 'per-version-replacement-hint-only', '!ready'), '')]
for (f, t) in ready_fns:
os.system('touch %s "%s"' % (t, f))
packages = calm.calm.process(args, state)
self.assertTrue(packages)
pkg2html.update_package_listings(args, packages)
package.write_setup_ini(args, packages['x86'], 'x86')
with open(os.path.join(args.rel_area, 'setup.ini')) as inifile:
results = inifile.read()
# fix the timestamp to match expected
results = re.sub('setup-timestamp: .*', 'setup-timestamp: 1473797080', results, 1)
results = re.sub('generated at .*', 'generated at 2016-09-13 21:04:40 BST', results, 1)
compare_with_expected_file(self, 'testdata/process_arch', (results,), 'setup.ini')
for d in ['rel_area', 'homedir', 'htdocs', 'vault']:
with self.subTest(directory=d):
dirlist = capture_dirtree(getattr(args, d))
compare_with_expected_file(self, 'testdata/process_arch', dirlist, d)
with io.StringIO() as jsonfile:
package.write_repo_json(args, packages, jsonfile)
j = json.loads(jsonfile.getvalue(), object_pairs_hook=collections.OrderedDict)
del j['timestamp']
compare_with_expected_file(self, 'testdata/process_arch', json.dumps(j, sort_keys=True, indent=4), 'packages.json')
for d in ['rel_area', 'homedir', 'htdocs', 'vault']:
shutil.rmtree(getattr(args, d))
@classmethod
def setUpClass(cls):
# testdata is located in the same directory as this file
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# ensure sha512.sum files exist
os.system("find testdata/relarea/x86 testdata/relarea/noarch -type d -exec sh -c 'cd {} ; sha512sum * >sha512.sum 2>/dev/null' \\;")
# should remove a sha512.sum file so that we test functioning when it's absent
os.unlink('testdata/relarea/x86/release/arc/sha512.sum')
# remove !ready files
os.system("find testdata/homes -name !ready -exec rm {} \\;")
# fix up package timestamps
# (git doesn't store timestamps, so they will all be dated the time of checkout)
# set all package timestamps to some arbitrary date
os.environ['TZ'] = 'UTC'
for dirpath, _dirnames, filenames in os.walk(os.path.join('testdata', 'relarea')):
for f in filenames:
os.system('touch "%s" -d %s' % (os.path.join(dirpath, f), '2018-03-02'))
# then adjust packages where we need highest version to also be latest
relarea_x86 = os.path.join('testdata', 'relarea', 'x86', 'release')
relarea_noarch = os.path.join('testdata', 'relarea', 'noarch', 'release')
home_conflict = os.path.join('testdata', 'homes.conflict', '<NAME>', 'x86', 'release')
touches = [(os.path.join(relarea_x86, 'cygwin', 'cygwin-2.2.0-1.tar.xz'), '2016-11-01'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-2.2.0-1-src.tar.xz'), '2016-11-01'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-2.2.1-1.tar.xz'), '2016-11-02'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-2.2.1-1-src.tar.xz'), '2016-11-02'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-debuginfo', 'cygwin-debuginfo-2.2.0-1.tar.xz'), '2016-11-01'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-debuginfo', 'cygwin-debuginfo-2.2.1-1.tar.xz'), '2016-11-02'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-devel', 'cygwin-devel-2.2.0-1.tar.xz'), '2016-11-01'),
(os.path.join(relarea_x86, 'cygwin', 'cygwin-devel', 'cygwin-devel-2.2.1-1.tar.xz'), '2016-11-02'),
(os.path.join(relarea_x86, 'base-cygwin', 'base-cygwin-3.6-1.tar.xz'), '2016-11-02'),
(os.path.join(relarea_x86, 'per-version', 'per-version-4.0-1.tar.xz'), '2017-04-09'),
(os.path.join(relarea_x86, 'per-version', 'per-version-4.0-1-src.tar.xz'), '2017-04-09'),
(os.path.join(relarea_x86, 'rpm-doc', 'rpm-doc-4.1-2.tar.bz2'), '2016-11-02'),
(os.path.join(relarea_x86, 'rpm-doc', 'rpm-doc-4.1-2-src.tar.bz2'), '2016-11-02'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-240-1.tar.xz'), '2017-04-07'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-240-1-src.tar.xz'), '2017-04-07'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-242-0.tar.xz'), '2017-04-08'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-242-0-src.tar.xz'), '2017-04-08'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-243-0.tar.xz'), '2017-04-09'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-243-0-src.tar.xz'), '2017-04-09'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-250-0.tar.xz'), '2017-04-10'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-250-0-src.tar.xz'), '2017-04-10'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-251-0.tar.xz'), '2017-04-09'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-251-0-src.tar.xz'), '2017-04-09'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-260-0.tar.xz'), '2017-04-12'),
(os.path.join(relarea_x86, 'staleversion', 'staleversion-260-0-src.tar.xz'), '2017-04-12'),
(os.path.join(relarea_x86, 'keychain', 'keychain-2.6.8-1.tar.bz2'), '2016-11-02'),
(os.path.join(relarea_x86, 'keychain', 'keychain-2.6.8-1-src.tar.bz2'), '2016-11-02'),
(os.path.join(relarea_noarch, 'perl-Net-SMTP-SSL', 'perl-Net-SMTP-SSL-1.01-1.tar.xz'), '2016-09-01'),
(os.path.join(relarea_noarch, 'perl-Net-SMTP-SSL', 'perl-Net-SMTP-SSL-1.01-1-src.tar.xz'), '2016-09-01'),
(os.path.join(relarea_noarch, 'perl-Net-SMTP-SSL', 'perl-Net-SMTP-SSL-1.02-1.tar.xz'), '2016-10-01'),
(os.path.join(relarea_noarch, 'perl-Net-SMTP-SSL', 'perl-Net-SMTP-SSL-1.02-1-src.tar.xz'), '2016-10-01'),
(os.path.join(relarea_noarch, 'perl-Net-SMTP-SSL', 'perl-Net-SMTP-SSL-1.03-1.tar.xz'), '2016-11-01'),
(os.path.join(relarea_noarch, 'perl-Net-SMTP-SSL', 'perl-Net-SMTP-SSL-1.03-1-src.tar.xz'), '2016-11-01'),
(os.path.join(home_conflict, 'staleversion', 'staleversion-230-1.hint'), '2017-04-06'),
(os.path.join(home_conflict, 'staleversion', 'staleversion-230-1.tar.xz'), '2017-04-06'),
(os.path.join(home_conflict, 'staleversion', 'staleversion-230-1-src.tar.xz'), '2017-04-06')]
for (f, t) in touches:
os.system('touch "%s" -d %s' % (f, t))
# ensure !reminder-timestamp is created for uploads
home = os.path.join('testdata', 'homes', '<NAME>')
os.system('find "%s" -type f -exec touch -d "12 hours ago" {} +' % (home))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(message)s')
unittest.main()
``` |
{
"source": "jontwo/bookmark-sync",
"score": 3
} |
#### File: bookmark-sync/bookmarksync/__main__.py
```python
import argparse
import os
from xmldiff.main import patch_tree
from bookmarksync.helpers import load_bookmarks, save_bookmarks, save_tree, diff_trees
def main():
"""Runs bookmark sync main method.
TODO
options
- create new merged.html
- compare existing merged.html with new bookmarks.html
- compare and save
- store all intermediate diffs (by datestamp? diff time or modified time?)
- apply each diff since last sync
"""
args = parse_args()
if args.bookmark_file:
new_bkm = load_bookmarks(args.bookmark_file)
sync_file = args.sync_file
if not os.path.dirname(sync_file):
sync_file = os.path.join(os.path.dirname(os.path.abspath(args.bookmark_file)),
sync_file)
try:
old_bkm = load_bookmarks(sync_file)
except IOError:
save_tree(new_bkm, sync_file)
return
diff = diff_trees(new_bkm, old_bkm)
output_file = None
if args.save_file:
output_file = args.save_file
elif args.overwrite_file:
output_file = args.bookmark_file
if output_file:
updated_bkm = patch_tree(diff, old_bkm)
save_bookmarks(updated_bkm, output_file)
else:
print(diff)
def parse_args():
"""Creates parser and reads args."""
parser = argparse.ArgumentParser(description='Utility to sync exported bookmark files')
parser.add_argument('--bookmark_file', '-b', type=os.path.expanduser,
help='Compare bookmark file to the current sync file.')
parser.add_argument('--save_file', '-s', type=os.path.expanduser,
help='Save the compared bookmarks to a new bookmark file.')
parser.add_argument('--overwrite_file', '-o', action='store_true',
help='Save the compared bookmarks to the input bookmark file.')
parser.add_argument('--sync_file', '-y', type=os.path.expanduser, default='merged.html',
help='Sync file to store current bookmarks. (default: %(default)s in '
'same directory as bookmark file). This will be created if it does not '
'exist.')
return parser.parse_args()
``` |
{
"source": "jontxu/myweb",
"score": 2
} |
#### File: jontxu/myweb/app.py
```python
import os
import config
from flask import Flask, render_template, request, flash, send_from_directory
from flask.ext.assets import Environment, Bundle
from flask.ext.babel import Babel
from flask_wtf.csrf import CsrfProtect
from werkzeug.contrib.fixers import ProxyFix
from contact import ContactForm
from flask.ext.mail import Message, Mail
from flask_sslify import SSLify
mail = Mail()
csrf = CsrfProtect()
app = Flask(__name__)
sslify = SSLify(app)
app.secret_key = 'thisisakey'
if os.environ.get('HEROKU') is None:
app.config.from_object('config.Development')
else:
app.config.from_object('config.Production')
mail.init_app(app)
csrf.init_app(app)
assets = Environment(app)
assets.config['less_run_in_debug'] = False
assets.url = app.static_url_path
less = Bundle('stylesheets/main.less', 'stylesheets/img.less', filters='less,cssmin', output='stylesheets/style.min.css')
js = Bundle('javascripts/jquery.js', 'javascripts/html5shiv.js', filters='closure_js', output='javascripts/all.min.js')
assets.register('less', less)
assets.register('js', js)
@csrf.error_handler
def csrf_error(reason):
return render_template('error.html', error=reason)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static/ico'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/')
def index():
return render_template('index.html', title='Index')
@app.route('/resume/')
def resume():
return render_template('resume.html', title=u'R\xe9sum\xe9')
@app.route('/projects/')
def projects():
return render_template('projects.html', title='Projects')
@app.route('/tech/')
def tech():
return render_template('tech.html', title='Technologies')
@app.route('/contact/', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('contact.html', form=form)
else:
msg = Message(subject=form.subject.data, sender=(form.name.data, form.email.data), body=form.message.data, recipients=[app.config["MAIL_USERNAME"]])
mail.send(msg)
return render_template('contact.html', title='Contact', success=True)
elif request.method == 'GET':
return render_template('contact.html', title='Contact', form=form)
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run()
``` |
{
"source": "Jonty16117/shopping_bot",
"score": 2
} |
#### File: Jonty16117/shopping_bot/bot.py
```python
from selenium import webdriver
from configparser import ConfigParser
import time
import os
CONFIG = ConfigParser()
CONFIG.read('config.ini')
EMAIL = CONFIG.get('LOGIN DETAILS', 'EMAIL')
PASSWORD = CONFIG.get('LOGIN DETAILS', 'PASSWORD')
PHONE = CONFIG.get('PAYMENT DETAILS', 'PHONE')
WEBSITE = CONFIG.get('URL', 'WEBSITE')
URL = CONFIG.get('URL', 'URL')
ADDRESS = CONFIG.get('ADDRESS', 'ADDRESS')
# For headless browser
# chrome_options = webdriver.ChromeOptions()
# chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
# chrome_options.add_argument("--headless")
# chrome_options.add_argument("--disable-dev-shm-usage")
# chrome_options.add_argument("--no-sandbox")
# chrome_options.add_argument('--ignore-certificate-errors')
# chrome_options.add_argument('--ignore-ssl-errors')
# driver = webdriver.Chrome(execute_path=os.environ.get("CHROMEDRIVER_PATH"),
# chrome_options=chrome_options)
# For gui browser
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--ignore-ssl-errors')
driver = webdriver.Chrome(executable_path='C:/Program Files (x86)/chromedriver.exe', options=chrome_options)
#driver.maximize_window()
#driver.execute_script("window.scrollTo(0, 4500)")
# Login into Flipkart
def login_fk():
try:
login = driver.find_element_by_xpath('//*[@id="container"]/div/div[1]/div[1]/div[2]/div[3]/div/div/div/a')
login.click()
email_field_element_loc = '/html/body/div[3]/div/div/div/div/div[2]/div/form/div[1]/input'
password_field_element_loc = '/html/body/div[3]/div/div/div/div/div[2]/div/form/div[2]/input'
email_field = driver.find_element_by_xpath(email_field_element_loc)
password_field = driver.find_element_by_xpath(password_field_element_loc)
email_field.send_keys(EMAIL)
password_field.send_keys(PASSWORD)
print("✅Details entered✅")
enter = driver.find_element_by_xpath('/html/body/div[3]/div/div/div/div/div[2]/div/form/div[3]/button')
enter.click()
print('🔓Logging in as {}🔓'.format(EMAIL))
except:
print('Login Failed. Retrying.')
time.sleep(0.1)
login_fk()
# Adds item to cart for flipkart website
def add_to_cart_fk():
start_time = time.time()
add_to_cart_option = False
while add_to_cart_option is False:
try:
time.sleep(0.15)
driver.execute_script("window.scrollTo(0, 4200)")
add_to_cart = driver.find_element_by_xpath('//*[@id="container"]/div/div[3]/div[2]/div[1]/div[1]/div[2]/div/ul/li[1]/button')
print('🛒Add To Cart button appeared🛒')
add_to_cart.click()
add_to_cart_option = True
except:
add_to_cart_option = False
text = '🔍Add To Cart option is unavailable...retrying: 🔍 (Time Elapsed: ' + str(time.time()-start_time) + ')'
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(255,128,0, text))
driver.refresh()
if add_to_cart_option is True:
text = '🎉Congratulations, Item added to cart cart successfully. Please checkout as soon as possible!🎉'
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(0,255,0, text))
width = os.get_terminal_size().columns
text = ' ⏳(Took {} seconds)⌛'.format(time.time()-start_time)
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(124,252,0, text).center(width))
# Perform a check if the item is acctually added in the cart
def check_cart_fk():
try:
item = driver.find_element_by_xpath('//*[@id="container"]/div/div[2]/div/div/div[1]/div/div[3]/div/form/button')
print("☑Your item is present in the cart.☑")
except:
print("❌Unable to find the item in the cart.❌")
if WEBSITE == "FLIPKART":
print("🔒Logging in...🔒")
add_to_cart_page_url = URL
driver.get(add_to_cart_page_url)
login_fk()
email_field_element_loc = '/html/body/div[3]/div/div/div/div/div[2]/div/form/div[1]/input'
while True:
try:
driver.find_element_by_xpath(email_field_element_loc)
except:
break
add_to_cart_fk()
while True:
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(0,0,255, '1. Refresh current page'))
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(255,255,0, '2. Check Cart'))
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(255,128,0, '3. Retry add to cart'))
print("\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(255,0,0, '4. Exit Script'))
ans = int(input())
if ans == 1:
driver.refresh()
elif ans == 2:
check_cart_fk()
elif ans == 3:
driver.get(add_to_cart_page_url)
add_to_cart_fk()
else:
break
print("🚗Exited Script🚗")
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.