metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshuamarango/aws-mongo-cdc-kinesis",
"score": 2
} |
#### File: aws_mongo_cdc_kinesis/core/bookmark.py
```python
import boto3
from botocore.client import BaseClient
class StreamBookmark:
def __init__(self):
self.dynamo: BaseClient = boto3.client("dynamo")
def save(self, resume_token: str) -> None:
"""Save resume_token to DynamoDB"""
try:
self.dynamo.insert_item(data=resume_token)
except:
print("error saving bookmark")
```
#### File: aws_mongo_cdc_kinesis/test/test_aws_dynamodb.py
```python
import os
import boto3
import pytest
import moto
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = '<PASSWORD>'
os.environ['AWS_SESSION_TOKEN'] = '<PASSWORD>'
@pytest.fixture(scope='function')
def dynamodb(aws_credentials):
with moto.mock_dynamodb2():
yield boto3.client("dynamodb", region_name='eu-west-1')
def test_create_dynamodb_table(dynamodb):
"""Create Cloudwatch log group"""
pass
``` |
{
"source": "JoshuaMarksberry/JarvisVirtualAssistant",
"score": 3
} |
#### File: JoshuaMarksberry/JarvisVirtualAssistant/main.py
```python
import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime #pip install datetime
import wikipedia #pip install wikipedia
import webbrowser #pip install pipwin
import os #pipwin install pyaudio
import smtplib
MASTER = "Sir"
#print("Initiallizing Jarvis")
engine = pyttsx3.init("sapi5")
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[-0].id)
#number in [] corresponds to speech settings voices
# Voice Options [-3, -2, -2, -1, 0, -0, 1, 2] pick one
# Speak function will speak the string which is passed
def speak(text):
engine.say(text)
engine.runAndWait()
#This function will wish you as per the current time
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour <12:
speak("Good Morning" + MASTER)
elif hour>=12 and hour<18:
speak("Good Afternoon" + MASTER)
else:
speak("Good Evening" + MASTER)
#speak("I am Jarvis. How may I help you sir?")
#This function will take the command from microphone
def takeCommand():
r =sr.Recognizer()
with sr.Microphone() as source:
print("Listening....")
audio = r.listen(source)
try :
print("Recognizing...")
query = r.recognize_google(audio, language = 'en-us' )
print(f"user said: {query}\n")
except Exception:
print("Say that again sir")
query = None
return query
def sendEmail(to, content):
server = smtplib.SMTP('smptp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('<EMAIL>', '<PASSWORD>')
server.sendmail("<EMAIL>", to, content)
server.close
#Main program starts here...
def main():
#speak("Initializing Jarvis....")
wishMe()
query = takeCommand()
#Logic for excuting tasks per the query
if 'wikipedia' in query.lower():
speak('Searching wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences = 2) #2
print(results)
speak(results)
#Make sure chrome.exe is in the correct directory
elif 'open youtube' in query.lower():
webbrowser.open('http://youtube.com')
elif 'open google' in query.lower():
webbrowser.open('http://google.com')
#your own directory below navigate to your own music
elif 'play music' in query.lower():
songs_dir = 'C:\\Users\\joshua\\Music\\Playlists\\'
songs = os.listdir(songs_dir)
os.startfile(os.path.join(songs_dir, songs[1]))
elif 'the time' in query.lower():
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"{MASTER} the time is {strTime}")
elif 'email to me' in query.lower():
try:
speak("What should I send")
content = takeCommand()
to = "<EMAIL>"
sendEmail(to,content)
speak("Email has been sent")
except Exception:
print()
main()
``` |
{
"source": "joshuaMarple/phase-vocoder",
"score": 4
} |
#### File: phase-vocoder/app/Duration.py
```python
import subprocess
import os
from sys import platform as _platform
from lib import pydub
def changeDuration(filename,percent):
"""
Input: filename , tones
filename (string): the path to the soundfile
tones (integer): the number of semitones to change(from negative number to positive number)
Outputs: pitchoutput.wav
Description: This function will change the pitch of a soundfile
"""
tempochange = "-tempo="+str(percent)
if _platform == "linux" or _platform == "linux2":
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'soundpitch')
elif _platform == "darwin":
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'soundpitchmac')
elif _platform == "win32":
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'soundpitchwin32.exe')
subprocess.call([fn,filename, "duroutput.wav","-speech", tempochange])
return "duroutput.wav"
def changeGapDuration(filename,gaptime,gapduration,percentage):
"""
Input: filename , gaptime, gapduration , tones
filename (string): the path to the soundfile
gaptime (float): the time to begin changing the pitch
gapduration (float): the amount of sound to be changed(from the gaptime start to the end of this length)
tones (integer): the number of semitones to change(from negative number to positive number)
Outputs: processefile.wav
Description: This function will change the pitch of a soundfile
"""
file = pydub.AudioSegment.from_wav(filename)
newdurationpart = file[int((gaptime* 1000)) : int(((gaptime+gapduration) * 1000))]
first = file[:int(gaptime * 1000)]
last = file[int((gaptime+gapduration) * 1000):]
newdurationpart.export("durinput.wav", format="wav")
changeDuration("durinput.wav",percentage)
newdurationpart = pydub.AudioSegment.from_wav("duroutput.wav")
newfile = first + newdurationpart + last
newfile.export(filename, format="wav")
os.remove("durinput.wav")
os.remove("duroutput.wav")
return newfile
```
#### File: lib/tgt/agreement.py
```python
from __future__ import division, print_function
import itertools
import re
import numpy as np
from .core import IntervalTier, PointTier, Time
# --------------
# Fleiss's kappa
# --------------
def fleiss_observed_agreement(a):
'''Return the observed agreement for the input array.'''
def per_subject_agreement(row):
'''Return the observed agreement for the i-th subject.'''
number_of_objects = np.sum(row)
return (np.sum(np.square(row)) - number_of_objects) / (number_of_objects * (number_of_objects - 1))
row_probabilities = np.apply_along_axis(per_subject_agreement, axis=1, arr=a)
return np.mean(row_probabilities)
def fleiss_chance_agreement(a):
'''Returns the chance agreement for the input array.'''
def per_category_probabilities(a):
'''The proportion of all assignments which were to the j-th category.'''
cat_sums = np.sum(a, axis=0)
return cat_sums / np.sum(cat_sums)
return np.sum(np.square(per_category_probabilities(a)))
def fleiss_kappa(a):
'''Calculates Fleiss's kappa for the input array (with categories
in columns and items in rows).'''
p = fleiss_observed_agreement(a)
p_e = fleiss_chance_agreement(a)
return (p - p_e) / (1 - p_e)
# -------------
# Cohen's kappa
# -------------
def cohen_kappa(a):
'''Calculates Cohen's kappa for the input array.'''
totsum = np.sum(a)
colsums = np.sum(a, 0)
rowsums = np.sum(a, 1)
# Observed agreement.
p = np.sum(np.diagonal(a)) / totsum
# Chance agreement.
p_e = np.sum((colsums * rowsums) / totsum ** 2)
return (p - p_e) / (1 - p_e)
# ----------
# Scott's pi
# ----------
def scott_pi(a):
'''Calculates Scott's Pi for the input array.'''
totsum = np.sum(a)
colsums = np.sum(a, 0)
rowsums = np.sum(a, 1)
# Observed agreement.
p = np.sum(np.diagonal(a)) / totsum
# Chance agreement.
joint_marginal_props = (colsums + rowsums) / (2 * totsum)
p_e = np.sum(joint_marginal_props ** 2)
return (p - p_e) / (1 - p_e)
# ----------------------------------------------------------
# Functions producing contingency tables from lists of labels
# ----------------------------------------------------------
def align_labels(tiers_list, precision=None, regex=r'[^\s]+'):
'''Create a list of lists for all time-aligned Interval
or Point object in tiers_list, whose text matches regex.
For example:
[[label_1-tier_1, label_1-tier_2, label_1-tier_3],
[label_2-tier_1, label_2-tier_2, label_2-tier_3],
...
[label_n-tier_n, label_n-tier_n, label_n-tier_n]]
The allowed mismatch between object's timestamps can be
controlled via the precision parameter.
'''
# If precision is supplied, temporarily change
# the value of Time._precision
if precision is not None:
precision_old = Time._precision
Time._precision = precision
if len(tiers_list) < 2:
raise Exception('At least two tiers need to be provided')
# Check if all elements of tiers_list are either
# either PointTiers or IntervalTiers.
elif (not (all([isinstance(x, IntervalTier) for x in tiers_list])
or all([isinstance(x, PointTier) for x in tiers_list]))):
raise TypeError('Only objects of types IntervalTier or PointTier can be aligned.')
elif len(set([len(x) for x in tiers_list])) > 1:
raise Exception('Input tiers differ in the number of objects.')
labels_aligned = []
for intervals in zip(*[x for x in tiers_list]):
start_times = [x.start_time for x in intervals]
end_times = [x.end_time for x in intervals]
labels = [x.text.strip() for x in intervals]
if any([not re.search(regex, x) for x in labels]):
# Only go on if labels of all intervals match the regular expression.
continue
# Check if start and end times match.
elif start_times.count(start_times[0]) != len(start_times):
raise Exception('Objects\' time stamps do not match: {0}'.format(start_times))
elif end_times.count(end_times[0]) != len(end_times):
raise Exception('Objects\' time stamps do not match: {0}'.format(end_times))
else:
labels_aligned.append(labels)
# Reset Time._precision to its original value
if precision is not None:
Time._precision = precision_old
return labels_aligned
def cont_table(tiers_list, precision, regex):
'''Produce a contingency table from annotations in tiers_list
whose text matches regex, and whose time stamps are not
misaligned by more than precision.
'''
labels_aligned = align_labels(tiers_list, precision, regex)
# List of unique labels from both lists.
categories = list(set(itertools.chain(*labels_aligned)))
# A 2-by-2 array
if len(labels_aligned[0]) == 2:
categories_product = itertools.product(categories, categories)
table = np.array([labels_aligned.count(list(x)) for x in categories_product])
table.shape = (len(categories), len(categories))
# An n-by-m array
else:
table = np.array([x.count(y) for x in labels_aligned for y in categories])
table.shape = (len(labels_aligned), len(categories))
return table
def agreement(tiers_list, method, precision=None, regex=r'[^\s]+'):
_AGREEMENT_METHODS = {'cohen-kappa': cohen_kappa,
'fleiss-kappa': fleiss_kappa,
'scott-pi': scott_pi}
if method not in _AGREEMENT_METHODS:
available_methods = ', '.join(_AGREEMENT_METHODS.keys())
raise Exception('Unsupported method. Available options are {0}.'.format(available_methods))
elif len(tiers_list) < 2:
raise Exception('At least two tiers need to be provided')
elif len(tiers_list) == 2 and method in ['cohen-kappa', 'scott-pi']:
agr = _AGREEMENT_METHODS[method](cont_table(tiers_list, precision, regex))
return [x.name for x in tiers_list] + [agr]
elif len(tiers_list) > 2 and method == 'fleiss-kappa':
agr = _AGREEMENT_METHODS[method](cont_table(tiers_list, precision, regex))
return [x.name for x in tiers_list] + [agr]
else:
tier_combinations = itertools.combinations(tiers_list, 2)
agr = []
for tiers_pair in tier_combinations:
agr_pair = _AGREEMENT_METHODS[method](cont_table(tiers_pair, precision, regex))
agr.append([x.name for x in tiers_pair] + [agr_pair])
return agr
```
#### File: lib/tgt/io.py
```python
from __future__ import division, print_function
import copy
import codecs
import datetime
import collections
from .core import TextGrid, IntervalTier, Interval, PointTier, Point, Time
def read_textgrid(filename, encoding='utf-8', include_empty_intervals=False):
'''Read a Praat TextGrid file and return a TextGrid object.
If include_empty_intervals is False (the default), empty intervals
are excluded. If True, they are included. Empty intervals from specific
tiers can be also included by specifying tier names as a string (for one tier)
or as a list.'''
with codecs.open(filename, 'r', encoding) as f:
# Read whole file into memory ignoring empty lines and lines consisting
# solely of a single double quotes.
stg = [line.strip() for line in f.readlines()
if line.strip() not in ['', '"']]
if stg[0] != 'File type = "ooTextFile"':
raise Exception(filename)
if stg[1] != 'Object class = "TextGrid"':
raise Exception(filename)
# Determine the TextGrid format.
if stg[2].startswith('xmin'):
return read_long_textgrid(filename, stg, include_empty_intervals)
else:
return read_short_textgrid(filename, stg, include_empty_intervals)
def read_short_textgrid(filename, stg, include_empty_intervals=False):
'''Read a Praat short TextGrid file and return a TextGrid object.'''
def read_interval_tier(stg_extract):
'''Read and return an IntervalTier from a short TextGrid.'''
name = stg_extract[1].strip('"') # name w/o quotes
start_time = Time(stg_extract[2])
end_time = Time(stg_extract[3])
include_empty_intervals_this_tier = include_empty_intervals_in_tier(name, include_empty_intervals)
it = IntervalTier(start_time, end_time, name)
i = 5
while i < len(stg_extract):
text = stg_extract[i + 2].strip('"') # text w/o quotes
if text.strip() != '' or include_empty_intervals_this_tier:
it.add_annotation(Interval(
Time(stg_extract[i]), # left bound
Time(stg_extract[i + 1]), # right bound
text))
i += 3
return it
def read_point_tier(stg_extract):
'''Read and return a PointTier (called TextTier) from a short TextGrid.'''
name = stg_extract[1].strip('"') # name w/o quotes
start_time = stg_extract[2]
end_time = stg_extract[3]
pt = PointTier(start_time, end_time, name)
i = 5
while i < len(stg_extract):
text = stg_extract[i + 1].strip('"') # text w/o quotes
pt.add_annotation(Point(
stg_extract[i], # time
text))
i += 2
return pt
tg = TextGrid(filename)
read_start_time = stg[2]
read_end_time = stg[3]
if stg[4] != '<exists>':
raise Exception(filename)
read_no_of_tiers = stg[5]
index = 6
while index < len(stg):
num_obj = int(stg[index + 4])
if stg[index] == '"IntervalTier"':
tg.add_tier(read_interval_tier(stg[index:index + 5 + num_obj * 3]))
index += 5 + (num_obj * 3)
elif stg[index] == '"TextTier"':
tg.add_tier(read_point_tier(stg[index:index + 5 + num_obj * 2]))
index += 5 + (num_obj * 2)
else:
raise Exception('Unknown tier type: {0}'.format(stg[index]))
return tg
def read_long_textgrid(filename, stg, include_empty_intervals=False):
'''Read a Praat long TextGrid file and return a TextGrid object.'''
def get_attr_val(x):
'''Extract the attribute value from a long TextGrid line.'''
return x.split(' = ')[1]
def read_interval_tier(stg_extract):
'''Read and return an IntervalTier from a long TextGrid.'''
name = get_attr_val(stg_extract[2])[1:-1] # name w/o quotes
start_time = get_attr_val(stg_extract[3])
end_time = get_attr_val(stg_extract[4])
include_empty_intervals_this_tier = include_empty_intervals_in_tier(name, include_empty_intervals)
it = IntervalTier(start_time, end_time, name)
i = 7
while i < len(stg_extract):
text = get_attr_val(stg_extract[i + 2])[1:-1] # text w/o quotes
if text.strip() != '' or include_empty_intervals_this_tier:
it.add_annotation(Interval(
Time(get_attr_val(stg_extract[i])), # left bound
Time(get_attr_val(stg_extract[i + 1])), # right bound
text))
i += 4
return it
def read_point_tier(stg_extract):
'''Read and return a PointTier (called TextTier) from a long TextGrid.'''
name = get_attr_val(stg_extract[2])[1:-1] # name w/o quotes
start_time = get_attr_val(stg_extract[3])
end_time = get_attr_val(stg_extract[4])
pt = PointTier(start_time, end_time, name)
i = 7
while i < len(stg_extract):
text = get_attr_val(stg_extract[i + 1])[1:-1] # text w/o quotes
pt.add_annotation(Point(
Time(get_attr_val(stg_extract[i])), # time
text))
i += 3
return pt
tg = TextGrid(filename)
read_start_time = get_attr_val(stg[2])
read_end_time = get_attr_val(stg[3])
if stg[4].split()[1] != '<exists>':
raise Exception(filename)
read_no_of_tiers = get_attr_val(stg[5])
index = 7
while index < len(stg):
num_obj = int(get_attr_val(stg[index + 5]))
if get_attr_val(stg[index + 1]) == '"IntervalTier"':
tg.add_tier(read_interval_tier(stg[index:index + 6 + num_obj * 4]))
index += 6 + (num_obj * 4)
elif get_attr_val(stg[index + 1]) == '"TextTier"':
tg.add_tier(read_point_tier(stg[index:index + 6 + num_obj * 3]))
index += 6 + (num_obj * 3)
else:
raise Exception('Unknown tier type: {0}'.format(stg[index]))
return tg
def include_empty_intervals_in_tier(tier_name, include_empty_intervals):
"""Check whether to include empty intervals for a particular tier"""
if isinstance(include_empty_intervals, bool):
return include_empty_intervals
elif isinstance(include_empty_intervals, str):
return tier_name == include_empty_intervals
elif isinstance(include_empty_intervals, list):
return tier_name in include_empty_intervals
else:
raise TypeError('Invalid type of include_empty_intervals: {0}.'.format(type(include_empty_intervals)))
## Functions for writing TextGrid files
##----------------------------------------------------------------------------
def correct_start_end_times_and_fill_gaps(textgrid):
'''Correct the start/end times of all tiers and fill gaps.
Returns a copy of a textgrid, where empty gaps between intervals
are filled with empty intervals and where start and end times are
unified with the start and end times of the whole textgrid.
'''
textgrid_copy = copy.deepcopy(textgrid)
for tier in textgrid_copy:
if isinstance(tier, IntervalTier):
tier_corrected = tier.get_copy_with_gaps_filled(textgrid.start_time, textgrid.end_time)
position = textgrid_copy.tiers.index(tier)
textgrid_copy.tiers[position] = tier_corrected
return textgrid_copy
def export_to_short_textgrid(textgrid):
'''Convert a TextGrid object into a string of Praat short TextGrid format.'''
result = ['File type = "ooTextFile"',
'Object class = "TextGrid"',
'',
textgrid.start_time,
textgrid.end_time,
'<exists>',
len(textgrid)]
textgrid_corrected = correct_start_end_times_and_fill_gaps(textgrid)
for tier in textgrid_corrected:
result += ['"' + tier.tier_type() + '"',
'"' + tier.name + '"',
tier.start_time, tier.end_time, len(tier)]
if isinstance(tier, IntervalTier):
result += [u'{0}\n{1}\n"{2}"'.format(obj.start_time, obj.end_time, obj.text)
for obj in tier]
elif isinstance(tier, PointTier):
result += [u'{0}\n"{1}"'.format(obj.time, obj.text)
for obj in tier]
else:
raise Exception('Unknown tier type: {0}'.format(tier.name))
return '\n'.join([unicode(x) for x in result])
def export_to_long_textgrid(textgrid):
'''Convert a TextGrid object into a string of Praat long TextGrid format.'''
result = ['File type = "ooTextFile"',
'Object class = "TextGrid"',
'',
'xmin = ' + unicode(textgrid.start_time),
'xmax = ' + unicode(textgrid.end_time),
'tiers? <exists>',
'size = ' + unicode(len(textgrid)),
'item []:']
textgrid_corrected = correct_start_end_times_and_fill_gaps(textgrid)
for i, tier in enumerate(textgrid_corrected):
result += ['\titem [{0}]:'.format(i + 1),
'\t\tclass = "{0}"'.format(tier.tier_type()),
'\t\tname = "{0}"'.format(tier.name),
'\t\txmin = ' + unicode(tier.start_time),
'\t\txmax = ' + unicode(tier.end_time),
'\t\tintervals: size = ' + unicode(len(tier))]
if isinstance(tier, IntervalTier):
for j, obj in enumerate(tier):
result += ['\t\tintervals [{0}]:'.format(j + 1),
'\t\t\txmin = ' + unicode(obj.start_time),
'\t\t\txmax = ' + unicode(obj.end_time),
'\t\t\ttext = "' + obj.text + '"']
elif isinstance(tier, PointTier):
for j, obj in enumerate(tier):
result += ['\t\tpoints [{0}]:'.format(j + 1),
'\t\t\tnumber = ' + unicode(obj.time),
'\t\t\tmark = "' + obj.text + '"']
else:
raise Exception('Unknown tier type: {0}'.format(tier.name))
return '\n'.join([unicode(x) for x in result])
def export_to_elan(textgrid, encoding='utf-8', include_empty_intervals=False,
include_point_tiers=True, point_tier_annotation_duration=0.04):
'''Convert a TextGrid object into a string of ELAN eaf format.'''
time_slots = collections.OrderedDict()
def get_time_slot_id(time, ts_dict=time_slots):
'''Returns (and possibly creates) the time slot id of time.'''
time_in_ms = int(time * 1000)
if time_in_ms not in ts_dict:
ts_id = 'ts' + str(len(ts_dict) + 1)
ts_dict[time_in_ms] = ts_id
return ts_dict[time_in_ms]
# Create ELAN header
head = [
u'<?xml version="1.0" encoding="{0}"?>'.format(encoding.upper()),
u'<ANNOTATION_DOCUMENT AUTHOR="TextGridTools" DATE="{0}" FORMAT="2.7" VERSION="2.7" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://www.mpi.nl/tools/elan/EAFv2.7.xsd">'.format(datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S+00:00')),
u'<HEADER MEDIA_FILE="" TIME_UNITS="milliseconds">',
u'\t<PROPERTY NAME="lastUsedAnnotationId">0</PROPERTY>',
u'</HEADER>']
# Create annotations
annotation_id_count = 1
annotations = []
textgrid_copy = correct_start_end_times_and_fill_gaps(textgrid) if include_empty_intervals else textgrid
for tier in textgrid_copy:
annotations.append(u'<TIER DEFAULT_LOCALE="en" LINGUISTIC_TYPE_REF="default-lt" TIER_ID="{0}">'.format(tier.name))
if isinstance(tier, IntervalTier):
for interval in tier:
if include_empty_intervals or interval.text != '':
continue
annotations += [
u'<ANNOTATION>',
u'\t<ALIGNABLE_ANNOTATION ANNOTATION_ID="{0}" TIME_SLOT_REF1="{1}" TIME_SLOT_REF2="{2}">'.format('a' + str(annotation_id_count), get_time_slot_id(interval.start_time), get_time_slot_id(interval.end_time)),
u'\t\t<ANNOTATION_VALUE>{0}</ANNOTATION_VALUE>'.format(interval.text),
u'\t</ALIGNABLE_ANNOTATION>',
u'</ANNOTATION>']
annotation_id_count += 1
elif isinstance(tier, PointTier):
if include_point_tiers:
for point in tier.points:
annotations += [
u'<ANNOTATION>',
u'\t<ALIGNABLE_ANNOTATION ANNOTATION_ID="{0}" TIME_SLOT_REF1="{1}" TIME_SLOT_REF2="{2}">'.format('a' + str(annotation_id_count), get_time_slot_id(point.time), get_time_slot_id(point.time + point_tier_annotation_duration)),
u'\t\t<ANNOTATION_VALUE>{0}</ANNOTATION_VALUE>'.format(point.text),
u'\t</ALIGNABLE_ANNOTATION>',
u'</ANNOTATION>']
annotation_id_count += 1
else:
raise Exception('Unknown tier type: {0}'.format(tier.name))
annotations.append(u'</TIER>')
# Create time stamp information
time_info = [u'<TIME_ORDER>']
for time_value, time_slot_id in time_slots.items():
time_info.append(u'\t<TIME_SLOT TIME_SLOT_ID="{0}" TIME_VALUE="{1}"/>'.format(time_slot_id, str(time_value)))
time_info.append(u'</TIME_ORDER>')
# Create ELAN footer
foot = [u'<LINGUISTIC_TYPE GRAPHIC_REFERENCES="false" LINGUISTIC_TYPE_ID="default-lt" TIME_ALIGNABLE="true"/>',
u'<LOCALE COUNTRY_CODE="US" LANGUAGE_CODE="en"/>',
u'</ANNOTATION_DOCUMENT>']
eaf = head + time_info + annotations + foot
return '\n'.join([unicode(x) for x in eaf])
def export_to_table(textgrid, separator=','):
'''Convert a TextGrid object into a table with fields delimited
with the specified separator (comma by default).'''
result = [separator.join(['tier_name', 'tier_type', 'start_time', 'end_time', 'text'])]
for tier in textgrid:
if isinstance(tier, IntervalTier):
for obj in tier:
if obj.text:
result.append(separator.join([unicode(tier.name), unicode(tier.tier_type()),
unicode(obj.start_time), unicode(obj.end_time), obj.text]))
elif isinstance(tier, PointTier):
for obj in tier:
result.append(separator.join([unicode(tier.name), unicode(tier.tier_type()),
unicode(obj.time), unicode(obj.time), obj.text]))
else:
raise Exception('Unknown tier type: {0}'.format(tier.name))
return '\n'.join([unicode(x) for x in result])
# Listing of currently supported export formats.
_EXPORT_FORMATS = {
# Export to Praat TextGrid in short format
'short': export_to_short_textgrid,
# Export to Praat TextGrid in long (i.e., standard) format
'long': export_to_long_textgrid,
# Export to ELAN .eaf format
'eaf': export_to_elan,
# Export to a table
'table': export_to_table
}
def write_to_file(textgrid, filename, format='short', encoding='utf-8', **kwargs):
'''Write a TextGrid object to a file in the specified format.'''
with codecs.open(filename, 'w', encoding) as f:
if format in _EXPORT_FORMATS:
f.write(_EXPORT_FORMATS[format](textgrid, **kwargs))
else:
raise Exception('Unknown output format: {0}'.format(format))
```
#### File: lib/tgt/util.py
```python
from __future__ import division
import re
from .core import TextGrid, IntervalTier, Interval
## High-level functions
##----------------------------------------------------------------------------
def shift_boundaries(tier, left, right):
"""
Return a copy of the tier with boundaries shifted by the specified
amount of time (in seconds). Positive values expand the tier and negative values shrink
it, i.e.:
* positive value of left shifts the left boundary to the left
* negative value of left shifts the left boundary to the right
* positive value of right shifts the right boundary to the right
* negative value of right shifts the right boundary to the left.
"""
tier_end_shifted = tier.end_time + left + right
tier_shifted = IntervalTier(start_time=0,
end_time=tier_end_shifted,
name=tier.name)
for i, interval in enumerate(tier.intervals):
if interval.end_time <= left * -1:
continue
elif i > 0 and interval.start_time > left * -1:
interval_start_shifted = interval.start_time + left
else:
interval_start_shifted = 0
interval_end_shifted = interval.end_time + left
if (interval_start_shifted >= tier_end_shifted):
break
elif i == len(tier.intervals) - 1 or interval_end_shifted > tier_end_shifted:
interval_end_shifted = tier_end_shifted
tier_shifted.add_annotation(Interval(interval_start_shifted,
interval_end_shifted,
interval.text))
return tier_shifted
def get_overlapping_intervals(tier1, tier2, regex=r'[^\s]+', overlap_label=None):
'''Return a list of overlaps between intervals of tier1 and tier2.
If no overlap_label is specified, concatenated labels
of overlapping intervals are used as the resulting label.
All nonempty intervals are included in the search by default.
'''
if not isinstance(tier2, IntervalTier):
raise TypeError('Argument is not an IntervalTier')
intervals1 = tier1.intervals
intervals2 = tier2.intervals
overlaps = []
i, j = 0, 0
while i < len(tier1) and j < len(tier2):
lo = max(intervals1[i].start_time, intervals2[j].start_time)
hi = min(intervals1[i].end_time, intervals2[j].end_time)
if (lo < hi and re.search(regex, intervals1[i].text)
and re.search(regex, intervals2[j].text)):
if overlap_label is None:
text = '+'.join([intervals1[i].text, intervals2[j].text])
else:
text = overlap_label
overlaps.append(Interval(lo, hi, text))
if intervals1[i].end_time < intervals2[j].end_time:
i += 1
else:
j += 1
return overlaps
def concatenate_textgrids(textgrids, ignore_nonmatching_tiers=False):
'''Concatenate Tiers with matching names.
TextGrids are concatenated in the order they are specified. If
ignore_nonmatching_tiers is False, an exception is raised if the
number and the names of tiers differ between TextGrids.
'''
tier_names_intersection = set.intersection(
*[set(tg.get_tier_names()) for tg in textgrids])
# Check whether the TextGrids have the same number of tiers
# and whether tier names match. If they don't
# and if ignore_nonmatching_tiers is False, raise an exception.
if (not ignore_nonmatching_tiers
and not all([len(tier_names_intersection) == len(tg) for tg in textgrids])):
raise Exception('TextGrids have different numbers of tiers or tier names do not match.')
tot_duration = 0
tiers = {} # tier_name : tgt.Tier()
for textgrid in textgrids:
for tier in textgrid:
if tier.name not in tier_names_intersection:
continue
intervals = []
# If this is the first we see this tier, we just make a copy
# of it as it is.
if tier.name not in tiers.keys():
tiers[tier.name] = copy.deepcopy(tier)
# Otherwise we update the start and end times of intervals
# and append them to the first part.
else:
for interval in tier.intervals:
interval.start_time += tot_duration
interval.end_time += tot_duration
intervals.append(interval)
tiers[tier.name].add_annotations(intervals)
tot_duration += textgrid.end_time
# Create a new TextGrid and add the concatenated tiers
textgrid_concatenated = TextGrid()
# Add tiers in the order they're found in the first TextGrid.
textgrid_concatenated.add_tiers(
[tiers[x] for x in tier_names_intersection])
return textgrid_concatenated
def merge_textgrids(textgrids, ignore_duplicates=True):
'''Return a TextGrid object with tiers in all textgrids.p
If ignore_duplicates is False, tiers with equal names are renamed
by adding a path of the textgrid or a unique number incremented
with each occurrence.
'''
tg_merged = TextGrid()
tier_duplicates_lookup = collections.defaultdict(int)
for tg in textgrids:
for tier in tg:
tier_copy = copy.deepcopy(tier)
if tg_merged.has_tier(tier.name):
if not ignore_duplicates:
if tg.filename.strip() != '':
tier_copy.name += '_' + tg.filename
else:
tier_duplicates_lookup[tier.name] += 1
tier_copy.name += '_' + str(tier_duplicates_lookup[tier.name])
else:
continue
tg_merged.add_tier(tier_copy)
return tg_merged
``` |
{
"source": "joshuamataaraya/auctionSystem",
"score": 3
} |
#### File: auctionSystem/pythonClient/sql.py
```python
import pymssql
from user import getPassword
class SQLConnection:
"""Connecto to Microsoft SQL DB """
def __init__(self, userType=None, userName=None):
self.server = "autionDB"
if userType == "admin":
self.user = userName
self.password = getPassword(userName)
elif userType == "agent":
self.user = userName
self.password = getPassword(userName)
elif userType == "participant":
self.user = "Participant"
self.password = "<PASSWORD>"
else:
self.user = "App"
self.password = "<PASSWORD>"
def connect(self):
return pymssql.connect(self.server,
self.user,self.password,'<PASSWORD>')
def close(self, con):
con.close()
``` |
{
"source": "joshua-matt/Go-Variance",
"score": 4
} |
#### File: Go-Variance/src/board.py
```python
import numpy as np # Matrix representation
"""
Board
-----
Represents a Go board as a 19x19 matrix, with operations to be performed on it.
Members:
- board: the matrix representation of the board, where board[i,j]=1 indicates that row i, column j contains a black
stone. 0 indicates an empty space, and -1 indicates a white stone.
Methods:
- place_b(x,y): Places a black stone at coordinate (x,y) and checks for capture
- place_w(x,y): Places a white stone at coordinate (x,y) and checks for capture
- check_capture(x,y): Checks if any groups adjacent to (x,y) have 0 liberties. If so, removes them.
- get_neighbors(x,y): Returns the colors and coordinates of all squares adjacent to (x,y)
- get_group(x,y): Returns all coordinates part of the group at (x,y) and the number of liberties the group has
- remove_group(x,y): Sets all members of the group at (x,y) to empty
"""
class Board:
def __init__(self):
self.board = np.zeros((19,19))
def place_b(self,x,y):
self.board[x,y] = 1
self.check_capture(x,y)
def place_w(self,x,y):
self.board[x,y] = -1
self.check_capture(x,y)
def check_capture(self,x,y):
neigh = self.get_neighbors(x, y)
for n in neigh:
ng = self.get_group(*n[1])
if n[0] != 0: # Don't count adjacent empty squares as a group
if ng[1] == 0: # No liberties
self.remove_group(*n[1])
def get_neighbors(self,x,y):
neighbors = []
for n in [(x-1,y), (x+1,y), (x,y-1), (x,y+1)]:
if n[0] < 0 or n[0] > 18: # Account for cells on the edge / corner
continue
elif n[1] < 0 or n[1] > 18:
continue
neighbors.append(n)
return [(self.board[n[0],n[1]], n) for n in neighbors]
def _get_group(self,x,y,g,libs): # Recursive helper procedure for get_group
self_c = self.board[x, y] # Color of x,y
neigh = self.get_neighbors(x, y)
g.add((x, y))
for n in neigh:
if n[0] == 0 and n[1] not in libs: # Add empty neighbor to liberties
libs.add(n[1])
if n[0] == self_c and n[1] not in g: # Checks that nonempty neighbor is same color, not already counted
self._get_group(n[1][0], n[1][1], g, libs)
def get_group(self,x,y):
if self.board[x,y] == 0: # Don't count contiguous empty cells as a group
return set(), -1
group = set()
liberties = set()
self._get_group(x, y, group, liberties)
return group, len(list(liberties))
def remove_group(self,x,y):
group = list(self.get_group(x,y)[0])
for c in group:
self.board[c[0],c[1]] = 0
```
#### File: Go-Variance/src/preprocess.py
```python
import pandas as pd # Export boards as CSV
import os # Path management
import re # Clean up sgf data
import time # Benchmarking
from board import * # Go engine
from string import ascii_lowercase # Convenience
from visualize import * # Visualize a Go board from its matrix
letter_ind = {ascii_lowercase[i]:i for i in range(26)} # For converting from letters to coordinates
"""
convert_all_sgf
---------------
Converts all .sgf files (containing the moves played in a Go game) in a folder to .csv files containing a matrix
which represents the final board state.
In a .csv file, a 1 indicates a black stone at that coordinate, a -1 indicates
a white stone at that coordinate, and a 0 indicates an empty point at the coordinate.
Parameters:
- `folder`: a string designating the data folder containing the .sgf files to convert to .csv
- `overwrite(=False)`: whether or not to overwrite existing .csv files that have the same name as an .sgf file
Preconditions:
- folder contains only .sgf files
"""
def convert_all_sgf(folder, overwrite=False):
dir = os.getcwd() + "..\\data\\" + folder # Get the whole file path to the folder
if not os.path.exists(dir + "_csv"):
os.mkdir(dir + "_csv") # Make directory for new .csv files
i = 0 # Keep track of conversion progress
over = 0 # Files overwritten
t1 = time.time() # Start time
for fname in os.listdir(dir): # Loop through all files
#print("PROCESSING FILE: " + fname)
if i % 1000 == 0:
print("%d files converted." % i) # Update on progress
if os.path.exists(dir+"_csv\\"+fname[:-4]+".csv"):
i += 1
if not overwrite: # Skip files that already exist
continue
else:
over += 1
f = open(dir+"\\"+fname) # Open .sgf file
f_data = re.sub('^\(;[^;]*;([BW])\[', "\\1[", f.read())[:-1] # Extract move data
moves = [s[2:4] for s in f_data.split(";")] # Get individual moves
board = get_final_board(moves) # Play out game (accounting for captures) to get final board state
if np.array_equal(board, np.zeros((19,19))):
continue # Throw away empty games
board_frame = pd.DataFrame(board)
board_frame.to_csv(dir + "_csv\\" + fname[:-4] + ".csv") # Write final board to .csv under the same name as the .sgf
i += 1
print("Finished converting $d files in %f seconds. %d files overwritten." % (i, time.time()-t1, over))
"""
mean_board_csv
--------------
Calculates and writes the mean of a folder of .csv files. Used in calculating covariance between coordinates.
Parameters:
- `folder`: a string designating the data folder containing the .sgf files to convert to .csv
Preconditions:
- folder contains only 19x19 .csv files
"""
def mean_board_csv(folder):
avg = np.zeros((19, 19)) # Accumulator for the mean board
dir = os.getcwd() + "..\\data\\" + folder
i = 0
t1 = time.time()
for fname in os.listdir(dir):
#print("PROCESSING FILE: " + fname)
if i % 1000 == 0:
print(i)
board = pd.read_csv(dir + "\\" + fname).values[:,1:]
avg += board
i += 1
avg /= i # Divide by number of boards to get mean
avg_frame = pd.DataFrame(avg)
avg_frame.to_csv(dir + "\\avg.csv")
print("Mean board of %s written to %s in %f seconds." % (folder, dir+"\\avg.csv", time.time()-t1))
"""
get_final_board
---------------
Executes the moves in a given game of Go to determine the final board position.
Parameters:
- `moves`: an array of two-character lowercase strings with characters from a-s. Strings specify board coordinates,
with the origin 'aa' indicating the top-left of the board. The first character indicates the row, and the
second the column.
Preconditions:
- moves contains only strings of the above format
"""
def get_final_board(moves):
game_board = Board()
black = True # Black plays first
for move in moves: # Play each move
try:
x,y = letter_ind[move[0]],letter_ind[move[1]] # Coordinates of move
if black:
game_board.place_b(x, y)
else:
game_board.place_w(x, y)
black = not black # Switch to other player
except: # In event of error, return empty board
return np.zeros((19,19))
return game_board.board
"""
mean_normalize
--------------
Mean normalizes all games within each folder of a set of folders. A new folder `XXXX_norm` is created, containing the
mean normalized games for a folder XXXX.
Parameters:
- `folders`: an array of folder names within the 'data' folder, indicates the collections of games to mean normalize
Preconditions:
- Every folder in folders contains an `avg.csv`, which is the mean of all .csv files in the folder.
"""
def mean_normalize(folders):
for folder in folders:
print(folder+"\n")
dir = os.getcwd() + "..\\data\\" + folder
if not os.path.exists(dir + "_norm"):
os.mkdir(dir + "_norm")
avg = pd.read_csv(dir+"\\avg.csv").values[:,1:]
i = 1
t1 = time.time()
for fname in os.listdir(dir):
if i % 1000 == 0:
print(i)
if os.path.exists(dir+"_norm\\"+fname): # Skip pre-existing files
continue
board = pd.read_csv(dir+"\\"+fname).values[:,1:]
mean_norm_b = pd.DataFrame(board - avg)
mean_norm_b.to_csv(dir+"_norm\\"+fname)
i += 1
print("Mean normalized %s in %f seconds." % (folder, time.time()-t1))
print("Mean normalization of %d folders complete." % (len(folders)))
``` |
{
"source": "joshua-matt/RL-Quarto",
"score": 3
} |
#### File: RL-Quarto/src/Board.py
```python
import numpy as np
class Board:
def __init__(self):
self.b = np.zeros((4,4),dtype=np.uintc) # Initialize board as empty
self.open_squares = [(i,j) for i in range(4) for j in range(4)]
def clear(self):
self.b = np.zeros((4,4))
def has_victory(self):
# Check if the binary representations of nums have the same bit in at least one place
def all_overlap(nums):
one_overlap = nums[0]
zero_overlap = ~nums[0]
for n in nums:
if n == 0:
return False
one_overlap = one_overlap & n
zero_overlap = zero_overlap & ~n
return one_overlap > 0 or zero_overlap > 0
return any([all_overlap(pcs) for pcs in [list(self.b[i,:]) for i in range(4)] # Rows
+ [list(self.b[:,i]) for i in range(4)] # Columns
+ [[self.b[i,i] for i in range(4)], [self.b[i,3-i] for i in range(4)]]]) # Diagonals
def place_piece(self, piece, coord):
self.b[coord] = piece
self.open_squares.remove(coord)
def print_open(self):
for i in range(4):
for j in range(4):
if self.b[i,j] == 0:
print(4*j + i + 1, end=" ")
else:
print("X", end=" ")
print()
def to_string(self):
st = ""
for i in range(4):
for j in range(4):
if self.b[i,j] != 0:
st += str(self.b[i,j]) + " "
else:
st += "_ "
return st
def print_board(self):
for i in range(4):
for j in range(4):
if self.b[i,j] != 0:
print(self.b[i,j], end=" ")
else:
print("_", end=" ")
print()
``` |
{
"source": "joshuamckenty/chaos-scimmia",
"score": 2
} |
#### File: scimmia/tests/redis_test.py
```python
import nose
import redis
import os
def test_redis_timestamps():
host = os.getenv('REDIS_HOST', "localhost")
port = os.getenv('REDIS_PORT', 6379)
redis_db = redis.StrictRedis(host=host, port=port, db=0)
(last, second) = redis_db.lrange('scimmia', 0, 1)
assert (last - second) < 180 # Less than 3 minutes between timestamps
``` |
{
"source": "joshuamckenty/yolo-octo-wookie",
"score": 2
} |
#### File: joshuamckenty/yolo-octo-wookie/exercise_rsapi.py
```python
import cloudservers
class IdFake:
def __init__(self, id):
self.id = id
# to get your access key:
# from nova.auth import users
# users.UserManger.instance().get_users()[0].access
rscloud = cloudservers.CloudServers(
'admin',
'6cca875e-5ab3-4c60-9852-abf5c5c60cc6'
)
rscloud.client.AUTH_URL = 'http://localhost:8773/v1.0'
rv = rscloud.servers.list()
print "SERVERS: %s" % rv
if len(rv) == 0:
server = rscloud.servers.create(
"test-server",
IdFake("ami-tiny"),
IdFake("m1.tiny")
)
print "LAUNCH: %s" % server
else:
server = rv[0]
print "Server to kill: %s" % server
raw_input("press enter key to kill the server")
server.delete()
```
#### File: nova/auth/signer.py
```python
import logging
import hashlib
import hmac
import urllib
import base64
from nova.exception import Error
class Signer(object):
""" hacked up code from boto/connection.py """
def __init__(self, secret_key):
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def generate(self, params, verb, server_string, path):
if params['SignatureVersion'] == '0':
return self._calc_signature_0(params)
if params['SignatureVersion'] == '1':
return self._calc_signature_1(params)
if params['SignatureVersion'] == '2':
return self._calc_signature_2(params, verb, server_string, path)
raise Error('Unknown Signature Version: %s' % self.SignatureVersion)
def _get_utf8_value(self, value):
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def _calc_signature_0(self, params):
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
keys = params.keys()
keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
pairs.append(key + '=' + urllib.quote(val))
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
keys = params.keys()
keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
self.hmac.update(key)
val = self._get_utf8_value(params[key])
self.hmac.update(val)
pairs.append(key + '=' + urllib.quote(val))
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
logging.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~'))
qs = '&'.join(pairs)
logging.debug('query string: %s' % qs)
string_to_sign += qs
logging.debug('string_to_sign: %s' % string_to_sign)
hmac.update(string_to_sign)
b64 = base64.b64encode(hmac.digest())
logging.debug('len(b64)=%d' % len(b64))
logging.debug('base64 encoded digest: %s' % b64)
return b64
if __name__ == '__main__':
print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo")
```
#### File: yolo-octo-wookie/nova/crypto.py
```python
import base64
import hashlib
import logging
import os
import shutil
import struct
import tempfile
import time
import utils
from nova import vendor
import M2Crypto
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
buffer = ""
if project_id:
with open(ca_path(project_id),"r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
with open(ca_path(None),"r") as cafile:
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
shutil.rmtree(tmpdir)
# code below returns public key in pem format
# key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
# private_key = key.as_pem(cipher=None)
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
return (private_key, public_key, fingerprint)
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
rsa_key = M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(ssl_public_key))
e, n = rsa_key.pub()
key_type = 'ssh-rsa'
key_data = struct.pack('>I', len(key_type))
key_data += key_type
key_data += '%s%s' % (e,n)
b64_blob = base64.b64encode(key_data)
return '%s %s %s@%s\n' %(key_type, b64_blob, name, suffix)
def generate_x509_cert(subject, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
if not intermediate:
return _sign_csr(csr_text, FLAGS.ca_path)
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
def mkreq(bits, subject="foo", ca=0):
pk = M2Crypto.EVP.PKey()
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
req.set_pubkey(pk)
req.set_subject(subject)
req.sign(pk,'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
return req, pk
def mkcacert(subject='nova', years=1):
req, pk = mkreq(2048, subject, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub) # FIXME subject is not set in mkreq yet
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME()
nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365))
cert.set_not_before(now)
cert.set_not_after(nowPlusYear)
issuer = M2Crypto.X509.X509_Name()
issuer.C = "US"
issuer.CN = subject
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha512')
# print 'cert', dir(cert)
print cert.as_pem()
print pk.get_rsa().as_pem()
return cert, pk, pkey
# Copyright (c) 2006-2009 <NAME> http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""
@type fp: file
@param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
@rtype: tuple
@return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
```
#### File: nova/endpoint/rackspace.py
```python
import base64
import json
import logging
import multiprocessing
import os
import time
from nova import vendor
import tornado.web
from twisted.internet import defer
from nova import datastore
from nova import flags
from nova import rpc
from nova import utils
from nova import exception
from nova.auth import users
from nova.compute import model
from nova.compute import network
from nova.endpoint import wsgi
from nova.endpoint import images
from nova.volume import storage
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
# TODO(todd): subclass Exception so we can bubble meaningful errors
class Api(object):
def __init__(self, rpc_mechanism):
self.controllers = {
"v1.0": RackspaceAuthenticationApi(),
"servers": RackspaceCloudServerApi()
}
self.rpc_mechanism = rpc_mechanism
def handler(self, environ, responder):
environ['nova.context'] = self.build_context(environ)
controller, path = wsgi.Util.route(
environ['PATH_INFO'],
self.controllers
)
if not controller:
# TODO(todd): Exception (404)
raise Exception("Missing Controller")
rv = controller.process(path, environ)
if type(rv) is tuple:
responder(rv[0], rv[1])
rv = rv[2]
else:
responder("200 OK", [])
return rv
def build_context(self, env):
rv = {}
if env.has_key("HTTP_X_AUTH_TOKEN"):
rv['user'] = users.UserManager.instance().get_user_from_access_key(
env['HTTP_X_AUTH_TOKEN']
)
if rv['user']:
rv['project'] = users.UserManager.instance().get_project(
rv['user'].name
)
return rv
class RackspaceApiEndpoint(object):
def process(self, path, env):
if not self.check_authentication(env):
# TODO(todd): Exception (Unauthorized)
raise Exception("Unable to authenticate")
if len(path) == 0:
return self.index(env)
action = path.pop(0)
if hasattr(self, action):
method = getattr(self, action)
return method(path, env)
else:
# TODO(todd): Exception (404)
raise Exception("Missing method %s" % path[0])
def check_authentication(self, env):
if hasattr(self, "process_without_authentication") \
and getattr(self, "process_without_authentication"):
return True
if not env['nova.context']['user']:
return False
return True
class RackspaceAuthenticationApi(RackspaceApiEndpoint):
def __init__(self):
self.process_without_authentication = True
# TODO(todd): make a actual session with a unique token
# just pass the auth key back through for now
def index(self, env):
response = '204 No Content'
headers = [
('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']),
('X-Storage-Url', 'http://%s' % env['HTTP_HOST']),
('X-CDN-Managment-Url', 'http://%s' % env['HTTP_HOST']),
('X-Auth-Token', env['HTTP_X_AUTH_KEY'])
]
body = ""
return (response, headers, body)
class RackspaceCloudServerApi(RackspaceApiEndpoint):
def __init__(self):
self.instdir = model.InstanceDirectory()
self.network = network.PublicNetworkController()
def index(self, env):
if env['REQUEST_METHOD'] == 'GET':
return self.detail(env)
elif env['REQUEST_METHOD'] == 'POST':
return self.launch_server(env)
def detail(self, args, env):
value = {
"servers":
[]
}
for inst in self.instdir.all:
value["servers"].append(self.instance_details(inst))
return json.dumps(value)
##
##
def launch_server(self, env):
data = json.loads(env['wsgi.input'].read(int(env['CONTENT_LENGTH'])))
inst = self.build_server_instance(data, env['nova.context'])
self.schedule_launch_of_instance(inst)
return json.dumps({"server": self.instance_details(inst)})
def instance_details(self, inst):
return {
"id": inst.get("instance_id", None),
"imageId": inst.get("image_id", None),
"flavorId": inst.get("instacne_type", None),
"hostId": inst.get("node_name", None),
"status": inst.get("state", "pending"),
"addresses": {
"public": [self.network.get_public_ip_for_instance(
inst.get("instance_id", None)
)],
"private": [inst.get("private_dns_name", None)]
},
# implemented only by Rackspace, not AWS
"name": inst.get("name", "Not-Specified"),
# not supported
"progress": "Not-Supported",
"metadata": {
"Server Label": "Not-Supported",
"Image Version": "Not-Supported"
}
}
def build_server_instance(self, env, context):
reservation = utils.generate_uid('r')
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
inst = self.instdir.new()
inst['name'] = env['server']['name']
inst['image_id'] = env['server']['imageId']
inst['instance_type'] = env['server']['flavorId']
inst['user_id'] = context['user'].id
inst['project_id'] = context['project'].id
inst['reservation_id'] = reservation
inst['launch_time'] = ltime
inst['mac_address'] = utils.generate_mac()
address = network.allocate_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address']
)
inst['private_dns_name'] = str(address)
inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
inst['user_id'],
inst['project_id'],
'default' # security group
)['bridge_name']
# key_data, key_name, ami_launch_index
# TODO(todd): key data or root password
inst.save()
return inst
def schedule_launch_of_instance(self, inst):
rpc.cast(
FLAGS.compute_topic,
{
"method": "run_instance",
"args": {"instance_id": inst.instance_id}
}
)
```
#### File: yolo-octo-wookie/nova/rpc.py
```python
import json
import logging
import sys
import uuid
from nova import vendor
from carrot import connection
from carrot import messaging
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
from nova import exception
from nova import fakerabbit
from nova import flags
FLAGS = flags.FLAGS
_log = logging.getLogger('amqplib')
_log.setLevel(logging.WARN)
class Connection(connection.BrokerConnection):
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
userid=FLAGS.rabbit_userid,
password=<PASSWORD>,
virtual_host=FLAGS.rabbit_virtual_host)
if FLAGS.fake_rabbit:
params['backend_cls'] = fakerabbit.Backend
cls._instance = cls(**params)
return cls._instance
@classmethod
def recreate(cls):
del cls._instance
return cls.instance()
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
# cleaning up after themselves
def attach_to_tornado(self, io_inst=None):
from tornado import ioloop
if io_inst is None:
io_inst = ioloop.IOLoop.instance()
injected = ioloop.PeriodicCallback(
lambda: self.fetch(enable_callbacks=True), 100, io_loop=io_inst)
injected.start()
return injected
attachToTornado = attach_to_tornado
def fetch(self, *args, **kwargs):
# TODO(vish): the logic for failed connections and logging should be
# refactored into some sort of connection manager object
try:
if getattr(self, 'failed_connection', False):
# attempt to reconnect
self.conn = Connection.recreate()
self.backend = self.conn.create_backend()
super(Consumer, self).fetch(*args, **kwargs)
if getattr(self, 'failed_connection', False):
logging.error("Reconnected to queue")
self.failed_connection = False
except Exception, ex:
if not getattr(self, 'failed_connection', False):
logging.exception("Failed to fetch message from queue")
self.failed_connection = True
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
loop.start(interval=0.1)
class Publisher(messaging.Publisher):
pass
class TopicConsumer(Consumer):
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
super(TopicConsumer, self).__init__(connection=connection)
class AdapterConsumer(TopicConsumer):
def __init__(self, connection=None, topic="broadcast", proxy=None):
_log.debug('Initing the Adapter Consumer for %s' % (topic))
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection, topic=topic)
@exception.wrap_exception
def receive(self, message_data, message):
_log.debug('received %s' % (message_data))
msg_id = message_data.pop('_msg_id', None)
method = message_data.get('method')
args = message_data.get('args', {})
message.ack()
if not method:
# NOTE(vish): we may not want to ack here, but that means that bad
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
_log.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
d = defer.maybeDeferred(node_func, **node_args)
if msg_id:
d.addCallback(lambda rval: msg_reply(msg_id, rval))
d.addErrback(lambda e: msg_reply(msg_id, str(e)))
return
class TopicPublisher(Publisher):
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
super(TopicPublisher, self).__init__(connection=connection)
class DirectConsumer(Consumer):
exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.queue = msg_id
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
super(DirectConsumer, self).__init__(connection=connection)
class DirectPublisher(Publisher):
exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
super(DirectPublisher, self).__init__(connection=connection)
def msg_reply(msg_id, reply):
conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
try:
publisher.send({'result': reply})
except TypeError:
publisher.send(
{'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems())
})
publisher.close()
def call(topic, msg):
_log.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
_log.debug("MSG_ID is %s" % (msg_id))
conn = Connection.instance()
d = defer.Deferred()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
consumer.register_callback(lambda data, message: d.callback(data))
injected = consumer.attach_to_tornado()
# clean up after the injected listened and return x
d.addCallback(lambda x: injected.stop() and x or x)
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
publisher.close()
return d
def cast(topic, msg):
_log.debug("Making asynchronous cast...")
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
publisher.close()
def generic_response(message_data, message):
_log.debug('response %s', message_data)
message.ack()
sys.exit(0)
def send_message(topic, message, wait=True):
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
_log.debug('topic is %s', topic)
_log.debug('message %s', message)
if wait:
consumer = messaging.Consumer(connection=Connection.instance(),
queue=msg_id,
exchange=msg_id,
auto_delete=True,
exchange_type="direct",
routing_key=msg_id)
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange="nova",
exchange_type="topic",
routing_key=topic)
publisher.send(message)
publisher.close()
if wait:
consumer.wait()
# TODO: Replace with a docstring test
if __name__ == "__main__":
send_message(sys.argv[1], json.loads(sys.argv[2]))
``` |
{
"source": "JoshuaMcroberts/DeliveryDilemmaLite",
"score": 4
} |
#### File: JoshuaMcroberts/DeliveryDilemmaLite/locker_room.py
```python
from libraries import *
from mmap import *
from game import N_game
def locker_room(game = N_game()):
loop = True
while loop:
game.game_map.pre = game.game_map.player_enter((3,2),game.game_map.pre)
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- LOCKER ROOM --")+"\n")
print_tab("The room is narrow and bordering three walls are a collection of storage lockers. ")
print_tab("Ten full size " + pr_colour("l_blue","Lockers") + " are against the right-hand wall with a matching Ten opposite ")
print_tab("them against the left-hand wall. The wall towards the back of the room is roughly ")
print_tab("half the size of the other two walls and therefore has two rows of five half-sized ")
print_tab("lockers which had been stacked to give in total an additional Ten lockers. ")
print_tab("Each locker is sequentially numbered.")
var = san_input()
# Navigation IF
if var == "lockers":
print("")
lockers(game)
else:
hint = "Don't lick icy lamp posts"
loop = game.basic_game_func(var, hint)
def lockers(game = N_game()):
loop = True
while loop:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- LOCKERS --") + "\n")
print_tab("Enter the locker number you wish to search:" )
print_tab("You may need a " + pr_colour("l_blue","Hint") + " to start your search.")
var, num = item_input()
if str(type(num)) != "<class 'int'>":
var = san_text(var + str(num))
if var == "locker" and num > 0 and num < 31:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- LOCKER " + str(num) + " --") + "\n")
if num < 14:
print_tab("You open " + pr_colour("l_blue","Locker " + str(num)) + " and find that it is empty.")
elif num < 21:
print_tab("You go to open " + pr_colour("l_blue","Locker " + str(num)) + " but it appears to be locked.")
elif num == 21:
locker_21(game)
elif num < 24:
print_tab("You go to open " + pr_colour("l_blue","Locker " + str(num)) + " but it appears to be locked.")
elif num < 28:
if game.get_key == False:
print_tab("As you open " + pr_colour("l_blue","Locker " + str(num)) + " you hear someone else enter the room. You pretend to be retrieving ")
print_tab("something from your non-existent bag in the empty locker. The person passes behind you and goes ")
print_tab("further down your row. Out of the corner of your eye you see them take their pass from around ")
print_tab("their neck and put it inside the locker. They then take out a lunch box and lock their locker behind ")
print_tab("them. As they exit the room they slip their locker key into the pocket of their worn work coat. ")
game.get_key = True
game.set_new_ob("Follow the worker and find a way to get their locker key")
else:
print_tab("You open " + pr_colour("l_blue","Locker " + str(num)) + " and find that it is empty.")
elif num < 31:
print_tab("You go to open " + pr_colour("l_blue","Locker " + str(num)) + " but it appears to be locked.")
pause()
else:
hint = "Type: Locker 1"
loop = game.basic_game_func(var, hint)
def locker_21(game = N_game()):
bol = game.pc.check_inventory("Locker 21 - Key")
if bol and game.locker_21_empty == False:
print_tab("You slide the key for locker 21 into the lock and open it. Inside the locker is a black ")
print_tab("backpack which is open and a security pass with the words '<NAME> - Warehouse Opts' ")
print_tab("on it. You take the pass and relock the locker. ")
game.locker_21_empty = True
s_pause()
game.completed_spec_ob("Open Locker 21")
s_pause()
game.set_new_ob("Enter the Warehouse")
s_pause()
game.pc.add_inventory("Warehouse - ID Card")
elif bol and game.locker_21_empty:
print_tab("You slide the key for locker 21 into the lock and open it. Inside the locker is a black ")
print_tab("backpack which is open and seems to have had a box removed from it. Nothing useful here, ")
print_tab("you close and relock the locker.")
else:
print_tab("You go to open " + pr_colour("l_blue","Locker 21") + " but it appears to be locked.")
if __name__ == "__main__":
game = N_game()
game.pc.inventory = ["Locker 21 - Key"]
locker_room(game)
```
#### File: JoshuaMcroberts/DeliveryDilemmaLite/start.py
```python
from libraries import *
from text import *
from game import *
from reception import recep
# DISPLAY HELP TEXT
def help_text():
clear_screen()
print_tab("Help text will go here!")
# DISPLAY ABOUT TEXT
def cred_text():
clear_screen()
print_tab(pr_colour("l_green","-- CREDITS --"))
print_tab("Intro Story Reviewers - <NAME>, <NAME>, <NAME> ")
print_tab("Receptionsist Name - <NAME>")
print_tab("Alpha Testers - <NAME>, <NAME>, <NAME>")
print_tab("Beta Testers - <NAME>, <NAME>")
print_tab("User Testers - <NAME>, <NAME>")
# DISPLAY ASCII ART
def game_intro():
clear_screen()
# ascii_del_dil()
print(pr_colour("l_blue","\n\tWelcome to Delviery Dilemma"))
s_pause()
# DISPLAYS AME OVER ASCII ART
def game_over():
ascii_game_over()
# GAME FUNCTION
def new_game():
clear_screen()
game = N_game()
game.enter_name()
game.set_courier()
game.create_char()
pc = game.get_character()
cour = game.get_courier()
pause()
act_1_intro(cour, pc)
recep(game)
game_over()
def menu():
ext = False
while not ext:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- MAIN MENU --") + "\n")
print_tab("[1] Start\n")
print_tab("[2] Help\n")
print_tab("[3] Credits\n")
print_tab("[4] Exit\n")
try:
main_op = int(input("\tEnter Option: "))
except:
main_op = 10
if main_op == 1:
new_game()
elif main_op == 2:
help_text()
pause()
elif main_op == 3:
cred_text()
pause()
elif main_op == 4:
print("")
print_tab(pr_colour("l_orange","Bye Bye\n"))
ext = True
else:
print_tab("Select a Number from 1-4")
pause()
# MAIN FUNCTION
def main():
game_intro()
menu()
if __name__ == "__main__":
main()
```
#### File: JoshuaMcroberts/DeliveryDilemmaLite/warehouse.py
```python
from libraries import *
from game import N_game
def warehouse(game = N_game()):
loop = True
while loop:
if(game.game_over == False):
game.game_map.pre = game.game_map.player_enter((2,1),game.game_map.pre)
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- WAREHOUSE --")+"\n")
print_tab("The warehouse is a cavernous open space with concrete floors painted a pale blue colour.")
print_tab("Red lines clearly mark out walk ways from fork lift drive paths. The warehouse appears to")
print_tab("have been broken down into sections. To the front of the warehouse there are two plastic ")
print_tab("sheeting, covered holes in the wall. The space behind them is clear, however after that on")
print_tab("the wall can be found the word " + pr_colour("l_blue", "Sorting Area") + ". Looking to the opposite side of the room")
print_tab("you can see six smaller gaps in the wall covered by the same plastic sheeting as the others.")
print_tab("The wall beside this area reads " + pr_colour("l_blue", "Loading Bay") + ". Next to you there is a desk that has been")
print_tab("labelled " + pr_colour("l_blue", "Parcel Repair") + ". This seems to be were damaged parcels go when they need fixing. ")
print_tab("The last feature of the warehouse is a window surrounded " + pr_colour("l_blue", "Office") + " in the near right hand corner. ")
var = san_input()
# Navigation IF
if var == "sortingarea":
shelves(game)
elif var == "parcelrepair":
damaged_parcel_area(game)
elif var == "loadingbay":
loading_bay(game)
elif var == "office":
office(game)
else:
hint = "Look around for Uncle Jock's Parcel"
loop = game.basic_game_func(var, hint)
else:
loop = False
def shelves(game = N_game()):
loop = True
while loop:
game.game_map.pre = game.game_map.player_enter((2,0),game.game_map.pre)
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- SORTING AREA --") + "\n")
print_tab("The sorting area is broken down into postcodes and forwarding piles. Some to be shipped to ")
print_tab("other distribution centres and others to be delivered to the local area. In the forwarding ")
print_tab("section there are a number of parcels to be sent however only four of them match the size of ")
print_tab("the parcel you are looking for. Have a look around at the parcels. You may need a " + pr_colour("l_blue","Hint") + " to ")
print_tab("start your search.")
var, num = item_input()
if str(type(num)) != "<class 'int'>":
var = san_text(var + str(num))
if var == "parcel" and num < 5 and num > 0:
boxes(1 ,num, game)
else:
hint = "Type: Parcel 1 "
loop = game.basic_game_func(var, hint)
def damaged_parcel_area(game = N_game()):
loop = True
while loop:
game.game_map.pre = game.game_map.player_enter((2,2),game.game_map.pre)
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL REPAIR STATION --") + "\n")
print_tab("On the desk sits two parcels that seem a little worst for wear. The " + pr_colour("l_blue", "Parcel 1") + " seems to have")
print_tab("been dropped as one of the corners has the characteristic signs of landing face first. ")
print_tab(pr_colour("l_blue", "Parcel 2") + " seems to have been crashed by another parcel significantly heavier then if could ")
print_tab("withstand. All around its side are the wrinkles in the cardboard formed when it buckled")
print_tab("under the weight which also seems to have caused the corners to spilt.")
var = san_input()
if var == "parcel1":
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL 1 --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ <NAME> │")
print_tab("\t│ New Chester Road │")
print_tab("\t│ Ellesmere Port │")
print_tab("\t│ Cheshire │")
print_tab("\t│ CH66 1QW │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(1)
office_empty(game)
elif var == "parcel2":
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL 2 --") + "\n")
print_tab("The address on this label appears to be ripped:\n")
print_tab("\t ─────────┐")
print_tab("\t _\Roberts │")
print_tab("\t /raney Road │")
print_tab("\t __\derry │")
print_tab("\t /rn │")
print_tab("\t /JG │")
print_tab("\t /ern Ireland │")
print_tab("\t ─────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(2)
office_empty(game)
else:
hint = "Don't lick icy lamp posts"
loop = game.basic_game_func(var, hint)
def loading_bay(game = N_game()):
loop = True
while loop:
game.game_map.pre = game.game_map.player_enter((1,3),game.game_map.pre)
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- LOADING BAY --") + "\n")
print_tab("The loading bay has a fairly simple layout. A wheeled cage trolley can be easily wheeled from")
print_tab("the sorting area to the smaller entrances which then allows for easy loading of the delivery")
print_tab("vans when they are getting ready for their delivery runs. There is a single " + pr_colour("l_blue", "Roller Cage"))
print_tab("sitting off to the side of one of the loading areas.")
var = san_input()
if var == "rollercage":
rollercage(game)
else:
hint = "Don't lick icy lamp posts"
loop = game.basic_game_func(var, hint)
def rollercage(game = N_game()):
loop = True
while loop:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- ROLLER CAGE --") + "\n")
print_tab("Three parcel lie in an almost tower like structure in the bottom of the Roller Cage. Most of ")
print_tab("the labels are obscured. You can take a closer look at each parcel to see its shipping label.")
print_tab("You may need a " + pr_colour("l_blue","Hint") + " to start your search.")
var, num = item_input()
if str(type(num)) != "<class 'int'>":
var = san_text(var + str(num))
if var == "parcel" and num <4 and num > 0:
boxes( 2 ,num, game)
else:
hint = "Type: Parcel 1 "
loop = game.basic_game_func(var, hint)
def office(game = N_game()):
loop = True
while loop:
if(game.game_over == False):
game.game_map.pre = game.game_map.player_enter((0,3),game.game_map.pre)
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- WAREHOUSE OFFICE --") + "\n")
if game.worker == True:
print_tab("As you get closer to the office you see there is someone inside it. They would recognise ")
print_tab("instantly that you weren't supposed to be here. You best search elsewhere until they leave. ")
pause()
loop = False
else:
print_tab("You enter the office and find cluttered space. On a table in the back of the room semi-ordered ")
print_tab("stacks of paper climb the wall. Three of the four sides of the boxy room have glass windows ")
print_tab("that span the length of the side. The bottom edges of the window frames are coated with a thin")
print_tab("layer of dust which appears to have been disturbed in places where people have lent against it.")
print_tab("On a table that faces into the warehouse sits a " + pr_colour("l_blue","Computer") + " with it password and username handily")
print_tab("stored on a post-it note stuck to the top left-hand corner of the screen. ")
var = san_input()
if var == "computer":
computer(game)
else:
hint = "Don't lick icy lamp posts"
loop = game.basic_game_func(var, hint)
else:
loop = False
def computer(game = N_game()):
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- COMPUTER --")+"\n")
print_tab("You unlock the computer to find a parcel management system loaded on the screen. On the ")
print_tab("display different numbers show how many parcels will be shipped to each of the surrounding ")
print_tab("towns.")
s_pause()
print_tab("You select the search function and enter the tracking number of Uncle Jocks parcel.")
s_pause()
print_tab("An incorrect value error appears on the screen and then blinks out.")
s_pause()
print_tab("You try entering the parcel ID number and immediately an item record opens up.")
s_pause()
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL RECORD --") + "\n")
print_tab("┌──────────────────────────────────────────────────────────────┐")
print_tab("│ Parcel Number: B42 8472 3189 6439 10 │")
print_tab("│ │")
print_tab("│ Tracking Number: A2K6U9-2893-G2GU96 │")
print_tab("│ │")
print_tab("│ Delivery Address: Jock Thistlewaite Angus MacTavish III │")
print_tab("│ 3 Pennyworth Rd │")
print_tab("│ Aderfeldy │")
print_tab("│ Perthshire │")
print_tab("│ BXA2XW │")
print_tab("│ │")
print_tab("│ Delivery Date: Tomorrow - 24/12/2021 │")
print_tab("│ │")
print_tab("│ Current Location: In Vehicle for delivery │")
print_tab("└──────────────────────────────────────────────────────────────┘")
pause()
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- COMPUTER --")+"\n")
print_tab("After skimming over the details you realise that the parcel in no longer in the warehouse ")
print_tab("but instead in a vehicle waiting to be delivered.")
s_pause()
print_tab("You select the Current Location field and a vehicle record opens.")
s_pause()
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- VEHICLE RECORD --") + "\n")
print_tab("┌───────────────────────────────┐")
print_tab("│ Vehicle ID: 00001372 │")
# print_tab("│ │")
print_tab("│ Driver Name: Sidney │")
print_tab("│ Miles: 100,263 │")
print_tab("│ │")
print_tab("│ Serviced Last: 30/09/2021 │")
print_tab("│ MOT due: 22/01/2022 │")
print_tab("│ │")
print_tab("│ REG: " + game.unformated_plate + " │")
print_tab("└───────────────────────────────┘")
pause()
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- COMPUTER --")+"\n")
print_tab("You now have the vehicle information. "+ game.player_name +" it is up to you! ")
s_pause()
game.set_new_ob("Find Uncle Jock's Parcel in a Vehicle with REG: " + game.number_plate )
s_pause()
loop = True
while loop:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- COMPUTER --")+"\n")
print_tab("Did you find Uncle Jock's parcel in the delivery vehicle? Type YES to continue.")
var = san_input()
if var == "yes":
loop = False
elif var == "hint":
print("")
hint = "Call the game maker if you can't find the"
print("\tHint -", end="")
print_tab(hint)
pause()
else:
print("")
print_tab("Incorrect entry try again")
pause()
game.game_over = True
def boxes( opt , num, game = N_game() ):
if opt == 1:
if num == 1 :
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ <NAME> │")
print_tab("\t│ 25 Terrace Rd │")
print_tab("\t│ Aberystwyth │")
print_tab("\t│ Dyfed │")
print_tab("\t│ SY23 1NP │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(3)
elif num == 2:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ <NAME> │")
print_tab("\t│ 8 Lynwood Close │")
print_tab("\t│ Ashton-under-Lyne │")
print_tab("\t│ Tameside │")
print_tab("\t│ OL7 9SS │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(4)
elif num == 3:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ <NAME> │")
print_tab("\t│ College Green │")
print_tab("\t│ Bristol │")
print_tab("\t│ City of Bristol │")
print_tab("\t│ BS1 5TA │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(5)
elif num == 4:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ Bethany Hunt │")
print_tab("\t│ 56 Hambro Hill │")
print_tab("\t│ Rayleigh │")
print_tab("\t│ Essex │")
print_tab("\t│ SS6 8BW │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(6)
office_empty(game)
else:
if num == 1 :
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ <NAME> │")
print_tab("\t│ 27 Manor Way │")
print_tab("\t│ Borehamwood │")
print_tab("\t│ Hertfordshire │")
print_tab("\t│ WD6 1QJ │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(7)
elif num == 2:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ Yvonne Price │")
print_tab("\t│ 15-16 High St │")
print_tab("\t│ Swansea │")
print_tab("\t│ Glamorgan │")
print_tab("\t│ SA1 1LF │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(8)
elif num == 3:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- PARCEL "+ str(num) +" --") + "\n")
print_tab("The address label on the parcel reads:\n")
print_tab("\t┌────────────────────┐")
print_tab("\t│ <NAME> │")
print_tab("\t│ 14 St Thomas Rd │")
print_tab("\t│ Brentwood │")
print_tab("\t│ Essex │")
print_tab("\t│ CM14 4DB │")
print_tab("\t│ United Kingdom │")
print_tab("\t└────────────────────┘\n")
print_tab("Not Uncle Jock's Parcel, Lets keep looking")
pause()
game.set_boxes(9)
office_empty(game)
def office_empty(game = N_game()):
empty = game.check_boxes()
if empty == True:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- SEARCHING --") + "\n")
print_tab("As you set down the parcel you are looking at you glance across the warehouse to the office.")
print_tab("You notice the worker that was in the office has left it and is heading out the door to the ")
print_tab("main building. Now is your chance to have a look inside.")
game.set_new_ob("Search the Office")
game.worker = False
pause()
if __name__ == "__main__":
game = N_game()
game.set_num_plate(" KLZ 9890 ")
computer(game)
# warehouse(game)
``` |
{
"source": "joshuamegnauth54/aapor_scholars_2021",
"score": 3
} |
#### File: aapor_scholars_2021/steam_review_sentiments/cnn_model.py
```python
import numpy as np
import keras
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.exceptions import NotFittedError
from keras.models import Sequential
from keras.layers import BatchNormalization, Conv1D, Dense, Embedding
from keras.layers.pooling import GlobalMaxPooling1D
from keras.initializers import Constant
from utilities import null_preproc, transform_string,\
transform_all, tokenize_all
# This class is badly designed. I wanted to leverage spaCy, but I combined
# tools in a very poor way...
class PadCounts:
def __init__(self, nlp, pad_length=None):
"""Instantiate PadCounts.
Parameters
----------
nlp : spacy.lang.en.English
Trained spaCy language object.
pad_length : int, optional
Set a predefined length to pad data in transform(). Calculated
from X_train during fit() if None.
Returns
-------
None.
"""
# Language object for embeddings.
self.__nlp = nlp
# Word embeddings array.
self.__embeddings = None
# Sklearn model for word counts.
self.__vectorizer = None
# Vocabulary size based on X_train.
self.__vocab_size = None
# Length of the pre-trained word embeddings vector (300 most likely)
self.__vec_size = None
# Max length of a training document (or a predefined max for padding)
self.__pad_length = pad_length
def __to_docs(self, X):
# Convert X to a list of Doc if necessary
if isinstance(X[0], str):
return np.array([self.__nlp(text) for text in X])
else:
return X
def fit(self, X_train):
"""Fit PadCounts on X_train and transform into embeddings.
Parameters
----------
X_train : np.ndarray[spacy.tokens.Doc or str]
Array of spaCy Docs or strings (training).
Raises
------
ValueError
Raised if X_train isn't an array of spaCy Docs.
Returns
-------
None.
"""
if not isinstance(X_train, (np.ndarray, list)) or not len(X_train):
raise ValueError("X_train needs to be an array of strs or Docs.")
# Make sure X_train are Docs.
X_train = self.__to_docs(X_train)
# CountVectorizer counts each word/token, so I can use it to extract
# ONLY the vectors present in my data from spaCy's pretrained
# embeddings.
self.__vectorizer = CountVectorizer(strip_accents="unicode",
preprocessor=null_preproc,
tokenizer=transform_string,
token_pattern=None).fit(X_train)
# The vocabulary size only consists of the terms that appear after
# vectorizing. This is our first dimension.
# 0 will be used as an indicator for missing words, so let's shift the
# vocab by elements + 1.
self.__vocab_size = len(self.__vectorizer.get_feature_names()) + 1
# Word vectors length (second dimension).
self.__vec_size = self.__nlp.vocab.vectors_length
# Remove stop words, et cetera.
# And yeah, due to bad design I'm calling transform_string a lot.
X_transformed = transform_all(X_train)
if not self.__pad_length:
self.__pad_length = len(max(X_transformed, key=len))
def embeddings(self):
"""Return subsetted embeddings for X_train.
The returned vectors are a subset of the spaCy language object's
vectors that only include words present in X_train.
PadCounts should be fit() before calling embeddings().
Raises
------
NotFittedError
Raised if PadCounts() is unfit.
Returns
-------
embeddings : np.ndarray[np.float32]
Subsetted word embeddings.
"""
if self.__embeddings:
return self.__embeddings
elif not self.__vectorizer:
raise NotFittedError("Call PadCounts.fit() first.")
# Initialize a zero length ndarray with the vocab and vector sizes.
self.__embeddings = np.zeros((self.__vocab_size, self.__vec_size),
dtype=np.float32)
# CountVectorizer.vocabulary_ is a dictionary matching word to index.
# Thus:
# index = vectorizer.vocabulary_["meow"]
# value = vectorizer.get_feature_names()[index]
# value == "meow"
for word, i in self.__vectorizer.vocabulary_.items():
# Can't index with NumPy strings.
# Also, shift the embeddings by 1.
self.__embeddings[i + 1] = self.__nlp.vocab[str(word)].vector
def transform(self, X, remove_junk=True):
"""Return tokenized X.
Parameters
----------
X : np.ndarray[Doc or str]
Array of Docs or str to tokenize.
remove_junk : bool, optional
Whether X needs to be transformed to remove stop words.
The default is True.
Raises
------
NotFittedError
DESCRIPTION.
ValueError
DESCRIPTION.
Returns
-------
X_tokens : np.ndarray[np.int32]
Word embeddings for X.
"""
if not self.__vectorizer or not self.__pad_length:
raise NotFittedError("Call PadCounts.fit() first.")
if not isinstance(X, (np.ndarray, list)) or not len(X):
raise ValueError("X_train needs to be an array of strs or Docs.")
# Make sure X is a list of Docs
X = self.__to_docs(X)
# Remove stop words et cetera if necessary.
if remove_junk:
X = transform_all(X)
# Tokenize the training and test sets. 0 is the magic NaN value.
return tokenize_all(X,
self.__vectorizer,
0,
True,
self.__pad_length)
def cnn_model(embeddings, max_length, ngrams=3, dropout_prob=.4):
# Base model. Convert to class later(?!?).
model = Sequential(name="cnn_steam_reviews_model")
# Embedding layer to use our pretrained vectors.
# https://keras.io/examples/nlp/pretrained_word_embeddings/
model.add(Embedding(embeddings.shape[0],
embeddings.shape[1],
embeddings_initializer=Constant(embeddings),
# mask_zero=True,
input_length=max_length,
trainable=False))
# One dimension convulution layer
model.add(Conv1D(max_length,
ngrams,
padding="same"))
# Normalize inputs.
model.add(BatchNormalization())
# Max pooling
model.add(GlobalMaxPooling1D())
# Non-linearity and weight optimization
model.add(Dense(128, activation="relu"))
# Output
model.add(BatchNormalization())
model.add(Dense(1, activation="sigmoid"))
# Compile and return
model.compile("adam",
"binary_crossentropy",
["accuracy"])
return model
def model_def_fit(model, X_train, y_train, epochs):
return model.fit(X_train,
y_train,
batch_size=128,
epochs=epochs,
workers=6,
use_multiprocessing=True,
validation_split=.25)
``` |
{
"source": "JoshuaMeyers/guacamol_baselines",
"score": 2
} |
#### File: frag_gt/src/io.py
```python
import logging
import joblib
from joblib import delayed, Parallel
from rdkit import Chem
from tqdm import tqdm
from typing import List, Optional
logger = logging.getLogger(__name__)
def load_smiles_from_file(smi_file: str) -> List[str]:
with open(smi_file) as f:
smiles = [s.strip() for _, s in enumerate(f)]
return smiles
def _smi2mol(smi: str) -> Optional[Chem.rdchem.Mol]:
return Chem.MolFromSmiles(smi)
def valid_mols_from_smiles(smiles_list: List[str], n_jobs: int = -1) -> List[Chem.rdchem.Mol]:
if n_jobs < 0:
n_jobs = joblib.cpu_count()
logger.info(f"found {n_jobs} cpus available")
if n_jobs == 1:
valid_mols = []
for s in tqdm(smiles_list):
m = _smi2mol(s)
if m is not None:
valid_mols.append(m)
else:
with Parallel(n_jobs=n_jobs) as pool:
parsed_mols = pool(delayed(_smi2mol)(s) for s in smiles_list)
valid_mols = [m for m in parsed_mols if m is not None]
logger.info(f"parsed {len(valid_mols)} valid mols from a possible {len(smiles_list)} smiles using rdkit")
return valid_mols
```
#### File: frag_gt/src/mapelites.py
```python
from abc import ABC, abstractmethod
from rdkit.Chem import Descriptors
from typing import List, Tuple, Union, Dict
from frag_gt.src.fragmentors import fragmentor_factory
from frag_gt.src.gene_type_utils import get_species
from frag_gt.src.population import Molecule
class MapElites(ABC):
"""
Place molecules in discretized map of the feature space, where only the fittest `self.n_elites`
molecules are kept per cell. This ensures diversity in the population.
"""
def __init__(self, n_elites: int = 1):
self.n_elites = n_elites
def place_in_map(self, molecule_list: List[Molecule]) -> Tuple[List[Molecule], List[str]]:
"""
1. Compute the feature descriptor of the solution to find the correct cell in the N-dimensional space
2. Check if the cell is empty or if the previous performance is worse, place new solution in the cell
Args:
molecule_list: list of molecule objects with fitness scores
Returns:
"""
map: Dict[str, List[Molecule]] = {}
for mol in molecule_list:
# compute features and output a discrete cell id (str)
f = self.compute_features(mol)
# get existing molecule in that cell
existing_m = map.get(f, [])
# place the current mol in the map if its fitter than others in the cell
if not len(existing_m) or (existing_m[-1].score < mol.score):
existing_m.append(mol)
existing_m = sorted(existing_m, key=lambda x: x.score, reverse=True)[:self.n_elites]
map[f] = existing_m
return [m for mollist in map.values() for m in mollist], list(map.keys())
@abstractmethod
def compute_features(self, m: Molecule):
pass
class MWLogPMapElites(MapElites):
""" map elites using two dimensions: molecular weight and log p """
def __init__(self, mw_step_size: float = 25., logp_step_size: float = 0.25, n_elites: int = 1):
self.mw_step_size = mw_step_size
self.logp_step_size = logp_step_size
super().__init__(n_elites)
def compute_features(self, m: Molecule) -> str:
mw = Descriptors.MolWt(m.mol)
log_p = Descriptors.MolLogP(m.mol)
# discretize
mw_cell_midpoint = round(mw / self.mw_step_size) * self.mw_step_size
log_p_cell_midpoint = round(log_p / self.logp_step_size) * self.logp_step_size
return f"mw-midpoint={mw_cell_midpoint},logp-midpoint{log_p_cell_midpoint}"
class SpeciesMapElites(MapElites):
""" map elites using a single dimension: species (constructed from the gene types of constituent fragment genes """
def __init__(self, fragmentor: str, n_elites: int = 1):
self.fragmentor = fragmentor_factory(fragmentor)
super().__init__(n_elites)
def compute_features(self, m: Molecule) -> str:
frags = self.fragmentor.get_frags(m.mol)
return get_species(frags)
def map_elites_factory(mapelites_str: str, fragmentation_scheme) -> Union[SpeciesMapElites, MWLogPMapElites]:
if mapelites_str == "mwlogp":
map_elites = MWLogPMapElites(mw_step_size=25, logp_step_size=0.5)
elif mapelites_str == "species":
map_elites = SpeciesMapElites(fragmentation_scheme)
else:
raise ValueError(f"unknown value for mapelites argument: {mapelites_str}")
return map_elites
```
#### File: frag_gt/src/stereo.py
```python
from rdkit import Chem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
from rdkit.Chem.rdchem import StereoSpecified
from typing import List
STEREO_OPTIONS = StereoEnumerationOptions(tryEmbedding=True, unique=True, maxIsomers=8, rand=None)
# todo explore fragment on chiral https://sourceforge.net/p/rdkit/mailman/message/35420297/
def mol_contains_unspecified_stereo(m: Chem.rdchem.Mol) -> bool:
try:
si = Chem.FindPotentialStereo(m)
except ValueError as e:
print(e)
print(Chem.MolToSmiles(m))
return False
if any([element.specified == StereoSpecified.Unspecified for element in si]):
return True
else:
return False
def enumerate_unspecified_stereocenters(m: Chem.rdchem.Mol) -> List[Chem.rdchem.Mol]:
if mol_contains_unspecified_stereo(m):
isomers = list(EnumerateStereoisomers(m, options=STEREO_OPTIONS))
else:
isomers = [m]
return isomers
```
#### File: tests/fragstore_scripts/test_generate_fragstore.py
```python
import os
from rdkit import Chem
from frag_gt.fragstore_scripts.generate_fragstore import FragmentStoreCreator
from frag_gt.src.fragstore import fragstore_factory
from frag_gt.tests.utils import SAMPLE_SMILES_FILE
def test_create_gene_table(tmp_path):
# Given
sample_smiles_file = SAMPLE_SMILES_FILE
fragstore_output_dir = tmp_path / "output_dir"
fragstore_output_dir.mkdir()
fragstore_path = fragstore_output_dir / "temp_fragstore.pkl"
# When
db_creator = FragmentStoreCreator(frag_scheme="brics")
db_creator.create_gene_table(smiles_file=str(sample_smiles_file))
db_creator.create_gene_type_table()
db_creator.save_fragstore_to_disc(str(fragstore_path))
reloaded_db = fragstore_factory("in_memory", str(fragstore_path))
reloaded_db.load()
# Then
num_genes = db_creator.frag_db.get_records(query={}, collection='genes', return_count=True)
assert num_genes == 516
assert os.path.exists(fragstore_path)
assert len(reloaded_db.store["gene_types"])
def test_genes_from_parent_mol():
# Given
parent_mol = Chem.MolFromSmiles("CCSc1nnc(NC(=O)CCCOc2ccc(C)cc2)s1")
db_generator = FragmentStoreCreator(frag_scheme="brics")
# When
mol_genes = db_generator.genes_from_parent_mol(parent_mol, fragmentor=db_generator.fragmentor)
# Then
assert len(mol_genes) == 7
assert mol_genes[0] == {
"gene_frag_smiles": "[4*]CC",
"hap_frag_smiles": "CC",
"parent_smiles": "CCSc1nnc(NC(=O)CCCOc2ccc(C)cc2)s1",
"gene_type": "4"
}
assert len(set([x["parent_smiles"] for x in mol_genes])) == 1
def test_genes_from_parent_mol_multi():
# Given
parent_smiles = ["CCSc1nnc(NC(=O)CCCOc2ccc(C)cc2)s1", "CCCC(=O)NNC(=O)Nc1ccccc1"]
parent_mols = [Chem.MolFromSmiles(x) for x in parent_smiles]
db_generator = FragmentStoreCreator(frag_scheme="brics")
# When
all_genes = []
for mol in parent_mols:
mol_genes = db_generator.genes_from_parent_mol(mol, fragmentor=db_generator.fragmentor)
all_genes.extend(mol_genes)
# Then
assert len(all_genes) == 10
assert all_genes[0] == {
"gene_frag_smiles": "[4*]CC",
"hap_frag_smiles": "CC",
"parent_smiles": "CCSc1nnc(NC(=O)CCCOc2ccc(C)cc2)s1",
"gene_type": "4"
}
assert len(set([x["parent_smiles"] for x in all_genes])) == 2
```
#### File: tests/src/test_population.py
```python
import numpy as np
import random
from frag_gt.src.fragstore import fragstore_factory
from frag_gt.src.population import MolecularPopulationGenerator, Molecule
from frag_gt.tests.utils import SAMPLE_FRAGSTORE_PATH, SAMPLE_SMILES_FILE
from rdkit import Chem
SAMPLE_FRAGSTORE = fragstore_factory("in_memory", SAMPLE_FRAGSTORE_PATH)
SAMPLE_FRAGSTORE.load()
# seed random functions as operators have stochastic behaviour
np.random.seed(1337)
random.seed(1337)
def _scored_population():
""" read sample smiles and convert to mols as current_population """
with open(SAMPLE_SMILES_FILE, 'r') as f:
smiles = [x.strip() for x in f]
molecules = [Chem.MolFromSmiles(s) for s in smiles]
dummy_scores = list(range(len(molecules)))
current_pool = [Molecule(*m) for m in zip(dummy_scores, molecules)]
return current_pool
def test_population_generate():
# Given
n_molecules_to_generate = 10
mol_generator = MolecularPopulationGenerator(fragstore_path=SAMPLE_FRAGSTORE_PATH,
fragmentation_scheme="brics",
n_molecules=n_molecules_to_generate,
operators=None,
allow_unspecified_stereo=True,
selection_method="random")
current_pool = _scored_population()
# When
new_pool = mol_generator.generate(current_pool)
# Then
# since crossover adds multiple, and stereo adds multiple, we do not guarantee that population size is exact
# next tests shows a case where its possible to exactly generate a population by removing those factors
assert len(new_pool) >= n_molecules_to_generate
# this is now true, generate inputs population and outputs mol objects
# assert isinstance(current_pool[0], type(new_pool[0])), "inputs and outputs have different types"
def test_population_generate_custom_operators():
# Given
n_molecules_to_generate = 10
mol_generator = MolecularPopulationGenerator(fragstore_path=SAMPLE_FRAGSTORE_PATH,
fragmentation_scheme="brics",
n_molecules=n_molecules_to_generate,
operators=[("substitute_node_mutation", 1.)],
allow_unspecified_stereo=True,
selection_method='random')
current_pool = _scored_population()
# When
new_pool = mol_generator.generate(current_pool)
# Then
assert len(new_pool) == n_molecules_to_generate
def test_tournament_selection():
# Given
np.random.seed(1337)
random.seed(1337)
current_pool = _scored_population()
# When
fittest = MolecularPopulationGenerator.tournament_selection(current_pool, k=5)
# Then
assert int(fittest.score) == 92
def test_population_generate_tournament_selection():
# Given
n_molecules_to_generate = 10
mol_generator = MolecularPopulationGenerator(fragstore_path=SAMPLE_FRAGSTORE_PATH,
fragmentation_scheme="brics",
n_molecules=n_molecules_to_generate,
operators=None,
allow_unspecified_stereo=True,
selection_method="tournament")
current_pool = _scored_population()
# When
new_pool = mol_generator.generate(current_pool)
# Then
assert len(new_pool) >= n_molecules_to_generate
def test_population_generate_fixed_substructure_pyrazole():
# Given
baricitinib = "CCS(=O)(=O)N1CC(C1)(CC#N)N2C=C(C=N2)C3=C4C=CNC4=NC=N3"
pyrazole = "c1cn[nH]c1"
current_pool = [Molecule(1., Chem.MolFromSmiles(baricitinib))]
n_molecules_to_generate = 10
# When
mol_generator = MolecularPopulationGenerator(fragstore_path=SAMPLE_FRAGSTORE_PATH,
fragmentation_scheme="brics",
n_molecules=n_molecules_to_generate,
operators=None,
allow_unspecified_stereo=True,
selection_method="tournament",
fixed_substructure_smarts=pyrazole)
new_pool = mol_generator.generate(current_pool)
# Then
patt = Chem.MolFromSmarts(pyrazole)
assert all([m.HasSubstructMatch(patt) for m in new_pool])
def test_population_generate_fixed_substructure_impossible_pattern():
# Given
baricitinib = "CCS(=O)(=O)N1CC(C1)(CC#N)N2C=C(C=N2)C3=C4C=CNC4=NC=N3"
baricitinib_core_scaffold_smiles = "[N]1C=C(C=N1)C3=C2C=C[N]C2=NC=N3"
current_pool = [Molecule(1., Chem.MolFromSmiles(baricitinib))]
n_molecules_to_generate = 10
# When
mol_generator = MolecularPopulationGenerator(fragstore_path=SAMPLE_FRAGSTORE_PATH,
fragmentation_scheme="brics",
n_molecules=n_molecules_to_generate,
operators=None,
allow_unspecified_stereo=True,
selection_method="tournament",
fixed_substructure_smarts=baricitinib_core_scaffold_smiles,
patience=100)
new_pool = mol_generator.generate(current_pool)
# Then
# generator is unable to generate molecules for this fixed scaffold given the limited size of the sample fragstore
# this checks that when no molecules can be generated, we dont fall into an infinite loop
assert new_pool == []
``` |
{
"source": "JoshuaMeyers/ssbio",
"score": 3
} |
#### File: ssbio/core/complex.py
```python
import os.path as op
import logging
import ssbio.utils
from ssbio.core.object import Object
from ssbio.core.protein import Protein
from cobra.core import DictList
log = logging.getLogger(__name__)
class Complex(Object):
"""Store information about a protein complex, a generic representation of a 3D oligomeric complex composed of
individual protein subunits.
The main utilities of this class are to:
* Allow as input a name for the complex and a dictionary of its subunit composition
* Map each single subunit to its available experimental structures and homology models using methods in the
:class:`~ssbio.core.protein.Protein` class
* Map experimental structures and homology models to their available oliogmeric states
* Select a single :attr:`~ssbio.core.complex.Complex.representative_complex` to which best represents the 3D
oligomeric structure that best matches the defined subunit composition
* Calculate, store, and access properties related to this complex
* Provide summaries of available structures and selection details for the representative complex
Args:
ident (str): Unique identifier for this protein
subunits (dict): Subunit composition defined as ``{protein_subunit_id: number_of_times_used_in_complex}``
description (str): Optional description for this complex
root_dir (str): Path to where the folder named by this complex's ID will be created.
Default is current working directory.
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
"""
def __init__(self, ident, subunits, description=None, root_dir=None, pdb_file_type='mmtf'):
Object.__init__(self, id=ident, description=description)
self.subunit_dict = subunits
"""dict: Subunit composition defined as ``{protein_subunit_id: number_of_times_used_in_complex}``"""
self._subunits = None
self._oligomeric_state = None
self.pdb_file_type = pdb_file_type
"""str: ``pdb``, ``pdb.gz``, ``mmcif``, ``cif``, ``cif.gz``, ``xml.gz``, ``mmtf``, ``mmtf.gz`` - choose a file
type for files downloaded from the PDB"""
# Create directories
self._root_dir = None
if root_dir:
self.root_dir = root_dir
@property
def root_dir(self):
"""str: Path to where the folder named by this complex's ID will be created. Default is current working
directory."""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
raise ValueError('No path specified')
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.debug('Changing root directory of Complex "{}" from {} to {}'.format(self.id, self.root_dir, path))
if not op.exists(op.join(path, self.id)) and not op.exists(op.join(path, self.id) + '_complex'):
raise IOError('Complex "{}" does not exist in folder {}'.format(self.id, path))
self._root_dir = path
for d in [self.complex_dir]:
ssbio.utils.make_dir(d)
@property
def complex_dir(self):
"""str: Complex folder"""
if self.root_dir:
# Add a _complex suffix to the folder if it has the same name as its root folder
folder_name = self.id
if folder_name == op.basename(self.root_dir):
folder_name = self.id + '_complex'
return op.join(self.root_dir, folder_name)
else:
log.warning('Root directory not set')
return None
@property
def oligomeric_state(self):
"""Return the oligomeric state based on the contents of the :attr:`~ssbio.core.complex.Complex.subunit_dict`
dictionary.
Returns:
str: Oligomeric state of the complex, currently can be ``monomer``, ``homomer``, or ``heteromer``
"""
# TODO: [VizRecon]
# Check the number of keys in the self.subunit_dict dictionary
# Set as monomer if there is one key, and the value == 1
# Set as homomer if there is one key and value > 1
# Set as heteromer otherwise
return None
@property
def subunits(self):
"""DictList: Subunits represented as a DictList of Protein objects"""
# TODO: [VizRecon]
# TODO: will need to adapt this to allow for input of previously created Protein objects
subunits = DictList()
for s in self.subunit_dict:
subunits.append(Protein(ident=s, description='Subunit of complex {}'.format(self.id),
root_dir=self.complex_dir, pdb_file_type=self.pdb_file_type))
return subunits
def map_subunit_to_sequence_and_structures(self, subunit_id):
"""Run the sequence and structure mapping code for a specified protein subunit.
This stores the mapping information directly inside the Protein subunit object itself
Args:
subunit_id (str): ID of protein subunit to run mapping code for
"""
# TODO: Nathan
def set_representative_complex(self):
"""Set the representative 3D structure for this complex based on coverage of subunits.
Args:
Returns:
"""
# TODO: [VizRecon]
if self.oligomeric_state == 'monomer':
pass
elif self.oligomeric_state == 'homomer':
pass
elif self.oligomeric_state == 'heteromer':
pass
def get_structure_stoichiometry(structure_file):
"""Parse a structure file and return a chain stoichiometry dictionary.
Args:
structure_file (str): Path to protein structure file (experimental or homology model
Returns:
dict: Dictionary of ``{chain_id: number_of_times_used}``
"""
# TODO: [VizRecon]
# TODO: most likely to be moved to another module, need to brainstorm
pass
```
#### File: ssbio/databases/patric.py
```python
import ftplib
import os.path as op
import logging
import os
log = logging.getLogger(__name__)
def download_coding_sequences(patric_id, seqtype, outdir='', outfile='', force_rerun=False):
"""Download the entire set of DNA or protein sequences from protein-encoding genes in a genome from NCBI.
Saves a FASTA file in the optional directory specified.
Args:
genome_accession_or_id (str): PATRIC ID
seqtype (str): "dna" or "protein" - if you want the coding sequences in DNA or amino acid formats.
outdir (str): optional output directory (default is the current directory)
outfile (str): optional custom name for file
force_rerun (bool): if you want to redownload existing files
Returns:
Path to downloaded FASTA file.
"""
if seqtype == 'dna':
extension = 'ffn'
elif seqtype == 'protein':
extension = 'faa'
else:
raise ValueError('seqtype must be "dna" or "protein"')
# TODO: use utils functions here
# path and filename parsing
if outfile:
outfile = op.join(outdir, '{}.{}'.format(outfile, extension))
else:
# if no outfile is specified, default is "$GI.PATRIC.faa"
outfile = op.join(outdir, '{}.PATRIC.{}'.format(patric_id, extension))
if not force_rerun:
# return the path to the file if it was already downloaded
if op.exists(outfile) and os.stat(outfile).st_size != 0:
log.debug('FASTA file already exists at {}'.format(outfile))
return outfile
try:
ftp = ftplib.FTP('ftp.patricbrc.org')
ftp.login()
ftp.cwd("/patric2/patric3/genomes/{0}/".format(patric_id))
with open(outfile, "wb") as gFile:
ftp.retrbinary('RETR {0}.PATRIC.{1}'.format(patric_id, extension), gFile.write)
ftp.quit()
# TODO: check exceptions
except:
return None
return outfile
```
#### File: ssbio/databases/pdbflex.py
```python
import requests
import ssbio.utils
import os.path as op
# #### PDB stats
# Request flexibility data about one particular PDB.
#
# http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A
#
# pdbID of structure you are interested in
# chainID of chain you are interested in
#
# [{"pdbID":"1a50",
# "chainID":"A",
# "parentClusterID":"4hn4A",
# "avgRMSD":"0.538",
# "maxRMSD":"2.616",
# "flexibilityLabel":"Low",
# "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...],
# "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}]
#
# Note: you can omit the chainID and PDBFlex will return information for all chains.
#
# #### RMSD profile
# Request RMSD array used for local flexibility plots
#
# http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# {"queryPDB":"1a50A",
# "clusterName":"4hn4A",
# "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"}
#
# #### PDB representatives
# Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster.
#
# http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# ["2trsA","3pr2A","1kfjA"]
def get_pdbflex_info(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_stats.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/PDBStats.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# TODO: will running with chain ID always return a single item list?
assert len(infolist) == 1
newdict = {}
for k, v in infolist[0].items():
if k == 'avgRMSD' and v:
newdict[k] = float(v)
elif k == 'maxRMSD' and v:
newdict[k] = float(v)
else:
newdict[k] = v
return newdict
def get_pdbflex_rmsd_profile(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_rmsdprofile.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/rmsdProfile.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infodict = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
infodict['profile'] = [float(x) for x in infodict['profile'].strip('[]').split(',')]
return infodict
def get_pdbflex_representatives(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_representatives.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/representatives.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# infolist = [str(x) for x in infolist.strip('[]').split(',')]
return infolist
```
#### File: ssbio/databases/pdb_seq.py
```python
from os import path as op
import requests
from lxml import etree
import ssbio.utils
import logging
log = logging.getLogger(__name__)
def blast_pdb(seq, outfile='', outdir='', evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False):
"""Returns a list of BLAST hits of a sequence to available structures in the PDB.
Args:
seq (str): Your sequence, in string format
outfile (str): Name of output file
outdir (str, optional): Path to output directory. Default is the current directory.
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default).
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
link (bool, optional): Set to True if a link to the HTML results should be displayed
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
Returns:
list: Rank ordered list of BLAST hits in dictionaries.
"""
if len(seq) < 12:
raise ValueError('Sequence must be at least 12 residues long.')
if link:
page = 'PDB results page: http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=HTML'.format(seq, evalue)
print(page)
parser = etree.XMLParser(ns_clean=True)
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(force_rerun, outfile):
# Load the BLAST XML results if force_rerun=True
page = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=XML'.format(
seq, evalue)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if outfile:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded BLAST results from REST server')
else:
log.error('BLAST request timed out')
return []
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing BLAST XML results'.format(outfile))
# Get length of original sequence to calculate percentages
len_orig = float(len(seq))
root = tree.getroot()
hit_list = []
for hit in root.findall('BlastOutput_iterations/Iteration/Iteration_hits/Hit'):
info = {}
hitdef = hit.find('Hit_def')
if hitdef is not None:
info['hit_pdb'] = hitdef.text.split('|')[0].split(':')[0].lower()
info['hit_pdb_chains'] = hitdef.text.split('|')[0].split(':')[2].split(',')
# One PDB can align to different parts of the sequence
# Will just choose the top hit for this single PDB
hsp = hit.findall('Hit_hsps/Hsp')[0]
# Number of identical residues
hspi = hsp.find('Hsp_identity')
if hspi is not None:
info['hit_num_ident'] = int(hspi.text)
info['hit_percent_ident'] = int(hspi.text)/len_orig
if int(hspi.text)/len_orig < seq_ident_cutoff:
log.debug('{}: does not meet sequence identity cutoff'.format(hitdef.text.split('|')[0].split(':')[0]))
continue
# Number of similar residues (positive hits)
hspp = hsp.find('Hsp_positive')
if hspp is not None:
info['hit_num_similar'] = int(hspp.text)
info['hit_percent_similar'] = int(hspp.text) / len_orig
# Total number of gaps (unable to align in either query or subject)
hspg = hsp.find('Hsp_gaps')
if hspg is not None:
info['hit_num_gaps'] = int(hspg.text)
info['hit_percent_gaps'] = int(hspg.text) / len_orig
# E-value of BLAST
hspe = hsp.find('Hsp_evalue')
if hspe is not None:
info['hit_evalue'] = float(hspe.text)
# Score of BLAST
hsps = hsp.find('Hsp_score')
if hsps is not None:
info['hit_score'] = float(hsps.text)
hit_list.append(info)
log.debug("{}: Number of BLAST hits".format(len(hit_list)))
return hit_list
```
#### File: ssbio/databases/pisa.py
```python
import requests
from collections import defaultdict
from copy import deepcopy
import ssbio.utils
import os
import os.path as op
from lxml import etree
import logging
import glob
log = logging.getLogger(__name__)
def download_pisa_multimers_xml(pdb_ids, save_single_xml_files=True, outdir=None, force_rerun=False):
"""Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded
"""
if not outdir:
outdir = os.getcwd()
files = {}
pdb_ids = ssbio.utils.force_lower_list(sorted(pdb_ids))
# If we want to save single PISA XML files per PDB ID...
if save_single_xml_files:
# Check for existing PISA XML files
if not force_rerun:
existing_files = [op.basename(x) for x in glob.glob(op.join(outdir, '*_multimers.pisa.xml'))]
# Store the paths to these files to return
files = {v.split('_')[0]: op.join(outdir, v) for v in existing_files}
log.debug('Already downloaded PISA files for {}'.format(list(files.keys())))
else:
existing_files = []
# Filter PDB IDs based on existing file
pdb_ids = [x for x in pdb_ids if '{}_multimers.pisa.xml'.format(x) not in existing_files]
# Split the list into 50 to limit requests
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
# Download PISA files
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
r = requests.get(all_pisa_link)
# Parse PISA file and save individual XML files
parser = etree.XMLParser(ns_clean=True)
tree = etree.fromstring(r.text, parser)
for pdb in tree.findall('pdb_entry'):
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdb.find('pdb_code').text))
add_root = etree.Element('pisa_multimers')
add_root.append(pdb)
with open(filename, 'wb') as f:
f.write(etree.tostring(add_root))
files[pdb.find('pdb_code').text] = filename
log.debug('{}: downloaded PISA results'.format(pdb))
else:
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdbs))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename):
r = requests.get(all_pisa_link)
with open(filename, 'w') as f:
f.write(r.text)
log.debug('Downloaded PISA results')
else:
log.debug('PISA results already downloaded')
for x in l:
files[x] = filename
return files
def parse_pisa_multimers_xml(pisa_multimers_xml, download_structures=False, outdir=None, force_rerun=False):
"""Retrieve PISA information from an XML results file
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pisa_multimers_xml (str): Path to PISA XML output file
download_structures (bool): If assembly files should be downloaded
outdir (str): Directory to output assembly files
force_rerun (bool): Redownload files if they already exist
Returns:
dict: of parsed PISA information
"""
if not outdir:
outdir = os.getcwd()
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(pisa_multimers_xml, parser)
root = tree.getroot()
pisa = defaultdict(dict)
for pdb in root.findall('pdb_entry'):
# Get the PDB ID
pdb_id = pdb.find('pdb_code').text
# Check the assembly status
status = pdb.find('status').text
errors = ['Entry not found', 'Overlapping structures', 'No symmetry operations']
if status in errors:
pisa[pdb_id]['status'] = status
continue
# Check monomer status
num_complexes = int(pdb.find('total_asm').text)
if num_complexes == 0:
pisa[pdb_id]['status'] = 'MONOMER'
continue
elif num_complexes > 0:
# All "assembly sets" (see PISA sets for more info)
sets = pdb.findall('asm_set')
for s in sets:
set_id = int(s.find('ser_no').text)
# All assemblies
assemblies = s.findall('assembly')
for cplx in assemblies:
############################################################################################
# This part tells you the actual composition of the predicted complex (chains and ligands)
parts = cplx.findall('molecule')
chains = defaultdict(int)
for part in parts:
part_id = part.find('chain_id').text
if part_id.startswith('['):
part_id = 'LIG_' + part_id.split(']')[0].strip('[')
chains[str(part_id)] += 1
ligands = {}
for key in deepcopy(chains).keys():
if key.startswith('LIG_'):
ligands[str(key.split('_')[1])] = chains.pop(key)
############################################################################################
adder = {}
cplx_id = int(cplx.find('id').text)
cplx_composition = str(cplx.find('composition').text)
d_g_diss = float(cplx.find('diss_energy').text)
d_g_int = float(cplx.find('int_energy').text)
pdb_biomol = int(cplx.find('R350').text)
if d_g_diss >= 0:
stable = True
else:
stable = False
adder['cplx_composition'] = cplx_composition.strip()
adder['cplx_chains'] = chains
adder['cplx_ligands'] = ligands
adder['stable'] = stable
adder['d_g_diss'] = d_g_diss
adder['d_g_int'] = d_g_int
adder['pdb_biomol'] = pdb_biomol
pisa[pdb_id][(set_id, cplx_id)] = adder
if download_structures:
ident = '{}:{},{}'.format(pdb_id, set_id, cplx_id)
filename = op.join(outdir, ident + '.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename):
download_structure_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimer.pdb?{}'.format(
ident)
r = requests.get(download_structure_link)
with open(filename, 'w') as f:
f.write(r.text)
log.debug('{}: downloaded structure file'.format(ident))
else:
log.debug('{}: structure file already downloaded'.format(ident))
pisa[pdb_id][(set_id, cplx_id)]['structure_file'] = filename
return pisa
def pdb_chain_stoichiometry_biomolone(pdbid):
"""Get the stoichiometry of the chains in biological assembly 1 as a dictionary.
Steps taken are:
1) Download PDB and parse header, make biomolecule if provided
2) Count how many times each chain appears in biomolecule #1
3) Convert chain id to uniprot id
4) Return final dictionary
Args:
pdbid (str): 4 character PDB ID
Returns:
dict: {(ChainID,UniProtID): # occurences}
"""
pass
```
#### File: ssbio/pipeline/atlas3.py
```python
import json
import os.path as op
import pandas as pd
import ssbio.utils
import pickle
from collections import defaultdict
from tqdm import tqdm_notebook as tqdm
filter_proteome = [False, ]
filter_observations = ['A22.CCCP', 'A22.PYOCYANIN', 'CCCP.2', 'CHIR090.CCCP', 'DOXYCYCLINE.PARAQUAT', 'DOXYCYCLINE.PMS',
'DOXYCYCLINE.PYOCYANIN', 'FUMARATE.40C', 'FUMARATE.40MM', 'FUMARATE.A22', 'FUMARATE.CEFSULODIN',
'FUMARATE.PARAQUAT', 'FUMARATE.TOBRAMYCIN', 'NACL.PARAQUAT', 'NOSALT.FUMARATE', 'NOSALT.PARAQUAT',
'PARAQUAT.10UM', 'PH5.FUMARATE', 'PMS.PROCAINE', 'PYOCYANIN.0P2UM', 'PYOCYANIN.10', 'PYOCYANIN.1UM',
'TRIMETHOPRIM.PYOCYANIN', 'PYOCYANIN.10UM', 'pathotype_simple',
'ros_simulated', 'pathotype_simple2', 'pathotype_simple3'] #'isolation_source', 'pathotype', ]
filter_ismem = [('vizrecon','membrane'),
('vizrecon','inner_membrane'),
('vizrecon','outer_membrane'),]
#('tmhmm','membrane')]
filter_notmem = [#('vizrecon','non_membrane'),
('vizrecon','periplasm'),
('vizrecon','cytosol'), ]
#('vizrecon','extracellular'),]
#('tmhmm','non_membrane')]
filter_ismem_subseq = [['all', 'acc_3D', 'metal_2_5D', 'metal_3D', 'tm_2D', 'tm_3D',
'csa_2_5D', 'sites_2_5D'], ] # need the comma to treat it like one set
filter_notmem_subseq = [['all', 'disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'acc_2D',
'acc_3D', 'surface_3D', 'metal_2_5D', 'metal_3D', 'dna_2_5D', 'csa_2_5D', 'sites_2_5D'], ]
filter_all_subseq = [['all', 'disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'acc_2D',
'tm_2D', 'tm_3D',
'acc_3D', 'surface_3D', 'metal_2_5D', 'metal_3D', 'dna_2_5D', 'csa_2_5D', 'sites_2_5D'], ]
filter_subseq_suffixes = {'all': ['aa_%_ord', 'aa_%_dis', 'aa_%_M', 'aa_%_C', 'aa_%_chrg', 'aa_%_carb', 'aa_%_poschrg', 'aa_%_negchrg', 'aa_%_Y'],
'disorder_2D': ['aa_%_ord', 'aa_%_dis'],
'disorder_3D': ['aa_%_ord', 'aa_%_dis'],
'ss_disorder_2D': ['aa_%_ord', 'aa_%_dis'],
'ss_disorder_3D': ['aa_%_ord', 'aa_%_dis'],
'acc_2D': ['aa_%_M', 'aa_%_C', 'aa_%_chrg', 'aa_%_carb', 'aa_%_poschrg', 'aa_%_negchrg', 'aa_%_Y'],
'acc_3D': ['aa_%_M', 'aa_%_C', 'aa_%_chrg', 'aa_%_carb', 'aa_%_poschrg', 'aa_%_negchrg', 'aa_%_Y'],
'surface_3D': ['aa_%_M', 'aa_%_C', 'aa_%_chrg', 'aa_%_carb', 'aa_%_poschrg', 'aa_%_negchrg', 'aa_%_Y'],
'metal_2_5D': ['aa_%_M', 'aa_%_bulk', 'aa_%_C', 'aa_%_chrg', 'aa_%_carb', 'aa_%_poschrg', 'aa_%_negchrg'],
'metal_3D': ['aa_%_M', 'aa_%_bulk', 'aa_%_C', 'aa_%_chrg', 'aa_%_carb', 'aa_%_poschrg', 'aa_%_negchrg'],
'tm_2D': ['aa_%_M', 'aa_%_tmstab', 'aa_%_tmunstab'],
'tm_3D': ['aa_%_M', 'aa_%_tmstab', 'aa_%_tmunstab'],
'dna_2_5D': ['aa_%_dis', 'aa_%_ord'],
'csa_2_5D': ['aa_%_chrg', 'aa_%_M', 'aa_%_C'],
'sites_2_5D': ['aa_%_chrg', 'aa_%_M', 'aa_%_C']}
# filter_subseq_3D_suffixes = {'disorder_3D': ['aa_count_ord', 'aa_count_dis'],
# 'ss_disorder_3D': ['aa_count_ord', 'aa_count_dis'],
# 'acc_3D': ['aa_count_M', 'aa_count_C', 'aa_count_chrg', 'aa_count_carb', 'aa_count_poschrg', 'aa_count_negchrg'],
# 'surface_3D': ['aa_count_M', 'aa_count_C', 'aa_count_chrg', 'aa_count_carb', 'aa_count_poschrg', 'aa_count_negchrg'],
# 'metal_3D': ['aa_count_M', 'aa_count_bulk', 'aa_count_C', 'aa_count_chrg', 'aa_count_carb', 'aa_count_poschrg', 'aa_count_negchrg'],
# 'tm_3D': ['aa_count_M', 'aa_count_tmstab', 'aa_count_tmunstab'],
# 'dna_2_5D': ['aa_count_dis', 'aa_count_ord'],
# 'csa_2_5D': ['aa_count_chrg', 'aa_count_M', 'aa_count_C'],
# 'sites_2_5D': ['aa_count_chrg', 'aa_count_M', 'aa_count_C']}
#
# filter_subseq_2D_suffixes = {'disorder_2D': ['aa_count_ord', 'aa_count_dis'],
# 'ss_disorder_2D': ['aa_count_ord', 'aa_count_dis'],
# 'acc_2D': ['aa_count_M', 'aa_count_C', 'aa_count_chrg', 'aa_count_carb', 'aa_count_poschrg', 'aa_count_negchrg'],
# 'metal_2_5D': ['aa_count_M', 'aa_count_bulk', 'aa_count_C', 'aa_count_chrg', 'aa_count_carb', 'aa_count_poschrg', 'aa_count_negchrg'],
# 'tm_2D': ['aa_count_M', 'aa_count_tmstab', 'aa_count_tmunstab'],
# 'dna_2_5D': ['aa_count_dis', 'aa_count_ord'],
# 'csa_2_5D': ['aa_count_chrg', 'aa_count_M', 'aa_count_C'],
# 'sites_2_5D': ['aa_count_chrg', 'aa_count_M', 'aa_count_C']}
def get_protein_feather_paths(protgroup, memornot, protgroup_dict, protein_feathers_dir, core_only_genes=None):
"""
protgroup example: ('subsystem', 'cog_primary', 'H')
memornot example: ('vizrecon', 'membrane')
protgroup_dict example: {'databases': {'redoxdb': {'experimental_sensitive_cys': ['b2518','b3352','b2195','b4016'], ...}}}
"""
prots_memornot = protgroup_dict['localization'][memornot[0]][memornot[1]]
if protgroup[0] == 'localization':
if protgroup[2] != 'all':
if memornot[1] in ['membrane', 'inner_membrane', 'outer_membrane'] and protgroup[2] not in ['membrane', 'inner_membrane', 'outer_membrane']:
return []
if memornot[1] not in ['membrane', 'inner_membrane', 'outer_membrane'] and protgroup[2] in ['membrane', 'inner_membrane', 'outer_membrane']:
return []
prots_group = protgroup_dict[protgroup[0]][protgroup[1]][protgroup[2]]
prots_filtered = list(set(prots_group).intersection(prots_memornot))
if core_only_genes:
prots_filtered = list(set(prots_filtered).intersection(core_only_genes))
return [op.join(protein_feathers_dir, '{}_protein_strain_properties.fthr'.format(x)) for x in prots_filtered if op.exists(op.join(protein_feathers_dir, '{}_protein_strain_properties.fthr'.format(x)))]
def get_observed_strains_and_df(observation, observation_dict):
"""
observation example: 'ros_simulated'
observation_dict example: {'ros_simulated': [['NT12204_755', 'wt'], ['NT12120_270', 'wt'], ...] ...}
"""
observed_df = pd.DataFrame.from_records(observation_dict[observation], columns=['strain','phenotype']).set_index('strain')
observed_strains = observed_df.index.tolist()
return observed_strains, observed_df
def get_interested_subsequences(subsequences):#, subseqdim):
"""
subsequences example: ['disorder_2D', 'ss_disorder_2D', ...]
filter_subseq_suffixes example: {'disorder_2D': ['aa_count_ord', 'aa_count_dis'], ... } -- defined above
"""
keep_indices = []
# if subseqdim == 'ALLD':
# filter_subseq_suffixes = filter_subseq_suffixes
# elif subseqdim == '3D':
# filter_subseq_suffixes = filter_subseq_3D_suffixes
# elif subseqdim == '2D':
# filter_subseq_suffixes = filter_subseq_2D_suffixes
# else:
# raise ValueError('ALLD, 3D, or 2D only!')
for subseq in subsequences:
if subseq == 'all':
keep_indices.extend([x for x in filter_subseq_suffixes[subseq]])
else:
keep_indices.extend([subseq + '_' + x for x in filter_subseq_suffixes[subseq]])
return keep_indices
def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
"""Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
"""
protein_df = pd.read_feather(protein_feather).set_index('index')
# Combine counts for residue groups
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {
'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],
'subseqs' : ['metal_2_5D', 'metal_3D']},
'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',
'surface_3D']},
'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']},
'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']}}
# Do combination counts for all types of subsequences
for suffix, info in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + '_aa_count_' + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series
protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df
## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length
## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index
protein_df = protein_df[keep_cols]
# Multiply by proteomics copy number?
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError('Please supply copy numbers')
protein_id = op.basename(protein_feather).split('_protein')[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, 'copynum']
if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?
protein_df = protein_df * copynum
return protein_df
def get_proteome_counts_simple(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(index=loaded.index, columns=loaded.columns)
first = False
big_strain_counts_df = big_strain_counts_df.add(loaded, fill_value=0)
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index')
# def get_proteome_counts_simple_sc(sc, prots_filtered_feathers, outpath, length_filter_pid=None,
# copynum_scale=False, copynum_df=None,
# force_rerun=False):
# import ssbio.utils
# if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
# protein_feathers_final_rdd = sc.parallelize(prots_filtered_feathers)
# mapper = protein_feathers_final_rdd.map(lambda x: load_feather(protein_feather=x, length_filter_pid=None,
# copynum_scale=copynum_scale,
# copynum_df=copynum_df))
# big_strain_counts_df = mapper.reduce(lambda df1, df2: df1.add(df2, fill_value=0))
# big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
# return big_strain_counts_df
# else:
# return pd.read_feather(outpath).set_index('index')
def get_proteome_counts_impute_missing(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
"""Get counts, uses the mean feature vector to fill in missing proteins for a strain"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(index=_all_counts, columns=loaded.columns)
first = False
new_columns = list(set(loaded.columns.tolist()).difference(big_strain_counts_df.columns))
if new_columns:
for col in new_columns:
big_strain_counts_df[col] = big_strain_counts_df.mean(axis=1)
not_in_loaded = list(set(big_strain_counts_df.columns).difference(loaded.columns.tolist()))
if not_in_loaded:
for col in not_in_loaded:
big_strain_counts_df[col] = big_strain_counts_df[col] + loaded.mean(axis=1)
big_strain_counts_df = big_strain_counts_df.add(loaded, fill_value=0)
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index')
def get_proteome_percentages(counts_df, outpath, force_rerun=False):
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
big_strain_percents_df = pd.DataFrame(columns=counts_df.columns)
for strain in counts_df.columns:
totals = list(filter(lambda x: x.endswith('total'), counts_df[strain].index))
for t in totals:
counts = t.rsplit('_', 1)[0]
aa_counts = list(filter(lambda x: (x.startswith(counts) and x not in totals), counts_df[strain].index))
for aa_count in aa_counts:
big_strain_percents_df.at[aa_count.replace('count', '%'), strain] = counts_df[strain][aa_count]/counts_df[strain][t]
big_strain_percents_df.astype(float).reset_index().to_feather(outpath)
else:
big_strain_percents_df = pd.read_feather(outpath).set_index('index')
big_strain_percents_df.index.name = None
return big_strain_percents_df
def get_proteome_correct_percentages(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
"""Get counts and normalize by number of proteins, providing percentages"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
prot_tracker = defaultdict(int)
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(columns=loaded.columns)
first = False
tmp_df = pd.DataFrame(columns=loaded.columns)
for strain in loaded.columns:
prot_tracker[strain] += 1
totals = list(filter(lambda x: x.endswith('total'), loaded[strain].index))
for t in totals:
counts = t.rsplit('_', 1)[0]
aa_counts = list(
filter(lambda x: (x.startswith(counts) and x not in totals), loaded[strain].index))
for aa_count in aa_counts:
tmp_df.at[aa_count.replace('count', '%'), strain] = loaded[strain][aa_count] / \
loaded[strain][t]
big_strain_counts_df = big_strain_counts_df.add(tmp_df, fill_value=0)
for c, total in prot_tracker.items():
big_strain_counts_df.loc[:, c] /= total
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index')
def remove_correlated_feats(df):
tmp = df.T
# Remove columns with no variation
nunique = tmp.apply(pd.Series.nunique)
cols_to_drop = nunique[nunique == 1].index
tmp.drop(cols_to_drop, axis=1, inplace=True)
perc_spearman = scipy.stats.spearmanr(tmp)
abs_corr = np.subtract(np.ones(shape=perc_spearman.correlation.shape),
np.absolute(perc_spearman.correlation))
np.fill_diagonal(abs_corr, 0)
abs_corr_clean = np.maximum(abs_corr,
abs_corr.transpose()) # some floating point mismatches, just make symmetric
clustering = linkage(squareform(abs_corr_clean), method='average')
clusters = fcluster(clustering, .1, criterion='distance')
names = tmp.columns.tolist()
names_to_cluster = list(zip(names, clusters))
indices_to_keep = []
### Extract models closest to cluster centroids
for x in range(1, len(set(clusters)) + 1):
# Create mask from the list of assignments for extracting submatrix of the cluster
mask = np.array([1 if i == x else 0 for i in clusters], dtype=bool)
# Take the index of the column with the smallest sum of distances from the submatrix
idx = np.argmin(sum(abs_corr_clean[:, mask][mask, :]))
# Extract names of cluster elements from names_to_cluster
sublist = [name for (name, cluster) in names_to_cluster if cluster == x]
# Element closest to centroid
centroid = sublist[idx]
indices_to_keep.append(centroid)
return df.loc[df.index.isin(indices_to_keep)]
def get_simple_sigdict(prots_filtered_feathers, subsequences, observation, observation_dict,
remove_corr=True, force_rerun=False):
sigdict = {'less': defaultdict(list),
'greater': defaultdict(list)}
for p in prots_filtered_feathers:
p_id = op.basename(p).split('_')[0]
outpath = op.join(op.dirname(p), '{}_protein_strain_properties_percfilt.fthr'.format(p_id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
p_df = load_feather(protein_feather=p, length_filter_pid=0.8)
p_perc_df = get_proteome_percentages(counts_df=p_df,
outpath=outpath,
force_rerun=force_rerun)
else:
p_perc_df = pd.read_feather(outpath).set_index('index')
# Clean data first
keep_subsequences = get_interested_subsequences(subsequences=subsequences)
p_perc_df2 = p_perc_df.loc[p_perc_df.index.isin(keep_subsequences)]
p_perc_df2 = p_perc_df2.astype(float).fillna(0)
p_perc_df2 = p_perc_df2.loc[(p_perc_df2 > 0).any(axis=1)]
if remove_corr:
try:
p_perc_df2 = remove_correlated_feats(p_perc_df2)
except:
continue
all_features = p_perc_df2.index.tolist()
# Add observations
keep_strains, observations_df = get_observed_strains_and_df(observation=observation,
observation_dict=observation_dict)
feat_obs_df = p_perc_df2.T.join(observations_df, how='inner')
# Split into 2 groups
if observation == 'isolation_source':
continue
elif observation == 'ros_simulated':
wt_df = feat_obs_df[feat_obs_df.phenotype == 'wt']
mut_df = feat_obs_df[feat_obs_df.phenotype != 'wt']
elif 'pathotype' in observation:
wt_df = feat_obs_df[feat_obs_df.phenotype == 'Other']
mut_df = feat_obs_df[feat_obs_df.phenotype != 'Other']
else:
wt_df = feat_obs_df[feat_obs_df.phenotype == 'No growth']
mut_df = feat_obs_df[feat_obs_df.phenotype == 'Growth']
if len(wt_df) == 0 or len(mut_df) == 0:
continue
# Mann-whitney test of each feature
for alt in ['less', 'greater']:
for feat in all_features:
try:
z_stat, p_val = mannwhitneyu(wt_df[feat], mut_df[feat], alternative=alt)
except ValueError:
continue
if p_val < 0.01:
# Store differentiating features for this protein in a dictionary
sigdict[alt][p_id].append((feat, p_val))
return sigdict
from scipy.cluster.hierarchy import linkage, fcluster
import numpy as np
import scipy.stats
from scipy.spatial.distance import squareform
from sklearn import preprocessing
from sklearn.decomposition import PCA
from scipy.stats import ks_2samp
import itertools
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
from scipy.spatial.distance import pdist # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
from itertools import combinations
from sklearn.metrics.pairwise import euclidean_distances # http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.euclidean_distances.html
import scipy.stats.stats as st
from scipy.stats import mannwhitneyu
from numpy.random import choice
plt.ioff() # Turn interactive plotting off
sns.set(rc={'figure.figsize': (16, 8.5)})
sns.set_context('talk')
sns.set_style('ticks')
sns.set_palette('Set2')
class PCAMultiROS():
def __init__(self, features_df, observations_df, plot_title, observation_colname='phenotype'):
self.features_df = features_df
self.observations_df = observations_df
self.observation_colname = observation_colname
self.plot_title = plot_title
self.num_components = None
self.pca = None
self.pc_names_list = None
self.pc_names_dict = None
self.principal_df = None
self.principal_observations_df = None
self.markers = None
def clean_data(self, keep_features=None, remove_correlated_feats=True):
self.features_df = self.features_df.astype(float).fillna(0)
self.features_df = self.features_df.loc[(self.features_df > 0).any(axis=1)]
if keep_features:
self.features_df = self.features_df.loc[self.features_df.index.isin(keep_features)]
if remove_correlated_feats:
tmp = self.features_df.T
# Remove columns with no variation
nunique = tmp.apply(pd.Series.nunique)
cols_to_drop = nunique[nunique == 1].index
tmp.drop(cols_to_drop, axis=1, inplace=True)
perc_spearman = scipy.stats.spearmanr(tmp)
abs_corr = np.subtract(np.ones(shape=perc_spearman.correlation.shape),
np.absolute(perc_spearman.correlation))
np.fill_diagonal(abs_corr, 0)
abs_corr_clean = np.maximum(abs_corr,
abs_corr.transpose()) # some floating point mismatches, just make symmetric
clustering = linkage(squareform(abs_corr_clean), method='average')
clusters = fcluster(clustering, .1, criterion='distance')
names = tmp.columns.tolist()
names_to_cluster = list(zip(names, clusters))
indices_to_keep = []
### Extract models closest to cluster centroids
for x in range(1, len(set(clusters)) + 1):
# Create mask from the list of assignments for extracting submatrix of the cluster
mask = np.array([1 if i == x else 0 for i in clusters], dtype=bool)
# Take the index of the column with the smallest sum of distances from the submatrix
idx = np.argmin(sum(abs_corr_clean[:, mask][mask, :]))
# Extract names of cluster elements from names_to_cluster
sublist = [name for (name, cluster) in names_to_cluster if cluster == x]
# Element closest to centroid
centroid = sublist[idx]
indices_to_keep.append(centroid)
self.features_df = self.features_df.loc[self.features_df.index.isin(indices_to_keep)]
def run_pca(self, whiten=True):
# Normalize
for_pca_df = self.features_df.T
for_pca_df_scaled = pd.DataFrame(preprocessing.scale(for_pca_df), columns=for_pca_df.columns)
# Run PCA
self.num_components = min(len(for_pca_df.T.columns), len(for_pca_df.T.index))
pca = PCA(n_components=self.num_components, whiten=whiten)
pca_fit = pca.fit_transform(for_pca_df_scaled)
self.pc_names_list = ['PC{} ({:.0%})'.format(x + 1, pca.explained_variance_ratio_[x]) for x in
range(self.num_components)]
self.pc_names_dict = {k.split(' ')[0]: k for k in self.pc_names_list}
principal_df = pd.DataFrame(data=pca_fit, columns=self.pc_names_list, index=for_pca_df.index)
principal_df.index.name = 'strain'
self.principal_df = principal_df
self.pca = pca
# self.principal_observations_df = self.principal_df.join(self.observations_df, how='inner')
#
# # Make iterable list of markers
# mks = itertools.cycle(["<", "+", "o", 'D', 'x', '^', '*', '8', 's', 'p', 'v', 'X', '_', 'h'])
# self.markers = [next(mks) for i in range(len(self.principal_observations_df[self.observation_colname].unique()))]
def make_biplot(self, pc_x=1, pc_y=2, outpath=None, dpi=150, custom_markers=None, custom_order=None):
if not custom_order:
custom_order = sorted(self.observations_df[self.observation_colname].unique().tolist())
if not custom_markers:
custom_markers = self.markers
plot = sns.lmplot(data=self.principal_observations_df,
x=self.principal_observations_df.columns[pc_x - 1],
y=self.principal_observations_df.columns[pc_y - 1],
hue=self.observation_colname,
hue_order=custom_order,
fit_reg=False,
size=6,
markers=custom_markers,
scatter_kws={'alpha': 0.5})
plot = (plot.set(title='PC{} vs. PC{}'.format(pc_x, pc_y)))
if outpath:
plot.savefig(outpath, dpi=dpi)
else:
plt.show()
plt.close()
def make_pairplot(self, num_components_to_plot=4, outpath=None, dpi=150):
# Get columns
components_to_plot = [self.principal_observations_df.columns[x] for x in range(num_components_to_plot)]
# Plot
plot = sns.pairplot(data=self.principal_observations_df, hue=self.observation_colname,
vars=components_to_plot, markers=self.markers, size=4)
plt.subplots_adjust(top=.95)
plt.suptitle(self.plot_title)
if outpath:
plot.fig.savefig(outpath, dpi=dpi)
else:
plt.show()
plt.close()
def make_3Dplot(self, outpath=None, dpi=150):
import mpl_toolkits.mplot3d
fig = plt.figure(1, figsize=(8, 6))
ax = mpl_toolkits.mplot3d.Axes3D(fig)
for key, group in self.principal_observations_df.groupby(self.observation_colname):
ax.plot(group[self.principal_observations_df.columns[0]],
group[self.principal_observations_df.columns[1]],
group[self.principal_observations_df.columns[2]],
'o', alpha=0.5, label=key)
# Make simple, bare axis lines through space:
xAxisLine = ((min(self.principal_observations_df[self.principal_observations_df.columns[0]]),
max(self.principal_observations_df[self.principal_observations_df.columns[0]])), (0, 0), (0, 0))
ax.plot(xAxisLine[0], xAxisLine[1], xAxisLine[2], 'r', alpha=0.4)
yAxisLine = ((0, 0), (min(self.principal_observations_df[self.principal_observations_df.columns[1]]),
max(self.principal_observations_df[self.principal_observations_df.columns[1]])), (0, 0))
ax.plot(yAxisLine[0], yAxisLine[1], yAxisLine[2], 'r', alpha=0.4)
zAxisLine = ((0, 0), (0, 0), (min(self.principal_observations_df[self.principal_observations_df.columns[2]]),
max(self.principal_observations_df[self.principal_observations_df.columns[2]])))
ax.plot(zAxisLine[0], zAxisLine[1], zAxisLine[2], 'r', alpha=0.4)
ax.set_title("PC1 vs. PC2 vs. PC3")
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
ax.set_zlabel("PC3")
ax.legend(loc='upper left', fontsize=15)
if outpath:
fig.savefig(outpath, dpi=dpi)
else:
plt.show()
plt.close()
def make_contribplot(self, pc_to_look_at=1, sigadder=0.01, outpath=None, dpi=150, return_top_contribs=False):
"""Make a plot showing contributions of properties to a PC"""
cont = pd.DataFrame(self.pca.components_, columns=self.features_df.index, index=self.pc_names_list)
tmp_df = pd.DataFrame(cont.iloc[pc_to_look_at - 1]).reset_index().rename(columns={'index': 'Property'})
tmp_df['Contribution'] = tmp_df.iloc[:, 1] ** 2
tmp_df = tmp_df[tmp_df['Contribution'] > 1 / len(
cont.iloc[0]) + sigadder] # Alter sigadder to just plot more/less significant contributors
tmp_df['Sign'] = np.where(tmp_df.iloc[:, 1] >= 0, 'Positive', 'Negative')
tmp_df = tmp_df.sort_values(by='Contribution', ascending=False)
fig, ax = plt.subplots(figsize=(30, 10))
sns.barplot(data=tmp_df, y='Property', x='Contribution', hue='Sign', dodge=False, ax=ax, hue_order=['Positive', 'Negative'],
palette=sns.color_palette("coolwarm", 2))
# Random formatting crap
self._change_height(ax, .6) # Make bars thinner
ax.set_title('{} contributors'.format(self.pc_names_list[pc_to_look_at - 1]))
legend = plt.legend(loc=8, bbox_to_anchor=(1.2, .8), ncol=1, title='Sign', fontsize=10)
plt.setp(legend.get_title(), fontsize=12)
plt.gcf().subplots_adjust(left=.5, right=.65)
if outpath:
fig.savefig(outpath, dpi=dpi)
else:
plt.show()
plt.close()
if return_top_contribs:
return tmp_df.Property.values.tolist()
def _change_height(self, ax, new_value):
"""Make bars in horizontal bar chart thinner"""
for patch in ax.patches:
current_height = patch.get_height()
diff = current_height - new_value
# we change the bar height
patch.set_height(new_value)
# we recenter the bar
patch.set_y(patch.get_y() + diff * .5)
def get_pca_ks_stats(self, maxrange=5):
"""Get a dictionary of PC#: K-S test stat for each """
pc_to_phenotype_pairs = {}
num_components = self.principal_observations_df.shape[1]
if num_components < maxrange:
maxrange = num_components
phenotypes = self.principal_observations_df.phenotype.unique().tolist()
for i in range(0, maxrange):
phenotype_pair_to_ks = {}
for p1, p2 in combinations(phenotypes, 2):
p1_pc = self.principal_observations_df[self.principal_observations_df.phenotype == p1].iloc[:,i].as_matrix()
p2_pc = self.principal_observations_df[self.principal_observations_df.phenotype == p2].iloc[:,i].as_matrix()
phenotype_pair_to_ks[(p1, p2)] = ks_2samp(p1_pc, p2_pc)
pc_to_phenotype_pairs[i + 1] = phenotype_pair_to_ks
return pc_to_phenotype_pairs
def get_intra_inter_distances(feat_df, obs_df, normalize=False, plot=False):
# Drop zero rows, transpose, set type to float, and fill any missing values
feat_df = feat_df.loc[(feat_df != 0).any(axis=1)].T.astype(float).fillna(0)
if normalize:
feat_df = pd.DataFrame(preprocessing.scale(feat_df), columns=feat_df.columns, index=feat_df.index)
feat_obs_df = feat_df.join(obs_df, how='inner')
obs_intra_distances = {}
obs_inter_distances = {}
obs_feat_vectors = {}
for phenotype in feat_obs_df.phenotype.unique():
feat_vectors = feat_obs_df[feat_obs_df.phenotype == phenotype].drop(columns=feat_obs_df.columns[-1]).as_matrix()
obs_feat_vectors[phenotype] = feat_vectors
# Intra-distances
intra_distances_calc = pdist(feat_vectors)
obs_intra_distances[phenotype] = intra_distances_calc
# Inter-distances
# Randomly sample from 1...? uncomment next 2 lines and use obs_feat_vector instead of obs_feat_vector_choice
# if you want to try that
# obs_shortest = min([x.shape[0] for x in obs_feat_vectors.values()])
# obs_feat_vector_choice = {k:(v[choice(v.shape[0], obs_shortest, replace=False), :] if v.shape[0]!=obs_shortest else v) for k,v in obs_feat_vectors.items()}
for inter1, inter2 in combinations(obs_feat_vectors, 2):
obs_inter_distances[(inter1, inter2)] = euclidean_distances(obs_feat_vectors[inter1],
obs_feat_vectors[inter2])
if plot:
df = pd.DataFrame()
for k, v in obs_intra_distances.items():
ser = pd.Series(v)
df[k] = ser
for k, v in obs_inter_distances.items():
ser = pd.Series(v.flatten())
df[str(k)] = ser
plotter = pd.melt(df, value_vars=df.columns.tolist())
plotter = plotter[pd.notnull(plotter.value)]
sns.violinplot(data=plotter, x='variable', y='value')
return obs_intra_distances, obs_inter_distances
def compute_skew_stats(intra, inter):
"""Returns two dictionaries reporting (skew, skew_pval) for all groups"""
# Intra (within a group) stats
intra_skew = {}
for k, v in intra.items():
skew = st.skew(v)
try:
skew_zstat, skew_pval = st.skewtest(v)
except ValueError: # if sample size too small
skew_zstat, skew_pval = (0, 1)
intra_skew[k] = (skew, skew_zstat, skew_pval)
# Inter (between groups) stats
inter_skew = {}
for k, v in inter.items():
# Inter skew stats
skew_sep = st.skew(v.flatten())
try:
skew_sep_zstat, skew_sep_pval = st.skewtest(v.flatten())
except ValueError:
skew_sep_zstat, skew_sep_pval = (0, 1)
inter_skew['-'.join(k)] = (skew_sep, skew_sep_zstat, skew_sep_pval)
# Significance of difference between intra and inter distributions
for intra_key in k:
try:
separation_zstat, separation_pval = mannwhitneyu(intra[intra_key],
v.flatten(),
alternative='less')
except ValueError: # All numbers are identical in mannwhitneyu
separation_zstat, separation_pval = (0, 1)
inter_skew['{}<{}'.format(intra_key, '-'.join(k))] = (separation_zstat, separation_pval)
return intra_skew, inter_skew
def run_all(protgroup, memornot, subsequences, observation, proteomescale, base_outdir,
protgroup_dict, protein_feathers_dir, observation_dict, copynum_df, date,
errfile, subseqdim, statfile_duo, statfile_trio, cutoff_num_proteins=0, core_only_genes=None,
impute_counts=True, length_filter_pid=.8,
force_rerun_counts=False,
sc=None, run_percentages=True, force_rerun_percentages=False,
run_pca_and_stats=True, remove_correlated_feats=True, save_plots=False, force_rerun_pca=False):
import ssbio.utils
# Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything
import os
os.environ['OMP_NUM_THREADS'] = '1'
# First, filter down the protein group to the membrane/nonmembrane definition
prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,
protgroup_dict=protgroup_dict,
protein_feathers_dir=protein_feathers_dir,
core_only_genes=core_only_genes)
num_proteins = len(prots_filtered_feathers)
if num_proteins <= cutoff_num_proteins:
return
# Make output directories
if proteomescale:
protscale = 'proteome_scaled'
else:
protscale = 'proteome_unscaled'
outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale))
outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot)))
outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup)))
outdir_observ = ssbio.utils.make_dir(op.join(outdir_final, observation))
outdir_observ_subseqdim = ssbio.utils.make_dir(op.join(outdir_observ, subseqdim))
# Then load the protein feathers and add them all together to represent a "proteome"
# if sc:
# big_strain_counts_df = get_proteome_counts_sc(sc=sc, prots_filtered_feathers=prots_filtered_feathers,
# outpath=op.join(outdir_final,
# '{}-subsequence_proteome.fthr'.format(date)),
# copynum_scale=proteomescale, copynum_df=copynum_df,
# force_rerun=force_rerun_counts)
# else:
# big_strain_counts_df = get_proteome_counts(prots_filtered_feathers=prots_filtered_feathers,
# outpath=op.join(outdir_final,
# '{}-subsequence_proteome.fthr'.format(date)),
# copynum_scale=proteomescale, copynum_df=copynum_df,
# force_rerun=force_rerun_counts)
# if len(big_strain_counts_df) == 0:
# with open(errfile, "a") as myfile:
# myfile.write('COUNT ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
# return
# Impute averages for missing counts
if impute_counts:
big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_IMP.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
copynum_scale=proteomescale, copynum_df=copynum_df,
force_rerun=force_rerun_counts)
big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_IMP.fthr'.format(
date)),
force_rerun=force_rerun_percentages)
# Divide by totals to get percentages in a new dataframe
else:
try:
big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_percentages)
except:
with open(errfile, "a") as myfile:
myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(
observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
return
# Stop here if only percentages desired
if not run_pca_and_stats:
return
pca_pickle = op.join(outdir_observ_subseqdim, '{}-subsequence_pca.pckl'.format(date))
if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle):
# Then, get filters for the columns to available strains in an observation for percentage df
keep_strains, observations_df = get_observed_strains_and_df(observation=observation,
observation_dict=observation_dict)
if keep_strains:
big_strain_percents_df = big_strain_percents_df[big_strain_percents_df.columns[big_strain_percents_df.columns.isin(keep_strains)]]
# Then, get filters for rows of the loaded feathers for interested subsequences
keep_subsequences = get_interested_subsequences(subsequences=subsequences)
# Some numbers: number of observations
num_obs = observations_df.phenotype.value_counts().to_dict()
if len(num_obs) < 2: # If only one observation, what are we trying to compare?? nothing really
return
observations_string = ';'.join('{}:{}'.format(key, val) for key, val in num_obs.items())
# Some numbers: number of features
num_feats = len(big_strain_percents_df)
# Make an unwieldy title
big_title = 'LOC={0}; PROTGROUP={1}; PHENOTYPE={2}; PROTSCALE={3}; SUBSEQDIM={4};\n' \
'NUMPROTS={5}; NUMFEATS={6}; NUMSTRAINS={7}'.format('-'.join(memornot),
'-'.join(protgroup),
str(observation),
str(proteomescale),
subseqdim,
num_proteins,
num_feats,
observations_string)
# Run PCA and make plots
runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=observations_df, plot_title=big_title)
try:
runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats)
except:
with open(errfile, "a") as myfile:
myfile.write(
'CLEAN ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
return
try:
runner.run_pca()
except:
with open(errfile, "a") as myfile:
myfile.write(
'PCA ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
return
if save_plots:
try:
runner.make_biplot(pc_x=1, pc_y=2,
outpath=op.join(outdir_observ_subseqdim, '{}-subsequence_biplot_1_2.png'.format(date)))
except:
with open(errfile, "a") as myfile:
myfile.write(
'PCA BIPLOT ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
try:
runner.make_pairplot(num_components_to_plot=4,
outpath=op.join(outdir_observ_subseqdim, '{}-subsequence_pairplot.png'.format(date)))
except:
with open(errfile, "a") as myfile:
myfile.write(
'PCA PAIRPLOT ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
try:
runner.make_3Dplot(outpath=op.join(outdir_observ_subseqdim, '{}-subsequence_3Dplot.png'.format(date)))
except:
with open(errfile, "a") as myfile:
myfile.write(
'PCA 3DPLOT ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
try:
runner.make_contribplot(pc_to_look_at=1, sigadder=0.01,
outpath=op.join(outdir_observ_subseqdim, '{}-subsequence_contribplot.png'.format(date)))
except:
with open(errfile, "a") as myfile:
myfile.write(
'PCA CONTRIBPLOT ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(
observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
with open(pca_pickle, 'wb') as f:
pickle.dump(runner, f)
else:
with open(pca_pickle, 'rb') as f:
runner = pickle.load(f)
# Get stats
try:
ks_stats = runner.get_pca_ks_stats()
except:
with open(errfile, "a") as myfile:
myfile.write(
'STAT K-S ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(
observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
return
try:
# perc_intra, perc_inter = get_intra_inter_distances(feat_df=runner.features_df,
# obs_df=runner.observations_df,
# normalize=True,
# plot=False)
# perc_stats = compute_skew_stats(intra=perc_intra, inter=perc_inter)
pca_intra, pca_inter = get_intra_inter_distances(feat_df=runner.principal_df.T,
obs_df=runner.observations_df,
normalize=False,
plot=False)
skew_stats = compute_skew_stats(intra=pca_intra, inter=pca_inter)
except:
with open(errfile, "a") as myfile:
myfile.write('STAT DIST ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(
observation) + '\t' + str(proteomescale) + '\t' + subseqdim + "\n")
return
# with open(statfile, 'a') as statf:
# statf.write('-'.join(memornot) + '\t' + '-'.join(protgroup) + '\t' + str(observation) + '\t' + str(
# proteomescale) + '\t' + subseqdim + '\t' + str(num_proteins) + '\t' + str(num_feats) + '\t' + str(
# num_obs) + '\t' + str(perc_stats) + '\t' + str(skew_stats) + '\t' + str(ks_stats) + '\n')
skew_pval_significant = True
skew_worst_pval = -1
ks_pval_significant = True
ks_worst_pval = -1
# Search for clusters not close to WT
if observation == 'ros_simulated':
for k, v in skew_stats[1].items():
if 'wt' in k and '<' in k:
pval = v[1]
if pval > skew_worst_pval:
skew_worst_pval = pval
if pval > 0.05:
skew_pval_significant = False
for k, v in ks_stats[1].items():
if 'wt' in k:
if v.pvalue > ks_worst_pval:
ks_worst_pval = v.pvalue
if v.pvalue > 0.05:
ks_pval_significant = False
# Combination stats
if ks_pval_significant and skew_pval_significant:
if ks_worst_pval != -1 and skew_worst_pval != -1:
with open(statfile_trio, 'a') as f:
f.write(str((protgroup, memornot, subsequences, observation, proteomescale)) + '\t' + str(ks_worst_pval) + '\t' + str(skew_worst_pval) + '\n')
# Search for G/NG clusters and pathotype_simple ones
else:
for k, v in skew_stats[1].items():
if '<' in k:
pval = v[1]
if pval > skew_worst_pval:
skew_worst_pval = pval
if pval > 0.05:
skew_pval_significant = False
for k, v in ks_stats[1].items():
if v.pvalue > ks_worst_pval:
ks_worst_pval = v.pvalue
if v.pvalue > 0.05:
ks_pval_significant = False
# Combination stats
if ks_pval_significant and skew_pval_significant:
if ks_worst_pval != -1 and skew_worst_pval != -1:
with open(statfile_duo, 'a') as f:
f.write(str((protgroup, memornot, subsequences, observation, proteomescale)) + '\t' + str(ks_worst_pval) + '\t' + str(skew_worst_pval) + '\n')
def run_all2(protgroup, memornot, subsequences, base_outdir,
protgroup_dict, protein_feathers_dir, date, errfile, impute_counts=True,
cutoff_num_proteins=0, core_only_genes=None,
length_filter_pid=.8, remove_correlated_feats=True,
force_rerun_counts=False, force_rerun_percentages=False, force_rerun_pca=False):
"""run_all but ignoring observations before pca"""
import ssbio.utils
# Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything
import os
os.environ['OMP_NUM_THREADS'] = '1'
# First, filter down the protein group to the membrane/nonmembrane definition
prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,
protgroup_dict=protgroup_dict,
protein_feathers_dir=protein_feathers_dir,
core_only_genes=core_only_genes)
num_proteins = len(prots_filtered_feathers)
if num_proteins <= cutoff_num_proteins:
return
# Make output directories
protscale = 'proteome_unscaled'
outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale))
outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot)))
outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup)))
if impute_counts:
big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_IMP.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_counts)
big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_IMP.fthr'.format(
date)),
force_rerun=force_rerun_percentages)
pca_pickle = op.join(outdir_final, '{}-subsequence_pca.pckl'.format(date))
# Divide by totals to get percentages in a new dataframe
else:
try:
big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_AVG.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_percentages)
pca_pickle = op.join(outdir_final, '{}-subsequence_pca_AVG.pckl'.format(date))
except:
with open(errfile, "a") as myfile:
myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
return
if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle):
# Then, get filters for rows of the loaded feathers for interested subsequences
keep_subsequences = get_interested_subsequences(subsequences=subsequences)
# Some numbers: number of features
num_feats = len(big_strain_percents_df)
# Make an unwieldy title
big_title = 'LOC={0}; PROTGROUP={1};\n' \
'NUMPROTS={2}; NUMFEATS={3}'.format('-'.join(memornot),
'-'.join(protgroup),
num_proteins,
num_feats)
# Run PCA and make plots
runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=pd.DataFrame(), plot_title=big_title)
try:
runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats)
except:
with open(errfile, "a") as myfile:
myfile.write(
'CLEAN ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
return
# try:
runner.run_pca()
# except:
# with open(errfile, "a") as myfile:
# myfile.write(
# 'PCA ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
# return
with open(pca_pickle, 'wb') as f:
pickle.dump(runner, f)
else:
with open(pca_pickle, 'rb') as f:
runner = pickle.load(f)
def run_all_simple(protgroup, memornot, subsequences, observation, protgroup_dict, protein_feathers_dir, observation_dict, statfile, cutoff_num_proteins=0):
# Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything
import os
os.environ['OMP_NUM_THREADS'] = '1'
# First, filter down the protein group to the membrane/nonmembrane definition
prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,
protgroup_dict=protgroup_dict,
protein_feathers_dir=protein_feathers_dir)
num_proteins = len(prots_filtered_feathers)
if num_proteins <= cutoff_num_proteins:
return
# WT HAVE MORE OF THESE...
sigdict = get_simple_sigdict(prots_filtered_feathers=prots_filtered_feathers,
subsequences=subsequences,
observation=observation,
observation_dict=observation_dict)
sig_predf = []
for protein, feat_and_pvals in sigdict['greater'].items():
for feat, pval in feat_and_pvals:
sig_predf.append((protein, feat, pval))
sig_df1 = pd.DataFrame(sig_predf, columns=['protein', 'feature', 'pval']).sort_values(by='pval', ascending=True)
if len(sig_df1) > 0:
num_prots1 = len(sig_df1.protein.unique())
num_feats1 = len(sig_df1.feature.unique())
# Very signficant features
sig_df2 = sig_df1[sig_df1.pval <= sig_df1.pval.quantile(.25)]
num_prots2 = len(sig_df2.protein.unique())
num_feats2 = len(sig_df2.feature.unique())
# Very significant features that are explainable
sig_df3 = sig_df2[sig_df2.feature.isin(expected_in_wt)]
num_prots3 = len(sig_df3.protein.unique())
num_feats3 = len(sig_df3.feature.unique())
wt_perc_sig_feats = num_feats3 / num_feats2
wt_perc_sig_prots = num_prots3 / num_proteins
else:
wt_perc_sig_feats = 0
wt_perc_sig_prots = 0
# MUT HAVE MORE OF THESE...
sig_predf = []
for protein, feat_and_pvals in sigdict['less'].items():
for feat, pval in feat_and_pvals:
sig_predf.append((protein, feat, pval))
sig_df1 = pd.DataFrame(sig_predf, columns=['protein', 'feature', 'pval']).sort_values(by='pval', ascending=True)
if len(sig_df1) > 0:
num_prots1 = len(sig_df1.protein.unique())
num_feats1 = len(sig_df1.feature.unique())
# Very signficant features
sig_df2 = sig_df1[sig_df1.pval <= sig_df1.pval.quantile(.25)]
num_prots2 = len(sig_df2.protein.unique())
num_feats2 = len(sig_df2.feature.unique())
# Very significant features that are explainable
sig_df3 = sig_df2[sig_df2.feature.isin(expected_in_mut)]
num_prots3 = len(sig_df3.protein.unique())
num_feats3 = len(sig_df3.feature.unique())
mut_perc_sig_feats = num_feats3 / num_feats2
mut_perc_sig_prots = num_prots3 / num_proteins
else:
mut_perc_sig_feats = 0
mut_perc_sig_prots = 0
with open(statfile, 'a') as f:
f.write(str((protgroup, memornot, observation)) + '\t' + str(wt_perc_sig_feats) + '\t' + str(wt_perc_sig_prots) + '\t' + str(mut_perc_sig_feats) + '\t' + str(mut_perc_sig_prots) + '\t' + str(num_proteins) + '\n')
expected_in_wt = ['aa_%_dis', 'disorder_2D_aa_%_dis', 'ss_disorder_2D_aa_%_dis', 'disorder_3D_aa_%_dis', 'ss_disorder_3D_aa_%_dis', 'dna_2_5D_aa_%_ord', 'aa_%_C', 'acc_2D_aa_%_C', 'acc_3D_aa_%_C', 'surface_3D_aa_%_C', 'metal_2_5D_aa_%_C', 'metal_3D_aa_%_C', 'csa_2_5D_aa_%_C', 'sites_2_5D_aa_%_C', 'aa_%_carb', 'acc_2D_aa_%_carb', 'acc_3D_aa_%_carb', 'surface_3D_aa_%_carb', 'metal_2_5D_aa_%_carb', 'metal_3D_aa_%_carb', 'aa_%_chrg', 'acc_2D_aa_%_chrg', 'acc_3D_aa_%_chrg', 'surface_3D_aa_%_chrg', 'metal_2_5D_aa_%_chrg', 'metal_3D_aa_%_chrg', 'csa_2_5D_aa_%_chrg', 'sites_2_5D_aa_%_chrg', 'acc_2D_aa_%_poschrg', 'acc_3D_aa_%_poschrg', 'surface_3D_aa_%_poschrg', 'metal_2_5D_aa_%_poschrg', 'metal_3D_aa_%_poschrg', 'acc_2D_aa_%_negchrg', 'acc_3D_aa_%_negchrg', 'surface_3D_aa_%_negchrg', 'tm_2D_aa_%_tmunstab', 'tm_3D_aa_%_tmunstab', 'surface_3D_aa_%_Y','aa_%_Y','acc_3D_aa_%_Y','acc_2D_aa_%_Y']
expected_in_mut = ['aa_%_ord', 'disorder_2D_aa_%_ord', 'ss_disorder_2D_aa_%_ord', 'disorder_3D_aa_%_ord', 'ss_disorder_3D_aa_%_ord', 'dna_2_5D_aa_%_dis', 'aa_%_M', 'acc_2D_aa_%_M', 'acc_3D_aa_%_M', 'surface_3D_aa_%_M', 'metal_2_5D_aa_%_M', 'metal_3D_aa_%_M', 'csa_2_5D_aa_%_M', 'sites_2_5D_aa_%_M', 'metal_2_5D_aa_%_negchrg', 'metal_3D_aa_%_negchrg', 'metal_2_5D_aa_%_bulk', 'metal_3D_aa_%_bulk', 'tm_2D_aa_%_M', 'tm_3D_aa_%_M', 'tm_2D_aa_%_tmstab', 'tm_3D_aa_%_tmstab']
_all_counts = ['aa_count_A', 'aa_count_C', 'aa_count_D', 'aa_count_E', 'aa_count_F', 'aa_count_G', 'aa_count_H',
'aa_count_I', 'aa_count_K', 'aa_count_L', 'aa_count_M', 'aa_count_N', 'aa_count_P', 'aa_count_Q',
'aa_count_R', 'aa_count_S', 'aa_count_T', 'aa_count_V', 'aa_count_W', 'aa_count_Y', 'aa_count_total',
'acc_2D_aa_count_A', 'acc_2D_aa_count_C', 'acc_2D_aa_count_D', 'acc_2D_aa_count_E', 'acc_2D_aa_count_F',
'acc_2D_aa_count_G', 'acc_2D_aa_count_H', 'acc_2D_aa_count_I', 'acc_2D_aa_count_K', 'acc_2D_aa_count_L',
'acc_2D_aa_count_M', 'acc_2D_aa_count_N', 'acc_2D_aa_count_P', 'acc_2D_aa_count_Q', 'acc_2D_aa_count_R',
'acc_2D_aa_count_S', 'acc_2D_aa_count_T', 'acc_2D_aa_count_V', 'acc_2D_aa_count_W', 'acc_2D_aa_count_Y',
'acc_2D_aa_count_carb', 'acc_2D_aa_count_chrg', 'acc_2D_aa_count_negchrg', 'acc_2D_aa_count_poschrg',
'acc_2D_aa_count_total', 'acc_3D_aa_count_A', 'acc_3D_aa_count_C', 'acc_3D_aa_count_D',
'acc_3D_aa_count_E', 'acc_3D_aa_count_F', 'acc_3D_aa_count_G', 'acc_3D_aa_count_H', 'acc_3D_aa_count_I',
'acc_3D_aa_count_K', 'acc_3D_aa_count_L', 'acc_3D_aa_count_M', 'acc_3D_aa_count_N', 'acc_3D_aa_count_P',
'acc_3D_aa_count_Q', 'acc_3D_aa_count_R', 'acc_3D_aa_count_S', 'acc_3D_aa_count_T', 'acc_3D_aa_count_V',
'acc_3D_aa_count_W', 'acc_3D_aa_count_Y', 'acc_3D_aa_count_carb', 'acc_3D_aa_count_chrg',
'acc_3D_aa_count_negchrg', 'acc_3D_aa_count_poschrg', 'acc_3D_aa_count_total', 'csa_2_5D_aa_count_A',
'csa_2_5D_aa_count_C', 'csa_2_5D_aa_count_D', 'csa_2_5D_aa_count_E', 'csa_2_5D_aa_count_F',
'csa_2_5D_aa_count_G', 'csa_2_5D_aa_count_H', 'csa_2_5D_aa_count_I', 'csa_2_5D_aa_count_K',
'csa_2_5D_aa_count_L', 'csa_2_5D_aa_count_M', 'csa_2_5D_aa_count_N', 'csa_2_5D_aa_count_P',
'csa_2_5D_aa_count_Q', 'csa_2_5D_aa_count_R', 'csa_2_5D_aa_count_S', 'csa_2_5D_aa_count_T',
'csa_2_5D_aa_count_V', 'csa_2_5D_aa_count_W', 'csa_2_5D_aa_count_Y', 'csa_2_5D_aa_count_chrg',
'csa_2_5D_aa_count_total', 'disorder_2D_aa_count_A', 'disorder_2D_aa_count_C', 'disorder_2D_aa_count_D',
'disorder_2D_aa_count_E', 'disorder_2D_aa_count_F', 'disorder_2D_aa_count_G', 'disorder_2D_aa_count_H',
'disorder_2D_aa_count_I', 'disorder_2D_aa_count_K', 'disorder_2D_aa_count_L', 'disorder_2D_aa_count_M',
'disorder_2D_aa_count_N', 'disorder_2D_aa_count_P', 'disorder_2D_aa_count_Q', 'disorder_2D_aa_count_R',
'disorder_2D_aa_count_S', 'disorder_2D_aa_count_T', 'disorder_2D_aa_count_V', 'disorder_2D_aa_count_W',
'disorder_2D_aa_count_Y', 'disorder_2D_aa_count_dis', 'disorder_2D_aa_count_ord',
'disorder_2D_aa_count_total', 'disorder_3D_aa_count_A', 'disorder_3D_aa_count_C',
'disorder_3D_aa_count_D', 'disorder_3D_aa_count_E', 'disorder_3D_aa_count_F', 'disorder_3D_aa_count_G',
'disorder_3D_aa_count_H', 'disorder_3D_aa_count_I', 'disorder_3D_aa_count_K', 'disorder_3D_aa_count_L',
'disorder_3D_aa_count_M', 'disorder_3D_aa_count_N', 'disorder_3D_aa_count_P', 'disorder_3D_aa_count_Q',
'disorder_3D_aa_count_R', 'disorder_3D_aa_count_S', 'disorder_3D_aa_count_T', 'disorder_3D_aa_count_V',
'disorder_3D_aa_count_W', 'disorder_3D_aa_count_Y', 'disorder_3D_aa_count_dis',
'disorder_3D_aa_count_ord', 'disorder_3D_aa_count_total', 'dna_2_5D_aa_count_A', 'dna_2_5D_aa_count_C',
'dna_2_5D_aa_count_D', 'dna_2_5D_aa_count_E', 'dna_2_5D_aa_count_F', 'dna_2_5D_aa_count_G',
'dna_2_5D_aa_count_H', 'dna_2_5D_aa_count_I', 'dna_2_5D_aa_count_K', 'dna_2_5D_aa_count_L',
'dna_2_5D_aa_count_M', 'dna_2_5D_aa_count_N', 'dna_2_5D_aa_count_P', 'dna_2_5D_aa_count_Q',
'dna_2_5D_aa_count_R', 'dna_2_5D_aa_count_S', 'dna_2_5D_aa_count_T', 'dna_2_5D_aa_count_V',
'dna_2_5D_aa_count_W', 'dna_2_5D_aa_count_Y', 'dna_2_5D_aa_count_dis', 'dna_2_5D_aa_count_ord',
'dna_2_5D_aa_count_total', 'metal_2_5D_aa_count_A', 'metal_2_5D_aa_count_C', 'metal_2_5D_aa_count_D',
'metal_2_5D_aa_count_E', 'metal_2_5D_aa_count_F', 'metal_2_5D_aa_count_G', 'metal_2_5D_aa_count_H',
'metal_2_5D_aa_count_I', 'metal_2_5D_aa_count_K', 'metal_2_5D_aa_count_L', 'metal_2_5D_aa_count_M',
'metal_2_5D_aa_count_N', 'metal_2_5D_aa_count_P', 'metal_2_5D_aa_count_Q', 'metal_2_5D_aa_count_R',
'metal_2_5D_aa_count_S', 'metal_2_5D_aa_count_T', 'metal_2_5D_aa_count_V', 'metal_2_5D_aa_count_W',
'metal_2_5D_aa_count_Y', 'metal_2_5D_aa_count_bulk', 'metal_2_5D_aa_count_carb',
'metal_2_5D_aa_count_chrg', 'metal_2_5D_aa_count_negchrg', 'metal_2_5D_aa_count_poschrg',
'metal_2_5D_aa_count_total', 'metal_3D_aa_count_bulk', 'metal_3D_aa_count_carb',
'metal_3D_aa_count_chrg', 'metal_3D_aa_count_negchrg', 'metal_3D_aa_count_poschrg',
'notdeep_3D_aa_count_A', 'notdeep_3D_aa_count_C', 'notdeep_3D_aa_count_D', 'notdeep_3D_aa_count_E',
'notdeep_3D_aa_count_F', 'notdeep_3D_aa_count_G', 'notdeep_3D_aa_count_H', 'notdeep_3D_aa_count_I',
'notdeep_3D_aa_count_K', 'notdeep_3D_aa_count_L', 'notdeep_3D_aa_count_M', 'notdeep_3D_aa_count_N',
'notdeep_3D_aa_count_P', 'notdeep_3D_aa_count_Q', 'notdeep_3D_aa_count_R', 'notdeep_3D_aa_count_S',
'notdeep_3D_aa_count_T', 'notdeep_3D_aa_count_V', 'notdeep_3D_aa_count_W', 'notdeep_3D_aa_count_Y',
'notdeep_3D_aa_count_total', 'sites_2_5D_aa_count_A', 'sites_2_5D_aa_count_C', 'sites_2_5D_aa_count_D',
'sites_2_5D_aa_count_E', 'sites_2_5D_aa_count_F', 'sites_2_5D_aa_count_G', 'sites_2_5D_aa_count_H',
'sites_2_5D_aa_count_I', 'sites_2_5D_aa_count_K', 'sites_2_5D_aa_count_L', 'sites_2_5D_aa_count_M',
'sites_2_5D_aa_count_N', 'sites_2_5D_aa_count_P', 'sites_2_5D_aa_count_Q', 'sites_2_5D_aa_count_R',
'sites_2_5D_aa_count_S', 'sites_2_5D_aa_count_T', 'sites_2_5D_aa_count_V', 'sites_2_5D_aa_count_W',
'sites_2_5D_aa_count_Y', 'sites_2_5D_aa_count_chrg', 'sites_2_5D_aa_count_total',
'ss_disorder_2D_aa_count_A', 'ss_disorder_2D_aa_count_C', 'ss_disorder_2D_aa_count_D',
'ss_disorder_2D_aa_count_E', 'ss_disorder_2D_aa_count_F', 'ss_disorder_2D_aa_count_G',
'ss_disorder_2D_aa_count_H', 'ss_disorder_2D_aa_count_I', 'ss_disorder_2D_aa_count_K',
'ss_disorder_2D_aa_count_L', 'ss_disorder_2D_aa_count_M', 'ss_disorder_2D_aa_count_N',
'ss_disorder_2D_aa_count_P', 'ss_disorder_2D_aa_count_Q', 'ss_disorder_2D_aa_count_R',
'ss_disorder_2D_aa_count_S', 'ss_disorder_2D_aa_count_T', 'ss_disorder_2D_aa_count_V',
'ss_disorder_2D_aa_count_W', 'ss_disorder_2D_aa_count_Y', 'ss_disorder_2D_aa_count_dis',
'ss_disorder_2D_aa_count_ord', 'ss_disorder_2D_aa_count_total', 'ss_disorder_3D_aa_count_A',
'ss_disorder_3D_aa_count_C', 'ss_disorder_3D_aa_count_D', 'ss_disorder_3D_aa_count_E',
'ss_disorder_3D_aa_count_F', 'ss_disorder_3D_aa_count_G', 'ss_disorder_3D_aa_count_H',
'ss_disorder_3D_aa_count_I', 'ss_disorder_3D_aa_count_K', 'ss_disorder_3D_aa_count_L',
'ss_disorder_3D_aa_count_M', 'ss_disorder_3D_aa_count_N', 'ss_disorder_3D_aa_count_P',
'ss_disorder_3D_aa_count_Q', 'ss_disorder_3D_aa_count_R', 'ss_disorder_3D_aa_count_S',
'ss_disorder_3D_aa_count_T', 'ss_disorder_3D_aa_count_V', 'ss_disorder_3D_aa_count_W',
'ss_disorder_3D_aa_count_Y', 'ss_disorder_3D_aa_count_dis', 'ss_disorder_3D_aa_count_ord',
'ss_disorder_3D_aa_count_total', 'surface_3D_aa_count_A', 'surface_3D_aa_count_C',
'surface_3D_aa_count_D', 'surface_3D_aa_count_E', 'surface_3D_aa_count_F', 'surface_3D_aa_count_G',
'surface_3D_aa_count_H', 'surface_3D_aa_count_I', 'surface_3D_aa_count_K', 'surface_3D_aa_count_L',
'surface_3D_aa_count_M', 'surface_3D_aa_count_N', 'surface_3D_aa_count_P', 'surface_3D_aa_count_Q',
'surface_3D_aa_count_R', 'surface_3D_aa_count_S', 'surface_3D_aa_count_T', 'surface_3D_aa_count_V',
'surface_3D_aa_count_W', 'surface_3D_aa_count_Y', 'surface_3D_aa_count_carb', 'surface_3D_aa_count_chrg',
'surface_3D_aa_count_negchrg', 'surface_3D_aa_count_poschrg', 'surface_3D_aa_count_total',
'tm_2D_aa_count_tmstab', 'tm_2D_aa_count_tmunstab', 'tm_3D_aa_count_tmstab', 'tm_3D_aa_count_tmunstab']
```
#### File: sequence/utils/alignment.py
```python
import logging
import os.path as op
import subprocess
import tempfile
from collections import defaultdict
from itertools import count, groupby
import numpy as np
import pandas as pd
from Bio import AlignIO
from Bio import pairwise2
from Bio.Align import MultipleSeqAlignment
from Bio.SubsMat import MatrixInfo as matlist
import ssbio.utils
import ssbio.protein.sequence.utils
# Quiet the SettingWithCopyWarning when converting dtypes in get_deletions/mutations methods
pd.options.mode.chained_assignment = None
log = logging.getLogger(__name__)
def pairwise_sequence_alignment(a_seq, b_seq, engine, a_seq_id=None, b_seq_id=None,
gapopen=10, gapextend=0.5,
outfile=None, outdir=None, force_rerun=False):
"""Run a global pairwise sequence alignment between two sequence strings.
Args:
a_seq (str, Seq, SeqRecord, SeqProp): Reference sequence
b_seq (str, Seq, SeqRecord, SeqProp): Sequence to be aligned to reference
engine (str): `biopython` or `needle` - which pairwise alignment program to use
a_seq_id (str): Reference sequence ID. If not set, is "a_seq"
b_seq_id (str): Sequence to be aligned ID. If not set, is "b_seq"
gapopen (int): Only for `needle` - Gap open penalty is the score taken away when a gap is created
gapextend (float): Only for `needle` - Gap extension penalty is added to the standard gap penalty for each
base or residue in the gap
outfile (str): Only for `needle` - name of output file. If not set, is {id_a}_{id_b}_align.txt
outdir (str): Only for `needle` - Path to output directory. Default is the current directory.
force_rerun (bool): Only for `needle` - Default False, set to True if you want to rerun the alignment
if outfile exists.
Returns:
MultipleSeqAlignment: Biopython object to represent an alignment
"""
engine = engine.lower()
if engine not in ['biopython', 'needle']:
raise ValueError('{}: invalid engine'.format(engine))
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_seq = ssbio.protein.sequence.utils.cast_to_str(a_seq)
b_seq = ssbio.protein.sequence.utils.cast_to_str(b_seq)
if engine == 'biopython':
# TODO: allow different matrices? needle uses blosum62 by default, how to change that?
# TODO: how to define gap open/extend when using matrix in biopython global alignment?
log.warning('Gap penalties not implemented in Biopython yet')
blosum62 = matlist.blosum62
alignments = pairwise2.align.globaldx(a_seq, b_seq, blosum62) # TODO: add gap penalties
best_alignment = alignments[0]
a = ssbio.protein.sequence.utils.cast_to_seq_record(best_alignment[0], id=a_seq_id)
b = ssbio.protein.sequence.utils.cast_to_seq_record(best_alignment[1], id=b_seq_id)
alignment = MultipleSeqAlignment([a, b], annotations={"score": best_alignment[2],
"start": best_alignment[3],
"end" : best_alignment[4]})
alignment.annotations['percent_identity'] = get_percent_identity(best_alignment[0], best_alignment[1]) * 100
return alignment
if engine == 'needle':
alignment_file = run_needle_alignment(seq_a=a_seq, seq_b=b_seq, gapopen=gapopen, gapextend=gapextend,
write_outfile=True, # Has to be true, AlignIO parses files on disk
outdir=outdir, outfile=outfile, force_rerun=force_rerun)
log.debug('Needle alignment at {}'.format(alignment_file))
if not op.exists(alignment_file):
raise ValueError('{}: needle alignment file does not exist'.format(alignment_file))
# Use AlignIO to parse the needle alignment, alignments[0] is the first alignment (the only one in pairwise)
# alignments = list(AlignIO.parse(alignment_file, "emboss"))
# alignment = alignments[0]
alignment = needle_statistics_alignio(alignment_file)
# Rename the sequence IDs
alignment[0].id = a_seq_id
alignment[1].id = b_seq_id
# # Add needle statistics as annotations in the alignment object
# stats = needle_statistics(alignment_file)
# alignment_ids = list(stats.keys())
# if len(alignment_ids) > 1:
# raise ValueError('Needle alignment file contains more than one pairwise alignment')
# needle_id = alignment_ids[0]
# alignment.annotations['percent_identity'] = stats[needle_id]['percent_identity']
# alignment.annotations['percent_similarity'] = stats[needle_id]['percent_similarity']
# alignment.annotations['percent_gaps'] = stats[needle_id]['percent_gaps']
# alignment.annotations['score'] = stats[needle_id]['score']
return alignment
def run_needle_alignment(seq_a, seq_b, gapopen=10, gapextend=0.5, write_outfile=True,
outdir=None, outfile=None, force_rerun=False):
"""Run the needle alignment program for two strings and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Using strings as input: https://www.biostars.org/p/91124/
Args:
id_a: ID of reference sequence
seq_a (str, Seq, SeqRecord): Reference sequence
id_b: ID of sequence to be aligned
seq_b (str, Seq, SeqRecord): String representation of sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format.
"""
# TODO: check if needle is installed and raise error if not
if not outdir:
outdir = ''
# TODO: rewrite using utils functions - does not report error if needle is not installed currently
# TODO: rethink outdir/outfile, also if this should return the tempfile or just a file object or whatever
if write_outfile:
seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)
seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)
if not outfile:
outfile = op.join(tempfile.gettempdir(), 'temp_alignment.needle')
else:
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
cmd = 'needle -outfile="{}" -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(
outfile, seq_a, seq_b, gapopen, gapextend)
command = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile
else:
seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)
seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)
cmd = 'needle -auto -stdout -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(seq_a, seq_b, gapopen, gapextend)
command = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
stdout = command.stdout.read()
return stdout
def run_needle_alignment_on_files(id_a, faa_a, id_b, faa_b, gapopen=10, gapextend=0.5,
outdir='', outfile='', force_rerun=False):
"""Run the needle alignment program for two fasta files and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Args:
id_a: ID of reference sequence
faa_a: File path to reference sequence
id_b: ID of sequence to be aligned
faa_b: File path to sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format.
"""
# TODO: rewrite using utils functions so we can check for needle installation
# # If you don't want to save the output file, just run the alignment and return the raw results
# if not outfile and not outdir:
# needle_cline = NeedleCommandline(asequence=faa_a, bsequence=faa_b,
# gapopen=gapopen, gapextend=gapextend,
# stdout=True, auto=True)
# stdout, stderr = needle_cline()
# raw_alignment_text = stdout.decode('utf-8')
# Make a default name if no outfile is set
if not outfile:
outfile = op.join(outdir, '{}_{}.needle'.format(id_a, id_b))
else:
outfile = op.join(outdir, outfile)
# Check if the outfile already exists
if op.exists(outfile) and not force_rerun:
return outfile
# If it doesn't exist, or force_rerun=True, run the alignment
else:
cmd = 'needle -outfile="{}" -asequence="{}" -bsequence="{}" -gapopen={} -gapextend={}'.format(outfile,
faa_a,
faa_b,
gapopen,
gapextend)
command = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile
def get_percent_identity(a_aln_seq, b_aln_seq):
"""Get the percent identity between two alignment strings"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
count = 0
gaps = 0
for n in range(0, len(a_aln_seq)):
if a_aln_seq[n] == b_aln_seq[n]:
if a_aln_seq[n] != "-":
count += 1
else:
gaps += 1
return count / float((len(a_aln_seq) - gaps))
def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
"""Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != '-' and b != '-':
aa_flag = 'match'
elif a != b and a == '-' and b != '-':
aa_flag = 'insertion'
elif a != b and a != '-' and b == '-':
aa_flag = 'deletion'
elif a != b and a != '-' and b == 'X':
aa_flag = 'unresolved'
elif a != b and b != '-' and a == 'X':
aa_flag = 'unresolved'
elif a != b and a != '-' and b != '-':
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == 'deletion':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if aa_flag == 'insertion':
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df
def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None):
"""Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment
"""
alignments = list(AlignIO.parse(alignment_file, "emboss"))
alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'])
for alignment in alignments:
if not a_seq_id:
a_seq_id = list(alignment)[0].id
a_seq = str(list(alignment)[0].seq)
if not b_seq_id:
b_seq_id = list(alignment)[1].id
b_seq = str(list(alignment)[1].seq)
df = get_alignment_df(a_seq, b_seq, a_seq_id, b_seq_id)
alignment_df = alignment_df.append(df).reset_index(drop=True)
return alignment_df
def get_mutations(aln_df):
"""Get a list of residue numbers (in the original sequence's numbering) that are mutated
Args:
aln_df (DataFrame): Alignment DataFrame
just_resnums: If only the residue numbers should be returned, instead of a list of tuples of
(original_residue, resnum, mutated_residue)
Returns:
list: Residue mutations
"""
mutation_df = aln_df[aln_df['type'] == 'mutation']
tuples = []
if not mutation_df.empty:
subset = mutation_df[['id_a_aa', 'id_a_pos', 'id_b_aa']]
subset['id_a_pos'] = subset['id_a_pos'].astype(int)
tuples = [tuple(x) for x in subset.values]
return tuples
def get_unresolved(aln_df):
"""Get a list of residue numbers (in the original sequence's numbering) that are unresolved
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: Residue numbers that are mutated
"""
unresolved_df = aln_df[aln_df['type'] == 'unresolved']
unresolved = []
if not unresolved_df.empty:
unresolved_df['id_a_pos'] = unresolved_df['id_a_pos'].astype(int)
unresolved = unresolved_df.id_a_pos.tolist()
return unresolved
def get_deletions(aln_df):
"""Get a list of tuples indicating the first and last residues of a deletion region, as well as the length of the deletion.
Examples:
# Deletion of residues 1 to 4, length 4
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: 'M', 1: 'G', 2: 'I', 3: 'T'}, 'id_a_pos': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'id_b_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'type': {0: 'deletion', 1: 'deletion', 2: 'deletion', 3: 'deletion'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_deletions(my_alignment)
[((1.0, 4.0), 4)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((deletion_start_resnum, deletion_end_resnum), deletion_length)
"""
deletion_df = aln_df[aln_df['type'] == 'deletion']
if not deletion_df.empty:
deletion_df['id_a_pos'] = deletion_df['id_a_pos'].astype(int)
deletions = []
for k, g in groupby(deletion_df.index, key=lambda n, c=count(): n - next(c)):
tmp = list(g)
deletion_indices = (min(tmp), max(tmp))
deletion_start_ix = deletion_indices[0]
deletion_end_ix = deletion_indices[1]
deletion_length = deletion_end_ix - deletion_start_ix + 1
id_a_pos_deletion_start = aln_df.ix[deletion_start_ix].id_a_pos
id_a_pos_deletion_end = aln_df.ix[deletion_end_ix].id_a_pos
deletion_region = (id_a_pos_deletion_start, id_a_pos_deletion_end)
# Logging where the insertion is
log.debug('Deletion of length {} at residues {}'.format(deletion_length, deletion_region))
to_append = (deletion_region, deletion_length)
deletions.append(to_append)
return deletions
def get_insertions(aln_df):
"""Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion.
If the first tuple is:
(-1, 1) that means the insertion is at the beginning of the original protein
(X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein
Examples:
# Insertion at beginning, length 3
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_insertions(my_alignment)
[((-1, 1.0), 3)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length)
"""
insertion_df = aln_df[aln_df['type'] == 'insertion']
# if not insertion_df.empty: # don't need to do this for insertions
# insertion_df['id_a_pos'] = insertion_df['id_a_pos'].astype(int)
insertions = []
for k, g in groupby(insertion_df.index, key=lambda n, c=count(): n - next(c)):
tmp = list(g)
insertion_indices = (min(tmp), max(tmp))
insertion_start = insertion_indices[0] - 1
insertion_end = insertion_indices[1] + 1
# Checking if insertion is at the beginning or end
if insertion_start < 0:
insertion_start = insertion_indices[0]
insertion_length = insertion_end - insertion_start
elif insertion_end >= len(aln_df):
insertion_end = insertion_indices[1]
insertion_length = insertion_end - insertion_start
else:
insertion_length = insertion_end - insertion_start - 1
id_a_pos_insertion_start = aln_df.ix[insertion_start].id_a_pos
id_a_pos_insertion_end = aln_df.ix[insertion_end].id_a_pos
# Checking if insertion is at the beginning or end
if np.isnan(id_a_pos_insertion_start) and id_a_pos_insertion_end == 1:
insertion_region = (-1, id_a_pos_insertion_end)
elif np.isnan(id_a_pos_insertion_end):
insertion_region = (id_a_pos_insertion_start, float('Inf'))
else:
insertion_region = (id_a_pos_insertion_start, id_a_pos_insertion_end)
# Logging where the insertion is
if insertion_region[0] == -1:
log.debug('Insertion of length {} at beginning'.format(insertion_length))
elif insertion_region[1] == float('Inf'):
log.debug('Insertion of length {} at end'.format(insertion_length))
else:
log.debug('Insertion of length {} at residues {}'.format(insertion_length, insertion_region))
to_append = (insertion_region, insertion_length)
insertions.append(to_append)
return insertions
def map_resnum_a_to_resnum_b(resnums, a_aln, b_aln):
"""Map a residue number in a sequence to the corresponding residue number in an aligned sequence.
Examples:
>>> map_resnum_a_to_resnum_b([1,2,3], '--ABCDEF', 'XXABCDEF')
{1: 3, 2: 4, 3: 5}
>>> map_resnum_a_to_resnum_b(5, '--ABCDEF', 'XXABCDEF')
{5: 7}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF', 'ABCD--')
{}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF--', 'ABCD--GH')
{}
>>> map_resnum_a_to_resnum_b([9,10], '--MKCDLHRLE-E', 'VSNEYSFEGYKLD')
{9: 11, 10: 13}
Args:
resnums (int, list): Residue number or numbers in the first aligned sequence
a_aln (str, Seq, SeqRecord): Aligned sequence string
b_aln (str, Seq, SeqRecord): Aligned sequence string
Returns:
int: Residue number in the second aligned sequence
"""
resnums = ssbio.utils.force_list(resnums)
aln_df = get_alignment_df(a_aln, b_aln)
maps = aln_df[aln_df.id_a_pos.isin(resnums)]
able_to_map_to_b = maps[pd.notnull(maps.id_b_pos)]
successful_map_from_a = able_to_map_to_b.id_a_pos.values.tolist()
mapping = dict([(int(a), int(b)) for a,b in zip(able_to_map_to_b.id_a_pos, able_to_map_to_b.id_b_pos)])
cant_map = list(set(resnums).difference(successful_map_from_a))
if len(cant_map) > 0:
log.warning('Unable to map residue numbers {} in first sequence to second'.format(cant_map))
return mapping
def pairwise_alignment_stats(reference_seq_aln, other_seq_aln):
"""Get a report of a pairwise alignment.
Args:
reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form
other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form
Returns:
dict: Dictionary of information on mutations, insertions, sequence identity, etc.
"""
if len(reference_seq_aln) != len(other_seq_aln):
raise ValueError('Sequence lengths not equal - was an alignment run?')
reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln)
other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln)
infodict = {}
# Percent identity to the reference sequence
stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)
infodict['percent_identity'] = stats_percent_ident
# Other alignment results
aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)
infodict['deletions'] = get_deletions(aln_df)
infodict['insertions'] = get_insertions(aln_df)
infodict['mutations'] = get_mutations(aln_df)
infodict['unresolved'] = get_unresolved(aln_df)
return infodict
def needle_statistics(infile):
"""Reads in a needle alignment file and spits out statistics of the alignment.
Args:
infile (str): Alignment file name
Returns:
dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.
"""
alignments = list(AlignIO.parse(infile, "emboss"))
alignment_properties = defaultdict(dict)
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "#=======================================":
line = f.readline()
if not line:
raise StopIteration
while line[0] == "#":
# Read in the rest of this alignment header,
# try and discover the number of records expected and their length
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == '1':
a_id = parts[1].strip()
if key == '2':
b_id = parts[1].strip()
if key == 'identity':
ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment_properties[a_id + '_' + b_id]['identity'] = ident_num
alignment_properties[a_id + '_' + b_id]['percent_identity'] = ident_percent
if key == 'similarity':
sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment_properties[a_id + '_' + b_id]['similarity'] = sim_num
alignment_properties[a_id + '_' + b_id]['percent_similarity'] = sim_percent
if key == 'gaps':
gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment_properties[a_id + '_' + b_id]['gaps'] = gap_num
alignment_properties[a_id + '_' + b_id]['percent_gaps'] = gap_percent
if key == 'score':
score = float(parts[1].strip())
alignment_properties[a_id + '_' + b_id]['score'] = score
# And read in another line...
line = f.readline()
return alignment_properties
def needle_statistics_alignio(infile):
"""Reads in a needle alignment file and returns an AlignIO object with annotations
Args:
infile (str): Alignment file name
Returns:
AlignIO: annotated AlignIO object
"""
alignments = list(AlignIO.parse(infile, "emboss"))
if len(alignments) > 1:
raise ValueError('Alignment file contains more than one pairwise alignment')
alignment = alignments[0]
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "#=======================================":
line = f.readline()
if not line:
raise StopIteration
while line[0] == "#":
# Read in the rest of this alignment header,
# try and discover the number of records expected and their length
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == 'identity':
ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment.annotations['identity'] = ident_num
alignment.annotations['percent_identity'] = ident_percent
if key == 'similarity':
sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment.annotations['similarity'] = sim_num
alignment.annotations['percent_similarity'] = sim_percent
if key == 'gaps':
gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment.annotations['gaps'] = gap_num
alignment.annotations['percent_gaps'] = gap_percent
if key == 'score':
score = float(parts[1].strip())
alignment.annotations['score'] = score
# And read in another line...
line = f.readline()
return alignment
```
#### File: sequence/utils/blast.py
```python
import os
import subprocess
import pandas as pd
import os.path as op
from ssbio import utils
import logging
try:
from IPython.display import clear_output
have_ipython = True
from tqdm import tqdm_notebook as tqdm
except ImportError:
have_ipython = False
from tqdm import tqdm
date = utils.Date()
log = logging.getLogger(__name__)
def run_makeblastdb(infile, dbtype, outdir=''):
"""Make the BLAST database for a genome file.
Args:
infile (str): path to genome FASTA file
dbtype (str): "nucl" or "prot" - what format your genome files are in
outdir (str): path to directory to output database files (default is original folder)
Returns:
Paths to BLAST databases.
"""
# TODO: add force_rerun option
# TODO: rewrite using utils function command
# Output location
og_dir, name, ext = utils.split_folder_and_path(infile)
if not outdir:
outdir = og_dir
outfile_basename = op.join(outdir, name)
# Check if BLAST DB was already made
if dbtype == 'nucl':
outext = ['.nhr', '.nin', '.nsq']
elif dbtype == 'prot':
outext = ['.phr', '.pin', '.psq']
else:
raise ValueError('dbtype must be "nucl" or "prot"')
outfile_all = [outfile_basename + x for x in outext]
db_made = True
for f in outfile_all:
if not op.exists(f):
db_made = False
# Run makeblastdb if DB does not exist
if db_made:
log.debug('BLAST database already exists at {}'.format(outfile_basename))
return outfile_all
else:
retval = subprocess.call('makeblastdb -in {} -dbtype {} -out {}'.format(infile, dbtype, outfile_basename), shell=True)
if retval == 0:
log.debug('Made BLAST database at {}'.format(outfile_basename))
return outfile_all
else:
log.error('Error running makeblastdb, exit code {}'.format(retval))
def run_bidirectional_blast(reference, other_genome, dbtype, outdir=''):
"""BLAST a genome against another, and vice versa.
This function requires BLAST to be installed, do so by running:
sudo apt install ncbi-blast+
Args:
reference (str): path to "reference" genome, aka your "base strain"
other_genome (str): path to other genome which will be BLASTed to the reference
dbtype (str): "nucl" or "prot" - what format your genome files are in
outdir (str): path to folder where BLAST outputs should be placed
Returns:
Paths to BLAST output files.
(reference_vs_othergenome.out, othergenome_vs_reference.out)
"""
# TODO: add force_rerun option
if dbtype == 'nucl':
command = 'blastn'
elif dbtype == 'prot':
command = 'blastp'
else:
raise ValueError('dbtype must be "nucl" or "prot"')
r_folder, r_name, r_ext = utils.split_folder_and_path(reference)
g_folder, g_name, g_ext = utils.split_folder_and_path(other_genome)
# make sure BLAST DBs have been made
run_makeblastdb(infile=reference, dbtype=dbtype, outdir=r_folder)
run_makeblastdb(infile=other_genome, dbtype=dbtype, outdir=g_folder)
# Reference vs genome
r_vs_g = r_name + '_vs_' + g_name + '_blast.out'
r_vs_g = op.join(outdir, r_vs_g)
if op.exists(r_vs_g) and os.stat(r_vs_g).st_size != 0:
log.debug('{} vs {} BLAST already run'.format(r_name, g_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, op.join(g_folder, g_name), r_vs_g)
log.debug('Running: {}'.format(cmd))
retval = subprocess.call(cmd, shell=True)
if retval == 0:
log.debug('BLASTed {} vs {}'.format(g_name, r_name))
else:
log.error('Error running {}, exit code {}'.format(command, retval))
# Genome vs reference
g_vs_r = g_name + '_vs_' + r_name + '_blast.out'
g_vs_r = op.join(outdir, g_vs_r)
if op.exists(g_vs_r) and os.stat(g_vs_r).st_size != 0:
log.debug('{} vs {} BLAST already run'.format(g_name, r_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, op.join(r_folder, r_name), g_vs_r)
log.debug('Running: {}'.format(cmd))
retval = subprocess.call(cmd, shell=True)
if retval == 0:
log.debug('BLASTed {} vs {}'.format(g_name, r_name))
else:
log.error('Error running {}, exit code {}'.format(command, retval))
return r_vs_g, g_vs_r
def print_run_bidirectional_blast(reference, other_genome, dbtype, outdir):
"""Write torque submission files for running bidirectional blast on a server and print execution command.
Args:
reference (str): Path to "reference" genome, aka your "base strain"
other_genome (str): Path to other genome which will be BLASTed to the reference
dbtype (str): "nucl" or "prot" - what format your genome files are in
outdir (str): Path to folder where Torque scripts should be placed
"""
# TODO: add force_rerun option
if dbtype == 'nucl':
command = 'blastn'
elif dbtype == 'prot':
command = 'blastp'
else:
raise ValueError('dbtype must be "nucl" or "prot"')
r_folder, r_name, r_ext = utils.split_folder_and_path(reference)
g_folder, g_name, g_ext = utils.split_folder_and_path(other_genome)
# Reference vs genome
r_vs_g_name = r_name + '_vs_' + g_name
r_vs_g = r_vs_g_name + '_blast.out'
if op.exists(op.join(outdir, r_vs_g)) and os.stat(op.join(outdir, r_vs_g)).st_size != 0:
log.debug('{} vs {} BLAST already run'.format(r_name, g_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, g_name, r_vs_g)
utils.write_torque_script(command=cmd, err=r_vs_g_name, out=r_vs_g_name, name=r_vs_g_name,
outfile=op.join(outdir, r_vs_g_name) + '.sh',
walltime='00:15:00', queue='regular')
# Genome vs reference
g_vs_r_name = g_name + '_vs_' + r_name
g_vs_r = g_vs_r_name + '_blast.out'
if op.exists(op.join(outdir, g_vs_r)) and os.stat(op.join(outdir, g_vs_r)).st_size != 0:
log.debug('{} vs {} BLAST already run'.format(g_name, r_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, r_name, g_vs_r)
utils.write_torque_script(command=cmd, err=g_vs_r_name, out=g_vs_r_name, name=g_vs_r_name,
outfile=op.join(outdir, g_vs_r_name) + '.sh',
walltime='00:15:00', queue='regular')
def calculate_bbh(blast_results_1, blast_results_2, r_name=None, g_name=None, outdir=''):
"""Calculate the best bidirectional BLAST hits (BBH) and save a dataframe of results.
Args:
blast_results_1 (str): BLAST results for reference vs. other genome
blast_results_2 (str): BLAST results for other vs. reference genome
r_name: Name of reference genome
g_name: Name of other genome
outdir: Directory where BLAST results are stored.
Returns:
Path to Pandas DataFrame of the BBH results.
"""
# TODO: add force_rerun option
cols = ['gene', 'subject', 'PID', 'alnLength', 'mismatchCount', 'gapOpenCount', 'queryStart', 'queryEnd',
'subjectStart', 'subjectEnd', 'eVal', 'bitScore']
if not r_name and not g_name:
r_name = op.basename(blast_results_1).split('_vs_')[0]
g_name = op.basename(blast_results_1).split('_vs_')[1].replace('_blast.out', '')
r_name2 = op.basename(blast_results_2).split('_vs_')[1].replace('_blast.out', '')
if r_name != r_name2:
log.warning('{} != {}'.format(r_name, r_name2))
outfile = op.join(outdir, '{}_vs_{}_bbh.csv'.format(r_name, g_name))
if op.exists(outfile) and os.stat(outfile).st_size != 0:
log.debug('{} vs {} BLAST BBHs already found at {}'.format(r_name, g_name, outfile))
return outfile
bbh1 = pd.read_csv(blast_results_1, sep='\t', names=cols)
bbh2 = pd.read_csv(blast_results_2, sep='\t', names=cols)
out = pd.DataFrame()
log.debug('Finding BBHs for {} vs. {}'.format(r_name, g_name))
for g in bbh1[pd.notnull(bbh1.gene)].gene.unique():
res = bbh1[bbh1.gene == g]
if len(res) == 0:
continue
best_hit = res.ix[res.PID.idxmax()].copy()
best_gene = best_hit.subject
res2 = bbh2[bbh2.gene == best_gene]
if len(res2) == 0:
continue
best_hit2 = res2.ix[res2.PID.idxmax()]
best_gene2 = best_hit2.subject
if g == best_gene2:
best_hit['BBH'] = '<=>'
else:
best_hit['BBH'] = '->'
out = pd.concat([out, pd.DataFrame(best_hit).transpose()])
out.to_csv(outfile)
log.debug('{} vs {} BLAST BBHs saved at {}'.format(r_name, g_name, outfile))
return outfile
def create_orthology_matrix(r_name, genome_to_bbh_files, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None,
outname='', outdir='', force_rerun=False):
"""Create an orthology matrix using best bidirectional BLAST hits (BBH) outputs.
Args:
r_name (str): Name of the reference genome
genome_to_bbh_files (dict): Mapping of genome names to the BBH csv output from the
:func:`~ssbio.protein.sequence.utils.blast.calculate_bbh` method
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
outname: Name of output file of orthology matrix
outdir: Path to output directory
force_rerun (bool): Force recreation of the orthology matrix even if the outfile exists
Returns:
str: Path to orthologous genes matrix.
"""
if outname:
outfile = op.join(outdir, outname)
else:
outfile = op.join(outdir, '{}_orthology.csv'.format(r_name))
if op.exists(outfile) and os.stat(outfile).st_size != 0 and not force_rerun:
log.info('{}: loaded existing orthology matrix'.format(outfile))
return outfile
if not pid_cutoff and not bitscore_cutoff and not evalue_cutoff:
log.warning('No cutoffs supplied, insignificant hits may be reported')
if not pid_cutoff:
pid_cutoff = 0
if not bitscore_cutoff:
bitscore_cutoff = 0
if not evalue_cutoff:
evalue_cutoff = float('Inf')
out = pd.DataFrame()
for g_name, bbh_path in genome_to_bbh_files.items():
df_bbh = pd.read_csv(bbh_path, index_col=0)
bidirectional = df_bbh[df_bbh.BBH == '<=>']
data = bidirectional[(bidirectional.PID > pid_cutoff) & (bidirectional.eVal < evalue_cutoff) & (bidirectional.bitScore > bitscore_cutoff)]
data.index = data.gene
data2 = data[['subject']]
if out.empty:
out = data2
out = out.rename(columns={'subject': g_name})
else:
out = pd.merge(out, data2, left_index=True, right_index=True, how='outer')
out = out.rename(columns={'subject': g_name})
out.to_csv(outfile)
log.debug('{} orthologous genes saved at {}'.format(r_name, outfile))
return outfile
```
#### File: protein/structure/chainprop.py
```python
from ssbio.core.object import Object
import logging
log = logging.getLogger(__name__)
from Bio.SeqFeature import FeatureLocation, CompoundLocation
from more_itertools import locate
import ssbio.utils
from ssbio.protein.sequence.seqprop import SeqProp
class ChainProp(Object):
"""Class for protein structural properties of a specific chain"""
def __init__(self, ident, pdb_parent, seq_record=None, description=None):
Object.__init__(self, id=ident, description=description)
self.pdb_parent = pdb_parent
self.seq_record = seq_record
if not self.description:
self.description = 'Chain {} from PDB parent {}'.format(self.id, self.pdb_parent)
def reset_seq_record(self):
self.seq_record = None
def get_subsequence(self, resnums, new_id=None, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a list of residue numbers"""
# XTODO: documentation
if not self.seq_record:
raise ValueError('No chain sequence stored')
biop_compound_list = []
for resnum in resnums:
feat = FeatureLocation(resnum - 1, resnum)
biop_compound_list.append(feat)
if len(biop_compound_list) == 0:
log.info('Zero length subsequences')
return
elif len(biop_compound_list) == 1:
log.debug('Subsequence only one residue long')
sub_feature_location = biop_compound_list[0]
else:
sub_feature_location = CompoundLocation(biop_compound_list)
sub_feature = sub_feature_location.extract(self.seq_record)
if not new_id:
new_id = '{}_subseq'.format(self.id)
new_sp = SeqProp(id=new_id, seq=sub_feature)
if copy_letter_annotations:
new_sp.letter_annotations = sub_feature.letter_annotations
return new_sp
def get_subsequence_from_property(self, property_key, property_value, condition, return_resnums=False,
copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a certain property you want to find in
this chain's letter_annotation
See documentation for :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property`
Args:
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (str): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
return_resnums (bool): If resnums should be returned as well
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties
"""
if not self.seq_record:
raise ValueError('No chain sequence stored')
if property_key not in self.seq_record.letter_annotations:
log.error(KeyError('{}: {} not contained in the letter annotations'.format(self.seq_record.id, property_key)))
return
if condition == 'in':
subfeat_indices = list(locate(self.seq_record.letter_annotations[property_key],
lambda x: x in property_value))
else:
subfeat_indices = list(locate(self.seq_record.letter_annotations[property_key],
lambda x: ssbio.utils.check_condition(x, condition, property_value)))
subfeat_resnums = [x + 1 for x in subfeat_indices]
new_sp = self.get_subsequence(resnums=subfeat_resnums, new_id='{}_{}_{}_{}_extracted'.format(self.pdb_parent,
self.id,
property_key,
condition,
property_value),
copy_letter_annotations=copy_letter_annotations)
if return_resnums:
return new_sp, subfeat_resnums
else:
return new_sp
```
#### File: structure/properties/opm.py
```python
import requests
from bs4 import BeautifulSoup
import ssbio.utils
### Server
# http://opm.phar.umich.edu/server.php
#
# ### Definitions:
#
# Depth or hydrophobic thickness. This parameter indicates the calculated hydrophobic thickness
# (for TM proteins) or maximal penetration depth of protein atoms into the lipid hydrocarbon core
# (for peripheral/monotopic) proteins. The ± values for the depth and tilt angle show fluctuations of
# the corresponding parameters within 1 kcal/mol around the global minimum of transfer energy.
#
# Tilt angle is calculated between membrane normal (Z axis) and protein axis. The protein axis is
# calculated as the sum of TM secondary structure segment vectors (for TM proteins) or as the principal
# inertia axis (for peripheral proteins).
#
# Transfer energy of the protein from water to lipid bilayer. This energy roughly corresponds to the
# actual membrane binding energy for water-soluble peripheral proteins, unless: (a) some membrane-anchoring
# elements are missing or disordered in the crystal structure; (b) there is strong specific binding of lipids
# (e.g. in PH domains and other "lipid clamps"), or (c) membrane binding is coupled with significant structural
# changes of the protein (e.g. helix-coil transition for amphiphilic α-helices). In situations (a) and (b), the
# calculated membrane binding free energy is underestimated. In situation (c) it is usually overestimated.
#
# Table of membrane-embedded residues consists of two parts: (a) list of residues penetrating into the hydrocarbon
# core of the lipid bilayer for each subunit (tilt angles of individual subunits in this part are calculated based
# on the principal inertia axis), and (b) parts of transmembrane alpha-helices or beta-strands that are embedded
# into the hydrocarbon core (the entire secondary structures can be longer; tilt angles of individual subunits
# in this part are calculated as vector averages of TM secondary structure segment vectors).
#
# Output coordinate file for a protein positioned in the lipid bilayer. The origin of coordinates corresponds
# to the center of lipid bilayer. Z axis coincides with membrane normal; atoms with the positive sign of Z coordinate
# are arranged in the "outer" leaflet as defined by the user-specified topology. Positions of DUMMY atoms correspond
# to locations of lipid carbonyl groups.
#
# Diagnostic messages. The server produces diagnostic messages in a separate window. Please report these messages to
# developer if program aborts.
## SEE: https://structure.dynamic.ucsd.edu:9998/notebooks/projects_unsynced/sandbox/PPM_server_test.ipynb
def run_ppm_server(pdb_file, outfile, force_rerun=False):
"""Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = {'userfile': open(pdb_file, 'rb')}
r = requests.post(url, files=files)
info = r.text
# Save results in raw HTML format
with open(outfile, 'w') as f:
f.write(info)
else:
# Utilize existing saved results
with open(outfile, 'r') as f:
info = f.read()
# Clean up the HTML stuff
t = info.replace('\n', '')
tt = t.replace('\r', '')
ttt = tt.replace('\t', '')
soup = BeautifulSoup(ttt, "lxml")
# Find all tables in the HTML code
tables = soup.find_all("table", attrs={"class": "data"})
info_dict = {}
# There are multiple tables with information
table_index = 0
for t in tables:
data_index = 0
# "row1" contains data
for data in t.find_all('tr', attrs={"class": "row1"}):
data_list = list(data.strings)
if table_index == 0:
info_dict['Depth/Hydrophobic Thickness'] = data_list[0]
info_dict['deltaG_transfer'] = data_list[2]
info_dict['Tilt Angle'] = data_list[3]
if table_index == 1 and data_index == 0:
info_dict['Embedded_residues_Tilt'] = data_list[0]
info_dict['Embedded_residues'] = data_list[1]
if table_index == 1 and data_index == 1:
info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]
info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]
if table_index == 2:
info_dict['Output Messages'] = data_list[1]
if table_index == 3:
baseurl = 'http://sunshine.phar.umich.edu/'
a = data.find('a', href=True)
download_url = baseurl + a['href'].replace('./', '')
info_dict['Output file download link'] = download_url
data_index += 1
table_index += 1
return info_dict
```
#### File: structure/utils/cleanpdb.py
```python
import argparse
import logging
import os
import os.path as op
import textwrap
from Bio import PDB
from tqdm import tqdm
import ssbio.utils
from ssbio.protein.structure.utils.structureio import StructureIO
log = logging.getLogger(__name__)
class CleanPDB(PDB.Select):
"""Selection rules to clean a PDB entry.
These rules aim to:
- Add missing chain identifiers to a PDB file
- Select a single chain if noted
- Remove alternate atom locations
- Add atom occupancies
- Add B (temperature) factors (default Biopython behavior)
"""
def __init__(self, remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None):
"""Initialize the parameters which indicate what cleaning will occur
Args:
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
"""
self.remove_atom_alt = remove_atom_alt
self.remove_atom_hydrogen = remove_atom_hydrogen
self.keep_atom_alt_id = keep_atom_alt_id
self.add_atom_occ = add_atom_occ
self.remove_res_hetero = remove_res_hetero
self.add_chain_id_if_empty = add_chain_id_if_empty
if not keep_chains:
self.keep_chains = []
else:
self.keep_chains = ssbio.utils.force_list(keep_chains)
if not keep_chemicals:
self.keep_chemicals = []
else:
self.keep_chemicals = ssbio.utils.force_list(keep_chemicals)
if not keep_res_only:
self.keep_res_only = []
else:
self.keep_res_only = ssbio.utils.force_list(keep_res_only)
def accept_chain(self, chain):
# If the chain does not have an ID, add one to it and keep it
# http://comments.gmane.org/gmane.comp.python.bio.devel/10639
if self.add_chain_id_if_empty and not chain.id.strip():
chain.id = self.add_chain_id_if_empty
return True
# If a chain is specified and the current chain equals that specified chain, keep it
elif self.keep_chains and chain.id in self.keep_chains:
return True
# If a chain is specified but the current chain does not equal that specified chain, remove it
elif self.keep_chains and chain.id not in self.keep_chains:
return False
# If no chain is specified, keep all chains
else:
return True
def accept_residue(self, residue):
hetfield, resseq, icode = residue.get_id()
if hetfield == '':
hetfield = ' '
if self.keep_res_only:
if residue.resname.strip() in self.keep_res_only:
return True
else:
return False
# If you want to remove residues that are not normal, remove them
if self.remove_res_hetero and hetfield[0] != ' ' and residue.resname.strip() not in self.keep_chemicals:
return False
else:
return True
def accept_atom(self, atom):
# If the you want to remove hydrogens and the atom is a H, remove it
if self.remove_atom_hydrogen and atom.element == 'H':
return False
# If you want to remove alternate locations, and the alt location isn't the one you want to keep, remove it
elif self.remove_atom_alt and atom.is_disordered() and atom.get_altloc() != self.keep_atom_alt_id:
return False
else:
# Add occupancies if there are none and you want to
# http://comments.gmane.org/gmane.comp.python.bio.general/6289
if self.add_atom_occ and atom.occupancy is None:
atom.set_occupancy(1)
if self.remove_atom_alt:
atom.set_altloc(' ')
return True
def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
outfile = ssbio.utils.outfile_maker(inname=pdb_file,
append_to_name=out_suffix,
outdir=outdir,
outext='.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
my_pdb = StructureIO(pdb_file)
my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains,
keep_chemicals=keep_chemicals)
my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix,
out_dir=outdir,
custom_selection=my_cleaner,
force_rerun=force_rerun)
return my_clean_pdb
else:
return outfile
if __name__ == '__main__':
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Clean PDB files - cleanpdb.py
-----------------------------
This script will automatically:
* Add missing chains to a PDB file
* Select a single chain or chains if noted
* Remove alternate atom locations
* Add atom occupancies
* Add B (temperature) factors (default Biopython behavior)
Cleaned PDBs will be in a clean_pdbs folder where the script is executed.
Example: script help
$ cleanpdb --help
Example: clean one PDB file
$ cleanpdb 1kf6.pdb
Example: clean one PDB file and keep only chains A and B
$ cleanpdb 1kf6.pdb --chain A,B
Example: clean multiple PDB files
$ cleanpdb *.pdb
Example: clean a whole directory of PDB
$ cleanpdb /path/to/pdb/files
"""))
p.add_argument('infile', help='PDB file or folder you want to clean', nargs='+', type=str)
p.add_argument('--outsuffix', '-os', default='_clean', help='Suffix appended to PDB file')
p.add_argument('--outdir', '-od', default='clean_pdbs', help='Directory to output clean PDBs')
p.add_argument('--chain', '-c', default=None, help='Keep only specified chains')
p.add_argument('--keephydro', '-hy', action='store_false', help='Keep hydrogen atoms (default is to remove)')
p.add_argument('--keephetero', '-ht', action='store_false', help='Keep hetero atoms (default is to remove)')
# TODO: if this flag is present, the alternate positions seem to switch line positions
p.add_argument('--keepalt', '-ka', action='store_false', help='Keep alternate positions (default is to remove)')
p.add_argument('--force', '-f', action='store_true', help='Force rerunning of cleaning even if the clean PDB exists')
args = p.parse_args()
if args.chain:
args.chain = args.chain.split(',')
if not op.isdir(args.outdir):
os.mkdir(args.outdir)
infiles = ssbio.utils.input_list_parser(args.infile)
for pdb in tqdm(infiles):
outfile = ssbio.utils.outfile_maker(inname=pdb,
append_to_name=args.outsuffix,
outdir=args.outdir,
outext='.pdb')
if ssbio.utils.force_rerun(flag=args.force, outfile=outfile):
my_pdb = StructureIO(pdb)
my_cleaner = CleanPDB(remove_atom_alt=args.keepalt,
remove_atom_hydrogen=args.keephydro,
keep_atom_alt_id='A',
add_atom_occ=True,
remove_res_hetero=args.keephetero,
add_chain_id_if_empty='X',
keep_chains=args.chain)
my_clean_pdb = my_pdb.write_pdb(out_suffix=args.outsuffix,
out_dir=args.outdir,
custom_selection=my_cleaner,
force_rerun=args.force)
print('Clean PDBs at: {}'.format(args.outdir))
```
#### File: structure/utils/dock.py
```python
import os
import pandas as pd
import os.path as op
import logging
from ssbio.core.object import Object
import ssbio.utils
log = logging.getLogger(__name__)
class DOCK(Object):
"""Class to prepare a structure file for docking with DOCK6.
Attributes:
"""
def __init__(self, structure_id, pdb_file, amb_file, flex1_file, flex2_file, root_dir=None):
"""Initialize a DOCK6 project.
Args:
"""
super(DOCK, self).__init__(id=structure_id, description='DOCK6 preparation')
self._root_dir = None
self.structure_path = pdb_file
if root_dir:
self.root_dir = root_dir
else:
self.root_dir = self.structure_dir
self.dockprep_path = None
self.receptormol2_path = None
self.receptorpdb_path = None
self.dms_path = None
self.sphgen_path = None
self.bindingsite_path = None
self.sphsel_path = None
self.box_path = None
self.grid_path = None
self.dock_flexible_outfile = None
self.dock_flexible_scored_result = None
self.dock_flexible_conformers_result = None
self.amb_file = amb_file
self.flex1_file = flex1_file
self.flex2_file = flex2_file
log.debug('{}: created DOCK6 project folder at {}'.format(structure_id, self.dock_dir))
@property
def root_dir(self):
"""str: Directory where DOCK project folder is located"""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
raise ValueError('No path specified')
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.debug('Changing root directory of DOCK project for "{}" from {} to {}'.format(self.id, self.root_dir, path))
if not op.exists(op.join(path, self.id)):
raise IOError('DOCK project "{}" does not exist in folder {}'.format(self.id, path))
self._root_dir = path
for d in [self.dock_dir]:
ssbio.utils.make_dir(d)
@property
def dock_dir(self):
"""str: DOCK folder"""
if self.root_dir:
return op.join(self.root_dir, self.id + '_DOCK')
else:
log.warning('Root directory not set')
return None
@property
def structure_dir(self):
if not self._structure_dir:
raise OSError('No structure folder set')
return self._structure_dir
@structure_dir.setter
def structure_dir(self, path):
if path and not op.exists(path):
raise OSError('{}: folder does not exist'.format(path))
self._structure_dir = path
@property
def structure_path(self):
if not self.structure_file:
raise OSError('Metadata file not loaded')
path = op.join(self.structure_dir, self.structure_file)
if not op.exists(path):
raise OSError('{}: file does not exist'.format(path))
return path
@structure_path.setter
def structure_path(self, path):
"""Provide pointers to the paths of the structure file
Args:
path: Path to structure file
"""
if not path:
self.structure_dir = None
self.structure_file = None
else:
if not op.exists(path):
raise OSError('{}: file does not exist!'.format(path))
if not op.dirname(path):
self.structure_dir = '.'
else:
self.structure_dir = op.dirname(path)
self.structure_file = op.basename(path)
def dockprep(self, force_rerun=False):
"""Prepare a PDB file for docking by first converting it to mol2 format.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running dock preparation...'.format(self.id))
prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id))
prep_py = op.join(self.dock_dir, "prep.py")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2):
with open(prep_py, "w") as f:
f.write('import chimera\n')
f.write('from DockPrep import prep\n')
f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\n')
f.write('prep(models)\n')
f.write('from WriteMol2 import writeMol2\n')
f.write('writeMol2(models, "{}")\n'.format(prep_mol2))
cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py)
os.system(cmd)
os.remove(prep_py)
os.remove('{}c'.format(prep_py))
if ssbio.utils.is_non_zero_file(prep_mol2):
self.dockprep_path = prep_mol2
log.debug('{}: successful dockprep execution'.format(self.dockprep_path))
else:
log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path))
def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):
"""Isolate the receptor by stripping everything except protein and specified ligands.
Args:
keep_ligands (str, list): Ligand(s) to keep in PDB file
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running protein receptor isolation...'.format(self.id))
if not self.dockprep_path:
return ValueError('Please run dockprep')
receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))
receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))
prly_com = op.join(self.dock_dir, "prly.com")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):
with open(prly_com, "w") as f:
f.write('open {}\n'.format(self.dockprep_path))
keep_str = 'delete ~protein'
if keep_ligands:
keep_ligands = ssbio.utils.force_list(keep_ligands)
for res in keep_ligands:
keep_str += ' & ~:{} '.format(res)
keep_str = keep_str.strip() + '\n'
f.write(keep_str)
f.write('write format mol2 0 {}\n'.format(receptor_mol2))
f.write('delete element.H\n')
f.write('write format pdb 0 {}\n'.format(receptor_noh))
cmd = 'chimera --nogui {}'.format(prly_com)
os.system(cmd)
os.remove(prly_com)
if ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh):
self.receptormol2_path = receptor_mol2
self.receptorpdb_path = receptor_noh
log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))
log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))
else:
log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path))
def dms_maker(self, force_rerun=False):
"""Create surface representation (dms file) of receptor
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running surface representation maker...'.format(self.id))
if not self.receptorpdb_path:
return ValueError('Please run protein_only_and_noH')
dms = op.join(self.dock_dir, '{}_receptor.dms'.format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=dms):
cmd = 'dms {} -n -w 1.4 -o {}'.format(self.receptorpdb_path, dms)
os.system(cmd)
self.dms_path = dms
if ssbio.utils.is_non_zero_file(dms):
self.dms_path = dms
log.debug('{}: successful dms execution'.format(self.dms_path))
else:
log.critical('{}: dms_maker failed to run on receptor file'.format(self.receptorpdb_path))
def sphgen(self, force_rerun=False):
"""Create sphere representation (sph file) of receptor from the surface representation
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running sphere generation...'.format(self.id))
if not self.dms_path:
return ValueError('Please run dms_maker')
sph = op.join(self.dock_dir, '{}_receptor.sph'.format(self.id))
insph = op.join(self.dock_dir, 'INSPH')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=sph):
with open(insph, "w") as f:
f.write("{}\n".format(self.dms_path))
f.write("R\n")
f.write("X\n")
f.write("0.0\n")
f.write("4.0\n")
f.write("1.4\n")
f.write("{}\n".format(sph))
os.chdir(self.dock_dir)
cmd = "sphgen_cpp"
os.system(cmd)
os.remove(insph)
if ssbio.utils.is_non_zero_file(sph):
self.sphgen_path = sph
log.debug('{}: successful sphgen execution'.format(self.sphgen_path))
else:
log.critical('{}: sphgen_cpp failed to run on dms file'.format(self.dms_path))
def binding_site_mol2(self, residues, force_rerun=False):
"""Create mol2 of only binding site residues from the receptor
This function will take in a .pdb file (preferably the _receptor_noH.pdb file)
and a string of residues (eg: '144,170,199') and delete all other residues in the
.pdb file. It then saves the coordinates of the selected residues as a .mol2 file.
This is necessary for Chimera to select spheres within the radius of the binding
site.
Args:
residues (str): Comma separated string of residues (eg: '144,170,199')
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running binding site isolation...'.format(self.id))
if not self.receptorpdb_path:
return ValueError('Please run protein_only_and_noH')
prefix = self.id + '_' + 'binding_residues'
mol2maker = op.join(self.dock_dir, '{}_make_mol2.py'.format(prefix))
outfile = op.join(self.dock_dir, '{}.mol2'.format(prefix))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(mol2maker, 'w') as mol2_maker:
mol2_maker.write('#! /usr/bin/env python\n')
mol2_maker.write('from chimera import runCommand\n')
mol2_maker.write('runCommand("open {}")\n'.format(self.receptorpdb_path))
mol2_maker.write('runCommand("delete ~:{}")\n'.format(residues))
mol2_maker.write('runCommand("write format mol2 resnum 0 {}")\n'.format(outfile))
mol2_maker.write('runCommand("close all")')
cmd = 'chimera --nogui {}'.format(mol2maker)
os.system(cmd)
os.remove(mol2maker)
os.remove('{}c'.format(mol2maker))
if ssbio.utils.is_non_zero_file(outfile):
self.bindingsite_path = outfile
log.debug('{}: successful binding site isolation'.format(self.bindingsite_path))
else:
log.critical('{}: binding_site_mol2 failed to run on receptor file'.format(self.receptorpdb_path))
def sphere_selector_using_residues(self, radius, force_rerun=False):
"""Select spheres based on binding site residues
Args:
radius (int, float): Radius around binding residues to dock to
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running sphere selector...'.format(self.id))
if not self.sphgen_path or not self.bindingsite_path:
return ValueError('Please run sphgen and binding_site_mol2')
selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph):
cmd = "sphere_selector {} {} {}".format(self.sphgen_path, self.bindingsite_path, radius)
rename = "mv selected_spheres.sph {}".format(selsph)
os.system(cmd)
os.system(rename)
if ssbio.utils.is_non_zero_file(selsph):
self.sphsel_path = selsph
log.debug('{}: successful sphere selection'.format(self.sphsel_path))
else:
log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path))
# def split_sph(self, force_rerun=False):
# """TODO: documentation? what was this used for"""
#
# selsph = op.join(self.dock_dir, '{}_selsph.sph'.format(self.id))
#
# if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph):
# with open(self.sphgen_path, "r") as f:
# text = f.read()
# f.seek(0)
# lines = f.readlines()
# paragraphs = re.split("cluster ... number of spheres in cluster ...\n", text)
#
# with open(selsph, "w") as f2:
# f2.write(lines[1])
# f2.write(paragraphs[1])
#
# return selsph
def showbox(self, force_rerun=False):
"""Create the dummy PDB box around the selected spheres.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running box maker...'.format(self.id))
if not self.sphsel_path:
return ValueError('Please run sphere_selector_using_residues')
boxfile = op.join(self.dock_dir, "{}_box.pdb".format(self.id))
boxscript = op.join(self.dock_dir, "{}_box.in".format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=boxfile):
with open(boxscript, "w") as f:
f.write("Y\n")
f.write("0\n")
f.write("{}\n".format(op.basename(self.sphsel_path)))
f.write("1\n")
f.write("{}".format(op.basename(boxfile)))
cmd = "showbox < {}".format(boxscript)
os.chdir(self.dock_dir)
os.system(cmd)
if ssbio.utils.is_non_zero_file(boxfile):
self.box_path = boxfile
log.debug('{}: successful box creation'.format(self.box_path))
else:
log.critical('{}: showbox failed to run on selected spheres file'.format(self.sphsel_path))
def grid(self, force_rerun=False):
"""Create the scoring grid within the dummy box.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running grid maker...'.format(self.id))
if not self.receptormol2_path or not self.box_path:
return ValueError('Please run protein_only_and_noH and showbox')
gridscript = op.join(self.dock_dir, "{}_grid.in".format(self.id))
out_name = op.join(self.dock_dir, "{}_grid.out".format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=out_name):
with open(gridscript, "w") as f:
grid_text = """compute_grids yes
grid_spacing 0.3
output_molecule no
contact_score yes
contact_cutoff_distance 4.5
energy_score yes
energy_cutoff_distance 9999
atom_model all
attractive_exponent 6
repulsive_exponent 12
distance_dielectric yes
dielectric_factor 4
bump_filter yes
bump_overlap 0.75
receptor_file {}
box_file {}
vdw_definition_file {}
score_grid_prefix {}_grid
""".format(op.basename(self.receptormol2_path), op.basename(self.box_path), self.amb_file, self.id)
f.write(grid_text)
os.chdir(self.dock_dir)
cmd = "grid -i {} -o {}".format(op.basename(gridscript), op.basename(out_name))
os.system(cmd)
if ssbio.utils.is_non_zero_file(out_name):
self.grid_path = out_name
log.debug('{}: successful grid creation'.format(self.grid_path))
else:
log.critical('{}: grid failed to run on receptor + box file'.format(self.box_path))
def do_dock6_flexible(self, ligand_path, force_rerun=False):
"""Dock a ligand to the protein.
Args:
ligand_path (str): Path to ligand (mol2 format) to dock to protein
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running DOCK6...'.format(self.id))
ligand_name = os.path.basename(ligand_path).split('.')[0]
in_name = op.join(self.dock_dir, "{}_{}_flexdock.in".format(self.id, ligand_name))
out_name = op.join(self.dock_dir, "{}_{}_flexdock.out".format(self.id, ligand_name))
conformers_out = op.join(self.dock_dir, '{}_{}_flexdock_conformers.mol2'.format(self.id, ligand_name))
scored_out = op.join(self.dock_dir, '{}_{}_flexdock_scored.mol2'.format(self.id, ligand_name))
ranked_out = op.join(self.dock_dir, '{}_{}_flexdock_ranked.mol2'.format(self.id, ligand_name))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=ranked_out):
with open(in_name, "w") as f:
dock_text = """ligand_atom_file {}
limit_max_ligands no
skip_molecule no
read_mol_solvation no
calculate_rmsd no
use_database_filter no
orient_ligand yes
automated_matching yes
receptor_site_file {}
max_orientations 500
critical_points no
chemical_matching no
use_ligand_spheres no
use_internal_energy yes
internal_energy_rep_exp 12
flexible_ligand yes
user_specified_anchor no
limit_max_anchors no
min_anchor_size 5
pruning_use_clustering yes
pruning_max_orients 100
pruning_clustering_cutoff 100
pruning_conformer_score_cutoff 100
use_clash_overlap no
write_growth_tree no
bump_filter yes
bump_grid_prefix {}
score_molecules yes
contact_score_primary no
contact_score_secondary no
grid_score_primary yes
grid_score_secondary no
grid_score_rep_rad_scale 1
grid_score_vdw_scale 1
grid_score_es_scale 1
grid_score_grid_prefix {}
multigrid_score_secondary no
dock3.5_score_secondary no
continuous_score_secondary no
descriptor_score_secondary no
gbsa_zou_score_secondary no
gbsa_hawkins_score_secondary no
SASA_descriptor_score_secondary no
amber_score_secondary no
minimize_ligand yes
minimize_anchor yes
minimize_flexible_growth yes
use_advanced_simplex_parameters no
simplex_max_cycles 1
simplex_score_converge 0.1
simplex_cycle_converge 1.0
simplex_trans_step 1.0
simplex_rot_step 0.1
simplex_tors_step 10.0
simplex_anchor_max_iterations 500
simplex_grow_max_iterations 500
simplex_grow_tors_premin_iterations 0
simplex_random_seed 0
simplex_restraint_min yes
simplex_coefficient_restraint 10.0
atom_model all
vdw_defn_file {}
flex_defn_file {}
flex_drive_file {}
ligand_outfile_prefix {}_{}_flexdock
write_orientations no
num_scored_conformers 20
write_conformations yes
cluster_conformations yes
rank_ligands yes
""".format(ligand_path, op.basename(self.sphsel_path), op.splitext(op.basename(self.grid_path))[0],
op.splitext(op.basename(self.grid_path))[0], self.amb_file, self.flex1_file, self.flex2_file,
self.id, ligand_name)
f.write(dock_text)
os.chdir(self.dock_dir)
cmd = "dock6 -i {} -o {} -v".format(in_name, out_name)
os.system(cmd)
if ssbio.utils.is_non_zero_file(ranked_out):
self.dock_flexible_outfile = out_name
self.dock_flexible_conformers_result = conformers_out
self.dock_flexible_scored_result = scored_out
log.debug('{}: successful docking!'.format(self.dock_flexible_outfile))
else:
log.error('{}+{}: empty DOCK6 ranked file, execution error (or ligand failed to dock)'.format(self.id,
op.basename(ligand_path)))
def auto_flexdock(self, binding_residues, radius, ligand_path=None, force_rerun=False):
"""Run DOCK6 on a PDB file, given its binding residues and a radius around them.
Provide a path to a ligand to dock a ligand to it. If no ligand is provided, DOCK6 preparations will be run on
that structure file.
Args:
binding_residues (str): Comma separated string of residues (eg: '144,170,199')
radius (int, float): Radius around binding residues to dock to
ligand_path (str): Path to ligand (mol2 format) to dock to protein
force_rerun (bool): If method should be rerun even if output files exist
"""
log.debug('\n{}: running DOCK6...\n'
'\tBinding residues: {}\n'
'\tBinding residues radius: {}\n'
'\tLigand to dock: {}\n'.format(self.id, binding_residues, radius, op.basename(ligand_path)))
self.dockprep(force_rerun=force_rerun)
self.protein_only_and_noH(force_rerun=force_rerun)
self.dms_maker(force_rerun=force_rerun)
self.sphgen(force_rerun=force_rerun)
self.binding_site_mol2(residues=binding_residues, force_rerun=force_rerun)
self.sphere_selector_using_residues(radius=radius, force_rerun=force_rerun)
self.showbox(force_rerun=force_rerun)
self.grid(force_rerun=force_rerun)
if ligand_path:
self.do_dock6_flexible(ligand_path=ligand_path, force_rerun=force_rerun)
def parse_results_mol2(mol2_outpath):
"""Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results
"""
docked_ligands = pd.DataFrame()
lines = [line.strip() for line in open(mol2_outpath, 'r')]
props = {}
for i, line in enumerate(lines):
if line.startswith('########## Name:'):
ligand = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')[1]
line = lines[i + 1]
props = {}
props['Ligand'] = ligand
if line.startswith('##########'):
splitter = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')
props[splitter[0]] = float(splitter[1])
if line.startswith('@<TRIPOS>MOLECULE'):
if props:
docked_ligands = docked_ligands.append(props, ignore_index=True)
return docked_ligands
# def do_dock6_rigid(self, ligand_path, force_rerun=False):
# ligand_name = os.path.basename(args.ligand).split('.')[0]
# in_name = op.join(self.dock_dir, "{}_{}_dock.in".format(self.id, ligand_name))
# out_name = op.join(self.dock_dir, "{}_{}_dock.out".format (self.id, ligand_name))
#
# with open(in_name, "w") as f:
# dock_text = """ligand_atom_file {}
# limit_max_ligands no
# skip_molecule no
# read_mol_solvation no
# calculate_rmsd no
# use_database_filter no
# orient_ligand yes
# automated_matching yes
# receptor_site_file {}
# max_orientations 1000
# critical_points no
# chemical_matching no
# use_ligand_spheres no
# use_internal_energy yes
# internal_energy_rep_exp 12
# flexible_ligand no
# bump_filter no
# score_molecules yes
# contact_score_primary no
# contact_score_secondary no
# grid_score_primary yes
# grid_score_secondary no
# grid_score_rep_rad_scale 1
# grid_score_vdw_scale 1
# grid_score_es_scale 1
# grid_score_grid_prefix {}
# multigrid_score_secondary no
# dock3.5_score_secondary no
# continuous_score_secondary no
# descriptor_score_secondary no
# gbsa_zou_score_secondary no
# gbsa_hawkins_score_secondary no
# SASA_descriptor_score_secondary no
# amber_score_secondary no
# minimize_ligand yes
# simplex_max_iterations 1000
# simplex_tors_premin_iterations 0
# simplex_max_cycles 1
# simplex_score_converge 0.1
# simplex_cycle_converge 1.0
# simplex_trans_step 1.0
# simplex_rot_step 0.1
# simplex_tors_step 10.0
# simplex_random_seed 0
# simplex_restraint_min yes
# simplex_coefficient_restraint 10.0
# atom_model all
# vdw_defn_file {}
# flex_defn_file {}
# flex_drive_file {}
# ligand_outfile_prefix {}_{}_rigid
# write_orientations yes
# num_scored_conformers 20
# rank_ligands no
# """.format(ligand_path, self.sphsel_path, self.grid_path.split('.')[0],
# self.amb_file, self.flex1_file, self.flex2_file, self.id, ligand_name)
#
# f.write(dock_text)
#
# cmd = "dock6 -i {} -o {}".format(in_name, out_name)
# os.system(cmd)
# def do_dock6_amberscore(self, ligand_path, force_rerun=False):
# """INCOMPLETE"""
# ligand_name = os.path.basename(args.ligand).split('.')[0]
# in_name = op.join(self.dock_dir, "{}_{}_amberscore.in".format(self.id, ligand_name))
# out_name = op.join(self.dock_dir, "{}_{}_amberscore.out".format(self.id, ligand_name))
#
# with open(in_name, "w") as f:
# dock_text = """ligand_atom_file {}.amber_score.mol2
# limit_max_ligands no
# skip_molecule no
# read_mol_solvation no
# calculate_rmsd no
# use_database_filter no
# orient_ligand no
# use_internal_energy no
# flexible_ligand no
# bump_filter no
# score_molecules yes
# contact_score_primary no
# contact_score_secondary no
# grid_score_primary no
# grid_score_secondary no
# multigrid_score_primary no
# multigrid_score_secondary no
# dock3.5_score_primary no
# dock3.5_score_secondary no
# continuous_score_primary no
# continuous_score_secondary no
# descriptor_score_primary no
# descriptor_score_secondary no
# gbsa_zou_score_primary no
# gbsa_zou_score_secondary no
# gbsa_hawkins_score_primary no
# gbsa_hawkins_score_secondary no
# SASA_descriptor_score_primary no
# SASA_descriptor_score_secondary no
# amber_score_primary yes
# amber_score_secondary no
# amber_score_receptor_file_prefix {}
# amber_score_movable_region ligand
# amber_score_minimization_rmsgrad 0.01
# amber_score_before_md_minimization_cycles 100
# amber_score_md_steps 3000
# amber_score_after_md_minimization_cycles 100
# amber_score_gb_model 5
# amber_score_nonbonded_cutoff 18.0
# amber_score_temperature 300.0
# amber_score_abort_on_unprepped_ligand yes
# ligand_outfile_prefix output
# write_orientations no
# num_scored_conformers 1
# rank_ligands no
# """.format()
#
# f.write(dock_text)
#
# cmd = "dock6 -i {} -o {} -v".format(in_name, out_name)
# os.system(cmd)
# if __name__ == '__main__':
#
# import glob
# import argparse
# import shlex
#
# # load inputs from command line
# p = argparse.ArgumentParser(
# description='Run the DOCK steps on a folder of structure files. To run in the background, execute using nohup: nohup dock.py $BASENAME $NUMFRAMES /path/to/structures/ /path/to/parameters/ --ligand /path/to/ligand.mol2 --cofactors $COFACTORS --residues $RESIDUES > /path/to/logs/$LOGNAME &')
# p.add_argument('basename', help='base filename that you used to name your files')
# p.add_argument('numframes', help='total number of frames from your trajectory', type=int)
# p.add_argument('folder', help='path to folder with your structure files')
# p.add_argument('params', help='path to folder with parameter files')
# p.add_argument('--ligand', help='path to file of your ligand that you want to dock')
# p.add_argument('--cofactors',
# help='comma-separated string of cofactors that you want to keep while docking (e.g. SAM,DNC,WAT)')
# p.add_argument('--residues', help='comma-separated string of the binding residues')
# p.add_argument('--radius', help='radius around binding residues to dock to (default 9 A)', type=int, default=9)
# p.add_argument('--redock', help='run DOCK again for the specified ligand, even if docking files exist',
# default=False)
# args = p.parse_args()
#
# # file paths for docking parameters
# amb = os.path.join(args.params, 'vdw_AMBER_parm99.defn')
# f1 = os.path.join(args.params, 'flex.defn')
# f2 = os.path.join(args.params, 'flex_drive.tbl')
#
# print(args)
# # loading current files
# os.chdir(args.folder)
# # pdbs = glob.glob('{}-*.pdb'.format(args.basename))
# current_files = os.listdir(os.getcwd())
#
# # ligand name
# if args.ligand:
# ligandname = os.path.basename(args.ligand)
#
# # cofactors
# if args.cofactors:
# cofactors_list = shlex.split(args.cofactors)
# else:
# cofactors_list = []
#
# print('***************PARAMETERS***************')
# print('FULL LIST: {0}'.format(vars(args)))
# if args.ligand:
# print('LIGAND: {0}'.format(ligandname))
# if args.cofactors:
# print('COFACTORS: {0}'.format(cofactors_list))
# if args.residues:
# print('BINDING RESIDUES: {0}'.format(args.residues))
# print('RADIUS: {0}'.format(args.radius))
#
# counter = 1
# for frame in range(1, args.numframes + 1):
# # just a simple counter
# print(str(counter) + '/' + str(args.numframes))
# counter += 1
#
# # file_prefix = '{0}-{1:03d}'.format(args.basename, frame)
# file_prefix = '{0}'.format(args.basename)
# print(file_prefix)
#
# # DOCKPREP
# # example: 3bwm-440_prep.mol2
# pdb = '{0}.pdb'.format(file_prefix)
# prepped_check = '{}_prep.mol2'.format(file_prefix)
# if prepped_check in current_files:
# print('***DOCKPREP PREVIOUSLY RUN***')
# prepped_file = prepped_check
# else:
# print('RUNNING: DOCKPREP')
# prepped_file = dockprep(pdb, file_prefix)
#
# # ISOLATE RECEPTOR
# # example: 3bwm-440_receptor.mol2, 3bwm-440_receptor_noH.pdb
# receptor_check = '{}_receptor.mol2'.format(file_prefix)
# receptor_noH_check = '{}_receptor_noH.pdb'.format(file_prefix)
# if receptor_check in current_files and receptor_noH_check in current_files:
# print('***RECEPTOR FILES PREVIOUSLY GENERATED***')
# receptor, receptor_noH = receptor_check, receptor_noH_check
# else:
# print('RUNNING: ISOLATE RECEPTOR')
# receptor, receptor_noH = protein_only_and_noH(prepped_file, cofactors_list, file_prefix)
#
# # DMS
# # example: 3bwm-440_receptor.dms
# dms_check = '{}_receptor.dms'.format(file_prefix)
# if dms_check in current_files:
# print('***SURFACE PREVIOUSLY GENERATED***')
# dms = dms_check
# else:
# print('RUNNING: DMS')
# dms = dms_maker(receptor_noH, file_prefix)
#
# # SPHGEN
# # example: 3bwm-440_receptor.sph
# sph_check = '{}_receptor.sph'.format(file_prefix)
# if sph_check in current_files:
# print('***SPHERES PREVIOUSLY GENERATED***')
# sph = sph_check
# else:
# print('RUNNING: SPHGEN')
# sph = sphgen(dms, file_prefix)
#
# # SPHERE_SELECTOR
# # first choose binding site and save it as separate .mol2
# # example: 3BWY-418_binding_residues.mol2
# binding_site_mol2_file_check = '{}_binding_residues.mol2'.format(file_prefix)
# if binding_site_mol2_file_check in current_files:
# print('***BINDING SITE RESIDUES ALREADY DEFINED***')
# binding_site_mol2_file = binding_site_mol2_file_check
# else:
# print('RUNNING: BINDING SITE MOL2')
# binding_site_mol2_file = binding_site_mol2(receptor_noH, args.residues, file_prefix)
#
# # then select the spheres based on these binding residues
# # example: 3bwm-440_selected_spheres_using_binding_residues.sph
# sel_sph_check = '{}_selected_spheres_using_binding_residues.sph'.format(file_prefix)
# if sel_sph_check in current_files:
# print('***SPHERES ALREADY SELECTED***')
# sel_sph = sel_sph_check
# else:
# print('RUNNING: SPHERE_SELECTOR')
# sel_sph = sphere_selector_using_residues(sph, binding_site_mol2_file, args.radius, file_prefix)
#
# # SHOWBOX
# # example: 3bwm-440_box.pdb
# box_check = '{}_box.pdb'.format(file_prefix)
# if box_check in current_files:
# print('***BOX PREVIOUSLY MADE***')
# box = box_check
# else:
# print('RUNNING: SHOWBOX')
# box = showbox(sel_sph, file_prefix)
#
# # GRID
# # example: 3bwm-440_grid.out
# gr_check = '{}_grid.out'.format(file_prefix)
# if gr_check in current_files:
# print('***GRID PREVIOUSLY CALCULATED***')
# gr = gr_check
# else:
# print('RUNNING: GRID')
# gr = grid(receptor, box, amb, file_prefix)
#
# # DOCK
# if args.ligand:
# dock6_flexible_check = '{}_{}_flexible_scored.mol2'.format((file_prefix, ligandname.split('.')[0]))
# if dock6_flexible_check in current_files and not args.redock:
# print('***DOCK PREVIOUSLY RUN***')
# else:
# print('RUNNING: DOCK')
# do_dock6_flexible(args.ligand, sel_sph, gr, amb, f1, f2, file_prefix)
#
# print('***DOCKING COMPLETE***')
```
#### File: structure/utils/foldx.py
```python
import os
import logging
import shutil
import ssbio.utils
import os.path as op
import pandas as pd
from ssbio.core.object import Object
log = logging.getLogger(__name__)
class FoldX(Object):
"""Class to run various commands from the FoldX suite of tools on a protein structure file.
Args:
pdb_path (str): Path to PDB file (PDB file format only!)
project_id (str): Name of your structure or this mini FoldX project
description (str): Short description of the PDB file being run on, ie. describe the oligomeric state.
rotabase_path (str): Path to the rotabase.txt file
foldx_exec (str, optional): Path to the FoldX executable, if empty, tries to execute ``foldx`` from the shell
root_dir (str, optional): Path to directory where a new directory named after ``project_id`` and
``description`` with "_foldx" appended to it will be created.
Todo:
- Need checks to ensure only PDB files are being used
- STDOUT and STDERR logging in a file
"""
def __init__(self, pdb_path, project_id, description, rotabase_path, foldx_exec=None, root_dir=None):
super(FoldX, self).__init__(id=project_id, description=description)
# Create directories
self._foldx_dirname = '{}_{}_foldx'.format(self.id, self.description)
self._root_dir = None
self.root_dir = root_dir
# FoldX related
self.foldx_exec = foldx_exec
if not foldx_exec:
self.foldx_exec = 'foldx'
self.rotabase_path = rotabase_path
# Copy PDB file
self.pdb_path = shutil.copy2(pdb_path, self.foldx_dir)
self.pdb_file = op.basename(self.pdb_path)
# Copy rotabase.txt
self.rotabase_path = shutil.copy2(self.rotabase_path, self.foldx_dir)
# Output files
self.repaired_pdb_outfile = None
self.mutation_infile = None
self.mutation_ddG_avg_outfile = None
self.mutation_ddG_raw_outfile = None
# To keep track of results
self.mutation_index_to_group = {}
@property
def root_dir(self):
"""str: Path to where the folder named by this protein's ID will be created. Default is current working
directory."""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
path = os.getcwd()
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.debug('Changing root directory of FoldX {} from {} to {}'.format(self._foldx_dirname, self.root_dir, path))
if not op.exists(op.join(path, self.id)):
raise IOError('FoldX {} does not exist in folder {}'.format(self._foldx_dirname, path))
self._root_dir = path
for d in [self.foldx_dir]:
ssbio.utils.make_dir(d)
@property
def foldx_dir(self):
"""str: FoldX folder"""
if self.root_dir:
return op.join(self.root_dir, self._foldx_dirname)
else:
log.warning('Root directory not set')
return None
@property
def df_mutation_ddG_avg(self):
return pd.read_csv(op.join(self.foldx_dir, self.mutation_ddG_avg_outfile), skiprows=8, sep='\t')
@property
def df_mutation_ddG_raw(self):
return pd.read_csv(op.join(self.foldx_dir, self.mutation_ddG_raw_outfile), skiprows=8, sep='\t')
def run_repair_pdb(self, silent=False, force_rerun=False):
"""Run FoldX RepairPDB on this PDB file.
Original command::
foldx --command=RepairPDB --pdb=4bxi.pdb
Args:
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.
"""
# Create RepairPDB command
foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file)
# Repaired PDB output file name
foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0])
# Run RepairPDB
ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent,
outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir)
# TODO: write stdout/stderr to log file somewhere!
self.repaired_pdb_outfile = foldx_repair_outfile
def create_mutation_file(self, list_of_tuples):
"""Create the FoldX file 'individual_list.txt' to run BuildModel upon.
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example::
[
(('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')), # Mutation group 1
(('S', 'A', 321, 'R'), ('T', 'A', 345, 'S')) # Mutation group 2
]
"""
self.mutation_infile = op.join(self.foldx_dir, 'individual_list.txt')
idx = 1
with open(self.mutation_infile, 'w') as f:
for mutant_group in list_of_tuples:
# Write the mutation string to the file
mutstring = ''.join(list(map(lambda x: '{}{}{}{};'.format(x[0], x[1], x[2], x[3]), mutant_group)))
f.write(mutstring + '\n')
# Also keep track of the index being used for this mutation
self.mutation_index_to_group[idx] = mutant_group
idx += 1
def create_random_mutation_file(self, list_of_tuples, original_sequence,
randomize_resnums=False, randomize_resids=False,
skip_resnums=None):
"""Create the FoldX file 'individual_list.txt', but randomize the mutation numbers or residues that were input.
The randomize combinations can be a little confusing - this is what can happen:
- randomize_resnums=False, randomize_resids=False: no change, original mutations are carried out
- randomize_resnums=True, randomize_resids=False: mutations of resid X to resid Y will be carried out,
but on a different residue number where resid X is found
- randomize_resnums=False, randomize_resids=True: mutations of residue X# to a random residue will be
carried out
- randomize_resnums=True, randomize_resids=True: original mutations will be ignored, random mutation of
any residue will be carried out
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to be randomized.
original_sequence (str, Seq, SeqRecord): Original amino acid sequence
randomize_resnums (bool): If residue numbers should be randomized
randomize_resids (bool): If residues themselves should be randomized
skip_resnums (list):
"""
import random
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
def run_build_model(self, num_runs=5, silent=False, force_rerun=False):
"""Run FoldX BuildModel command with a mutant file input.
Original command::
foldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5
Args:
num_runs (int):
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists.
"""
# BuildModel output files
self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
# BuildModel command
foldx_build_model = 'foldx --command=BuildModel --pdb={} --mutant-file={} --numberOfRuns={}'.format(self.repaired_pdb_outfile,
op.basename(self.mutation_infile),
num_runs)
ssbio.utils.command_runner(shell_command=foldx_build_model, force_rerun_flag=force_rerun, silent=silent,
outfile_checker=self.mutation_ddG_avg_outfile, cwd=self.foldx_dir)
def get_ddG_results(self):
"""Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG.
"""
foldx_avg_df = self.df_mutation_ddG_avg
foldx_avg_ddG = {}
results = foldx_avg_df[['Pdb', 'total energy', 'SD']].T.to_dict().values()
for r in results:
ident = r['Pdb'].split('_')[-1]
ddG = r['total energy']
ddG_sd = r['SD']
foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd)
return foldx_avg_ddG
```
#### File: structure/utils/mutatepdb.py
```python
import argparse
import tempfile
from Bio import PDB
from Bio.PDB.Polypeptide import aa1
from Bio.PDB.Polypeptide import aa3
from Bio.PDB.Polypeptide import one_to_three
from ssbio.protein.structure.utils.cleanpdb import CleanPDB
from ssbio.protein.structure.utils.structureio import StructureIO
class MutatePDB(PDB.Select):
"""Selection rules to mutate a PDB file
These rules aim to:
- Mutate a specified residue number to a new amino acid
"""
keep_atom_list = ['N', 'C', 'O', 'CA']
def __init__(self, mutation_list):
"""Initialize the parameters which indicate what mutations will occur
Args:
chain:
residue_number:
mutate_to:
"""
self.mutation_list = [(i[0], int(i[1]), self._standard_resname(i[2])) for i in mutation_list]
self.chains_and_residues = [(i[0], int(i[1])) for i in mutation_list]
def _standard_resname(self, res):
resname3 = res.upper()
if resname3 not in list(aa3) and resname3 not in list(aa1):
# TODO: mutation to selenocysteine (U;SEC) is not working
raise ValueError("Unrecognised residue {}".format(res))
if len(resname3) == 1:
resname3 = one_to_three(resname3)
return resname3
def accept_residue(self, residue):
hetfield, resseq, icode = residue.get_id()
chain = residue.get_parent()
chain_id = chain.get_id()
if (chain_id,resseq) in self.chains_and_residues:
prev_resname = residue.resname
get_index = self.chains_and_residues.index((chain_id,resseq))
residue.resname = self.mutation_list[get_index][2]
print("Mutated {0}.{1}.{2} to {0}.{1}.{3}".format(chain_id, resseq, prev_resname, residue.resname))
return True
def accept_atom(self, atom):
residue = atom.get_parent()
hetfield, resseq, icode = residue.get_id()
chain = residue.get_parent()
chain_id = chain.get_id()
if (chain_id,resseq) in self.chains_and_residues and atom.get_id() not in self.keep_atom_list:
# print("Removing atom {}.{}.{}".format(chain_id, resseq, atom.get_id()))
return False
return True
def parse_mutation_input(instr):
init_split = instr.split(',')
second_split = [tuple(i.split('.')) for i in init_split]
return second_split
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Mutates a PDB file')
p.add_argument('infile', help='PDB file you want to mutate')
p.add_argument('mutations', help='Mutations in the form of Chain1.ResNum1.Mutation1,Chain2.ResNum2.Mutation2. Example: A.4.TYR,B.4.TYR')
p.add_argument('--outsuffix', '-o', default='_mutated', help='Suffix appended to PDB file')
p.add_argument('--clean', '-c', action='store_true', help='Clean PDB and keep only chain with mutation')
args = p.parse_args()
mutations = parse_mutation_input(args.mutations)
my_pdb = StructureIO(args.infile)
if args.clean:
my_cleaner = CleanPDB(keep_chains=[x[0] for x in mutations])
my_clean_pdb = my_pdb.write_pdb(out_suffix='_clean', out_dir=tempfile.gettempdir(), custom_selection=my_cleaner)
my_pdb = StructureIO(my_clean_pdb)
my_mutation = MutatePDB(mutations)
my_mutated_pdb = my_pdb.write_pdb(out_suffix=args.outsuffix, out_dir='mutated_pdbs', custom_selection=my_mutation)
print('Mutated PDB at: {}'.format(my_mutated_pdb))
```
#### File: ssbio/test/test_databases_kegg.py
```python
import pytest
import os.path as op
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from BCBio import GFF
from ssbio.databases.kegg import KEGGProp
@pytest.fixture(scope='class')
def seq_record_loaded_from_file_example(fasta_path):
"""Original SeqRecord loaded from sequence file"""
return SeqIO.read(fasta_path, "fasta")
@pytest.fixture(scope='module')
def kegg_id():
return 'mtu:Rv0417'
@pytest.fixture(scope='module')
def fasta_file():
return 'mtu-Rv0417.faa'
@pytest.fixture(scope='module')
def txt_file():
return 'mtu-Rv0417.kegg'
@pytest.fixture(scope='module')
def fasta_path(test_files_sequences, fasta_file):
return op.join(test_files_sequences, fasta_file)
@pytest.fixture(scope='module')
def txt_path(test_files_sequences, txt_file):
return op.join(test_files_sequences, txt_file)
@pytest.fixture(scope='class')
def keggprop_with_i(kegg_id):
return KEGGProp(id=kegg_id,
seq=None)
@pytest.fixture(scope='class')
def keggprop_with_i_s_m_f(kegg_id, fasta_path, txt_path):
return KEGGProp(id=kegg_id,
seq=None,
fasta_path=fasta_path,
txt_path=txt_path)
class TestKEGGPropWithId():
"""Class to test a bare KEGGProp object with just an ID"""
def test_init(self, keggprop_with_i, kegg_id):
"""Test initializing with just an ID"""
assert keggprop_with_i.id == kegg_id
# If just an ID initialized, everything should be empty
assert keggprop_with_i.seq == None
assert keggprop_with_i.name == '<unknown name>'
assert keggprop_with_i.description == '<unknown description>'
assert len(keggprop_with_i.annotations) == 0
assert len(keggprop_with_i.letter_annotations) == 0
assert len(keggprop_with_i.features) == 0
# Files should not exist and raise errors if accessed
assert keggprop_with_i.sequence_file == None
with pytest.raises(IOError):
keggprop_with_i.sequence_dir
with pytest.raises(IOError):
keggprop_with_i.sequence_path
assert keggprop_with_i.metadata_file == None
with pytest.raises(IOError):
keggprop_with_i.metadata_dir
with pytest.raises(IOError):
keggprop_with_i.metadata_path
assert keggprop_with_i.feature_file == None
with pytest.raises(IOError):
keggprop_with_i.feature_dir
with pytest.raises(IOError):
keggprop_with_i.feature_path
def test_set_sequence_path(self, keggprop_with_i, fasta_path, fasta_file, test_files_sequences):
"""Test setting the seq attribute with a sequence file"""
keggprop_with_i.sequence_path = fasta_path
# Test that file paths are correct
assert keggprop_with_i.sequence_path == fasta_path
assert keggprop_with_i.sequence_file == fasta_file
assert keggprop_with_i.sequence_dir == test_files_sequences
def test_set_feature_path(self, keggprop_with_i, features_loaded_from_file_example,
gff_path, gff_file, test_files_sequences):
"""Test loading a feature file, and that old features are overwritten"""
# Test that the existing feature set is not the same as the new one to be loaded
assert len(keggprop_with_i.features) != len(features_loaded_from_file_example)
keggprop_with_i.feature_path = gff_path
# Test that file paths are correct
assert keggprop_with_i.feature_path == gff_path
assert keggprop_with_i.feature_file == gff_file
assert keggprop_with_i.feature_dir == test_files_sequences
# Test that features cannot be changed
with pytest.raises(ValueError):
keggprop_with_i.features = ['NOFEATURES']
# Test that number of features stored is same
assert len(keggprop_with_i.features) == len(features_loaded_from_file_example)
def test_set_metadata_path(self, keggprop_with_i, txt_path, txt_file, test_files_sequences,
txt_record_loaded_from_file_example):
keggprop_with_i.metadata_path = txt_path
# Unset sequence and feature paths
keggprop_with_i.sequence_path = None
keggprop_with_i.feature_path = None
# Test that file paths are correct
assert keggprop_with_i.metadata_path == txt_path
assert keggprop_with_i.metadata_file == txt_file
assert keggprop_with_i.metadata_dir == test_files_sequences
# Test loaded information
assert keggprop_with_i.description == txt_record_loaded_from_file_example.description
assert keggprop_with_i.bigg == None
for k in ['ecj:JW4347', 'eco:b4384']:
assert k in keggprop_with_i.kegg
for r in ['NP_418801.1', 'WP_000224877.1']:
assert r in keggprop_with_i.refseq
assert keggprop_with_i.kegg == 'mtu:Rv0417'
assert keggprop_with_i.gene_name == 'deoD'
for p in ['1A69', '1ECP', '1K9S', '1OTX', '1OTY', '1OU4', '1OUM', '1OV6', '1OVG',
'3ONV', '3OOE', '3OOH', '3OPV', '3UT6', '4TS3', '4TS9', '4TTA', '4TTI',
'4TTJ', '5I3C', '5IU6']:
assert p in keggprop_with_i.pdbs
for g in ['GO:0004731', 'GO:0005829', 'GO:0006152', 'GO:0006974', 'GO:0016020', 'GO:0019686', 'GO:0042802']:
assert g in keggprop_with_i.go
assert keggprop_with_i.pfam == ['PF01048']
assert keggprop_with_i.ec_number == None ## TODO: parse
assert keggprop_with_i.reviewed == False ## TODO: parse
for u in ['Q2M5T3', 'P09743']:
assert u in keggprop_with_i.alt_keggs
assert keggprop_with_i.taxonomy == 'Escherichia coli (strain K12)'
assert keggprop_with_i.seq_version == 2
assert keggprop_with_i.seq_date == '2007-01-23'
assert keggprop_with_i.entry_version == 106
assert keggprop_with_i.entry_date == '2017-08-30'
# Test that features are loaded directly from this metadata file
assert len(keggprop_with_i.features) == len(txt_record_loaded_from_file_example.features)
class TestKEGGPropWithIdAndFiles():
"""Class to test a bare KEGGProp object with just an ID"""
def test_init(self, keggprop_with_i_s_m_f, kegg_id,
fasta_path, txt_path, gff_path, test_files_sequences,
fasta_file, txt_file, gff_file,
seq_record_loaded_from_file_example,
features_loaded_from_file_example,
txt_record_loaded_from_file_example):
"""Test initializing with assigned files"""
assert keggprop_with_i_s_m_f.id == kegg_id
assert keggprop_with_i_s_m_f.seq == seq_record_loaded_from_file_example.seq
assert keggprop_with_i_s_m_f.name == seq_record_loaded_from_file_example.name
assert keggprop_with_i_s_m_f.description == txt_record_loaded_from_file_example.description
assert keggprop_with_i_s_m_f.annotations == {} # No annotations will be loaded from files
assert keggprop_with_i_s_m_f.letter_annotations == txt_record_loaded_from_file_example.letter_annotations
assert len(keggprop_with_i_s_m_f.features) == len(features_loaded_from_file_example)
# Files should exist
assert keggprop_with_i_s_m_f.sequence_file == fasta_file
assert keggprop_with_i_s_m_f.sequence_dir == test_files_sequences
assert keggprop_with_i_s_m_f.sequence_path == fasta_path
assert keggprop_with_i_s_m_f.metadata_file == txt_file
assert keggprop_with_i_s_m_f.metadata_dir == test_files_sequences
assert keggprop_with_i_s_m_f.metadata_path == txt_path
assert keggprop_with_i_s_m_f.feature_file == gff_file
assert keggprop_with_i_s_m_f.feature_dir == test_files_sequences
assert keggprop_with_i_s_m_f.feature_path == gff_path
```
#### File: ssbio/test/test_protein_seqprop.py
```python
import pytest
import os.path as op
import ssbio.utils
from Bio import SeqIO
from BCBio import GFF
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
from ssbio.protein.sequence.seqprop import SeqProp
@pytest.fixture(scope='class')
def seq_str_example():
"""Dummy sequence string to load"""
return "MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF"
@pytest.fixture(scope='class')
def seq_record_example():
"""Dummy SeqRecord to load"""
return SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
IUPAC.protein),
id="YP_025292.1", name="HokC",
description="toxic membrane protein, small",
annotations={'hello':'world'})
@pytest.fixture(scope='class')
def seq_record_loaded_from_file_example(sequence_path):
"""Original SeqRecord loaded from sequence file"""
return SeqIO.read(sequence_path, "fasta")
@pytest.fixture(scope='class')
def features_loaded_from_file_example(feature_path):
"""Original list of features"""
with open(feature_path) as handle:
feats = list(GFF.parse(handle))
return feats[0].features
@pytest.fixture(scope='module')
def sequence_id():
return 'P0ABP8'
@pytest.fixture(scope='module')
def sequence_file():
return 'P0ABP8.fasta'
@pytest.fixture(scope='module')
def metadata_file():
return 'P0ABP8.txt'
@pytest.fixture(scope='module')
def feature_file():
return 'P0ABP8.gff'
@pytest.fixture(scope='module')
def sequence_path(test_files_sequences, sequence_file):
"""Path to the FASTA file"""
return op.join(test_files_sequences, sequence_file)
@pytest.fixture(scope='module')
def metadata_path(test_files_sequences, metadata_file):
"""Path to the metadata file"""
return op.join(test_files_sequences, metadata_file)
@pytest.fixture(scope='module')
def feature_path(test_files_sequences, feature_file):
"""Path to the GFF file"""
return op.join(test_files_sequences, feature_file)
@pytest.fixture(scope='class')
def seqprop_with_i(sequence_id):
"""SeqProp with ID"""
return SeqProp(id=sequence_id, seq=None)
@pytest.fixture(scope='class')
def seqprop_with_i_seq(sequence_id, seq_str_example):
"""SeqProp with ID and sequence string"""
return SeqProp(id=sequence_id, seq=seq_str_example)
@pytest.fixture(scope='class')
def seqprop_with_i_s_m_f(sequence_id, sequence_path, metadata_path, feature_path):
"""SeqProp with ID + sequence file + metadata file + feature file"""
return SeqProp(id=sequence_id,
seq=None,
sequence_path=sequence_path,
metadata_path=metadata_path,
feature_path=feature_path)
class TestSeqPropWithId():
"""Class to test a bare SeqProp object with just an ID"""
def test_init(self, seqprop_with_i, sequence_id):
"""Test initializing with just an ID"""
assert seqprop_with_i.id == sequence_id
# If just an ID initialized, everything should be empty
assert seqprop_with_i.seq == None
assert seqprop_with_i.name == '<unknown name>'
assert seqprop_with_i.description == '<unknown description>'
assert len(seqprop_with_i.annotations) == 0
assert len(seqprop_with_i.letter_annotations) == 0
assert len(seqprop_with_i.features) == 0
# Files should not exist and raise errors if accessed
assert seqprop_with_i.sequence_file == None
with pytest.raises(IOError):
seqprop_with_i.sequence_dir
with pytest.raises(IOError):
seqprop_with_i.sequence_path
assert seqprop_with_i.metadata_file == None
with pytest.raises(IOError):
seqprop_with_i.metadata_dir
with pytest.raises(IOError):
seqprop_with_i.metadata_path
assert seqprop_with_i.feature_file == None
with pytest.raises(IOError):
seqprop_with_i.feature_dir
with pytest.raises(IOError):
seqprop_with_i.feature_path
def test_set_seq_with_str(self, seqprop_with_i, seq_str_example):
"""Test setting the seq attribute with a sequence string"""
seqprop_with_i.seq = seq_str_example
assert type(seqprop_with_i.seq) == Seq
assert str(seqprop_with_i.seq) == seq_str_example
def test_set_seq_with_seqrecord(self, seqprop_with_i, seq_record_example):
"""Test setting the seq attribute with a SeqRecord"""
seqprop_with_i.seq = seq_record_example
assert type(seqprop_with_i.seq) == Seq
assert seqprop_with_i.seq == seq_record_example.seq
assert seqprop_with_i.name == seq_record_example.name
assert seqprop_with_i.description == seq_record_example.description
assert seqprop_with_i.annotations == seq_record_example.annotations
def test_get_emboss_pepstats_failure(self, seqprop_with_i):
"""Test that EMBOSS pepstats does not run when no file has been written"""
with pytest.raises(IOError):
seqprop_with_i.get_emboss_pepstats()
def test_write_fasta_file(self, seqprop_with_i, tmpdir, test_files_outputs, seq_record_example):
"""Test that everything behaves properly when writing the SeqProp to a FASTA file"""
# Add dummy annotations to the SeqProp - check to see if they stay in the SeqProp even after Seq is written
seqprop_with_i.letter_annotations.update({'test_la_key': 'X' * len(seqprop_with_i.seq)})
seqprop_with_i.features.append(SeqFeature(FeatureLocation(1, 3)))
# Write the Seq to a FASTA file
outpath = tmpdir.join('test_seqprop_with_i_write_fasta_file.fasta').strpath
seqprop_with_i.write_fasta_file(outfile=outpath, force_rerun=True)
# Test that the file was written
assert op.exists(outpath)
assert op.getsize(outpath) > 0
# Test that file paths are correct
assert seqprop_with_i.sequence_path == outpath
assert seqprop_with_i.sequence_file == 'test_seqprop_with_i_write_fasta_file.fasta'
assert seqprop_with_i.sequence_dir == tmpdir
# Once a file is written, the annotations should not be lost, even though the sequence now
# loads from the written file as a Seq
assert seqprop_with_i.description == seq_record_example.description
assert seqprop_with_i.annotations == seq_record_example.annotations
assert seqprop_with_i.letter_annotations == {'test_la_key': 'X' * len(seq_record_example.seq)}
assert len(seqprop_with_i.features) == 1
# Test that sequence cannot be changed
with pytest.raises(ValueError):
seqprop_with_i.seq = 'THISWILLNOTBETHESEQ'
assert seqprop_with_i.seq == seq_record_example.seq
def test_get_residue_annotations(self, seqprop_with_i):
"""Test retrieval of residue letter_annotations"""
stuff = seqprop_with_i.get_residue_annotations(start_resnum=1, end_resnum=10)
assert stuff == {'test_la_key': 'XXXXXXXXXX'}
def test_get_biopython_pepstats(self, seqprop_with_i):
"""Test storing Biopython pepstats and consistency of results"""
seqprop_with_i.get_biopython_pepstats()
results = {'instability_index': 27.172727272727272, 'aromaticity': 0.022727272727272728,
'percent_turn_naive': 0.022727272727272728, 'percent_strand_naive': 0.2954545454545454,
'monoisotopic': False, 'isoelectric_point': 8.84234619140625, 'molecular_weight': 4820.8507,
'percent_helix_naive': 0.38636363636363635}
for k, v in results.items():
assert seqprop_with_i.annotations[k] == pytest.approx(v)
def test_get_emboss_pepstats_success(self, seqprop_with_i):
"""Test that EMBOSS pepstats does run when a file has been written"""
if ssbio.utils.program_exists('pepstats'):
seqprop_with_i.get_emboss_pepstats()
assert 'percent_charged' in seqprop_with_i.annotations
def test_set_metadata_path(self, seqprop_with_i, metadata_path, metadata_file, test_files_sequences):
"""Test setting the metadata file"""
seqprop_with_i.metadata_path = metadata_path
# Test that file paths are correct
assert seqprop_with_i.metadata_path == metadata_path
assert seqprop_with_i.metadata_file == metadata_file
assert seqprop_with_i.metadata_dir == test_files_sequences
def test_set_sequence_path(self, seqprop_with_i, seq_record_loaded_from_file_example,
sequence_path, sequence_file, test_files_sequences):
"""Test setting the seq attribute with a sequence file, and that seq is now loaded from file"""
seqprop_with_i.sequence_path = sequence_path
# Test that file paths are correct
assert seqprop_with_i.sequence_path == sequence_path
assert seqprop_with_i.sequence_file == sequence_file
assert seqprop_with_i.sequence_dir == test_files_sequences
# Test that the loaded sequence is the same as the original sequence
assert seqprop_with_i.seq == seq_record_loaded_from_file_example.seq
# Test that sequence cannot be changed
with pytest.raises(ValueError):
seqprop_with_i.seq = 'THISWILLNOTBETHESEQ'
def test_set_features(self, seqprop_with_i, features_loaded_from_file_example):
"""Test setting the features attribute in memory"""
seqprop_with_i.features = features_loaded_from_file_example[:5]
assert seqprop_with_i.features == features_loaded_from_file_example[:5]
assert seqprop_with_i.feature_file == None
def test_write_gff_file(self, seqprop_with_i, tmpdir):
"""Test writing the features, and that features are now loaded from a file"""
outpath = tmpdir.join('test_seqprop_with_i_write_gff_file.gff').strpath
seqprop_with_i.write_gff_file(outfile=outpath, force_rerun=True)
# Test that the file was written
assert op.exists(outpath)
assert op.getsize(outpath) > 0
# Test that file paths are correct
assert seqprop_with_i.feature_path == outpath
assert seqprop_with_i.feature_file == 'test_seqprop_with_i_write_gff_file.gff'
assert seqprop_with_i.feature_dir == tmpdir
# Test that features cannot be changed
with pytest.raises(ValueError):
seqprop_with_i.features = ['NOFEATURES']
def test_set_feature_path(self, seqprop_with_i, features_loaded_from_file_example,
feature_path, feature_file, test_files_sequences):
"""Test loading a feature file, and that old features are overwritten"""
# Test that the existing feature set is not the same as the new one to be loaded
assert len(seqprop_with_i.features) != len(features_loaded_from_file_example)
seqprop_with_i.feature_path = feature_path
# Test that file paths are correct
assert seqprop_with_i.feature_path == feature_path
assert seqprop_with_i.feature_file == feature_file
assert seqprop_with_i.feature_dir == test_files_sequences
# Test that features cannot be changed
with pytest.raises(ValueError):
seqprop_with_i.features = ['NOFEATURES']
# Test that number of features stored is same
assert len(seqprop_with_i.features) == len(features_loaded_from_file_example)
class TestSeqPropWithIdAndSeq():
"""Class to test a bare SeqProp object with just an ID"""
def test_init(self, seqprop_with_i_seq, sequence_id, seq_str_example):
"""Test initializing with an ID and sequence string"""
assert seqprop_with_i_seq.id == sequence_id
assert str(seqprop_with_i_seq.seq) == seq_str_example
assert type(seqprop_with_i_seq.seq) == Seq
# If just a sequence string initialized, everything should be empty
assert seqprop_with_i_seq.name == '<unknown name>'
assert seqprop_with_i_seq.description == '<unknown description>'
assert len(seqprop_with_i_seq.annotations) == 0
assert len(seqprop_with_i_seq.letter_annotations) == 0
assert len(seqprop_with_i_seq.features) == 0
# Files should not exist and raise errors if accessed
assert seqprop_with_i_seq.sequence_file == None
with pytest.raises(IOError):
seqprop_with_i_seq.sequence_dir
with pytest.raises(IOError):
seqprop_with_i_seq.sequence_path
assert seqprop_with_i_seq.metadata_file == None
with pytest.raises(IOError):
seqprop_with_i_seq.metadata_dir
with pytest.raises(IOError):
seqprop_with_i_seq.metadata_path
assert seqprop_with_i_seq.feature_file == None
with pytest.raises(IOError):
seqprop_with_i_seq.feature_dir
with pytest.raises(IOError):
seqprop_with_i_seq.feature_path
class TestSeqPropWithIdAndFiles():
"""Class to test a SeqProp object assigned files"""
def test_init(self, seqprop_with_i_s_m_f, sequence_id,
sequence_path, metadata_path, feature_path, test_files_sequences,
sequence_file, metadata_file, feature_file,
seq_record_loaded_from_file_example, features_loaded_from_file_example):
"""Test initializing with assigned files"""
assert seqprop_with_i_s_m_f.id == sequence_id
assert seqprop_with_i_s_m_f.seq == seq_record_loaded_from_file_example.seq
assert seqprop_with_i_s_m_f.name == seq_record_loaded_from_file_example.name
assert seqprop_with_i_s_m_f.description == seq_record_loaded_from_file_example.description
assert seqprop_with_i_s_m_f.annotations == seq_record_loaded_from_file_example.annotations
assert seqprop_with_i_s_m_f.letter_annotations == seq_record_loaded_from_file_example.letter_annotations
assert len(seqprop_with_i_s_m_f.features) == len(features_loaded_from_file_example)
# Files should exist
assert seqprop_with_i_s_m_f.sequence_file == sequence_file
assert seqprop_with_i_s_m_f.sequence_dir == test_files_sequences
assert seqprop_with_i_s_m_f.sequence_path == sequence_path
assert seqprop_with_i_s_m_f.metadata_file == metadata_file
assert seqprop_with_i_s_m_f.metadata_dir == test_files_sequences
assert seqprop_with_i_s_m_f.metadata_path == metadata_path
assert seqprop_with_i_s_m_f.feature_file == feature_file
assert seqprop_with_i_s_m_f.feature_dir == test_files_sequences
assert seqprop_with_i_s_m_f.feature_path == feature_path
def test_change_root_dir(self, seqprop_with_i_s_m_f, tmpdir):
# Test changing the sequence, metadata and feature dirs
t = tmpdir.strpath
seqprop_with_i_s_m_f.sequence_dir = t
seqprop_with_i_s_m_f.metadata_dir = t
seqprop_with_i_s_m_f.feature_dir = t
assert seqprop_with_i_s_m_f.sequence_dir == t
with pytest.raises(OSError): # Sequence path should throw an error since the file was never moved there
seqprop_with_i_s_m_f.sequence_path
assert seqprop_with_i_s_m_f.metadata_dir == t
with pytest.raises(OSError): # Metadata path should throw an error since the file was never moved there
seqprop_with_i_s_m_f.metadata_path
assert seqprop_with_i_s_m_f.feature_dir == t
with pytest.raises(OSError): # Feature path should throw an error since the file was never moved there
seqprop_with_i_s_m_f.feature_path
```
#### File: ssbio/test/test_protein_structure_properties_fatcat.py
```python
import unittest
import os.path as op
import ssbio.protein.structure.properties.fatcat as fatcat
class TestFATCAT(unittest.TestCase):
"""Unit tests for FATCAT"""
def test_run_fatcat_all_by_all(self):
OUT_DIR = op.join('test_files', 'out')
FATCAT_SH = '/home/nathan/software/fatcat/runFATCAT.sh'
structs = [op.join('test_files', 'structures', '12as-A_clean.pdb'),
op.join('test_files', 'structures', '1af6-A_clean.pdb'),
op.join('test_files', 'structures', '1a9x-A_clean.pdb')]
tm_scores = fatcat.run_fatcat_all_by_all(structs, fatcat_sh=FATCAT_SH, outdir=OUT_DIR)
```
#### File: ssbio/viz/nglview.py
```python
import logging
import ssbio.utils
import seaborn as sns
log = logging.getLogger(__name__)
def add_residues_highlight_to_nglview(view, structure_resnums, chain, res_color='red'):
"""Add a residue number or numbers to an NGLWidget view object.
Args:
view (NGLWidget): NGLWidget view object
structure_resnums (int, list): Residue number(s) to highlight, structure numbering
chain (str, list): Chain ID or IDs of which residues are a part of. If not provided, all chains in the
mapped_chains attribute will be used. If that is also empty, and exception is raised.
res_color (str): Color to highlight residues with
"""
chain = ssbio.utils.force_list(chain)
if isinstance(structure_resnums, list):
structure_resnums = list(set(structure_resnums))
elif isinstance(structure_resnums, int):
structure_resnums = ssbio.utils.force_list(structure_resnums)
else:
raise ValueError('Input must either be a residue number of a list of residue numbers')
to_show_chains = '( '
for c in chain:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
to_show_res = '( '
for m in structure_resnums:
to_show_res += '{} or '.format(m)
to_show_res = to_show_res.strip(' or ')
to_show_res += ' )'
log.info('Selection: {} and not hydrogen and {}'.format(to_show_chains, to_show_res))
view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res), color=res_color)
def add_scaled_residues_highlight_to_nglview(view, structure_resnums, chain_id, color='red',
unique_colors=False, opacity_range=(0.5,1), scale_range=(.7, 10)):
"""Add a list of residue numbers (which may contain repeating residues) to a view, or add a dictionary of
residue numbers to counts. Size and opacity of added residues are scaled by counts.
Args:
view (NGLWidget): NGLWidget view object
structure_resnums (int, list, dict): Residue number(s) to highlight, or a dictionary of residue number to
frequency count
chain_id (str, list): Chain ID or IDs of which residues are a part of.
color (str): Color to highlight residues with
unique_colors (bool): If each mutation should be colored uniquely (will override color argument)
opacity_range (tuple): Min/max opacity values (residues that have higher frequency counts will be opaque)
scale_range (tuple): Min/max size values (residues that have higher frequency counts will be bigger)
"""
# TODO: likely to move these functions to a separate nglview/utils folder since they are not coupled to the structure
# TODO: add color by letter_annotations!
chain_id = ssbio.utils.force_list(chain_id)
if isinstance(structure_resnums, dict):
opacity_dict = ssbio.utils.scale_calculator(opacity_range[0], structure_resnums, rescale=opacity_range)
scale_dict = ssbio.utils.scale_calculator(scale_range[0], structure_resnums, rescale=scale_range)
else:
opacity_dict = {x: max(opacity_range) for x in ssbio.utils.force_list(structure_resnums)}
scale_dict = {x: max(scale_range) for x in ssbio.utils.force_list(structure_resnums)}
if isinstance(structure_resnums, list):
structure_resnums = list(set(structure_resnums))
elif isinstance(structure_resnums, dict):
structure_resnums = list(structure_resnums.keys())
elif isinstance(structure_resnums, int):
structure_resnums = ssbio.utils.force_list(structure_resnums)
else:
raise ValueError('Input must either be a list of residue numbers or a dictionary of residue numbers '
'and their frequency.')
colors = sns.color_palette("hls", len(structure_resnums)).as_hex()
to_show_chains = '( '
for c in chain_id:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
for i, x in enumerate(structure_resnums):
if isinstance(x, tuple):
to_show_res = '( '
for mut in x:
to_show_res += '{} or '.format(mut)
to_show_res = to_show_res.strip(' or ')
to_show_res += ' )'
else:
to_show_res = x
log.info('Selection: {} and not hydrogen and {}'.format(to_show_chains, to_show_res))
if unique_colors:
view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res),
color=colors[i], opacity=opacity_dict[x], scale=scale_dict[x])
else:
view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res),
color=color, opacity=opacity_dict[x], scale=scale_dict[x])
def add_features_to_nglview(view, structure_resnums, chain_id):
"""Add select features from the selected SeqProp object to an NGLWidget view object.
Currently parsing for:
* Single residue features (ie. metal binding sites)
* Disulfide bonds
Args:
view (NGLWidget): NGLWidget view object
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
"""
# Parse and store chain seq if not already stored
if not structprop.chains.has_id(chain_id):
structprop.parse_structure()
if not structprop.chains.has_id(chain_id):
raise ValueError('Chain {} not present in structure {}'.format(chain_id, structprop.id))
if not seqprop.features:
log.warning('{}: no stored features'.format(seqprop.id))
# Loop through any stored features
for f in seqprop.features:
# Display disulfide bonds
if f.type.lower() == 'disulfide bond':
# TODO: double check if .start or .start + 1
disulfide = map_seqprop_resnums_to_structprop_resnums(resnums=[f.location.start + 1, f.location.end],
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=False)
to_view = [str(x)+'.CA' for x in list(disulfide.values())]
view.add_distance(atom_pair=[to_view], color='black')
log.info('Disulfide bridge at residues {} & {}'.format(f.location.start + 1, f.location.end))
# Display DNA-binding regions
if f.type.lower() == 'dna-binding region' or f.type.lower() == 'nucleotide phosphate-binding region':
impres = self.map_seqprop_resnums_to_structprop_resnums(resnums=[f.location.start + 1,
f.location.end],
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
# TODO: need to check if f.location.start was mapped and if not, try incrementing. or input the list
# of resnums, not just the start and end
if f.location.start + 1 in impres and f.location.end in impres:
mapped_start = impres[f.location.start + 1]
mapped_end = impres[f.location.end]
view.add_ball_and_stick(selection=':{} and ( {}-{} )'.format(chain_id,
mapped_start,
mapped_end), color='black')
log.info('{} at sequence region {}-{}, structure residues {}-{}'.format(f.type,
f.location.start,
f.location.end,
mapped_start,
mapped_end))
# Display other single residues
if f.location.end - 1 == f.location.start:
if f.type.lower() == 'sequence variant' or f.type.lower() == 'mutagenesis site':
continue
impres = self.map_seqprop_resnums_to_structprop_resnums(resnums=f.location.end,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
if f.location.end in impres:
impres_mapped = impres[f.location.end]
view.add_ball_and_stick(selection=str(impres_mapped), color='black')
view.add_label(selection=':{} and {}'.format(chain_id, impres_mapped), label_type='res', color='black')
log.info('{} at sequence residue {}, structure residue {}'.format(f.type, f.location.end, impres_mapped))
def add_mutations_to_nglview(self, view, alignment_type='seqalign', alignment_ids=None,
seqprop=None, structprop=None, chain_id=None, use_representatives=False,
grouped=False, color='red', unique_colors=True,
opacity_range=(0.8,1), scale_range=(1,5)):
"""Add representations to an NGLWidget view object for residues that are mutated in the
``sequence_alignments`` attribute.
Args:
view (NGLWidget): NGLWidget view object
alignment_type (str): Specified alignment type contained in the ``annotation`` field of an alignment object,
``seqalign`` or ``structalign`` are the current types.
alignment_ids (str, list): Specified alignment ID or IDs to use
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
grouped (bool): If groups of mutations should be colored and sized together
color (str): Color of the mutations (overridden if unique_colors=True)
unique_colors (bool): If each mutation/mutation group should be colored uniquely
opacity_range (tuple): Min/max opacity values (mutations that show up more will be opaque)
scale_range (tuple): Min/max size values (mutations that show up more will be bigger)
"""
if use_representatives:
if seqprop and structprop and chain_id:
raise ValueError('Overriding sequence, structure, and chain IDs with representatives. '
'Set use_representatives to False if custom IDs are to be used.')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Input sequence, structure, and chain to map between, or set use_representatives '
'to True.')
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
log.debug('Using sequence: {}, structure: {}, chain: {}'.format(seqprop.id, structprop.id, chain_id))
# Get the summary of mutations
single, fingerprint = self.sequence_mutation_summary(alignment_type=alignment_type, alignment_ids=alignment_ids)
# Map residues from sequence to structure
if not grouped:
single_lens = {k: len(v) for k, v in single.items()}
single_map_to_structure = {}
for k, v in single_lens.items():
resnum = int(k[1])
resnum_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=resnum,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
if resnum not in resnum_to_structure:
log.warning('{}: residue is not available in structure {}'.format(resnum, structprop.id))
continue
new_key = resnum_to_structure[resnum]
single_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=single_map_to_structure,
chain=chain_id,
color=color,
unique_colors=unique_colors,
opacity_range=opacity_range,
scale_range=scale_range)
else:
log.warning('Viewing mutation groups is currently in beta -- groups may overwrite each other')
fingerprint_lens = {k: len(v) for k, v in fingerprint.items()}
fingerprint_map_to_structure = {}
for k, v in fingerprint_lens.items():
k_list = [int(x[1]) for x in k]
resnums_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=k_list,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
new_key = tuple(y for y in resnums_to_structure.values())
fingerprint_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=fingerprint_map_to_structure,
chain=chain_id,
color=color,
unique_colors=unique_colors,
opacity_range=opacity_range,
scale_range=scale_range)
def add_fingerprint_to_nglview(self, view, fingerprint,
seqprop=None, structprop=None, chain_id=None, use_representatives=False,
color='red', opacity_range=(0.8, 1), scale_range=(1, 5)):
"""Add representations to an NGLWidget view object for residues that are mutated in the
``sequence_alignments`` attribute.
Args:
view (NGLWidget): NGLWidget view object
fingerprint (dict): Single mutation group from the ``sequence_mutation_summary`` function
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
color (str): Color of the mutations (overridden if unique_colors=True)
opacity_range (tuple): Min/max opacity values (mutations that show up more will be opaque)
scale_range (tuple): Min/max size values (mutations that show up more will be bigger)
"""
if use_representatives:
if seqprop and structprop and chain_id:
raise ValueError('Overriding sequence, structure, and chain IDs with representatives. '
'Set use_representatives to False if custom IDs are to be used.')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Input sequence, structure, and chain to map between, or set use_representatives '
'to True.')
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
log.debug('Using sequence: {}, structure: {}, chain: {}'.format(seqprop.id, structprop.id, chain_id))
fingerprint_lens = {k: len(v) for k, v in fingerprint.items()}
fingerprint_map_to_structure = {}
for k, v in fingerprint_lens.items():
k_list = [int(x[1]) for x in k]
resnums_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=k_list,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
new_key = tuple(y for y in resnums_to_structure.values())
fingerprint_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=fingerprint_map_to_structure,
chain=chain_id,
color=color,
opacity_range=opacity_range,
scale_range=scale_range)
``` |
{
"source": "JoshuaMeza/SupportBot",
"score": 2
} |
#### File: Controller/Commands/commandLogs.py
```python
import discord
from discord.ext import commands
from Model.model import *
class Logs(commands.Cog):
def __init__(self, client, model: Model):
self.client = client
self.model = model
def __verRole(self, roles):
flag = False
for role in roles:
if role.name == self.model.getLockRoleName():
flag = True
break
return flag
@commands.Cog.listener()
async def on_ready(self):
print('Command logs ready')
@commands.command(name='logs', aliases=['LOGS'], ignore_extra=False)
async def command_logs(self, ctx, arg: str):
arg = arg.upper()
desc = None
if arg == 'ADD' or arg == 'DEL':
# Role required
if self.__verRole(ctx.author.roles):
# Add
if arg == 'ADD':
if self.model.addSubjectToGuild(ctx.guild.id, ctx.channel.category.name):
desc = 'Category successfully added as a subject.'
else:
desc = 'Category failed to be added as a subject.'
# Delete
else:
if self.model.removeSubjectOfGuild(ctx.guild.id, ctx.channel.category.name):
desc = 'Subject successfully deleted.'
else:
desc = 'Subject failed to be deleted.'
else:
raise commands.MissingRole(self.model.getLockRoleName())
elif arg == 'VFY':
# Verify
if self.model.containsSubject(ctx.guild.id, ctx.channel.category.name):
desc = 'This category have been registered as a subject.'
else:
desc = 'This category haven\'t been registered as a subject yet.'
else:
raise commands.BadArgument()
await ctx.send(embed=discord.Embed(
title='Logs command',
description=desc,
colour=self.model.getDefaultColor()
))
```
#### File: Controller/Commands/commandUser.py
```python
import discord
from discord.ext import commands
from Model.model import *
class User(commands.Cog):
def __init__(self, client, model: Model):
self.client = client
self.model = model
def __verRole(self, roles):
flag = False
for role in roles:
if role.name == self.model.getLockRoleName():
flag = True
break
return flag
@commands.Cog.listener()
async def on_ready(self):
print('Command user ready')
@commands.command(name='user', aliases=['USER'], ignore_extra=False)
async def command_user(self, ctx, action: str, target: discord.Member):
action = action.upper()
desc = None
def check(m):
return m.author == ctx.author
if action == 'ADD':
if self.model.addStudentToSubjectFromGuild(ctx.guild.id, ctx.channel.category.name, target.id):
if self.model.getStudentFromSubject(ctx.guild.id, ctx.channel.category.name, target.id, True).split(',')[0] == 'unknown':
self.model.editStudentNamesFromGuild(ctx.guild.id, target.name, target.id)
desc = 'Student successfully added to this subject.'
else:
desc = 'Student failed to be added to this subject.'
elif action == 'MOD':
options = ['NAME','LNAME','ID','EMAIL','NUMBER']
option = None
data = None
# Ask for option
embed = discord.Embed(
title='Notebook command',
description='Send a message with an option:\n'
'[name/lname/id/email/number]',
colour=self.model.getDefaultColor()
)
embed.set_footer(text='Timeout in 20 seconds!')
await ctx.send(embed=embed)
try:
msg = await self.client.wait_for('message', timeout=20.0, check=check)
option = msg.content.upper()
if not option in options: raise Exception()
except:
# Timeout
raise commands.BadArgument()
# Ask for content
embed = discord.Embed(
title='Notebook command',
description='Send a message with the new information.',
colour=self.model.getDefaultColor()
)
embed.set_footer(text='Timeout in 30 seconds!')
await ctx.send(embed=embed)
try:
msg = await self.client.wait_for('message', timeout=30.0, check=check)
data = msg.content
except:
# Timeout
raise commands.BadArgument()
# Output
if option == options[0]:
# Name
if self.model.editStudentNamesFromGuild(ctx.guild.id, data, target.id):
desc = 'Name successfully modified.'
else:
desc = 'Name failed to be modified.'
elif option == options[1]:
# Last name
if self.model.editStudentLastNamesFromGuild(ctx.guild.id, data, target.id):
desc = 'Last name successfully modified.'
else:
desc = 'Last name failed to be modified.'
elif option == options[2]:
# Id
if not data.isdigit():
raise commands.BadArgument()
else:
data = int(data)
if self.model.editStudentCollegeIdFromGuild(ctx.guild.id, data, target.id):
desc = 'Id successfully modified.'
else:
desc = 'Id failed to be modified.'
elif option == options[3]:
# Email
if self.model.editStudentEmailFromGuild(ctx.guild.id, data, target.id):
desc = 'Email successfully modified.'
else:
desc = 'Email failed to be modified.'
elif option == options[4]:
# Number
if not data.isdigit():
raise commands.BadArgument()
else:
data = int(data)
if self.model.editStudentPhoneNumberFromGuild(ctx.guild.id, data, target.id):
desc = 'Phone number successfully modified.'
else:
desc = 'Phone number failed to be modified.'
else:
raise commands.BadArgument()
elif action == 'DEL':
if self.__verRole(ctx.author.roles):
if self.model.removeStudentOfSubjectFromGuild(ctx.guild.id, ctx.channel.category.name, target.id):
desc = 'Student successfully deleted from this subject.'
else:
desc = 'Student failed to be deleted from this subject.'
else:
raise commands.MissingRole(self.model.getLockRoleName())
else:
# Unknown action
raise commands.BadArgument()
await ctx.send(embed=discord.Embed(
title='User command',
description=desc,
colour=self.model.getDefaultColor()
))
```
#### File: SupportBot/Model/guildNode.py
```python
from Model.subject import *
class GuildNode:
def __init__(self, guildId: int):
self.guildId = guildId
self.subjects = []
self.noInfoAnswer = 'There\'s no information related'
# Getters and setters
def getGuildId(self) -> int:
return self.guildId
def setGuildId(self, guildId: int):
self.getGuildId = guildId
# Subjects management
def __verifySubjectExistence(self, subjectName: str) -> bool:
exists = False
for subject in self.subjects:
if subject.getName() == subjectName:
exists = True
break
return exists
def addSubject(self, subjectName: str) -> bool:
flag = True
try:
if not self.__verifySubjectExistence(subjectName):
s = Subject(subjectName)
self.subjects.append(s)
else:
flag= False
except:
flag = False
return flag
def removeSubject(self, subjectName: str) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
self.subjects.remove(subject)
flag = True
break
return flag
def containsSubject(self, subjectName: str) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
flag = True
break
return flag
# Students management
def __studentSearcher(self, discordId) -> dict:
output = {}
for subject in self.subjects:
data = subject.getStudentData(discordId)
if data != {}:
# An student with that id was found
output = data
break
return output
def addStudent(self, subjectName: str, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
# What if the student actually exists?
data = self.__studentSearcher(discordId)
if data == {}:
# New student
flag = subject.addStudent(discordId)
else:
# Existent student was found
flag = subject.addStudentAndGenerate(data)
break
return flag
def removeStudent(self, subjectName: str, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
flag = subject.removeStudent(discordId)
break
return flag
def editStudentNames(self, names: str, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.editStudentNames(names, discordId):
flag = True
return flag
def editStudentLastNames(self, lastNames: str, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.editStudentLastNames(lastNames, discordId):
flag = True
return flag
def editStudentCollegeId(self, collegeId: int, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.editStudentCollegeId(collegeId, discordId):
flag = True
return flag
def editStudentEmail(self, email: str, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.editStudentEmail(email, discordId):
flag = True
return flag
def editStudentPhoneNumber(self, phoneNum: int, discordId: int) -> bool:
flag = False
for subject in self.subjects:
if subject.editStudentPhoneNumber(phoneNum, discordId):
flag = True
return flag
# Links management
def addLink(self, subjectName: str, name: str, url: str) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
flag = subject.addLink(name, url)
break
return flag
def removeLink(self, subjectName: str, index: int) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
flag = subject.removeLink(index)
break
return flag
# Notes management
def addNote(self, subjectName: str, name: str, text: str) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
flag = subject.addNote(name, text)
break
return flag
def removeNote(self, subjectName: str, index: int) -> bool:
flag = False
for subject in self.subjects:
if subject.getName() == subjectName:
flag = subject.removeNote(index)
break
return flag
# Printing data
def getAllStudents(self) -> str:
output = ''
for subject in self.subjects:
output = output + subject.getStudents() + '\n'
if output != '':
output = output[:-1]
else:
output = self.noInfoAnswer
return output
def getStudentsFromSubject(self, subjectName: str) -> str:
output = ''
for subject in self.subjects:
if subject.getName() == subjectName:
output = subject.getStudents()
break
if output == '':
output = self.noInfoAnswer
return output
def getStudentFromSubject(self, subjectName: str, discordId: int, CSV: bool) -> str:
output = ''
for subject in self.subjects:
if subject.getName() == subjectName:
output = subject.getStudent(discordId, CSV)
break
if output == '':
output = self.noInfoAnswer
return output
def getLinksFromSubject(self, subjectName: str) -> str:
output = ''
for subject in self.subjects:
if subject.getName() == subjectName:
output = subject.getLinks()
break
if output == '':
output = self.noInfoAnswer
return output
def getNotesFromSubject(self, subjectName: str) -> str:
output = ''
for subject in self.subjects:
if subject.getName() == subjectName:
output = subject.getNotes()
break
if output == '':
output = self.noInfoAnswer
return output
# Dictionary
def toDict(self) -> dict:
listDict = []
for subject in self.subjects:
listDict.append(subject.toDict())
return {
"GuildId" : self.guildId,
"Subjects" : listDict
}
```
#### File: SupportBot/Model/model.py
```python
import discord
from Model.fileHandler import *
from Model.guildNode import *
class Model:
def __init__(self):
self.errColor = discord.Colour.from_rgb(225, 7, 0)
self.defColor = discord.Colour.from_rgb(0, 239, 134)
self.prefix = '!'
self.fileName = 'memory.json'
self.noInfoAnswer = 'There\'s no information related'
self.lockRoleName = 'BotManager'
self.guilds = []
self.__buildGuilds()
# Getters and setters
def getPrefix(self) -> str:
return self.prefix
def getErrorColor(self) -> discord.Colour:
return self.errColor
def getDefaultColor(self) -> discord.Colour:
return self.defColor
def getLockRoleName(self) -> str:
return self.lockRoleName
# File management
def __readData(self) -> dict:
fileHandler = FileHandler()
return fileHandler.readJSON(self.fileName)
def __writeData(self, info: dict) -> bool:
fileHandler = FileHandler()
return fileHandler.writeJSON(self.fileName, info)
def __updatePersistentData(self) -> bool:
listDict = []
for guild in self.guilds:
listDict.append(guild.toDict())
return self.__writeData({"Guilds" : listDict})
# Guilds management
def __buildGuilds(self):
info = self.__readData()
index = 0
try:
for guild in info['Guilds']:
# Move through all guilds
if self.addGuild(guild['GuildId'], False):
# Avoid duplicates
for subject in guild['Subjects']:
# Move through all subjects
subjectName = subject['SubjectName']
if self.guilds[index].addSubject(subjectName):
# Avoid duplicates
# Start adding the subject data ¬
for student in subject['Members']:
# Move through all students
discordId = student['DiscordId']
if self.guilds[index].addStudent(subjectName, discordId):
#Adding aditional information of students
if student['Names'] is not None:
self.guilds[index].editStudentNames(student['Names'], discordId)
if student['LastNames'] is not None:
self.guilds[index].editStudentLastNames(student['LastNames'], discordId)
if student['CollegeId'] is not None:
self.guilds[index].editStudentCollegeId(student['CollegeId'], discordId)
if student['Email'] is not None:
self.guilds[index].editStudentEmail(student['Email'], discordId)
if student['PhoneNumber'] is not None:
self.guilds[index].editStudentPhoneNumber(student['PhoneNumber'], discordId)
for link in subject['Links']:
# Move through all links
self.guilds[index].addLink(subjectName, link['Name'], link['URL'])
for note in subject['Notes']:
# Move through all notes
self.guilds[index].addNote(subjectName, note['Name'], note['Text'])
index += 1
except KeyError:
print('Memory loading problem!')
def __verifyGuildExistence(self, guildId) -> bool:
exists = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
exists = True
break
return exists
def addGuild(self, guildId: int, overwrite = True) -> bool:
flag = True
try:
if not self.__verifyGuildExistence(guildId):
g = GuildNode(guildId)
self.guilds.append(g)
else:
flag = False
except:
flag = False
if flag and overwrite:
self.__updatePersistentData()
return flag
def removeGuild(self, guildId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
self.guilds.remove(guild)
flag = True
break
if flag:
self.__updatePersistentData()
return flag
# Subjects management
def addSubjectToGuild(self, guildId: int, subjectName: str) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.addSubject(subjectName)
break
if flag:
self.__updatePersistentData()
return flag
def removeSubjectOfGuild(self, guildId: int, subjectName: str) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.removeSubject(subjectName)
break
if flag:
self.__updatePersistentData()
return flag
def containsSubject(self, guildId: int, subjectName: str) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.containsSubject(subjectName)
break
return flag
# Students management
def addStudentToSubjectFromGuild(self, guildId: int, subjectName: str, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.addStudent(subjectName, discordId)
break
if flag:
self.__updatePersistentData()
return flag
def removeStudentOfSubjectFromGuild(self, guildId: int, subjectName: str, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.removeStudent(subjectName, discordId)
break
if flag:
self.__updatePersistentData()
return flag
def editStudentNamesFromGuild(self, guildId: int, names: str, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.editStudentNames(names, discordId)
break
if flag:
self.__updatePersistentData()
return flag
def editStudentLastNamesFromGuild(self, guildId: int, lastNames: str, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.editStudentLastNames(lastNames, discordId)
break
if flag:
self.__updatePersistentData()
return flag
def editStudentCollegeIdFromGuild(self, guildId: int, collegeId: int, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.editStudentCollegeId(collegeId, discordId)
break
if flag:
self.__updatePersistentData()
return flag
def editStudentEmailFromGuild(self, guildId: int, email: str, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.editStudentEmail(email, discordId)
break
if flag:
self.__updatePersistentData()
return flag
def editStudentPhoneNumberFromGuild(self, guildId: int, phoneNum: int, discordId: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.editStudentPhoneNumber(phoneNum, discordId)
break
if flag:
self.__updatePersistentData()
return flag
# Links management
def addLinkToSubjectFromGuild(self, guildId: int, subjectName: str, name: str, url: str) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.addLink(subjectName, name, url)
break
if flag:
self.__updatePersistentData()
return flag
def removeLinkOfSubjectFromGuild(self, guildId: int, subjectName: str, index: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.removeLink(subjectName, index)
break
if flag:
self.__updatePersistentData()
return flag
# Notes management
def addNoteToSubjectFromGuild(self, guildId: int, subjectName: str, name: str, text: str) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.addNote(subjectName, name, text)
break
if flag:
self.__updatePersistentData()
return flag
def removeNoteOfSubjectFromGuild(self, guildId: int, subjectName: str, index: int) -> bool:
flag = False
for guild in self.guilds:
if guild.getGuildId() == guildId:
flag = guild.removeNote(subjectName, index)
break
if flag:
self.__updatePersistentData()
return flag
# Printing data
def getStudentsFromSubject(self, guildId: int, subjectName: str) -> str:
output = ''
for guild in self.guilds:
if guild.getGuildId() == guildId:
output = guild.getStudentsFromSubject(subjectName)
break
if output == '':
output = self.noInfoAnswer
return output
def getStudentFromSubject(self, guildId: int, subjectName: str, discordId: int, CSV: bool) -> str:
output = ''
for guild in self.guilds:
if guild.getGuildId() == guildId:
output = guild.getStudentFromSubject(subjectName, discordId, CSV)
break
if output == '':
output = self.noInfoAnswer
return output
def getLinksFromSubject(self, guildId: int, subjectName: str) -> str:
output = ''
for guild in self.guilds:
if guild.getGuildId() == guildId:
output = guild.getLinksFromSubject(subjectName)
break
if output == '':
output = self.noInfoAnswer
return output
def getNotesFromSubject(self, guildId: int, subjectName: str) -> str:
output = ''
for guild in self.guilds:
if guild.getGuildId() == guildId:
output = guild.getNotesFromSubject(subjectName)
break
if output == '':
output = self.noInfoAnswer
return output
```
#### File: SupportBot/Model/note.py
```python
class Note:
def __init__(self, index: int, name: str, text: str):
self.index = index
self.name = name
self.text = text
# Getters and setters
def getIndex(self) -> int:
return self.index
def getName(self) -> str:
return self.name
def getText(self) -> str:
return self.text
def setIndex(self, index: int):
self.index = index
def setName(self, name: str):
self.name = name
def setText(self, text: str):
self.text = text
# Printing data
def toString(self) -> str:
return f'📝 Id: {self.index}, Name: {self.name},\nNote: {self.text}'
# Dictionary
def toDict(self) -> dict:
return {
"Name" : self.name,
"Text" : self.text
}
``` |
{
"source": "JoshuaMichaelKing/Stock-SentimentAnalysis",
"score": 3
} |
#### File: JoshuaMichaelKing/Stock-SentimentAnalysis/reviews_preprocessing.py
```python
from __future__ import print_function
from __future__ import division
import os, sys, codecs, logging
import jieba
import random
from math import log
import datetime as dt
import iohelper
import stocktime as st
reload(sys)
sys.setdefaultencoding('utf-8')
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = '<NAME> (<EMAIL>)'
'''
Python : Reviews preprocessing.
'''
def main():
FILE = os.curdir
logging.basicConfig(filename=os.path.join(FILE,'log.txt'), level=logging.ERROR)
# pos_neg_cut_test()
# pos_or_neg_reviews2pkl() # 将手动标注的积极消极评论集转换为pkl存储
# pos_tk_lst = iohelper.read_pickle2objects('./Reviews/pos_reviews.pkl') # read training corpus to list[[,...], [,...], ...]
# neg_tk_lst = iohelper.read_pickle2objects('./Reviews/neg_reviews.pkl') # same as above
# print('POS_REVIEWS_LENGTH %d\tNEG_REVIEWS_LENGTH %d' % (len(pos_tk_lst), len(neg_tk_lst)))
#-------------------------------------------------------------------------------
def pos_or_neg_reviews2pkl():
'''
convert the neg_reviews and pos_reviews to list[[,...], [,...],...]
save the list to pkl
'''
neg_list = iohelper.read_file2list('neg')
pos_list = iohelper.read_file2list('pos')
neg_tk_lst = word_tokenization(neg_list)
pos_tk_lst = word_tokenization(pos_list) # segmentation : [[,], [,], ...]
iohelper.save_objects2pickle(neg_tk_lst, './Reviews/neg_reviews.pkl')
iohelper.save_objects2pickle(pos_tk_lst, './Reviews/pos_reviews.pkl')
def word_tokenization(tick_blog_list):
'''
word tokenization by jieba to list
return list : [[,], [,], ...]
'''
count = 0
seg_list = []
try:
for blog in tick_blog_list:
count += 1
if blog != '':
segments = jieba.cut(blog)
tmp = []
for seg in segments:
tmp.append(seg)
seg_list.append(tmp)
else:
print('Line%d is empty!' % cnt)
except IOError as e:
logging.error('IOError %s' % e)
finally:
return seg_list
#-------------------------------------------------------------------------------
def pos_neg_cut_test():
'''
Based on the initial constructed stock-oriented lexicon, then seperate the whole reviews into pwo part automatically : pos and neg
'''
# loading positive and negative sentiment lexicon
pos_lexicon_dict, neg_lexicon_dict = load_sentiment_lexicon()
# march + april + may = 40 (40*50=2000, 200 as test 1800 as train)
date_of_march = ['20160329', '20160331']
date_of_april = ['20160405', '20160406', '20160407', '20160408',
'20160411', '20160412', '20160413', '20160414', '20160415',
'20160418', '20160419', '20160420', '20160421',
'20160425', '20160426', '20160427', '20160429']
date_of_may = ['20160503', '20160504', '20160505', '20160506',
'20160509', '20160510', '20160511', '20160512', '20160513',
'20160516', '20160517', '20160518', '20160519', '20160520',
'20160523', '20160524', '20160525', '20160526', '20160527',
'20160530', '20160531']
review_list_day = []
review_list_day.extend(date_of_march)
review_list_day.extend(date_of_april)
review_list_day.extend(date_of_may)
# review_list_day = ['20160329'] # just for test one day : correct pos_reviews and neg_reviews manually
print('{0} {1}'.format(len(review_list_day), review_list_day))
opentime1 = st.opentime1
midclose = st.midclose
opentime2 = st.opentime2
closetime = st.closetime
tick_delta = dt.timedelta(minutes=5)
for subdir in review_list_day:
tick_now = opentime1
count = 0
pos_reviews = []
mid_reviews = []
neg_reviews = []
pos_scores = []
neg_scores = []
mid_scores = []
while True:
if (tick_now >= opentime1 and tick_now <= midclose) or (tick_now >= opentime2 and tick_now <= closetime):
hour = tick_now.hour
minute = tick_now.minute
fname = str(hour * 100 + minute)
tick_blog_list = iohelper.read_txt2list(fname, subdir)
# print(tick_blog_list[0])
# assert 0 == 1
count += len(tick_blog_list)
for each_blog in tick_blog_list:
if each_blog != '':
segments = jieba.cut(each_blog)
tmp = []
for seg in segments:
if is_word_invalid(seg) is False:
tmp.append(seg)
result = sentiment_logarithm_estimation(pos_lexicon_dict, neg_lexicon_dict, tmp)
if result == 0:
mid_scores.append(result)
mid_reviews.append(each_blog)
elif result < 0:
neg_scores.append(result)
neg_reviews.append(each_blog)
else:
pos_scores.append(result)
pos_reviews.append(each_blog)
tick_now += tick_delta
elif tick_now > midclose and tick_now < opentime2:
tick_now = opentime2
elif tick_now > closetime:
break
print('{0}-{1}'.format(subdir, count))
mid_reviews = random.sample(mid_reviews, 200)
iohelper.save_list2file(mid_reviews, './Data/' + subdir + '_mid_reviews')
print('save_list2file new word[mid polarity] list successfully!')
neg_reviews = random.sample(neg_reviews, 80)
pos_reviews = random.sample(pos_reviews, 80)
iohelper.save_list2file(neg_reviews, './Data/' + subdir + '_neg_reviews')
iohelper.save_list2file(pos_reviews, './Data/' + subdir + '_pos_reviews')
print('{0}-{1}-{2}'.format(len(neg_scores), len(mid_scores), len(pos_scores)))
# -----------------------------------------------------------------------------
def load_sentiment_lexicon():
'''
载入情感词典
'''
# loading positive and negative sentiment lexicon
pos_lexicon_dict = {}
neg_lexicon_dict = {}
lexicon = iohelper.read_lexicon2dict('positive.txt', True)
pos_lexicon_dict = dict(pos_lexicon_dict, **lexicon)
lexicon = iohelper.read_lexicon2dict('hownet-positive.txt')
pos_lexicon_dict = dict(pos_lexicon_dict, **lexicon)
lexicon = iohelper.read_lexicon2dict('ntusd-positive.txt')
pos_lexicon_dict = dict(pos_lexicon_dict, **lexicon)
lexicon = iohelper.read_lexicon2dict('negative.txt', True)
neg_lexicon_dict = dict(neg_lexicon_dict, **lexicon)
lexicon = iohelper.read_lexicon2dict('hownet-negative.txt')
neg_lexicon_dict = dict(neg_lexicon_dict, **lexicon)
lexicon = iohelper.read_lexicon2dict('ntusd-negative.txt')
neg_lexicon_dict = dict(neg_lexicon_dict, **lexicon)
return pos_lexicon_dict, neg_lexicon_dict
def sentiment_logarithm_estimation(pos_lexicon_dict, neg_lexicon_dict, sentence_blog_segments):
'''
compute every preprocessed sentence's sentiment index
using ln((1+sigma(pos))/(1+sigma(neg))) formula
return float : sentiment value
'''
pos_list = []
neg_list = []
tick_value_tmp = float(0)
pos_count = 0
neg_count = 0
for word in sentence_blog_segments:
if word in pos_lexicon_dict:
pos_count += pos_lexicon_dict[word]
elif word in neg_lexicon_dict:
neg_count += neg_lexicon_dict[word]
tick_value_tmp = log(float(1 + pos_count) / float(1 + neg_count))
return tick_value_tmp
def is_word_invalid(word):
'''
to judge the word is or not the chinese, if not return False, else return True.
'''
if 'sh' in word or 'sz' in word or 'SH' in word or 'SZ' in word or 'IF' in word or word.isdigit():
return True
if word[0] <= chr(127):
return True # is english, invalid chinese
isfloat = True
try:
fv = float(word)
except Exception as e:
isfloat = False
return isfloat
if __name__ == '__main__':
main()
```
#### File: JoshuaMichaelKing/Stock-SentimentAnalysis/weibo.py
```python
from __future__ import print_function
from __future__ import division
from snspy import APIClient, SinaWeiboMixin # using SinaWeibo
from datetime import datetime
import datetime as dt
import stocktime as st
import sys, os, time, json, logging, codecs
reload(sys)
sys.setdefaultencoding('utf-8')
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = '<NAME> (<EMAIL>)'
'''
Python get weibo for text mining. Require Python 2.6/2.7.
'''
APP_KEY = '' # app key
APP_SECRET = '' # app secret
CALLBACK_URL = '' # callback url
def main():
FILE = os.curdir
logging.basicConfig(filename=os.path.join(FILE,'log.txt'), level=logging.INFO)
config_init()
print('APP_KEY:%s APP_SECRET:%s CALLBACK_URL:%s' % (APP_KEY, APP_SECRET, CALLBACK_URL))
client = APIClient(SinaWeiboMixin, app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
url = client.get_authorize_url() # redirect the user to 'url'
print(url)
# needGetCode = raw_input("Do you need to get code from callback in browser? Please input yes or no!")
needGetCode = 'no'
code = ''
r = {}
if needGetCode == 'yes':
code = raw_input("Please input the returned code : ") # redirect to url and get code
r = client.request_access_token(code)
print(r.access_token) # access token,e.g., <KEY>
print(r.expires) # token expires time, UNIX timestamp, e.g., 1384826449.252 (10:01 am, 19 Nov 2013, UTC+8:00)
# 测试版本access_token的时间期限为1天,过期后再重新获取
access_token = '<KEY>'
expires = 1622967755.0
client.set_access_token(access_token, expires)
# After Initialize API Authorization...
get_blog_and_process(client)
print('get_blog_and_process is over...')
# testing code
# test_get_blog_and_save(client)
# blogs = u'$上证指数 sh000001$ #聚焦2016年全国两会# 车易拍赶紧的'
# bloglists = sentence_cut_list(list(blogs))
# print(''.join(bloglists))
def config_init():
filename = 'config.ini'
f = open(filename, 'rb') #以只读的方式打开
contents = f.read()
f.close()
allconfigs = contents.split('\n')
config = []
for cfg in allconfigs:
if cfg == '':
continue
tmp = cfg.replace('\r', '')
if '#' in tmp:
continue
config.append(tmp)
global APP_KEY
APP_KEY = config[0]
global APP_SECRET
APP_SECRET = config[1]
global CALLBACK_URL
CALLBACK_URL = config[2]
def make_dir(filepath):
if os.path.exists(str(filepath)):
pass
else:
os.mkdir(str(filepath))
def save2Txt(weibodict, filename, dir):
filepath = './Data/' + dir
make_dir(filepath)
filepath = filepath + '/'
f = codecs.open(filepath + filename + '.txt', 'a', 'utf-8')
for i in weibodict:
f.write(weibodict[i] + '\n')
f.close()
logging.info('Weibo save2txt success at %s %s' % (datetime.now(), filename))
def get_all_userid_sina(client):
'''
get all user ids from sina and backup to pickle
'''
userid = get_users(client, 'gqmk21st')
userdict = {}
userdict = print_users_list(userid)
userdict = read_file_dict()
print(len(userdict))
for k in userdict:
print('dict[%s] =' % k, userdict[k])
save_dict_file(userdict)
return userdict
def get_all_userid_pkl():
'''
get all user ids from pickle file
'''
userdict = {}
userdict = read_file_dict()
print(len(userdict))
for k in userdict:
print('dict[%s] =' % k, userdict[k])
return userdict
def print_users_list(ul):
'''
print all users info
'''
index = 0
userdict = {}
for user in ul:
uid = user["id"]
ugen = user["gender"]
uname = user["screen_name"]
# uloc = user["location"]
# udesc = user["description"]
# print('%-6d%-12d%-3s%s' % (index, uid, ugen, uname))
index += 1
userdict[uid] = uname
return userdict
def get_users(client, uname):
'''
API : get all users info from sina api
'''
fl = []
next_cursor = 0
while True:
raw_fl = client.friendships.friends.get(screen_name=uname, cursor=next_cursor, count=200)
fl.extend(raw_fl["users"])
next_cursor += 1
if next_cursor == 10:
break
time.sleep(1)
return fl
def get_newest_personalweibo(client, id):
'''
API : this api is just limited for authorized user
get designated following friends newest weibo by uid
'''
i = 1
while i < 5:
i += 1
jsdict = client.statuses.user_timeline.get(uid=id, page=i)
for status in jsdict.statuses:
# print(jsdict.statuses[m].user.id, jsdict.statuses[m])
print(status.created_at, status.user.id, status.user.name.encode("GBK", 'replace'), status.text.encode("GBK", 'replace'))
time.sleep(1)
def get_newest_publicweibo(client):
'''
get newest friend microblog
'''
jsdict = {}
try:
jsdict = client.statuses.friends_timeline.get(page=1, count=180)
except Exception as e:
logging.error('>>>>>>>>>>get_newest_publicweibo ERROR : %s %s' % (datetime.now(), e))
print('>>>>GET ERROR')
del client
time.sleep(10)
config_init()
client = APIClient(SinaWeiboMixin, app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
url = client.get_authorize_url() # redirect the user to 'url'
access_token = '<PASSWORD>'
expires = 1614584749.0
client.set_access_token(access_token, expires)
jsdict = get_newest_publicweibo(client)
logging.error('>>>>>>>>>> %s' % jsdict)
finally:
return jsdict
def send_one_message(client):
'''
API : send message or picture to sina weibo
'''
content = 'Hello World! By Python SDK'
client.statuses.update.post(status=content)
# print(client.statuses.upload.post(status=u'test weibo with picture', pic=open('/Users/michael/test.png')))
def sentence_cut_list(sentencelists):
'''
data clean process
to filter some irrelevant signal
to filter the sentence containing web link
to filter the words begin with # or $ and ends with # or $
'''
cutlist = "[。,!……!《》<>\"'::??、/\|“”‘’;]★@{}(){}【】(){}():,.;、~——+%%`:“”'‘\n\r".decode('utf-8')
l = []
length = len(sentencelists)
i = 0
is_to_skip = False
skip_to_num = 0
for wd in sentencelists:
if is_to_skip:
if i == skip_to_num:
is_to_skip = False
else:
i += 1
continue
if wd not in cutlist: # to filter some irrelevant signal
if wd == '#': # filter # #
cursor = i + 1
while cursor < length:
if sentencelists[cursor] == '#':
is_to_skip = True
skip_to_num = cursor + 1
break
else:
cursor += 1
if cursor - i > 100:
break
i += 1
continue
elif wd == '$':
cursor = i + 1
while cursor < length:
if sentencelists[cursor] == '$':
is_to_skip = True
skip_to_num = cursor + 1
break
else:
cursor += 1
if cursor - i > 100:
break
i += 1
continue
elif wd == 'h': # filter the text containing web link http://...
if (i + 3) < length:
if sentencelists[i+1] == 't' and sentencelists[i+2] == 't' and sentencelists[i+3] == 'p':
break
l.append(wd)
i += 1
return l
def get_blog_and_process(client):
# initialze time data
todaydate = st.todaydate
opentime1 = st.opentime1 # 建议在9:35之间启动
midclose = st.midclose
opentime2 = st.opentime2
closetime = st.closetime
tick_delta = dt.timedelta(minutes=5) # time minute delta, now condsider 5 minutes as a cycle
tick_start = opentime1 - tick_delta
tick_end = tick_start
nowtime = datetime.now()
if nowtime < opentime1:
print('it is before trading day!')
tick_end = opentime1
tick_start = tick_end - tick_delta * 3
elif nowtime > opentime1 and nowtime <= midclose:
print('it is in the first trading day!')
minute = nowtime.minute - nowtime.minute % 5
time_str = st.todaydate + ' ' + str(nowtime.hour) + ':' + str(minute) + ':00'
tick_end = datetime.strptime(time_str, '%Y%m%d %H:%M:%S')
tick_start = tick_end - tick_delta
elif nowtime > midclose and nowtime < opentime2:
print('it is in the mid time, not trading!')
tick_end = opentime2
tick_start = tick_end - tick_delta * 3
elif nowtime > opentime2 and nowtime <= closetime:
print('it is in the second trading day!')
minute = nowtime.minute - nowtime.minute % 5
time_str = st.todaydate + ' ' + str(nowtime.hour) + ':' + str(minute) + ':00'
tick_end = datetime.strptime(time_str, '%Y%m%d %H:%M:%S')
tick_start = tick_end - tick_delta
else:
print('it is time after trading time!')
return
print('>>>>>>>>>>>>>>>Weibo collector begin! %s %s', (tick_start, tick_end))
counter = 0
counter_sina = 1
cache_weio_dict = {}
is_set_again = False
while True:
now = datetime.now()
if now > midclose and now < opentime2:
mid_delta = dt.timedelta(minutes=6) # abvoid losing the last 5 minutes data persistance
mid_later = midclose + mid_delta
if now > mid_later:
print('>>>>>>>>>>>>>>>Weibo collector middle end! %d', now.second)
time.sleep(1)
if is_set_again == False:
tick_start = opentime2 - tick_delta * 3
tick_end = opentime2
is_set_again = True
continue
elif now > closetime:
close_delta = dt.timedelta(minutes=6) # abvoid losing the last 5 minutes data
close_later = closetime + close_delta
if now > close_later:
print('>>>>>>>>>>>>>>>Weibo collector end!')
break
# 后移一个tick持久化数据,考虑到新浪微博返回信息的滞后性
tmp = tick_end + tick_delta
if now >= tmp:
hour = tick_end.hour
minute = tick_end.minute
# flush the cache weibo text to txt file in disk
filename = str(hour * 100 + minute)
save2Txt(cache_weio_dict, filename, todaydate)
cache_weio_dict.clear() # clear the cache for next saving dict
print('>>>>>>>>>>>tick start and end! %s %s', (tick_start, tick_end))
tick_start = tick_end
tick_end = tmp
print('>>>>>>>>>>>tick start and end! %s %s', (tick_start, tick_end))
if counter_sina == 1:
counter += 1
jsdict = get_newest_publicweibo(client)
for status in jsdict.statuses:
datetime_array = status.created_at.split(' ')
current_str = todaydate + ' ' + datetime_array[3]
current_time = datetime.strptime(current_str, '%Y%m%d %H:%M:%S')
if current_time >= tick_start and current_time <= tick_end:
cut_list = sentence_cut_list(list(status.text))
sentence = ('').join(cut_list)
cache_weio_dict[status.id] = sentence
counter_sina += 1
if counter_sina >= 90:
counter_sina = 1
print('counter : %d %d %d' % (counter_sina, counter, len(cache_weio_dict)))
time.sleep(1)
def test_get_blog_and_save(client):
'''
just testing
'''
cache_weio_dict = {}
tick_start = datetime.strptime(st.todaydate + ' 19:40:00', '%Y%m%d %H:%M:%S')
tick_end = datetime.strptime(st.todaydate + ' 20:00:00', '%Y%m%d %H:%M:%S')
jsdict = get_newest_publicweibo(client)
for status in jsdict.statuses:
datetime_array = status.created_at.split(' ')
current_str = st.todaydate + ' ' + datetime_array[3]
current_time = datetime.strptime(current_str, '%Y%m%d %H:%M:%S')
if current_time >= tick_start and current_time <= tick_end:
cut_list = sentence_cut_list(list(status.text))
sentence = ('').join(cut_list)
cache_weio_dict[status.id] = sentence
if len(cache_weio_dict) == 0:
print('microblog zero')
else:
print(len(cache_weio_dict))
save2Txt(cache_weio_dict, 'text', st.todaydate)
if __name__ == '__main__':
main()
``` |
{
"source": "joshuaminwookang/mlrl_synthesis",
"score": 2
} |
#### File: mlrl_synthesis/analysis/plots.py
```python
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import glob, os.path, os, pickle
#global params
fig_dims = (12,8)
axis_label = 12
legend_label = 12
axis_scale = 2.0
default_abc9_script="&scorr;&sweep;&dc2;&dch -f;&if -W 300 -K 6 -v;&mfs;"
def load_data_pkl(pathname):
filename=os.path.basename(pathname)
tag=filename[filename.find('_') + 1 :filename.find('.pkl')]
data = pd.read_pickle(pathname)
data["Benchmark"] = tag
return data, tag
# Load the data as Pandas DataFrame
def load_data(pathname):
filename=os.path.basename(pathname)
tag=filename[filename.find('_') + 1 :filename.find('.out.csv')]
data = pd.read_csv(pathname, delimiter="\t")
data["Benchmark"] = tag
return data, tag
def load_data_from_dir(search):
dfs = []
search_path = os.path.normpath(os.path.join(os.getcwd(), search))
csvs = glob.glob(search_path)
for f in csvs:
data = pd.DataFrame(pd.read_pickle(f)).transpose()
dfs.append(data)
return dfs
"""
Plot a single (scatter/line/etc) plot for a benchmark
@params:
"""
def plot_single (df, title, y_vars, plot_type="scatter-ratios"):
sns.set_style("darkgrid")
fig = plt.gcf()
fig.set_size_inches(fig_dims)
yosys_baseline = df.loc[df['Sequence'] == default_abc9_script+"\n"]
with sns.plotting_context(font_scale=axis_scale):
plt.axvline(yosys_baseline.iloc[0][y_vars[0]], ls='--', color='black', lw=0.7)
plt.axhline(yosys_baseline.iloc[0][y_vars[1]], ls='--', color='black', lw=0.7)
if (plot_type == "scatter-ratios"):
df["Logic to Net Delay Ratio (%)"] = df["Logic_Delay_Percentage"]
ax = sns.scatterplot(x=y_vars[0], y = y_vars[1], data=df, hue="Logic to Net Delay Ratio (%)", palette="coolwarm");
elif (plot_type == "scatter"):
ax = sns.scatterplot(x=y_vars[0], y = y_vars[1], data=df);
else :
ax = sns.lineplot(x=y_vars[0], y = y_vars[1], data=df);
ax.set_xlabel(y_vars[0], fontsize=axis_label, weight='bold')
ax.set_ylabel(y_vars[1], fontsize=axis_label, weight='bold')
#plt.legend(fontsize=legend_label,loc=1, prop={'weight': 'bold'})
plt.title(title+" (N={})".format(df[y_vars[1]].count()), fontsize=axis_label, weight='bold')
plt.savefig(title+'_'+y_vars[0]+'_'+y_vars[1]+'.png', format='png', dpi=300)
plt.close()
"""
From a list of DataFrames, plot inidividual plots for each DF
"""
def plot_singles(dfs, title, y_vars, plot_type):
for df in dfs:
bmark = df["Benchmark"][0]
plot_single(df,title+"_"+bmark, y_vars, plot_type=plot_type)
"""
From a list of DataFrames, plot all data in a single plot (with legend)
Goal: compare the results of different benchmarks (of y_vars[1]) over some predictor (y_vars[0])
"""
def plot_stacked(dfs, y_vars, plot_type="scatter"):
total_df = pd.DataFrame()
min_size = np.amin([len(df.index) for df in dfs])
for df in dfs:
relative_df = df.copy()
relative_df[y_vars[1]] = df[y_vars[1]] / df[y_vars[1]].median()
total_df = total_df.append(relative_df)
total_df = total_df.pivot(index=y_vars[0], columns='Benchmark', values=y_vars[1])
fig = plt.gcf()
fig.set_size_inches(fig_dims)
sns.set_style("darkgrid")
with sns.plotting_context(font_scale=axis_scale):
if (plot_type == "scatter"):
ax = sns.scatterplot(data=total_df.iloc[100:200])
else :
ax = sns.lineplot(data=total_df.iloc[100:400])
ax.set_xlabel(y_vars[0]+' (Random Synthesis Flow)', fontsize=axis_label, weight='bold')
ax.set_ylabel('Relative '+y_vars[1]+' (Normalized to Median)', fontsize=axis_label, weight='bold')
plt.legend(fontsize=legend_label,loc=1, prop={'weight': 'bold'})
plt.savefig(y_vars[0]+'_'+y_vars[1]+'.png', format='png', dpi=300)
plt.close()
def main():
# Generate scatterplots for random runs
# dfs = load_data_from_dir("results/random*.csv")
# plot_singles(dfs, "random", ['Slice_LUTs', 'Path_Delay'], plot_type="scatter-ratios")
# Stacked plot to compare QoRs of same scripts on different benchmarks
# plot_stacked(dfs, ['Index', 'Path_Delay'], plot_type="scatter")
# plot_stacked(dfs, ['Index', 'Slice_LUTs'], plot_type="scatter")
# for df in dfs:
# if df.iloc[0]['Benchmark'] == "or1200":
# plot_single(df, "Vivado_vs_ABC", ['ABC_Delay', 'Path_Delay'], plot_type="scatter")
# plot_single(df, "Vivado_vs_ABC", ['ABC_Area', 'Slice_LUTs'], plot_type="scatter")
exh = load_data_from_dir("test*.pkl")
print(exh[0])
# plot_single(exh[0], "VTR_bgm", ['ABC_Delay', 'Path_Delay'], plot_type="scatter")
# plot_single(exh[0], "VTR_bgm", ['ABC_Area', 'Slice_LUTs'], plot_type="scatter")
# plot_single(exh[0], "Exhaustive_bgm", ['Slice_LUTs','Path_Delay'], plot_type="scatter-ratios")
if __name__ == "__main__":
main()
```
#### File: mlrl_synthesis/dataset/circuit_graphs.py
```python
import networkx as nx
import json
import argparse
import glob, os.path, os
import subprocess
def run_yosys(verilog_path, output, ip, graph_type, cwd=os.getcwd(), stdout=None, stderr=None):
try:
if graph_type == "gl":
p = subprocess.check_call(['yosys', '-p', "read_verilog {}; proc; memory; opt; techmap; opt; gml -o {}/{}.gml".format(str(verilog_path), str(output),ip)], \
cwd=cwd, stdout=stdout, stderr=stderr)
else :
p = subprocess.check_call(['yosys', '-p', "read_verilog {}; opt; gml -o {}/{}.rtl.gml".format(str(verilog_path), str(output),ip)], \
cwd=cwd, stdout=stdout, stderr=stderr)
return True
except:
return False
def run_yosys_from_dir(load_dir_path,output_path,graph_type):
verilogs = glob.glob(os.path.normpath(os.path.join(os.getcwd(), load_dir_path+"/*.v")))
for verilog_file in verilogs:
ip = verilog_file[verilog_file.rindex('/')+1:verilog_file.find('.v')]
run_yosys(verilog_file, output_path, ip, graph_type)
if graph_type == "gl":
M = nx.read_gml("{}/{}.gml".format(output_path,ip))
G = nx.Graph(M)
nx.write_gml(G, "{}/{}.gml".format(output_path,ip))
else:
M = nx.read_gml("{}/{}.rtl.gml".format(output_path,ip))
G = nx.Graph(M)
nx.write_gml(G, "{}/{}.rtl.gml".format(output_path,ip))
def summary(result_dir):
filename = os.path.basename(os.path.dirname(result_dir))
gmls = glob.glob(os.path.normpath(os.path.join(result_dir,"*.gml")))
gmls.sort()
print(gmls)
results = {}
for gml_file in gmls:
ip = gml_file[gml_file.rindex('/')+1:gml_file.find('.gml')]
G = nx.read_gml(gml_file)
results[ip] = (len(G.nodes), len(G.edges))
json_file = filename+'_summary.json'
with open(json_file, 'w') as outfile:
json.dump(results, outfile)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', type=str, required=True, help="verilog file path")
parser.add_argument('-o','--output', type=str, required=True, help="verilog file path")
parser.add_argument('--graph_type', type=str, default="gl", help="gl or rtl")
parser.add_argument('-s','--summary', action='store_true')
args = parser.parse_args()
input_dir = args.input
output_dir = args.output
graph_type = args.graph_type
if args.summary:
summary(input_dir)
else:
run_yosys_from_dir(input_dir,output_dir,graph_type)
summary(output_dir)
if __name__ == "__main__":
main()
``` |
{
"source": "joshuamitchell192/DLS_Project",
"score": 3
} |
#### File: DLS_Project/GUI/app.py
```python
import configparser
import os
import sys
import threading
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
from Controllers.controller import Controller
from Models.instructions import Instructions
from Models.sampleData import SampleData
from Services.serialConnection import SerialConnection
from Views.mainView import View
class App (QApplication):
def __init__(self, sys_argv):
super(App, self).__init__(sys_argv)
self.absoluteInstallationPath = os.path.dirname(os.path.abspath(__file__))
self.sampleData = SampleData()
port = self.loadConfigComPort()
self.serialConnection = SerialConnection(port)
self.controller = Controller(self.serialConnection, Instructions, self.sampleData)
self.view = View(self.serialConnection, self.controller, self.sampleData)
self.view.show()
thread = threading.Thread(target=self.controller.readLoop, args=())
# probably should use an signalling mechanism like an Event to stop gracefully
thread.daemon = True
thread.start()
self.loadConfig()
self.view.calculateExpectedDuration()
def loadConfigComPort(self):
config = configparser.ConfigParser()
config.read(self.absoluteInstallationPath + '/settings.ini')
if ('Default' in config):
if ('Port' in config['Default']):
return config['Default']['Port']
return ""
def loadConfig(self):
config = configparser.ConfigParser()
config.read(self.absoluteInstallationPath + '/settings.ini')
if ('Default' in config):
defaultSettings = config['Default']
if ("SampleDuration" in defaultSettings):
self.view.SmpDuration_SpinBox.setValue(float(defaultSettings['SampleDuration']))
else:
print("Failed to load 'SampleDuration' from 'Default' section from settings")
if ("mmBetweenSamples" in defaultSettings):
self.view.StepLength_LineEdit.setValue(float(defaultSettings['mmBetweenSamples']))
if ("StartPosition" in defaultSettings):
self.view.P1_Slider.setValue(int(defaultSettings["StartPosition"]))
self.view.P1_SpinBox.setValue(int(defaultSettings["StartPosition"]))
if ("EndPosition" in defaultSettings):
self.view.P2_Slider.setValue(int(defaultSettings["EndPosition"]))
self.view.P2_SpinBox.setValue(int(defaultSettings["EndPosition"]))
if ("AverageInterval" in defaultSettings):
self.view.AvgInterval_SpinBox.setValue(int(defaultSettings["AverageInterval"]))
if __name__ == '__main__':
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
QApplication.setAttribute(Qt.AA_DisableWindowContextHelpButton)
QApplication.setAttribute(Qt.AA_UseStyleSheetPropagationInWidgetStyles, True)
app = App(sys.argv)
dirname = os.path.dirname(os.path.abspath(__file__))
ssFile = os.path.join(dirname, "stylesheet.qss")
dropDownPath = os.path.join(dirname, "Assets/baseline_arrow_drop_down_black_18dp.png").replace('\\', '/')
with open(ssFile) as fh:
styleSheet = fh.read()
styleSheet += """
QComboBox::down-arrow {
image: url(""" + dropDownPath + """);
}
"""
app.setStyleSheet(styleSheet)
sys.exit(app.exec_())
```
#### File: GUI/Controllers/controller.py
```python
import struct
from enum import Enum
import pycrc.algorithms
from Services.settings import Settings
class State(Enum):
Idle = 0
ScanBetween = 1
StationarySample = 2
Calibration = 3
class Controller:
def __init__(self):
pass
def __init__(self, serialConnection, Instructions, sampleData):
self.serialConnection = serialConnection
self.Instructions = Instructions
self.pause = False
self.stepsPerMM = 0.018
self.isSampling = False
self.crc = pycrc.algorithms.Crc(width=16, poly=0x8005, reflect_in=True, xor_in= 0x0000, reflect_out=True, xor_out = 0x0000)
self.state = State.Idle
self.sampleData = sampleData
self.settings = Settings()
self.settings.loadSettings()
def handleCalibrate(self):
self.isSampling = False
self.serialConnection.sendInstruction(self.Instructions.Calibrate)
def handleScanBetween(self, P1, P2, sampleDuration, mmBetweenSamples, stepMode):
self.isSampling = False
self.serialConnection.sendInstruction(self.Instructions.Pause)
self.serialConnection.sendInstruction(self.Instructions.StartProgram)
self.serialConnection.sendInstruction(self.Instructions.RapidPosition, [P1])
self.serialConnection.sendInstruction(self.Instructions.SampleDuration, [sampleDuration])
self.serialConnection.sendInstruction(self.Instructions.TurnOnAdc)
self.serialConnection.sendInstruction(self.Instructions.mmBetweenSamples, [mmBetweenSamples])
self.serialConnection.sendInstruction(self.Instructions.StepMode, [stepMode])
self.serialConnection.sendInstruction(self.Instructions.LinearPosition, [P2])
self.serialConnection.sendInstruction(self.Instructions.TurnOffAdc)
self.serialConnection.sendInstruction(self.Instructions.EndProgram)
self.serialConnection.sendInstruction(self.Instructions.Resume)
def handlePause(self):
""" Sets the pause boolean to true so that we cease reading samples.
"""
if (self.pause):
self.pause = False
self.serialConnection.sendInstruction(self.Instructions.Resume)
else:
self.pause = True
# Write to tiva to pause sampling on GUI
self.isSampling = False
self.serialConnection.sendInstruction(self.Instructions.Pause)
def handleGoToPoint(self, position):
""" sends the Rapid Position instruction to move to the given position
"""
self.isSampling = False
self.serialConnection.sendInstruction(self.Instructions.Pause)
self.serialConnection.sendInstruction(self.Instructions.TurnOffAdc)
self.serialConnection.sendInstruction(self.Instructions.RapidPosition, [position])
self.serialConnection.sendInstruction(self.Instructions.Resume)
def handleStartSample(self, averageInterval):
self.isSampling = False
self.serialConnection.sendInstruction(self.Instructions.Pause)
self.serialConnection.sendInstruction(self.Instructions.StartProgram)
self.serialConnection.sendInstruction(self.Instructions.AverageInterval, [averageInterval])
self.serialConnection.sendInstruction(self.Instructions.TurnOnAdc)
self.serialConnection.sendInstruction(self.Instructions.SampleAtPosition)
self.serialConnection.sendInstruction(self.Instructions.TurnOffAdc)
self.serialConnection.sendInstruction(self.Instructions.EndProgram)
self.serialConnection.sendInstruction(self.Instructions.Resume)
def readLoop(self):
while True:
messageType = self.serialConnection.ser.read(2)
if messageType == b'\xff\xff':
if (self.state != State.ScanBetween):
self.sampleData.linePlotData.addLine()
self.state = State.ScanBetween
self.readSampleData()
elif messageType == b'\xff\xfe':
self.state = State.Idle
elif messageType == b'\xff\xfd':
self.state = State.StationarySample
self.readSampleData()
elif(messageType == b'\xff\xfc'):
self.state = State.Calibration
stepsPerMMBytes = self.serialConnection.readInt()
stageCalibrationStepsPerMM = struct.unpack('h', stepsPerMMBytes)[0]
self.stepsPerMM = 1/stageCalibrationStepsPerMM
self.settings.saveSetting("Calibration", "stepspermm", str(stageCalibrationStepsPerMM))
def readSampleData(self):
sampleBytes = self.serialConnection.readInt()
remainder = self.readCrc(sampleBytes)
if (remainder != 0):
print("---------- TRANMISSION ERROR OCCURED ----------")
timeBytes = self.serialConnection.readFloat()
remainder = self.readCrc(timeBytes)
if (remainder != 0):
print("---------- TRANMISSION ERROR OCCURED ----------")
positionBytes = self.serialConnection.readFloat()
remainder = self.readCrc(positionBytes)
if (remainder != 0):
print("---------- TRANMISSION ERROR OCCURED ----------")
sample = struct.unpack('h', sampleBytes)[0]
time = struct.unpack('f', timeBytes)[0]
position = struct.unpack('f', positionBytes)[0]
#time = self.secondsToRTC(time)
if (self.state == State.ScanBetween):
self.sampleData.linePlotData.samples[self.sampleData.linePlotData.getLineIndex()].append(round(sample, 4))
self.sampleData.linePlotData.times[self.sampleData.linePlotData.getLineIndex()].append(time)
self.sampleData.linePlotData.positions[self.sampleData.linePlotData.getLineIndex()].append(round(position, 4))
elif (self.state == State.StationarySample):
self.sampleData.scatterPlotData.samples.append(round(sample, 4))
self.sampleData.scatterPlotData.times.append(round(time, 4))
self.sampleData.scatterPlotData.positions.append(round(position, 4))
def secondsToRTC(self, time):
minutes = time // 60
seconds = round(time - (minutes * 60), 4)
#milliSeconds = round(seconds // )
#hours = minutes // 60
return f'{int(minutes)} m : {seconds} s'#:{milliSeconds} ms'
def readCrc(self, data):
crc = self.serialConnection.ser.read(2)
return self.crc.table_driven(data + crc)
def handleClearQueue(self):
self.isSampling = False
self.serialConnection.sendInstruction(self.Instructions.Pause)
self.serialConnection.sendInstruction(self.Instructions.Clear)
self.serialConnection.sendInstruction(self.Instructions.Resume)
```
#### File: GUI/Views/dynamicCanvas.py
```python
import random
import matplotlib
import numpy as np
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QVBoxLayout
# Make sure that we are using QT5Agg
matplotlib.use('Qt5Agg')
import matplotlib.colors as mcolors
from matplotlib import cm, figure
from matplotlib.backends.backend_qt5agg import \
FigureCanvasQTAgg as FigureCanvas
from numpy import linspace
from Models.sampleData import SampleData
class MplCanvas(FigureCanvas):
""" Creates the matplotlib canvas to setup the basic figure base class so that it can be extended to be either static or dynamic.
"""
def __init__(self, parent=None, width=5, height=4, dpi=90):
""" Creates a matplot lib figure, subplot and links the data samples list.
"""
self.fig = figure.Figure(figsize=(width, height), dpi=dpi)
self.ax = self.fig.add_subplot(111)
self.setCanvasAxes()
self.noColours = 40
self.noColourRepitions = 3
self.maxLineCount = self.noColours * self.noColourRepitions
FigureCanvas.__init__(self, self.fig)
self.createColourMap()
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def setCanvasAxes(self):
self.ax.set_xlim((0, 10))
self.ax.set_ylim(0, 10)
self.ax.grid(color='darkgrey', linestyle='dashed', axis='both', linewidth=1)
self.ax.set_xlabel("Position")
self.ax.set_ylabel ("Sensor Value")
self.fig.canvas.draw()
def createColourMap(self):
cm_subsection = linspace(0.0, 1.0, self.noColours)
tabbMap = [ cm.tab20b(x) for x in cm_subsection ]
tabcMap = [ cm.tab20c(x) for x in cm_subsection ]
colours = (tabbMap + tabcMap)
random.shuffle(colours)
self.colourMap = colours
for i in range(self.noColourRepitions):
random.shuffle(colours)
self.colourMap += colours
class DynamicMplCanvas(QtWidgets.QWidget):
""" Creates a dynamic canvas that updates based on the timers' frequency, changing if there's new data within the samples list.
"""
def __init__(self, sampleData, parent=None, width=5, height=4, dpi=90):
""" Initialises the basic canvas from the super class and setups of the timer for updating the graph
"""
QtWidgets.QMainWindow.__init__(self)
vbox = QVBoxLayout()
self.canvas = MplCanvas(parent=parent, width=width, height=height, dpi=dpi)
self.sampleData = sampleData
self.xAxisRange = AxisRange()
self.yAxisRange = AxisRange()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
self.lines = [self.canvas.ax.plot(0, 0, color=self.canvas.colourMap[i % self.canvas.noColours], label="Line")[0] for i in range(self.canvas.maxLineCount)]
self.scatterPlot = self.canvas.ax.plot(-10, -10, color='black', linestyle = 'None', marker='o', markersize=5)[0]
self.startTimer()
self.dataExists = False
def startTimer(self):
timer = QtCore.QTimer(self)
timer.timeout. connect(self.callback)
timer.start(64)
def callback(self):
try:
if (self.sampleData.linePlotData.getLineIndex() >= 0):
self.dataExists = True
self.lines[self.sampleData.linePlotData.getLineIndex()].set_data(self.sampleData.linePlotData.positions[self.sampleData.linePlotData.getLineIndex()], self.sampleData.linePlotData.samples[self.sampleData.linePlotData.getLineIndex()])
if (self.sampleData.scatterPlotData.getSampleCount() > 0):
self.dataExists = True
self.scatterPlot.set_data(self.sampleData.scatterPlotData.positions, self.sampleData.scatterPlotData.samples)
if (self.dataExists):
self.canvas.draw()
self.computeXAxisLimits()
self.computeYAxisLimits()
except:
pass
def computeXAxisLimits(self):
if (self.sampleData.scatterPlotData.getSampleCount() > 0):
latestScatterPosition = self.sampleData.scatterPlotData.positions[-1]
if (latestScatterPosition < self.xAxisRange.minValue):
self.xAxisRange.setMinValue(latestScatterPosition - 2)
lastRowMin = np.amin(self.sampleData.linePlotData.positions[-1])
if (lastRowMin < self.xAxisRange.minValue):
self.xAxisRange.setMinValue(lastRowMin - 2)
if (self.sampleData.scatterPlotData.getSampleCount() > 0):
latestScatterPosition = self.sampleData.scatterPlotData.positions[-1]
if (latestScatterPosition > self.xAxisRange.maxValue):
self.xAxisRange.maxValue = latestScatterPosition
lastRowMax = np.amax(self.sampleData.linePlotData.positions[-1])
if (lastRowMax > self.xAxisRange.maxValue):
self.xAxisRange.maxValue = lastRowMax
self.canvas.ax.set_xlim(self.xAxisRange.minValue, self.xAxisRange.maxValue + 2)
def computeYAxisLimits(self):
if (self.sampleData.scatterPlotData.getSampleCount() > 0):
latestScatterSample = self.sampleData.scatterPlotData.samples[-1]
if (latestScatterSample < self.yAxisRange.minValue):
self.yAxisRange.setMinValue(latestScatterSample - 10)
lastRowMin = np.amin(self.sampleData.linePlotData.samples[-1])
if (lastRowMin < self.yAxisRange.minValue):
self.yAxisRange.setMinValue(lastRowMin - 10)
if (self.sampleData.scatterPlotData.getSampleCount() > 0):
latestScatterSample = self.sampleData.scatterPlotData.samples[-1]
if (latestScatterSample > self.yAxisRange.maxValue):
self.yAxisRange.maxValue = latestScatterSample
lastRowMax = np.amax(self.sampleData.linePlotData.samples[-1])
if (lastRowMax > self.yAxisRange.maxValue):
self.yAxisRange.maxValue = lastRowMax
self.canvas.ax.set_ylim(self.yAxisRange.minValue, self.yAxisRange.maxValue + 10)
def resetCanvas(self):
""" Relinks the samples list for when the user clicks the clear samples button
:param: newSamples - the samples list after it's been reinitialised to an empty list.
"""
self.canvas.ax.clear()
self.canvas.setCanvasAxes()
self.lines = [self.canvas.ax.plot(0,0, color=self.canvas.colourMap[i % self.canvas.noColours], label="Line")[0] for i in range(self.canvas.maxLineCount)]
self.scatterPlot = self.canvas.ax.plot(0, 0, color='black', linestyle = 'None', marker='o', markersize=5)[0]
self.canvas.createColourMap()
class AxisRange:
def __init__(self):
self.minValue = 5000
self.maxValue = 0
def setMinValue(self, value):
if (value < 0):
self.minValue = 0
else:
self.minValue = value
``` |
{
"source": "JoshuaMitton/InvariantGraphNetworks",
"score": 2
} |
#### File: InvariantGraphNetworks/main_scripts/main_10fold_experiment.py
```python
import os
from data_loader.data_generator import DataGenerator
from models.invariant_basic import invariant_basic
from trainers.trainer import Trainer
from utils.config import process_config
from utils.dirs import create_dirs
from utils import doc_utils
from utils.utils import get_args
def main():
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
except Exception as e:
print("missing or invalid arguments %s" % e)
exit(0)
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import tensorflow as tf
import numpy as np
tf.set_random_seed(100)
np.random.seed(100)
print("lr = {0}".format(config.learning_rate))
print("decay = {0}".format(config.decay_rate))
print(config.architecture)
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
for exp in range(1, config.num_exp+1):
for fold in range(1, 11):
print("Experiment num = {0}\nFold num = {1}".format(exp, fold))
# create your data generator
config.num_fold = fold
data = DataGenerator(config)
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = invariant_basic(config, data)
# create trainer and pass all the previous components to it
trainer = Trainer(sess, model, data, config)
# here you train your model
acc, loss = trainer.train()
doc_utils.doc_results(acc, loss, exp, fold, config.summary_dir)
sess.close()
tf.reset_default_graph()
doc_utils.summary_10fold_results(config.summary_dir)
if __name__ == '__main__':
main()
``` |
{
"source": "JoshuaMitton/Rotation-Equivariant-Deforestation-Segmentation",
"score": 2
} |
#### File: Rotation-Equivariant-Deforestation-Segmentation/roteqseg/dataloader.py
```python
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
from rasterio.features import rasterize
from shapely.ops import cascaded_union
from shapely.geometry import Polygon
from torch.utils.data import Dataset
import pickle
class ForestDataset(Dataset):
"""Forest dataset."""
def __init__(self, csv_file, root_dir='/home/josh/Documents/Equivariance/ForestNet/ForestNetDataset/', transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.csv = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.label_to_int = {'Grassland shrubland':1, 'Other':2, 'Plantation':3, 'Smallholder agriculture':4}
def __len__(self):
return len(self.csv)
def poly_from_utm(self, polygon):
poly_pts = []
poly = cascaded_union(polygon)
for i in np.array(poly.exterior.coords):
poly_pts.append(tuple(i))
new_poly = Polygon(poly_pts)
return new_poly
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
label = self.csv.iloc[idx, 0]
merged_label = self.csv.iloc[idx, 1]
lat = self.csv.iloc[idx, 2]
long = self.csv.iloc[idx, 3]
year = self.csv.iloc[idx, 4]
folder = self.csv.iloc[idx, 5]
## load the image
image = io.imread(f'{self.root_dir}/{folder}/images/visible/composite.png')
## Get the segmentation map
with open(f'{self.root_dir}/{folder}/forest_loss_region.pkl', 'rb') as f:
data = pickle.load(f)
nx, ny = 332, 332
xy_array = np.empty((0,2))
if data.geom_type == 'Polygon':
data = [data]
elif data.geom_type == 'Multipolygon':
data = list(data)
poly_shp = []
for poly_verts in data:
poly_shp.append(self.poly_from_utm(poly_verts))
mask = rasterize(shapes=poly_shp, out_shape=(332,332))
seg = np.array(mask)
image = torch.from_numpy(image).type(torch.float)
image = image.permute(2, 0, 1)
seg = torch.from_numpy(seg).type(torch.uint8)
merged_label = self.label_to_int[merged_label]
image = image[:, 86:246, 86:246]
seg = seg[86:246, 86:246]
return image, seg, merged_label
``` |
{
"source": "joshuamoore-procore/spectrify",
"score": 2
} |
#### File: spectrify/spectrify/export.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from future.standard_library import install_aliases
install_aliases() # noqa
import boto3
import click
class RedshiftDataExporter:
UNLOAD_QUERY = """
UNLOAD ('select * from {}')
to %(s3_path)s
CREDENTIALS %(credentials)s
ESCAPE MANIFEST GZIP ALLOWOVERWRITE
MAXFILESIZE 256 mb;
"""
def __init__(self, sa_engine, s3_config):
self.sa_engine = sa_engine
self.s3_config = s3_config
def export_to_csv(self, table_name):
s3_path = self.s3_config.get_csv_dir()
creds_str = self.get_credentials()
query = self.get_query(table_name)
with self.sa_engine.connect() as cursor:
click.echo('Exporting table to CSV...')
sql = (query % {
's3_path': s3_path,
'credentials': creds_str,
})
print("Export query is:\n{}".format(sql))
cursor.execute(sql)
click.echo('Done.')
def get_query(self, table_name):
return self.UNLOAD_QUERY.format(table_name)
def get_credentials(self):
if self.iam_role:
return 'aws_iam_role={}'.format(self.iam_role)
else:
session = boto3.Session()
credentials = session.get_credentials()
return 'aws_access_key_id={};aws_secret_access_key={}'.format(
credentials.access_key,
credentials.secret_key,
)
``` |
{
"source": "joshuamorton/jml",
"score": 3
} |
#### File: experiments/utils/point.py
```python
import random
from typing import Tuple
import numpy as np
def random_point(width: int, height: int) -> Tuple[np.ndarray, Tuple[int, int]]:
img = np.zeros((width, height))
p = random.randint(0, width - 1), random.randint(0, height - 1)
img[p[0], p[1]] = 1
return img, p
def point(
width: int, height: int, x: int, y: int
) -> Tuple[np.ndarray, Tuple[int, int]]:
img = np.zeros((width, height))
img[x, y] = 1
return img, (x, y)
``` |
{
"source": "joshuamorton/PyMagic",
"score": 4
} |
#### File: PyMagic/test/test_color.py
```python
import unittest
from color import Color
class TestColor(unittest.TestCase):
"""
In Magic, the 5 colors can be considered ordered, given that there is an order in
which they always appear, in other words because white always comes before blue,
white is less than blue, this is useful for programmatically representing mana
costs later
"""
def test_color_comparison(self):
self.assertTrue(Color.white < Color.blue < Color.black < Color.red < Color.green)
def test_le(self):
self.assertTrue(Color.white <= Color.white)
def test_ge(self):
self.assertTrue(Color.green >= Color.green >= Color.red >= Color.black)
def test_gt(self):
self.assertTrue(Color.green > Color.red > Color.black > Color.blue > Color.white)
def test_str(self):
self.assertEqual("Color red", str(Color.red))
``` |
{
"source": "joshuamorton/turnt-octo-tyrion",
"score": 3
} |
#### File: turnt-octo-tyrion/db/Database.py
```python
from contextlib import contextmanager
import sqlalchemy
import sqlalchemy.orm
from . import Tables
from db.Tables import Student
__author__ = 'Josh'
class Database:
def __init__(self, engine='sqlite:///', name='ClassRank.db', folder='data'):
self.account = Tables.Account
self.student = Tables.Student
self.rating = Tables.Rating
self.course = Tables.Course
self.section = Tables.Section
self.faculty = Tables.Faculty
self.school = Tables.School
self.engine = sqlalchemy.create_engine(engine + folder + "/" + name)
self.base = Tables.Base
self.metadata = self.base.metadata
self.metadata.create_all(self.engine)
self.Session = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.session = None
@property
@contextmanager
def scope(self):
"""
The sort of general magical sessionmanager for the database scope, allows a very
clean usage with `with Database.scope as session:` do things
:return: a session object
"""
session = self.Session()
self.session = session
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
self.session = None
def query_rating(self, user, course, attr) -> float:
"""
for use within a sessionmanager object
"""
ratings = self.session.query(self.rating).join(self.section).\
filter(self.rating.student == user,
self.section.course == course).all()
return sum(rating.__getattribute__(attr) for rating in ratings) / len(ratings)
def student_with_id(self, user_id) -> Student:
return self.session.query(self.student).filter(self.student.uid == user_id).one()
def student_with_name(self, user_name) -> Student:
return self.session.query(self.account).filter(self.account.username == user_name).one().student
```
#### File: turnt-octo-tyrion/db/Tables.py
```python
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import Session
__author__ = 'Josh'
Base = declarative_base()
class Account(Base):
__tablename__ = "account"
# basic (required) signup information
uid = Column(Integer, primary_key=True)
username = Column(String(32), nullable=False, unique=True)
email_address = Column(String(64), nullable=False, unique=True)
password_hash = Column(String(128), nullable=False)
password_salt = Column(String(16), nullable=False)
# An account could be both a student and a teacher, as in the case of a grad student
# that TAs or teaches an undergraduate class
student = relationship("Student", backref="account", uselist=False) # joins to student
faculty = relationship("Faculty", backref="account", uselist=False) # joins to faculty
class Rating(Base):
__tablename__ = "rating"
student_id = Column(Integer, ForeignKey('student.uid'), primary_key=True)
section_id = Column(Integer, ForeignKey('section.uid'), primary_key=True)
rating = Column(Integer, nullable=True)
section = relationship('Section', backref='ratings')
student = relationship('Student', backref='ratings')
@property
def course(self):
"""
returns the course that this rating is for (as in parent of the section)
:return: Course object
"""
return self.section.course
def __repr__(self):
return "<Rating of " + str(self.rating) + ">"
class Student(Base):
__tablename__ = "student"
uid = Column(Integer, primary_key=True)
school_id = Column(Integer, ForeignKey("school.uid")) # joins to school
user_id = Column(Integer, ForeignKey("account.uid")) # joins to account
sections = relationship('Section', secondary='rating', backref='student')
@property
def courses(self):
session = Session.object_session(self)
return session.query(Course).join(Section).filter(Section.uid.in_(
section.uid for section in self.sections)).all()
class Faculty(Base):
__tablename__ = "faculty"
uid = Column(Integer, primary_key=True)
school_id = Column(Integer, ForeignKey("school.uid")) # joins to school
user_id = Column(Integer, ForeignKey("account.uid"), nullable=True) # joins to account
sections = relationship("Section", backref="professor")
@property
def ratings(self):
"""
queries the database for all ratings for the professor/faculty member
:return: Iterable[Rating]
"""
session = Session.object_session(self)
return session.query(Rating).join(Section).filter(Section.professor == self).all()
class School(Base):
__tablename__ = "school"
uid = Column(Integer, primary_key=True)
name = Column(String(64), nullable=False, unique=True)
abbreviation = Column(String(16), nullable=False, unique=True)
students = relationship("Student", backref="school") # 1-m joins to faculty
faculty = relationship("Faculty", backref="school") # 1-m joins to student
courses = relationship("Course", backref="school") # 1-m joins to course
@property
def sections(self):
session = Session.object_session(self)
return session.query(Section).join(Course).filter(Course.school == self).all()
class Course(Base):
__tablename__ = "course"
uid = Column(Integer, primary_key=True)
school_id = Column(Integer, ForeignKey("school.uid"), nullable=False)
name = Column(String(64), nullable=False)
description = Column(String(2000), nullable=True)
abbreviation = Column(String(8), nullable=False)
sections = relationship("Section", backref="course")
@property
def professors(self):
session = Session.object_session(self)
return session.query(Faculty).join(Section).filter(Section.course == self).all()
class Section(Base):
__tablename__ = "section"
uid = Column(Integer, primary_key=True)
professor_id = Column(Integer, ForeignKey("faculty.uid"))
course_id = Column(Integer, ForeignKey('course.uid'))
semester = Column(Integer, nullable=False)
year = Column(Integer, nullable=False)
``` |
{
"source": "joshuamorton/zing",
"score": 3
} |
#### File: zing/parser/peg.py
```python
import re
class Node:
def __init__(self, t, parser):
self.parser = parser
self.type = t
self._string = None
self.children = []
@property
def string(self):
return self.__repr__() # "complex" recursive thing
def __repr__(self):
return self.type + ":" + str(self.children)
def __getitem__(self, i):
return self.children[i]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def handle(self, vals):
return self.__getattribute__(self.type)(self, vals)
class PEGNode(Node):
def expression(self, node):
for i in range(0, len(node), 2):
self.handle(node(i))
def seqence(self, node):
print(node)
def prefix(self, node):
pass
def suffix(self, node):
pass
class Terminal(Node):
def __init__(self, t, parser, string):
super().__init__(t, parser)
self.parser = parser
self.type = t
self._string = string
self.children = None
@property
def string(self):
return self._string
def __repr__(self):
if self.type == "space" or self.type == "eof":
return ""
return "Terminal:" + '"' + self.string + '"'
def __str__(self):
return self._string
class PEGTerminal(Terminal):
pass
class GrammarError(Exception):
pass
class TerminalError(GrammarError):
pass
class SlashError(GrammarError):
pass
class NotError(GrammarError):
pass
class P:
def __init__(self, grammar):
self.functions = dict()
self.grammar = grammar
self.syntax_tree, _ = self._grammar(grammar)
self.syntax_tree = self.syntax_tree[0]
def parse(self, string):
return self._parse(string, self.syntax_tree)
def _parse(self, string, node):
"""
recursively parse nodes from the syntax
"""
print(node.type)
# for bootstrapping the PEG parse tree
# these methods each are in the form
# function(String a) -> Tuple[Subtring, Node]
# where Substring is some substring of a, from an index x (can be 0)
# to the end
# and Node is a Node object that essentially represents part of the AST of
# the parser itself
def _grammar(self, grammar):
"""
all _x are of the form str -> ([Node], str)
Grammar <- Spacing Definition+ EndOfFile
"""
gram = Node("grammar", self)
spacing, rest = self._spacing(grammar)
children = spacing
definitions, rest = self._some(self._definition)(rest)
children += definitions
eof, rest = self._EOF(rest)
children += eof
gram.children = children
return [gram], rest # rest will be empty here
def _definition(self, rest):
"""
Definition <- Identifier LEFTARROW Expression
"""
defn = Node("definition", self)
ident, rest = self._IDENTIFIER(rest)
arrow, rest = self._LEFTARROW(rest)
exp, rest = self._expression(rest)
defn.children = ident + arrow + exp
self.functions[defn.children[0].string] = defn.children[4]
return [defn], rest
def _expression(self, rest):
"""
Expression <- Sequence (SLASH Sequence)*
"""
expr = Node("expression", self)
seq, rest = self._sequence(rest)
nodes, rest = self._maybe(self._some(self._paren(self._SLASH, self._sequence)))(rest)
expr.children = seq + nodes
return [expr], rest
def _sequence(self, rest):
"""
Sequence <- Prefix*
"""
seq = Node("sequence", self)
nodes, rest = self._maybe(self._some(self._prefix))(rest)
seq.children = nodes
return [seq], rest
def _prefix(self, rest):
"""
Prefix <- (AND / NOT)? Suffix
"""
prefix = Node("prefix", self)
nodes, rest = self._maybe(self._slashed(self._AND, self._NOT))(rest)
suffix, rest = self._suffix(rest)
prefix.children = nodes + suffix
return [prefix], rest
def _suffix(self, rest):
"""
Suffix <- Primary (QUESTION / STAR / PLUS)?
"""
suffix = Node("suffix", self)
prim, rest = self._primary(rest)
nodes, rest = self._maybe(self._slashed(self._QUESTION, self._STAR, self._PLUS))(rest)
suffix.children = prim + nodes
return [suffix], rest
def _primary(self, rest):
"""
Primary <- Identifier (!LEFTARROW) / (OPEN Expression CLOSE) / Literal / Class / DOT
"""
prim = Node("primary", self)
nodes, rest = self._slashed(self._paren(self._IDENTIFIER, self._not(self._LEFTARROW)), self._paren(self._OPEN, self._expression,self._CLOSE), self._literal, self._class, self._DOT)(rest)
prim.children = nodes
return [prim], rest
def _IDENTIFIER(self, rest):
"""
Identifier <- IdentStart IdentCont* Spacing
IdentStart <- [a-zA-Z_]
IdentCont <- IdentStart / [0-9]
"""
return self._terminal(r'[a-zA-Z_][a-zA-Z0-9_]*', "identifier")(rest)
def _literal(self, rest):
"""
Literal <- ['] (!['] Char)* ['] Spacing / ["] (!["] Char)* ["] Spacing
"""
try:
if rest[0] == "'":
return self._terminal(r"""\'([^']|\n|\r|\r\n)*?\'""", "literal")(rest)
else:
return self._terminal(r"""\"([^"]|\n|\r|\r\n)*?\"""", "literal")(rest)
except:
raise GrammarError
def _class(self, rest):
"""
Class <- '[' (!']' Range)* ']' Spacing
"""
return self._terminal(r'\[(.(-.)?)*\]', "range")(rest)
def _terminal(self, terminal, name):
"""
terminal: the raw string to match
name: the name of the node
"""
def inner(rest):
try:
pos = re.match(terminal, rest).end()
node = [Terminal(name, self, rest[:pos])]
rest = rest[pos:]
except:
raise TerminalError("Expected a {} at '".format(name) + rest[:min(10, len(rest))] + "'")
spacing, rest = self._spacing(rest)
return node + spacing, rest
return inner
def _LEFTARROW(self, rest):
"""
LEFTARROW <- '<-' Spacing
"""
return self._terminal(r'<-', "LEFTARROW")(rest)
def _SLASH(self, rest):
"""
SLASH <- '/' Spacing
"""
return self._terminal(r'/', "SLASH")(rest)
def _AND(self, rest):
"""
AND <- '&' Spacing
"""
return self._terminal(r'&', "AND")(rest)
def _NOT(self, rest):
"""
NOT <- '!' Spacing
"""
return self._terminal(r'!', "NOT")(rest)
def _QUESTION(self, rest):
"""
QUESTION <- '?' Spacing
"""
return self._terminal(r'\?', "QUESTION")(rest)
def _STAR(self, rest):
"""
STAR <- '*' Spacing
"""
return self._terminal(r'\*', "STAR")(rest)
def _PLUS(self, rest):
"""
PLUS <- '+' Spacing
"""
return self._terminal(r'\+', "PLUS")(rest)
def _OPEN(self, rest):
"""
OPEN <- '(' Spacing
"""
return self._terminal(r'\(', "OPEN")(rest)
def _CLOSE(self, rest):
"""
CLOSE <- ')' Spacing
"""
return self._terminal(r'\)', "CLOSE")(rest)
def _DOT(self, rest):
"""
DOT <- '.' Spacing
"""
return self._terminal(r'\.', "DOT")(rest)
def _spacing(self, rest):
"""
Spacing <- (Space / Comment)*
"""
spacing = Node("spacing", self)
nodes, rest = self._maybe(self._some(self._paren(self._slashed(self._SPACE, self._COMMENT))))(rest)
spacing.children = nodes
return [spacing], rest
def _COMMENT(self, rest):
try:
pos = re.match(r"#.*?(\n|\r|\r\n|$)", rest).end()
return [Terminal("comment", self, rest[:pos])], rest[pos:]
except:
raise TerminalError("Expected a comment at '" + rest[:min(10, len(rest))] + "'")
def _SPACE(self, rest):
try:
pos = re.match(r"( |\t|\r\n|\n|\r)+", rest).end()
return [Terminal("space", self, rest[:pos])], rest[pos:]
except:
raise TerminalError("Expected a space at '" + rest[:min(10, len(rest))] + "'")
def _EOF(self, rest):
if rest != "":
raise TerminalError("Expected an end of file at '" + rest[:min(10, len(rest))] + "'")
else:
return [Terminal("eof", self, None)], None
@staticmethod
def _some(parser):
"""
parses at least one of the passed in parser
"""
def inner(rest):
node, rest = parser(rest)
nodes = node
while True:
try:
node, rest = parser(rest)
nodes += node
except GrammarError:
break
return nodes, rest
return inner
@staticmethod
def _maybe(parser):
"""
parses an optional item
"""
def inner(rest):
try:
node, rest = parser(rest)
except GrammarError:
node, rest = [], rest
return node, rest
return inner
@staticmethod
def _paren(*parsers):
"""
parses a parenthetical
"""
def inner(rest):
nodes = []
for parser in parsers:
node, rest = parser(rest)
nodes += node
return nodes, rest
return inner
@staticmethod
def _slashed(*parsers):
"""
parses slash seperated values
"""
def inner(rest):
for parser in parsers:
try:
node, rest = parser(rest)
return node, rest
except GrammarError:
pass
raise SlashError
return inner
@staticmethod
def _not(parser):
"""
parses a not lookahead
"""
def inner(rest):
try:
parser(rest)
except GrammarError:
return [], rest
raise GrammarError
return inner
``` |
{
"source": "joshuamosesb/gramex",
"score": 3
} |
#### File: guide/auth/authutil.py
```python
import os
import json
import random
import tornado
import datetime
import sqlalchemy
import pandas as pd
from gramex.config import str_utf8, CustomJSONEncoder
from passlib.hash import sha256_crypt
from six.moves.urllib.parse import urlencode
def create_user_database(url, table, user, password, salt, excel):
# Connect to the SQLAlchemy engine specified at url.
# For example, this could be sqlite:///auth.db
engine = sqlalchemy.create_engine(url, encoding=str_utf8)
# In the Gramex guide, we're using an sqlite3 database and Excel file.
# If the target folder doesn't exist, make sure we create it.
for path in (engine.url.database, excel):
folder = os.path.dirname(os.path.abspath(path))
if not os.path.exists(folder):
os.makedirs(folder)
# This method re-creates the user table each time.
# The table must have:
# a column for the username (typically called user)
# a column for the password (typically called password)
# and any other optional columns (here, we're adding email and role)
# We're using sha256_crypt as the password hash.
# Email IDs used are <EMAIL>, <EMAIL>, etc
email = '<EMAIL>'
data = pd.DataFrame([
['alpha', sha256_crypt.encrypt('alpha', salt=salt), email % 'alpha', 'admin manager'],
['beta', sha256_crypt.encrypt('beta', salt=salt), email % 'beta', 'manager employee'],
['gamma', sha256_crypt.encrypt('gamma', salt=salt), email % 'gamma', 'employee'],
['delta', sha256_crypt.encrypt('delta', salt=salt), email % 'delta', None],
], columns=[user, password, 'email', 'role'])
data.to_sql(table, engine, index=False, if_exists='replace')
data.to_excel(excel, index=False) # noqa - encoding not required
def store_value(handler):
handler.session.setdefault('randkey', random.randint(0, 1000))
return json.dumps(handler.session, indent=4, cls=CustomJSONEncoder)
async_http_client = tornado.httpclient.AsyncHTTPClient()
@tornado.gen.coroutine
def contacts(handler):
days = int(handler.get_argument('days', '30'))
start = (datetime.datetime.today() - datetime.timedelta(days=days))
result = yield async_http_client.fetch(
'https://www.google.com/m8/feeds/contacts/default/full?' + urlencode({
'updated-min': start.strftime('%Y-%m-%dT%H:%M:%S'),
'max-results': 500,
'alt': 'json',
}),
headers={'Authorization': 'Bearer ' + handler.session.get('google_access_token', '')},
)
try:
contacts = json.loads(result.body)['feed']
data = {'contacts': contacts.get('entry', [])}
except Exception as e:
data = {'error': repr(e)}
raise tornado.gen.Return(json.dumps(data, indent=4))
def signup_validate(handler, args):
# TODO Nikhil: Provide Sample validation
# What if user return dict/list/tuple?
return False
```
#### File: guide/handlers/handlerutil.py
```python
from collections import Counter
from gramex.handlers import BaseHandler
class CustomHandler(BaseHandler):
def get(self):
self.write('This is a custom handler')
class SetupHandler(BaseHandler):
@classmethod
def setup(cls, **kwargs):
super(SetupHandler, cls).setup(**kwargs) # You MUST call the BaseHandler setup
cls.name = kwargs.get('name', 'NA') # Perform any one-time setup here
cls.count = Counter()
def initialize(self, **kwargs): # initialize() is called with same kwargs
super(SetupHandler, self).initialize(**kwargs) # You MUST call the BaseHandler initialize
self.count[self.name] += 1 # Perform any recurring operations here
def get(self):
self.write('Name %s was called %d times' % (self.name, self.count[self.name]))
```
#### File: guide/uicomponents/guide_ui_app.py
```python
from __future__ import unicode_literals
import os
from gramex.config import variables
from tornado.escape import xhtml_escape
folder = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(folder, 'config.yaml')
ui_config_file = os.path.join(variables['GRAMEXPATH'], 'apps', 'ui', 'config.yaml')
def view_source(html):
'''Return the HTML with an escaped view source block appended to it'''
s = html.decode('utf-8')
return ('<div class="viewsource-wrapper">' + s +
'<pre class="viewsource"><code class="language-html">' + xhtml_escape(s.strip()) +
'</code></pre></div>')
def only_source(html):
'''Return only the escaped view source block'''
s = html.decode('utf-8')
return ('<pre class="viewsource"><code class="language-html">' + xhtml_escape(s.strip()) +
'</code></pre>')
```
#### File: gramex/handlers/functionhandler.py
```python
import json
import tornado.web
import tornado.gen
from types import GeneratorType
from gramex.transforms import build_transform
from gramex.config import app_log, CustomJSONEncoder
from .basehandler import BaseHandler
from tornado.util import unicode_type
class FunctionHandler(BaseHandler):
'''
Renders the output of a function when the URL is called via GET or POST. It
accepts these parameters when initialized:
:arg string function: a string that resolves into any Python function or
method (e.g. ``str.lower``). By default, it is called as
``function(handler)`` where handler is this RequestHandler, but you can
override ``args`` and ``kwargs`` below to replace it with other
parameters. The result is rendered as-is (and hence must be a string, or
a Future that resolves to a string.) You can also yield one or more
results. These are written immediately, in order.
:arg list args: positional arguments to be passed to the function.
:arg dict kwargs: keyword arguments to be passed to the function.
:arg dict headers: HTTP headers to set on the response.
:arg list methods: List of HTTP methods to allow. Defaults to
`['GET', 'POST']`.
:arg string redirect: URL to redirect to when the result is done. Used to
trigger calculations without displaying any output.
'''
@classmethod
def setup(cls, headers={}, methods=['GET', 'POST'], **kwargs):
super(FunctionHandler, cls).setup(**kwargs)
# Don't use cls.info.function = build_transform(...) -- Python treats it as a method
cls.info = {}
cls.info['function'] = build_transform(kwargs, vars={'handler': None},
filename='url: %s' % cls.name)
cls.headers = headers
for method in (methods if isinstance(methods, (tuple, list)) else [methods]):
setattr(cls, method.lower(), cls._get)
@tornado.gen.coroutine
def _get(self, *path_args):
if self.redirects:
self.save_redirect_page()
if 'function' not in self.info:
raise ValueError('Invalid function definition in url:%s' % self.name)
result = self.info['function'](handler=self)
for header_name, header_value in self.headers.items():
self.set_header(header_name, header_value)
# Use multipart to check if the respose has multiple parts. Don't
# flush unless it's multipart. Flushing disables Etag
multipart = isinstance(result, GeneratorType) or len(result) > 1
# build_transform results are iterable. Loop through each item
for item in result:
# Resolve futures and write the result immediately
if tornado.concurrent.is_future(item):
item = yield item
if isinstance(item, (bytes, unicode_type, dict)):
self.write(json.dumps(item, separators=(',', ':'), ensure_ascii=True,
cls=CustomJSONEncoder) if isinstance(item, dict) else item)
if multipart:
self.flush()
else:
app_log.warning('url:%s: FunctionHandler can write strings/dict, not %s',
self.name, repr(item))
if self.redirects:
self.redirect_next()
```
#### File: gramex/handlers/socialhandler.py
```python
import os
import six
import json
import gramex
import tornado.gen
from oauthlib import oauth1
from orderedattrdict import AttrDict
from tornado.web import HTTPError
from tornado.auth import TwitterMixin, FacebookGraphMixin
from tornado.httputil import url_concat, responses
from .basehandler import BaseHandler
from gramex.http import OK, BAD_REQUEST, CLIENT_TIMEOUT
custom_responses = {
CLIENT_TIMEOUT: 'Client Timeout'
}
store_cache = {}
class SocialMixin(object):
@classmethod
def setup_social(cls, user_info, transform={}, methods=['get', 'post'], **kwargs):
# Session key that stores the user info
cls.user_info = user_info
# Set up methods
if not isinstance(methods, list):
methods = [methods]
methods = set(method.lower().strip() for method in methods)
for method in ('get', 'post', 'put', 'patch'):
if method in methods:
setattr(cls, method, cls.run)
@tornado.gen.coroutine
def social_response(self, response):
# Set response headers
if response.code in responses:
self.set_status(response.code)
else:
self.set_status(response.code, custom_responses.get(response.code))
for header, header_value in response.headers.items():
# We're OK with anything that starts with X-
# Also set MIME type and last modified date
if header.startswith('X-') or header in {'Content-Type', 'Last-Modified'}:
self.set_header(header, header_value)
# Set user's headers
for header, header_value in self.kwargs.get('headers', {}).items():
self.set_header(header, header_value)
# Transform content
content = response.body
if content and response.code == OK:
content = yield gramex.service.threadpool.submit(self.run_transforms, content=content)
# Convert to JSON if required
if not isinstance(content, (six.binary_type, six.text_type)):
content = json.dumps(content, ensure_ascii=True, separators=(',', ':'))
raise tornado.gen.Return(content)
def run_transforms(self, content):
result = json.loads(content.decode('utf-8'))
for name, transform in self.transform.items():
for value in transform['function'](result):
result = value
for header, header_value in transform.get('headers', {}).items():
self.set_header(header, header_value)
return result
def write_error(self, status_code, **kwargs):
'''Write error responses in JSON'''
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.finish(json.dumps({'errors': [{
'code': status_code,
'message': self._reason,
}]}))
def _get_store_key(self):
'''
Allows social mixins to store information in a single global JSONStore.
Keys are "$YAMLPATH: url-key". Any value may be stored against it.
'''
if 'store' not in store_cache:
store_path = os.path.join(gramex.config.variables['GRAMEXDATA'], 'socialstore.json')
store_cache['store'] = gramex.handlers.basehandler.JSONStore(store_path, flush=60)
base_key = '{}: {}'.format(os.getcwd(), self.name)
return store_cache['store'], base_key
def read_store(self):
'''
Read from this URL handler's social store. Typically returns a dict
'''
cache, key = self._get_store_key()
return cache.load(key, {})
def write_store(self, value):
'''
Write to this URL handler's social store. Typically stores a dict
'''
cache, key = self._get_store_key()
cache.dump(key, value)
def get_token(self, key, fetch=lambda info, key, val: info.get(key, val)):
'''
Returns an access token / key / secret with the following priority:
1. If YAML config specifies "persist" for the token, get it from the last
stored value. If none is stored, save and use the current session's
token
2. If YAML config specifies a token, use it
3. If YAML config does NOT specify a token, use current sessions' token
If after all of this, we don't have a token, raise an exception.
'''
info = self.session.get(self.user_info, {})
token = self.kwargs.get(key, None) # Get from config
session_token = fetch(info, key, None)
if token == 'persist': # nosec
token = self.read_store().get(key, None) # If persist, use store
if token is None and session_token: # Or persist from session
self.write_store(info)
if token is None:
token = session_token # Use session token
if token is None: # Ensure token is present
raise HTTPError(BAD_REQUEST, reason='token %s missing' % key)
return token
class TwitterRESTHandler(SocialMixin, BaseHandler, TwitterMixin):
'''
Proxy for the Twitter 1.1 REST API via these ``kwargs``::
pattern: /twitter/(.*)
handler: TwitterRESTHandler
kwargs:
key: your-consumer-key
secret: your-consumer-secret
access_key: your-access-key # Optional -- picked up from session
access_secret: your-access-token # Optional -- picked up from session
methods: [get, post] # HTTP methods to use for the API
path: /search/tweets.json # Freeze Twitter API request
Now ``POST /twitter/search/tweets.json?q=gramener`` returns the same response
as the Twitter REST API ``/search/tweets.json``.
If you only want to expose a specific API, specify a ``path:``. It overrides
the URL path. The query parameters will still work.
By default, ``methods`` is POST, and GET logs the user in, storing the access
token in the session for future use. But you can specify the ``access_...``
values and set ``methods`` to ``[get, post]`` to use both GET and POST
requests to proxy the API.
'''
@staticmethod
def get_from_token(info, key, val):
return info.get('access_token', {}).get(key.replace('access_', ''), val)
@classmethod
def setup(cls, **kwargs):
super(TwitterRESTHandler, cls).setup(**kwargs)
cls.setup_social('user.twitter', **kwargs)
@tornado.gen.coroutine
def run(self, path=None):
path = self.kwargs.get('path', path)
if not path and self.request.method == 'GET':
yield self.login()
raise tornado.gen.Return()
args = {key: val[0] for key, val in self.args.items()}
params = AttrDict(self.kwargs)
params['access_key'] = self.get_token('access_key', self.get_from_token)
params['access_secret'] = self.get_token('access_secret', self.get_from_token)
client = oauth1.Client(
client_key=params['key'],
client_secret=params['secret'],
resource_owner_key=params['access_key'],
resource_owner_secret=params['access_secret'])
endpoint = params.get('endpoint', 'https://api.twitter.com/1.1/')
path = params.get('path', path)
uri, headers, body = client.sign(url_concat(endpoint + path, args))
http = self.get_auth_http_client()
response = yield http.fetch(uri, headers=headers, raise_error=False)
result = yield self.social_response(response)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(result)
@tornado.gen.coroutine
def login(self):
if self.get_argument('oauth_token', None):
info = self.session[self.user_info] = yield self.get_authenticated_user()
if (any(self.kwargs.get(key, None) == 'persist'
for key in ('access_key', 'access_secret'))):
self.write_store(info)
self.redirect_next()
else:
self.save_redirect_page()
yield self.authorize_redirect(callback_uri=self.request.protocol + "://" +
self.request.host + self.request.uri)
def _oauth_consumer_token(self):
return dict(key=self.kwargs['key'],
secret=self.kwargs['secret'])
class FacebookGraphHandler(SocialMixin, BaseHandler, FacebookGraphMixin):
'''
Proxy for the Facebook Graph API via these ``kwargs``::
pattern: /facebook/(.*)
handler: FacebookGraphHandler
kwargs:
key: your-consumer-key
secret: your-consumer-secret
access_token: your-access-token # Optional -- picked up from session
methods: [get, post] # HTTP methods to use for the API
scope: user_posts,user_photos # Permissions requested for the user
path: /me/feed # Freeze Facebook Graph API request
Now ``POST /facebook/me`` returns the same response as the Facebook Graph API
``/me``. To request specific access rights, specify the ``scope`` based on
`permissions`_ required by the `Graph API`_.
If you only want to expose a specific API, specify a ``path:``. It overrides
the URL path. The query parameters will still work.
By default, ``methods`` is POST, and GET logs the user in, storing the access
token in the session for future use. But you can specify the ``access_token``
values and set ``methods`` to ``[get, post]`` to use both GET and POST
requests to proxy the API.
.. _permissions: https://developers.facebook.com/docs/facebook-login/permissions
.. _Graph API: https://developers.facebook.com/docs/graph-api/reference
'''
@classmethod
def setup(cls, **kwargs):
super(FacebookGraphHandler, cls).setup(**kwargs)
cls.setup_social('user.facebook', **kwargs)
@tornado.gen.coroutine
def run(self, path=None):
path = self.kwargs.get('path', path)
if not path and self.request.method == 'GET':
yield self.login()
raise tornado.gen.Return()
args = {key: val[0] for key, val in self.args.items()}
args['access_token'] = self.get_token('access_token')
uri = url_concat(self._FACEBOOK_BASE_URL + '/' + self.kwargs.get('path', path), args)
http = self.get_auth_http_client()
response = yield http.fetch(uri, raise_error=False)
result = yield self.social_response(response)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(result)
@tornado.gen.coroutine
def login(self):
redirect_uri = self.request.protocol + "://" + self.request.host + self.request.uri
if self.get_argument('code', False):
info = self.session[self.user_info] = yield self.get_authenticated_user(
redirect_uri=redirect_uri,
client_id=self.kwargs['key'],
client_secret=self.kwargs['secret'],
code=self.get_argument('code'))
if self.kwargs.get('access_token', None) == 'persist':
self.write_store(info)
self.redirect_next()
else:
self.save_redirect_page()
scope = self.kwargs.get('scope', 'user_posts,read_insights')
yield self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.kwargs['key'],
extra_params={'scope': scope})
```
#### File: gramex/testlib/test_app_formhandler.py
```python
import unittest
from nose.tools import eq_
from gramex.apps.formhandler.formhandler_utils import URLUpdate, SET, ADD, POP, XOR
class TestURLUpdate(unittest.TestCase):
def test_url(self):
u = URLUpdate('')
eq_(u(), '') # Default test
eq_(u(SET, 'x', '1'), 'x=1') # SET value
eq_(u(SET, 'x', '1', SET, 'y', '2'), 'x=1&y=2') # SET multiple values
eq_(u(SET, 'x', '1', SET, 'x', None), '') # SET None clears
eq_(u(ADD, 'x', '1'), 'x=1') # ADD new value
eq_(u(SET, 'x', '1', ADD, 'x', '2'), 'x=1&x=2') # ADD to existing value
eq_(u(ADD, 'x', '1', ADD, 'x', '2'), 'x=1&x=2') # ADD multiple values
eq_(u(SET, 'x', '1', POP, 'x'), '') # POP None removes all values
eq_(u(SET, 'x', '1', POP, 'x', None), '') # POP None removes all values
eq_(u(SET, 'x', '1', POP, 'x', '1'), '') # POP removes value
eq_(u(SET, 'x', '1', POP, 'x', '0'), 'x=1') # POP ignores missing vals
eq_(u(XOR, 'x', '1'), 'x=1') # XOR sets missing
eq_(u(SET, 'x', '1', XOR, 'x', '1'), '') # XOR clears existing
u = URLUpdate('?x=1&x=2&y=3')
eq_(u(), 'x=1&x=2&y=3')
eq_(u(SET, 'x', '1'), 'x=1&y=3') # SET value
eq_(u(SET, 'x', '1', SET, 'y', '2'), 'x=1&y=2') # SET multiple values
eq_(u(SET, 'x', '1', SET, 'x', None), 'y=3') # SET None clears
eq_(u(ADD, 'x', '1'), 'x=1&x=2&y=3') # ADD new value
eq_(u(SET, 'x', '1', ADD, 'x', '2'), 'x=1&x=2&y=3') # ADD to existing value
eq_(u(ADD, 'x', '1', ADD, 'x', '2'), 'x=1&x=2&y=3') # ADD multiple values
eq_(u(SET, 'x', '1', POP, 'x', '1'), 'y=3') # POP removes value
eq_(u(SET, 'x', '1', POP, 'x', '0'), 'x=1&y=3') # POP ignores missing vals
eq_(u(POP, 'x'), 'y=3') # POP removes all values
eq_(u(POP, 'x', None), 'y=3') # POP None removes all values
eq_(u(POP, 'x', '1'), 'x=2&y=3') # POP removes part value
eq_(u(POP, 'x', '2', POP, 'y', '3'), 'x=1') # POP multiple values
eq_(u(POP, 'x', ADD, 'y', '4'), 'y=3&y=4') # POP in middle removes values
eq_(u(POP, 'y', SET, 'x', '1'), 'x=1') # POP in middle removes values
eq_(u(XOR, 'x', '1'), 'x=2&y=3') # XOR sets missing
eq_(u(XOR, 'x', '2', XOR, 'y', '4'), 'x=1&y=3&y=4') # XOR clears existing
```
#### File: gramex/testlib/test_ml.py
```python
import os
import unittest
import pandas as pd
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
import gramex.ml
import gramex.cache
from nose.tools import eq_, ok_
from pandas.util.testing import assert_frame_equal as afe
from . import folder
class TestClassifier(unittest.TestCase):
model_path = os.path.join(folder, 'model.pkl')
def test_01_train(self):
path = os.path.join(folder, 'iris.csv')
data = pd.read_csv(path, encoding='utf-8')
# Model can be trained without specifying a model class, input or output
model1 = gramex.ml.Classifier()
model1.train(data)
ok_(isinstance(model1.model, BernoulliNB))
eq_(model1.input, data.columns[:4].tolist())
eq_(model1.output, data.columns[-1])
# Train accepts explicit model_class, model_kwargs, input and output
inputs = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width']
model2 = gramex.ml.Classifier(
model_class='sklearn.svm.SVC', # Any sklearn model
# Optional model parameters
model_kwargs={'kernel': 'sigmoid'},
input=inputs,
output='species')
model2.train(data) # DataFrame with input & output columns
eq_(model2.input, inputs)
eq_(model2.output, model1.output)
ok_(isinstance(model2.model, SVC))
eq_(model2.model.kernel, 'sigmoid')
# Test predictions. Note: this is manually crafted. If it fails, change the test case
result = model1.predict([
{'sepal_length': 5, 'sepal_width': 3,
'petal_length': 1.5, 'petal_width': 0},
{'sepal_length': 5, 'sepal_width': 2,
'petal_length': 5.0, 'petal_width': 1},
{'sepal_length': 6, 'sepal_width': 3,
'petal_length': 4.8, 'petal_width': 2},
])
eq_(result.tolist(), ['setosa', 'versicolor', 'virginica'])
# Test saving
expected = model1.predict(data[model1.input])
if os.path.exists(self.model_path):
os.remove(self.model_path)
model1.save(self.model_path)
actuals = gramex.ml.load(self.model_path).predict(data[model1.input])
eq_(actuals.tolist(), expected.tolist())
def test_02_load(self):
model = gramex.ml.load(self.model_path)
result = model.predict([{
'sepal_length': 5.7,
'sepal_width': 4.4,
'petal_length': 1.5,
'petal_width': 0.4,
}])
eq_(result.tolist(), ['setosa'])
# def test_linear_model_with_controlled_data(self):
# ...
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.model_path):
os.remove(cls.model_path)
class TestAutolyse(unittest.TestCase):
path = os.path.join(folder, 'auto_test.csv')
df = gramex.cache.open(path, encoding='utf-8')
def base(self, groups, numbers, check_string):
eq_(gramex.ml.groupmeans(self.df, groups, numbers).to_json(), check_string)
def test_groupmeans_unicode_col_names(self):
'''Unicode column names and categorical column values. '''
autolysis_string = ''.join([
'{"group":{"0":"A\\u00e4"},"number":{"0":"Xfloat\\u00e9"},"biggies":{',
'"0":{"A0":0.5147290217,"A1":0.43041003,"A10":0.4747865202,"A11":0.4814285354,',
'"A12":0.4106736393,"A13":0.6440158478,"A14":0.4499212197,"A15":0.5564064238,',
'"A16":0.5736623215,"A17":0.4890015995,"A18":0.6202282336,"A2":0.4501432661,',
'"A3":0.4593324615,"A4":0.4611977511,"A5":0.4260692432,"A6":0.410675212,',
'"A7":0.560958454,"A8":0.4463740271,"A9":0.4476561046}},"gain":{"0":0.3144803738},',
'"means":{"0":{"Xfloat\\u00e9":{"A12":0.4106736393,"A6":0.410675212,',
'"A5":0.4260692432,',
'"A1":0.43041003,"A8":0.4463740271,"A9":0.4476561046,"A14":0.4499212197,',
'"A2":0.4501432661,"A3":0.4593324615,"A4":0.4611977511,"A10":0.4747865202,',
'"A11":0.4814285354,"A17":0.4890015995,"A0":0.5147290217,"A15":0.5564064238,',
'"A7":0.560958454,"A16":0.5736623215,"A18":0.6202282336,"A13":0.6440158478},',
'"#":{"A12":21,"A6":21,"A5":21,"A1":21,"A8":21,"A9":21,"A14":21,"A2":21,',
'"A3":21,"A4":21,"A10":21,"A11":21,"A17":21,"A0":22,"A15":21,"A7":21,"A16":21,',
'"A18":21,"A13":21}}},"prob":{"0":0.0046080226}}'
])
self.base([u'Aä'], [u'Xfloaté'], autolysis_string)
def test_numbers_sparse(self):
autolysis_string = ''.join([
'{"group":{"0":"R\\u00e9gions"},"number":{"0":"`Numbers1"},"biggies":{"0":{',
'"IL":2.0,"NO":2.3333333333,"CE":1.6666666667,"Bretagne":2.5,',
'"\\u00cele-de-France":2.5833333333,"Aquitaine":2.75,"Picardie":1.75,',
'"PR":2.3333333333,"Bourgogne":1.5,"HA":3.5,"BR":3.0,"PA":1.1666666667,',
'"Nord-Pas-de-Calais":3.0,"Pays de la Loire":5.0,',
'"Provence-Alpes-C\\u00f4te d\'Azur":4.5}},"gain":{"0":0.9157088123},',
'"means":{"0":{"`Numbers1":{"BO":0.0,"Lorraine":0.0,"PI":0.0,"PO":0.0,',
'"BA":0.0,"Centre":1.0,"PA":1.1666666667,"CH":1.5,"Bourgogne":1.5,',
'"CE":1.6666666667,"Picardie":1.75,"IL":2.0,"LO":2.0,"NO":2.3333333333,',
'"PR":2.3333333333,"AU":2.5,"Poitou-Charentes":2.5,"Bretagne":2.5,',
'"\\u00cele-de-France":2.5833333333,"Aquitaine":2.75,"Nord-Pas-de-Calais":3.0,',
'"BR":3.0,"HA":3.5,"AL":4.0,"Provence-Alpes-C\\u00f4te d\'Azur":4.5,',
'"Pays de la Loire":5.0,"Haute-Normandie":5.0,"Languedoc-Roussillon":6.0,',
'"MI":6.0,"AQ":6.5,"Alsace":8.0},"#":{"BO":4,"Lorraine":8,"PI":4,"PO":4,"BA":4,',
'"Centre":8,"PA":24,"CH":8,"Bourgogne":16,"CE":12,"Picardie":16,"IL":36,"LO":4,',
'"NO":24,"PR":24,"AU":8,"Poitou-Charentes":8,"Bretagne":16,"\\u00cele-de-France":48,',
'"Aquitaine":16,"Nord-Pas-de-Calais":12,"BR":16,"HA":16,"AL":4,',
'"Provence-Alpes-C\\u00f4te d\'Azur":16,"Pays de la Loire":12,"Haute-Normandie":8,',
'"Languedoc-Roussillon":8,"MI":4,"AQ":8,"Alsace":4}}},"prob":{"0":0.0004737537}}'
])
self.base([u'Régions'], [u'`Numbers1'], autolysis_string)
def test_non_normal_dist(self):
self.base(['GroupWith1Name'], [u'Xfloaté', u'Yfloaté',
u'Zfloaté', u'Numbérs', u'`Numbers'], '{"index":{}}')
def test_only_ints(self):
autolysis_string = ''.join([
'{"group":{"0":"IntCats"},"number":{"0":"`Numbers"},"biggies":{"0":{"1":5.40625,',
'"3":5.625,"4":4.4722222222}},"gain":{"0":0.0943579767},"means":{"0":{',
'"`Numbers":{"4":4.4722222222,"1":5.40625,"3":5.625},"#":{"4":144,"1":128,',
'"3":128}}},"prob":{"0":0.0002885852}}'
])
self.base(['IntCats'], ['`Numbers'], autolysis_string)
def test_col_only_dates(self):
self.base([u'Dätes'], [u'Numbérs'], '{"index":{}}')
def test_floats_col_sparse(self):
autolysis_string = ''.join([
'{"group":{"0":"R\\u00e9gions"},"number":{"0":"FloatsWithZero"},"biggies":{"0":{',
'"IL":0.012747739,"NO":-0.0352614186,"CE":0.0,"Bretagne":0.0058930111,',
'"\\u00cele-de-France":0.0109352419,"Aquitaine":0.1169812762,',
'"Picardie":-0.0470094696,"PR":0.0432100751,"Bourgogne":0.0563597848,',
'"HA":0.005033869,"BR":0.072658035,"PA":0.0747856489,',
'"Nord-Pas-de-Calais":0.0547388023,"Pays de la Loire":0.035744271,',
'"Provence-Alpes-C\\u00f4te d\'Azur":-0.1231877406}},"gain":{"0":10.4323598287},',
'"means":{"0":{"FloatsWithZero":{"PO":-0.2453796284,"Haute-Normandie":-0.2237780122,',
'"BA":-0.1698339283,"AU":-0.1614843193,',
'"Provence-Alpes-C\\u00f4te d\'Azur":-0.1231877406,"Centre":-0.0720992128,',
'"AQ":-0.0665866815,"Picardie":-0.0470094696,"NO":-0.0352614186,"LO":0.0,',
'"PI":0.0,"CE":0.0,"AL":0.0,"MI":0.0,"HA":0.005033869,"Bretagne":0.0058930111,',
'"\\u00cele-de-France":0.0109352419,"IL":0.012747739,"Pays de la Loire":0.035744271,',
'"CH":0.0377686544,"PR":0.0432100751,"Nord-Pas-de-Calais":0.0547388023,',
'"Bourgogne":0.0563597848,"Languedoc-Roussillon":0.0726263707,"BR":0.072658035,',
'"PA":0.0747856489,"Lorraine":0.115573334,"Aquitaine":0.1169812762,',
'"Poitou-Charentes":0.150149774,"Alsace":0.1766370569,"BO":0.1967609903},',
'"#":{"PO":4,"Haute-Normandie":8,"BA":4,"AU":8,',
'"Provence-Alpes-C\\u00f4te d\'Azur":16,"Centre":8,"AQ":8,"Picardie":16,"NO":24,',
'"LO":4,"PI":4,"CE":12,"AL":4,"MI":4,"HA":16,"Bretagne":16,"\\u00cele-de-France":48,',
'"IL":36,"Pays de la Loire":12,"CH":8,"PR":24,"Nord-Pas-de-Calais":12,',
'"Bourgogne":16,"Languedoc-Roussillon":8,"BR":16,"PA":24,"Lorraine":8,',
'"Aquitaine":16,"Poitou-Charentes":8,"Alsace":4,"BO":4}}},"prob":{"0":0.0000015713}}'
])
self.base([u'Régions'], ['FloatsWithZero'], autolysis_string)
# Map (q, source, target) -> (detected source, translation)
_translate = {
('Apple', None, 'nl'): ('en', 'appel'),
('Orange', None, 'nl'): ('en', 'Oranje'),
('Apple', 'en', 'nl'): ('en', 'appel'),
('Orange', 'en', 'nl'): ('en', 'Oranje'),
('Apple', 'en', 'de'): ('en', 'Apfel'),
('Orange', 'en', 'de'): ('en', 'Orange'),
('apfel', '', 'en'): ('de', 'apple'), # used by tests/test_translater
}
_translate_count = []
def translate_mock(q, source, target, key='...'):
'''Mock the Google Translate API results'''
_translate_count.append(1)
vals = [_translate[item, source, target] for item in q]
return {
'source': [v[0] for v in vals],
'target': [target] * len(q),
'q': q,
't': [v[1] for v in vals],
}
class TestTranslate(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache = os.path.join(folder, 'translate.xlsx')
gramex.ml.translate_api['mock'] = translate_mock
def test_translate(self):
def check(q, result, **kwargs):
kwargs['api'] = 'mock'
actual = gramex.ml.translate(*q, **kwargs)
expected = pd.DataFrame([
{'source': item[0], 'target': item[1], 'q': item[2], 't': item[3]}
for item in result
])
actual.index = expected.index
afe(actual, expected, check_like=True)
check(['Apple'], [
['en', 'nl', 'Apple', 'appel']
], target='nl')
check(['Apple', 'Orange'], [
['en', 'nl', 'Apple', 'appel'],
['en', 'nl', 'Orange', 'Oranje']
], target='nl')
check(['Apple', 'Orange'], [
['en', 'de', 'Apple', 'Apfel'],
['en', 'de', 'Orange', 'Orange']
], source='en', target='de')
check(['Orange', 'Apple'], [
['en', 'de', 'Orange', 'Orange'],
['en', 'de', 'Apple', 'Apfel'],
], source='en', target='de')
if os.path.exists(self.cache):
os.remove(self.cache)
cache = {'url': self.cache}
count = len(_translate_count)
check(['Apple'], [['en', 'nl', 'Apple', 'appel']], target='nl', cache=cache)
eq_(len(_translate_count), count + 1)
check(['Apple'], [['en', 'nl', 'Apple', 'appel']], target='nl', cache=cache)
eq_(len(_translate_count), count + 1)
check(['Apple'], [['en', 'de', 'Apple', 'Apfel']], source='en', target='de', cache=cache)
eq_(len(_translate_count), count + 2)
check(['Apple'], [['en', 'de', 'Apple', 'Apfel']], source='en', target='de', cache=cache)
eq_(len(_translate_count), count + 2)
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.cache):
os.remove(cls.cache)
```
#### File: gramex/tests/test_proxyhandler.py
```python
import requests
from gramex.http import METHOD_NOT_ALLOWED
from . import TestGramex
class TestProxyHandler(TestGramex):
def test_proxyhandler(self):
session = requests.Session()
r = self.check('/auth/session', session=session)
session_id = r.json()['id']
r = self.check('/xsrf', session=session)
xsrf_token = r.cookies['_xsrf']
for method in ['get', 'post', 'put']:
request_headers = {}
if method != 'get':
request_headers['X-Xsrftoken'] = xsrf_token
r = self.check(
'/proxy/httpbin/?a=1&z=5',
session=session,
method=method,
request_headers=request_headers,
headers={
# modify: adds the request method as a header
'X-Modify': method.upper(),
# headers: adds a custom HTTP header
'X-Proxy-Custom': 'custom-header',
})
# ProxyHandler returns the actual URL mapped
self.assertIn('X-Proxy-Url', r.headers)
result = r.json()
self.assertDictContainsSubset({
# request_headers: true translates to the passed value
'User-Agent': 'python-requests/' + requests.__version__,
# request_headers: value passed to the target
'X-From': 'ProxyHandler',
# request_headers: value formatted with handler
'Session': 'Session ' + session_id,
# prepare: adds HTTP headers
'X-Prepare': method.upper(),
}, result['headers'], )
self.assertEquals({
# default: keys are passed as args
'y': ['1', '2'],
# URL arguments are also applied
'x': ['1', '2'],
# Proxy request args are passed
'a': ['1'],
# Proxy request args over-rides default: value and URL value
'z': ['5'],
# prepare: can modify request arguments
'b': ['1'],
}, result['args'])
# PATCH method does not work because /httpbin does not support it
r = self.check('/proxy/httpbin/', session=session, method='patch',
request_headers={'X-Xsrftoken': xsrf_token},
code=METHOD_NOT_ALLOWED,
headers={
'X-Proxy-Url': True,
'X-Proxy-Custom': 'custom-header',
'X-Modify': 'PATCH',
})
# DELETE method does not work because /proxy/httpbin/ does not support it
r = self.check('/proxy/httpbin/', method='delete', session=session,
request_headers={'X-Xsrftoken': xsrf_token},
code=METHOD_NOT_ALLOWED,
headers={
'X-Proxy-Url': False,
'X-Proxy-Custom': False,
'X-Modify': False,
})
# URL pattern wildcards
result = self.check('/proxy/httpbinprefix/suffix', session=session).json()
self.assertEquals({
'pre': ['prefix'], # path_args from the url requested
'post': ['suffix'], # path_args from the url requested
'y': ['1', '2'], # from default:
'x': ['1', '2'], # from url:
'z': ['1'], # from url:
'b': ['1'], # from prepare:
}, result['args'])
``` |
{
"source": "joshuamsalazar/micromagnetic-computations-fun-uw",
"score": 3
} |
#### File: joshuamsalazar/micromagnetic-computations-fun-uw/streamlit_app.py
```python
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
from scipy.integrate import *
import scipy.optimize
import matplotlib.pyplot as plt
from functools import partial
import os, sys
st.sidebar.markdown("## Parameters used in the simulation")
st.sidebar.markdown("Enter your own custom values to run the model")
je = float(st.sidebar.text_input('Current density j_e [10^10 A/m^2]', 10))
periSampl = 1000 #
class Parameters:
gamma = 2.2128e5
alpha = float(st.sidebar.text_input('Gilbert damping constant', 1))
K1 = float(st.sidebar.text_input('Anisotropy constant K_1 [J/m^3]', 1.5 * 9100))
Js = float(st.sidebar.text_input('Saturation magnetization Js [T]', 0.65))
RAHE = float(st.sidebar.text_input('Anomalous Hall effect coefficient', 0.65))
d = float(st.sidebar.text_input('FM layer thickness [nm]', (0.6+1.2+1.1) * 1e-9))
frequency = float(st.sidebar.text_input('AC frequency [Hz]', 0.1e9))
currentd = je * 1e10
hbar = 1.054571e-34
e = 1.602176634e-19
mu0 = 4 * 3.1415927 * 1e-7
easy_axis = np.array([0,0,1])
p_axis = np.array([0,-1,0])
etadamp = float(st.sidebar.text_input('Damping like torque term coefficient', 0.084))
etafield = float(st.sidebar.text_input('Field like torque term', 0.008)) # etafield/etadamp=eta
eta = etafield/etadamp
hext = np.array([1.0 * K1/Js,0,0])
def lockin(sig, t, f, ph):
ref = np.cos(2 * 2*np.pi*f*t + ph/180.0*np.pi)
#ref = np.sin(2*np.pi*f*t + ph/180.0*np.pi)
comp = np.multiply(sig,ref)
#print(t[-1]) #plot real part fft
return comp.mean()*2
def fft(sig, t, f):
sample_dt = np.mean(np.diff(t))
N = len(t)
yfft = np.fft.rfft(sig)
yfft_abs = np.abs(yfft) #!!!
xfft = np.array(np.fft.rfftfreq(N, d=sample_dt))
stride =max(int(2*f*0.1*sample_dt),2)
idxF = np.argmin(np.abs(xfft-2*f))
tmpmax = 0
tmpj = 0
for j in range(-stride, stride+1):
if yfft_abs[idxF+j] > tmpmax:
tmpmax = yfft_abs[idxF+j]
tmpj = j
idxF = idxF+tmpj
return 2./N*(yfft.real[idxF])
def fields(t,m,p):
#Get the H^{DL} at (t, m, p)
Hk = 2 * p.K1/p.Js
Hd = p.etadamp * p.currentd * p.hbar/(2*p.e*p.Js*p.d)
return (Hk, Hd)
def f(t, m, p):
j = p.currentd * np.cos(2 * 3.1415927 * p.frequency * t)
prefactorpol = j * p.hbar/(2 * p.e * p.Js * p.d)
hani = 2 * p.K1/p.Js * p.easy_axis * np.dot(p.easy_axis,m)
h = p.hext+hani
H = - prefactorpol * (p.etadamp * np.cross(p.p_axis,m) + p.etafield * p.p_axis)
mxh = np.cross( m, h-prefactorpol*( p.etadamp * np.cross(p.p_axis,m) + p.etafield * p.p_axis ) ) #Corrected from Dieter
mxmxh = np.cross( m, mxh)
rhs = - p.gamma/(1+p.alpha**2) * mxh-p.gamma * p.alpha/(1+p.alpha**2) * mxmxh
p.result.append([t,m[0],m[1],m[2],H[0],H[1],H[2]])
return [rhs]
def calc_equilibrium(m0_,t0_,t1_,dt_,paramters_):
t0 = t0_
m0 = m0_
dt = dt_
r = ode(f).set_integrator('vode', method='bdf',atol=1e-14,nsteps =500000)
r.set_initial_value(m0_, t0_).set_f_params(paramters_).set_jac_params(2.0)
t1 = t1_
#Creating a counter and an array to store the magnetization directions
count = 0
magList = [[],[],[],[]]
testSignal = []
while r.successful() and r.t < t1: # and count < (periSampl + 1): #OLD: XXX
#To make sure the steps are equally spaced
#Hayashi et al. (2014), after eqn 45, suggests to divide one period into
# 200 time steps to get accurate temporal variation of Hall voltages
mag=r.integrate(r.t+dt)
magList[0].append(r.t)
magList[1].append(mag[0])
magList[2].append(mag[1])
magList[3].append(mag[2])
#testSignal.append( 23 * np.cos(2 * 2 * np.pi * paramters_.frequency * r.t) )
#Computing the H^{DL} at each time step
Hs = fields(r.t,mag,paramters_)
count += 1
#if count%100 == 0: print(count)
magList = np.array(magList)
#print(magList[0][0], magList[0][-1] )
return(r.t,magList,Hs, testSignal)
def calc_w1andw2(m0_,t0_,t1_,dt_,paramters_):
paramters_.result = []
t1,magList, Hs, testSignal = calc_equilibrium(m0_,t0_,t1_,dt_,paramters_)
npresults = np.array(paramters_.result)
time = np.array( magList[0] )
sinwt = np.sin( 2 * 3.1415927 * paramters_.frequency * time)
cos2wt = np.cos( 2 * 2 * 3.1415927 * paramters_.frequency * time)
current = paramters_.currentd * np.cos(2 * 3.1415927 * paramters_.frequency * time)
# time steps array creation
z=0
dt=[]
dt.append(time[1]-time[0])
for i in time:
if z>0:
dt.append(time[z]-time[z-1])
z=z+1
dt=np.array(dt)
#Computing the voltage from R_{AHE}
voltage = current * magList[3] * paramters_.RAHE * (2e-6 * 6e-9)
voltage = voltage[periSampl:]
current = current[periSampl:]
time = time[periSampl:]
sinwt = sinwt[periSampl:]
cos2wt = cos2wt[periSampl:]
dt = dt[periSampl:]
#nR2w = np.sum(voltage/paramters_.currentd * cos2wt * dt)*(2/time[-1])
R1w = np.sum(voltage * sinwt * dt)*(2 / (time[-1]*(3/4)) )
R2w = np.sum(voltage * cos2wt * dt)*(2 / (time[-1]*(3/4)) )
#R2w = np.sum(testSignal[periSampl:] * cos2wt * dt)*(2 / (time[-1]*(3/4)) )
#R1w = np.dot( voltage * dt,sinwt )/( np.dot(sinwt * dt,sinwt) * paramters_.currentd)
#nR2w = np.dot( voltage * dt,cos2wt )/( np.dot(cos2wt * dt, cos2wt) * paramters_.currentd)
fR2w = fft( voltage, magList[0][periSampl:], paramters_.frequency)
lR2w = lockin( voltage, magList[0][periSampl:], paramters_.frequency, 0)
#nR2w = np.fft.fft(magList[3], 2)/2
nR2w = lockin( voltage/paramters_.currentd, magList[0][periSampl:], paramters_.frequency, 90)
#Checking the magnetization time evolution at each external field value:
#plt.plot(time, magList[1], label = 'mx')
#plt.plot(time, magList[2], label = 'my')
#plt.plot(time, magList[3][periSampl:], label = 'mz tree periods')
#plt.plot(magList[0], magList[3], label = 'mz_full period')
#plt.title("H_x = " + str(paramters_.hext[0]*paramters_.mu0) + "[T]" )
#plt.legend()
#plt.show()
#plt.plot(time, mzlowfield(time, paramters_), label = 'test')
#plt.plot(time, np.full(time.shape, sum(magList[1]) / len(magList[1]) ), label = 'mx')
#plt.plot(time, np.full(time.shape, sum(magList[2]) / len(magList[2]) ), label = 'my')
#plt.plot(time, np.full(time.shape, sum(magList[3]) / len(magList[3]) ), label = 'mz')
#plt.plot(time, testSignal, label = 'cos(X)')
#plt.plot(time, voltage, label = 'cos(X)')
#Checking the current-induced fields time evolution at each external field value:
#plt.plot(time, npresults[:,4], label = 'Hx')
#plt.plot(time, npresults[:,5], label = 'Hy')
#plt.plot(time, npresults[:,6], label = 'Hz')
#plt.legend()
#plt.show()
#Final value of the current-induced field
#H_eff = print(npresults[-1,4],npresults[-1,5],npresults[-1,6])
#return(R1w,R2w,npresults[-1,4],npresults[-1,5],npresults[-1,6],npresults[-1,1],npresults[-1,2],npresults[-1,3], Hs, nR2w, lR2w, fR2w)
return(R1w,R2w,
magList[0], # ZZZ re-write function to save memory (duplicated time array)
npresults[:,4],npresults[:,5],npresults[:,6],
magList[1], magList[2], magList[3],
Hs, nR2w, lR2w, fR2w)
paramters = Parameters()
n = 21
phirange = np.linspace(-np.pi/2, np.pi*3/2, num=n)
signalw = []
signal2w = []
nsignal2w = []
lsignal2w = []
fsignal2w = []
timeEvol = []
Hx,Hy,Hz = [[],[],[]]
Mx,My,Mz = [[],[],[]]
m_eqx, m_eqy, m_eqz = [[],[],[]]
aheList, amrList = [[],[]]
fieldrangeT =[]
phirangeRad=[]
orgdensity = paramters.currentd
longitudinalSweep = True
rotationalSweep = False
if longitudinalSweep:
name = "_HSweep"
fieldrange = np.linspace(-0.1/paramters.mu0, 0.1/paramters.mu0, num = n )
for i in fieldrange:
paramters.currentd = orgdensity
paramters.hext = np.array([i,0,0])
initm=[0,0,1]
initm=np.array(initm)/np.linalg.norm(initm)
R1w,R2w, t,hx,hy,hz, mx,my,mz, Hs, nR2w, lR2w, fR2w = calc_w1andw2(m0_=initm,
t0_=0,
t1_=4/paramters.frequency,
dt_=1/(periSampl * paramters.frequency),
paramters_=paramters)
#Storing each current-induced field and magnetization state for each ext field value
timeEvol.append(t)
Hx.append(hx)
Hy.append(hy)
Hz.append(hz)
Mx.append(mx)
My.append(my)
Mz.append(mz)
m_eqx.append(mx[-1])
m_eqy.append(my[-1])
m_eqz.append(mz[-1])
fieldrangeT.append(i * paramters.mu0)
signalw.append(R1w)
signal2w.append(R2w)
nsignal2w.append(nR2w)
lsignal2w.append(lR2w)
fsignal2w.append(fR2w)
phirangeRad.append(0)
#AHE & AMR
paramters.currentd = -paramters.currentd
it1,imagList, iHs, itestSignal = calc_equilibrium(m0_=initm,t0_=0,t1_=4/paramters.frequency,dt_=1/(periSampl * paramters.frequency), paramters_=paramters)
aheList.append(mz[-1]-imagList[3][-1])
amrList.append(mx[-1]*mx[-1])
#Live prompt
#print(i, R1w, R2w, '\tHk,Hd', round(Hs[0]), round(Hs[1]), mx[-1], my[-1], mz[-1])
if rotationalSweep:
name = "_HconsRotat"
fieldrange = np.linspace(0, 0.8/paramters.mu0, num= int((n-1)/10) )
for h in fieldrange:
ipMagnitude = 0.05/paramters.mu0 # 0.05/paramters.mu0 # in Tesla
for i in phirange:
paramters.currentd = orgdensity
paramters.hext = np.array([ np.cos(i) * ipMagnitude , np.sin(i) * ipMagnitude , h])
initm=[0,0,-1]
initm=np.array(initm)/np.linalg.norm(initm)
R1w,R2w,hx,hy,hz,mx,my,mz, Hs, nR2w = calc_w1andw2(m0_=initm,t0_=0,t1_=1/paramters.frequency,dt_=1/(periSampl * paramters.frequency), paramters_=paramters)
#Storing each current-induced field and magnetization state for each ext field value
Hx.append(hx)
Hy.append(hy)
Hz.append(hz)
Mx.append(mx)
My.append(my)
Mz.append(mz)
phirangeRad.append(i*180/np.pi)
fieldrangeT.append(h)
signalw.append(R1w)
signal2w.append(R2w)
nsignal2w.append(nR2w)
#Live prompt
print( h, R1w, R2w, 'Pi:'+str(i%(2*np.pi)), '\tHk,Hd', round(Hs[0]), round(Hs[1]), mx, my, mz)
def savedata(p, sig, fieldrangeT, name):
#Storing the data into a dat file with the following strcture:
#Delta denotes current-induced fields
# ` denotes equilibium
# Current | H_ext | R2w | \Delta H_x | \Delta H_y | \Delta H_z | 7mz` | my` | mz` | Rw | 11 phi rad
with open( "v2o_" + str(name) + "_j" + str(p.currentd/1e10) + "e10.dat", "w") as f:
i = 0
for sig in signal2w:
f.write( str(p.currentd) + "\t" + str(fieldrangeT[i]) + "\t" + str(sig) + "\t"
+ str(Hx[i]) + "\t" + str(Hy[i]) + "\t" + str(Hz[i]) +'\t'
+ str(Mx[i]) + "\t" + str(My[i]) + "\t" + str(Mz[i]) + '\t' + str(signalw[i]) + "\t" + str(phirangeRad[i])
+ "\n")
i += 1
f.write("Hk\tHdamp\teta(f/d)\t t\t freq\n")
f.write( str(Hs[0]) + '\t' + str(Hs[1]) + "\t" + str(p.etafield/p.etadamp) + "\t" + str(p.d)
+ '\t' + str(p.frequency) + '\n')
f.close()
def graph(x, y, xlab, ylab, pltlabel, plthead):
fig, ax = plt.subplots()
plt.plot(x, y, label = pltlabel)
ax.set(xlabel = xlab, ylabel = ylab)
plt.title(plthead)
plt.legend()
return fig
def graphm(t, mx, my, mz, xlab, ylab, plthead):
fig, ax = plt.subplots()
plt.plot(t, mx, label = r'$x$')
plt.plot(t, my, label = r'$y$')
plt.plot(t, mz, label = r'$z$')
ax.set(xlabel = xlab, ylabel = ylab)
plt.title(plthead)
plt.legend()
return fig
st.title('Magnetization dynamics for FM/HM interfaces, a single-spin model')
st.header('Online LLG integrator')
st.caption("<NAME>, <NAME>, <NAME>, <NAME>")
st.caption("Physics of Functional Materials")
st.caption("University of Vienna")
st.write('The following page describes the details to consider to efficiently simulate a FM/HM interface. This model is based on the Landau-Lifshitz-Gilbert equation, and the equation is integrated using _scipy_ python libraries. Hence, the magnetization dynamics is computed with this model, which also contains routines to calculate the first and second harmonics of the Anomalous Hall Voltage (from AH Effect). This interactve tool is designed to allow quick computations and detailed understanding of the considerations made to simulate such FM/HM interfaces. ')
st.write('The parameters used in the computation for the live plot results can be freely manipulated using the left sidebar (_available clicking in the arrowhead on the top left of this web app_). Feel free to perform computations with the desired values. ')
st.subheader('Theoretical description')
st.write('The system described by the model is a typical FM/HM interface. In our specific case, a Hall cross with a thin ferromagnetic layer displaying an out of plane magnetization (fig. 1). ')
st.image("https://journals.aps.org/prb/article/10.1103/PhysRevB.89.144425/figures/1/medium",
caption = "*Fig. 1* Hall bar structure. Adapted from Phys. Rev. B 89, 144425 (2014)",
width = 400 )
#($\eta_\text{DL}$ and $\eta_\text{FL}$)
st.write(r'The LLG equation employed in the model is in explicit form and takes the Slonczewsky spin-orbit-torque coefficients as input. It goes as follows:')
st.latex(r''' \frac{\partial \vec{m}}{\partial t} = -
\frac{\gamma}{1+\alpha^2} (\vec{m} \times \vec{H}_{\text{eff}}) -
\frac{\gamma \alpha}{1+\alpha^2} \:\vec{m} \times (\vec{m} \times \vec{H}_{\text{eff}})''')
st.write(r'Where $m$ represents the mgnetization unit vector, $\alpha$ the Gilbert damping constant, $\gamma$ the gyromagnetic ratio, and $\vec{H}_{\text{eff}}$ is the effective magnetic field. The effective magnetic field contains contributions of the applied external field, the effective anisotropy field, and the current induced fields via spin orbit torque effects. It reads as follows:')
st.latex(r''' \vec{ H }_{\text{eff}} =
\vec{ H }_{\text{ext}} + \vec{ H }_{\text{k}} +
\vec{ H }^{\text{SOT}}_{\text{FL}} +
\vec{ H }^{\text{SOT}}_{\text{DL}} \\ \:\\ \:\\
\vec{ H }_{\text{k}} = \frac{2\vec{K}_1}{Js} \\ \:\\
\vec{ H }^{\text{SOT}}_{\text{FL}} = \eta_\text{FL} \frac{ j_e \hbar }{ 2 e t \mu_0 M_s }\:\vec{m} \times (\vec{m} \times \vec{p}) \\ \:\\
\vec{ H }^{\text{SOT}}_{\text{DL}} = \eta_\text{DL} \frac{ j_e \hbar }{ 2 e t \mu_0 M_s }\:(\vec{m} \times \vec{p})
''')
st.write(r"The $\vec{p}$ vector represents the spin polarization of electrons. For a current flowing along the x direction, the vector is $(0,-1,0)$. As the here simulated system presents out of plane magnetization along the +z axis, the $\vec{K}_1$ anisotropy constant is represented by $(0,0,K_1)$")
st.write("Therefore, this simplified model just describes out-of-plane systems with negligible Planar Hall Effect, compared to the Anomalous Hall Effect. It will get improved soon.")
st.caption("Performing the integration")
st.write("In order to accurately compute the first and second harmonic components of the Anomalous Hall Voltage, the period is, at least, split in 1000 equidistand time steps. This will ensure an accurate description of the time variation of the voltage induced by the AC current. Additionaly, it will improve the computation of the numerical Fourier integrals for getting the harmonic responses.")
st.write("Under AC, the voltage is made up by the following harmonics:")
st.latex(r''' V_{xy}(t) = V^{xy}_0 + V^{xy}_\omega\sin(\omega t) + V^{xy}_{2\omega}\cos(2\omega t) + ...''')
st.write("Those harmonic components can be isolated by applying the Fourier series coefficient integral definition, integrating over one full period.")
st.latex(r'''
V^{xy}_{\omega}=\frac{2}{T}\int_{T} V(t)\sin(\omega t)\text{dt} \\ \: \\
V^{xy}_{2\omega}=\frac{2}{T}\int_{T} V(t)\cos(2\omega t)\text{dt}
''')
st.write(r"As the system starts fully pointing in the z direction, it is important to simulate the electric current with a cosine wave $J_x=j_e \cos(\omega t)$. ")
if st.checkbox("Show relaxation of magnetization", True):
selected_field = st.select_slider('Slide the bar to check the trajectories for an specific field value [A/m]',
options = fieldrange.tolist())
st.write("Field value equivalent to", str( round(selected_field*paramters.mu0, 3) ), "[T]")
s_index = fieldrange.tolist().index(selected_field)
figtraj = graphm(timeEvol[s_index], Mx[s_index], My[s_index], Mz[s_index],
"time [ns]", r'$m_i$',
"Evolution at " + str( round(selected_field*paramters.mu0, 3) ) + "[T]")
st.pyplot(figtraj)
st.write(r"As can be noted in the magnetization dynamics for a given external field value, the system quickly gets its magnetization direction according to the applied AC current. However, if we just employ a single period for the time integration, the result of the Fourier integral may differ from the actual coefficient, as the first time steps do not have a pure wave behavior.")
st.caption("Computing the harmonics")
st.write(r"Therefore, in order to accurately compute the integral, each time integration of the LLG equation, for each $H_{\text{ext,x}}$ value, is performed over 4 complete periods $t_f=4/f$. Then, for computing the Fourier integral, the initial period of the time integration of the LLG equation is ommited from the computation. Furthermore, to improve the accuracy of the calculated harmonic component of the voltage, the remaining three periods are integrated and the normalization factor of the Fourier integral is adjusted accordingly. Finally, the integral is numerically approximated by the following sum:")
st.latex(r'''
V^{xy}_{ \omega} \approx \frac{2}{t_f(3/4)} \sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \text{AHE} }) \sin(\omega t_i) (\Delta t)_i \\ \: \\
V^{xy}_{2\omega} \approx \frac{2}{t_f(3/4)} \sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \text{AHE} }) \cos(2\omega t_i) (\Delta t)_i
''')
st.write(r'Where $i$ represents an index of the elements of the lists containing the values of each step of the simulation (_Note that one period has been split into 1000 equidistant steps_). Inside the simulation the voltage is computed as $V^{xy}(t)=J_x(t) m_z(t) R_{AHE} \sigma$, where $\sigma$ is the cross section area of the conducting element. In our case $\sigma=(2 \mu m \times 6 \text{nm})$ ')
st.write("Lastly, the resulting transfer curves using the Fourier series integral definition are: ")
figv2w = graph(fieldrangeT, signal2w, r'$\mu_0 H_x$ (T)', r'$V_{2w} [V]$ ', "V2w", "Second harmonic voltage" )
figv1w = graph(fieldrangeT, signalw, r'$\mu_0 H_x$ (T)', r'$V_{w} [V]$ ', "Vw", "First harmonic voltage" )
figamr = graph(fieldrangeT, amrList, r'$\mu_0 H_x$ (T)', r'$m_x^2$', r'$m_x^2$','AMR effect')
figahe = graph(fieldrangeT, aheList, r'$\mu_0 H_x$ (T)', r'$m_{z,+j_e}-m_{z,-j_e}$', r'$m_{z,+j_e}-m_{z,ij_e}$','AHE effect')
figmag = graphm(fieldrangeT, m_eqx, m_eqy, m_eqz, r'$\mu_0 H_x$ (T)', r'$m_i$', "Equilibrium direction of m") #index denotes field sweep step
##plt.plot(fieldrangeT, lsignal2w, label = 'lock in r2w')
##plt.plot(fieldrangeT, fsignal2w, label = 'fft r2w')
##plt.plot(fieldrangeT, H,'r')
##ax.set(xlabel=r'$\phi$ [grad]',ylabel = r'$m_{i}$ ')
st.pyplot(figv1w)
st.pyplot(figv2w)
st.write('If we just take in consideration the magnetization components to describe the AMR and AHE effects, the transfer curves are:')
st.pyplot(figahe)
st.pyplot(figamr)
st.write("It is important to highligh that by inducing an AC there is no an exact static point for equilibrium magnetization. However, when the system reaches equilibrium with respect to the AC current, the magnetization direction of the last time step of each period may be regarded as equilibrium magnetization (check ref. [X] Phys. Rev. B 89, 144425 (2014))")
st.pyplot(figmag)
#Pending code sections
#if st.checkbox("Show fields evolution", False):
# figfields = graphm(timeEvol[s_index], Hx[s_index], Hy[s_index], Hz[s_index],
# "time [ns]", r'$m_i$',
# "Current induced fields at H_ext:" + str( round(selected_field*paramters.mu0, 3) ) + "[T]")
#
# st.pyplot(figfields)
``` |
{
"source": "joshuamschmidt/set_perm",
"score": 2
} |
#### File: set_perm/set_perm/set_perm.py
```python
import pandas as pd
import pyranges as pr
import numpy as np
import concurrent.futures as cf
from itertools import repeat
from scipy.stats import rankdata
from scipy.sparse import csr_matrix
import time
from random import sample
# --- global functions
def permutation_fset_intersect(args):
permutation_array = args[0]
function_array = args[1]
max_z = max(permutation_array.max(), function_array.max()) + 1
def csr_sparse(a, z):
m, n = a.shape
indptr = np.arange(0, m * n + 1, n)
data = np.ones(m * n, dtype=np.uint16)
return csr_matrix((data, a.ravel(), indptr), shape=(m, z))
intersection = csr_sparse(permutation_array, max_z) * csr_sparse(function_array, max_z).T
intersection = intersection.todense()
return np.squeeze(np.asarray(intersection))
def listnp_to_padded_nparray(listnp):
max_width = np.max([np.size(sublist) for sublist in listnp])
padded_array = np.asarray(
[np.pad(sublist, (0, max_width - np.size(sublist)), mode='constant', constant_values=(0, 0))
for sublist
in listnp])
return padded_array.astype('uint16')
def annotation_sets_to_array(annotation, features, min_size=3):
sets = annotation.join(features.set_index('feature'), on='feature').groupby('id')['idx'].apply(list)
set_array = [s for s in sets if len(s) >= min_size]
set_array = np.sort(listnp_to_padded_nparray(set_array))
set_names = [i for i, s in enumerate(sets) if len(s) >= min_size]
set_names = sets.index[set_names]
return set_array, set_names
def sample_from_feature_list(feature_list, n_total, sample_ratio):
out = pd.unique([item for sublist in sample(feature_list, int(round(n_total*sample_ratio,0))) for item in sublist])
while len(out) < n_total:
out = np.append(out, pd.unique([item for sublist in sample(feature_list, n_total) for item in sublist]))
out = pd.unique(out)
out = out[:n_total]
#out = np.sort(out)
return out.astype('uint16')
def array_of_resamples_tup(args):
feature_list, n_total, n_reps = args[0], args[1], args[2]
out = np.ndarray((n_reps, n_total), dtype='uint16')
for i in range(n_reps):
out[i] = sample_from_feature_list(feature_list, n_total, 1.4)
return out
def n_jobs_core_list(n_reps, n_cores):
quotient, remainder = divmod(n_reps, n_cores)
n_per_core = [quotient] * n_cores
for i in range(remainder):
n_per_core[i] = n_per_core[i] + 1
return n_per_core
def multicore_resample(n_features, n_reps, n_cores, feature_list):
n_per_core = n_jobs_core_list(n_reps, n_cores)
with cf.ProcessPoolExecutor(max_workers=n_cores) as executor:
results = executor.map(array_of_resamples_tup, zip(repeat(feature_list), repeat(n_features), n_per_core))
results = list(results)
return np.concatenate(results)
def multicore_intersect(permutation_array, functionalset_array, n_cores):
split_permutation_array = np.array_split(permutation_array, n_cores)
with cf.ProcessPoolExecutor(max_workers=n_cores) as executor:
results = executor.map(permutation_fset_intersect, zip(split_permutation_array, repeat(functionalset_array)))
results = list(results)
return np.concatenate(results)
def calculate_p_values(c_set_n, p_set_n):
p_e = []
p_d = []
n_perm = p_set_n.shape[0]
if n_perm == 1:
#p_e.append((np.size(np.where(p_set_n >= c_set_n)) + 1) / (n_perm + 1))
#p_d.append((np.size(np.where(p_set_n <= c_set_n)) + 1) / (n_perm + 1))
raise ValueError("can only calculate p-val;ues if there is more than one permutation!")
else:
if(len(p_set_n.shape)>1):
for i in range(p_set_n.shape[1]):
p_e.append((np.size(np.where(p_set_n[:, i] >= c_set_n[i])) + 1) / (n_perm + 1))
p_d.append((np.size(np.where(p_set_n[:, i] <= c_set_n[i])) + 1) / (n_perm + 1))
if(len(p_set_n.shape)==1):
p_e.append( (np.size(np.where(p_set_n >= c_set_n)) + 1) / (n_perm + 1) )
p_d.append( (np.size(np.where(p_set_n <= c_set_n)) + 1) / (n_perm + 1) )
return p_e, p_d
def make_results_table(test_obj, function_obj, set_perm_obj, annotation_obj):
out = function_obj.function_sets.groupby('Id', as_index=False).agg({'FunctionName': pd.Series.unique})
out = out[out['Id'].isin(function_obj.function_array2d_ids)]
out['obs_n'] = test_obj.n_candidate_per_function
out['perm_mean_n'] = set_perm_obj.mean_per_set
sem=set_perm_obj.sd_per_set / np.sqrt(set_perm_obj.n_permutations)
out['perm_sem'] = sem
e_array = np.asarray(out['obs_n'] / out['perm_mean_n'].values)
sem_array = e_array * np.sqrt(np.square(2*sem/set_perm_obj.mean_per_set))
log_e = np.log2(e_array, out=np.empty((np.shape(e_array)[0],)) * np.nan, where=(e_array!=0))
out['enrich(log2)'] = log_e
out['u_95%(CI)'] = np.log2(e_array+sem_array, out=np.empty((np.shape(sem_array)[0],)) * np.nan, where=(e_array+sem_array!=0))
out['l_95%(CI)'] = np.log2(e_array-sem_array, out=np.empty((np.shape(sem_array)[0],)) * np.nan, where=(e_array-sem_array!=0))
out['p_enrich'] = set_perm_obj.p_enrichment
out['fdr_enrich'] = fdr_from_p_matrix(set_perm_obj.set_n_per_perm, out['p_enrich'], method='enrichment')
out['BHfdr_enrich'] = p_adjust_bh(out['p_enrich'])
out['p_deplete'] = set_perm_obj.p_depletion
out['fdr_deplete'] = fdr_from_p_matrix(set_perm_obj.set_n_per_perm, out['p_deplete'], method='depletion')
out['BHfdr_deplete'] = p_adjust_bh(out['p_deplete'])
#out_genes = candidates_per_set(test_obj, function_obj, annotation_obj)
out = pd.merge(out, test_obj.candidates_in_functions_df, on='Id', how='outer')
out = out.sort_values('p_enrich')
return out
def make_nested_results_table(test_obj, function_obj, set_perm_obj, annotation_obj):
out = function_obj.function_sets.groupby('Id', as_index=False).agg({'FunctionName': pd.Series.unique})
out = out[out['Id'].isin(function_obj.function_array2d_ids)]
out['obs_n'] = test_obj.n_candidate_per_function
out['perm_mean_n'] = set_perm_obj.mean_per_set
sem=set_perm_obj.sd_per_set / np.sqrt(set_perm_obj.n_permutations)
out['perm_sem'] = sem
e_array = np.asarray(out['obs_n'] / out['perm_mean_n'].values)
sem_array = e_array * np.sqrt(np.square(2*sem/set_perm_obj.mean_per_set))
log_e = np.log2(e_array, out=np.empty((np.shape(e_array)[0],)) * np.nan, where=(e_array!=0))
out['enrich(log2)'] = log_e
#out['u_95%(CI)'] = np.log2(e_array+sem_array, out=np.empty((np.shape(sem_array)[0],)) * np.nan, where=(e_array+sem_array!=0))
#out['l_95%(CI)'] = np.log2(e_array-sem_array, out=np.empty((np.shape(sem_array)[0],)) * np.nan, where=(e_array-sem_array!=0))
out['p_enrich'] = set_perm_obj.p_enrichment
out['within_fdr_enrich'] = fdr_from_p_matrix(set_perm_obj.set_n_per_perm, out['p_enrich'], method='enrichment')
out['within_BHfdr_enrich'] = p_adjust_bh(out['p_enrich'])
out['p_deplete'] = set_perm_obj.p_depletion
out['within_fdr_deplete'] = fdr_from_p_matrix(set_perm_obj.set_n_per_perm, out['p_deplete'], method='depletion')
out['within_BHfdr_deplete'] = p_adjust_bh(out['p_deplete'])
#out_genes = candidates_per_set(test_obj, function_obj, annotation_obj)
out = pd.merge(out, test_obj.candidates_in_functions_df, on='Id', how='outer')
return out
def combine_nested_results_table(results_list, per_set_list, nested_names, index_by_list_size, ):
mod_tables = [None] * len(nested_names)
set_n_per_perm_list = [None] * len(nested_names)
for i, size_index in enumerate(index_by_list_size):
this_name=nested_names[size_index]
this_table=results_list[i]
this_table['Label']=this_name
mod_tables[i]=this_table
this_per_set=per_set_list[i]
set_n_per_perm_list[i]=this_per_set.set_n_per_perm
merged_results=pd.concat(mod_tables)
merged_set_n_per_perm=np.concatenate(set_n_per_perm_list, axis=0)
merged_results['all_fdr_enrich'] = fdr_from_p_matrix(merged_set_n_per_perm, merged_results['p_enrich'], method='enrichment')
merged_results['all_BHfdr_enrich'] = p_adjust_bh(merged_results['p_enrich'])
merged_results['all_fdr_deplete'] = fdr_from_p_matrix(merged_set_n_per_perm, merged_results['p_deplete'], method='depletion')
merged_results['all_BHfdr_deplete'] = p_adjust_bh(merged_results['p_deplete'])
# reorganiase col order
merged_results=merged_results[['Label','Id','FunctionName','obs_n','perm_mean_n','enrich(log2)','p_enrich','within_fdr_enrich','within_BHfdr_enrich','all_fdr_enrich','all_BHfdr_enrich','p_deplete','within_fdr_deplete','within_BHfdr_deplete','all_fdr_deplete','all_BHfdr_deplete','Genes']]
merged_results = merged_results.sort_values('p_enrich')
return merged_results
def fdr_from_p_matrix(perm_n_per_set, obs_p, method='enrichment'):
p_matrix = perm_p_matrix(perm_n_per_set, method)
obs_p_arr = np.asarray(obs_p)
n_perm = p_matrix.shape[0]
fdr_p = np.empty(len(obs_p), dtype='float64')
obs_order = np.argsort(obs_p_arr)
p_val, p_counts = np.unique(p_matrix, return_counts=True)
current_max_fdr = 0
for i, p_idx in enumerate(obs_order):
if current_max_fdr == 1:
fdr_p[p_idx] = 1
else:
obs = np.size(np.where(obs_p_arr <= obs_p_arr[p_idx]))
exp = np.sum(p_counts[np.where(p_val <= obs_p_arr[p_idx])]) / n_perm
i_fdr = exp / obs
if current_max_fdr <= i_fdr < 1:
fdr_p[p_idx] = i_fdr
current_max_fdr = i_fdr
elif current_max_fdr > i_fdr and i_fdr < 1:
fdr_p[p_idx] = current_max_fdr
else:
fdr_p[p_idx] = 1
current_max_fdr = 1
return fdr_p
def p_adjust_bh(p):
"""Benjamini-Hochberg p-value correction for multiple hypothesis testing."""
p = np.asfarray(p)
by_descend = p.argsort()[::-1]
by_orig = by_descend.argsort()
steps = float(len(p)) / np.arange(len(p), 0, -1)
q = np.minimum(1, np.minimum.accumulate(steps * p[by_descend]))
return q[by_orig]
def load_variants(variant_file):
variants = None
cols = ["Chromosome", "Start", "End"]
possible_chr = ["CHR","CHROMOSOME","chr","chromosome","Chromosome"]
variant_header= pd.read_table(variant_file, header=None, nrows=1)
ncols=len(variant_header.columns)
dtype_dict = {"Chromosome": str, "Start": int, "End": int}
if(ncols < 2 or ncols > 3):
raise ValueError("variant inputs nust have 2 or 3 columns only!")
has_header=0
if(variant_header.iloc[0][0] not in possible_chr):
has_header='infer'
try:
variants = pd.read_table(
variant_file,
header=has_header,
names=cols[:ncols],
dtype={col: dtype_dict[col] for col in cols[:ncols]}
)
except pd.errors.ParserError:
print(f'The file: {variant_file} does not exist. Please correct and try again.')
return pr.PyRanges(variants.drop_duplicates())
# wrapper for results table writing
def results_writer(results_df, name_str, function_name_str, user_prefix):
if(user_prefix==""):
outfile_name = name_str + "-" + function_name_str + ".set_perm.txt"
else:
outfile_name = user_prefix + "-" + name_str + "-" + function_name_str + ".set_perm.txt"
results_df.to_csv(path_or_buf=outfile_name, sep='\t',header=True, index=False, float_format='%.8f',mode='w+')
return None
# global functions used in class constructors/__init__
def load_annotation_table(annotation_file):
annotation_table = pd.read_table(
annotation_file,
header=0,
names=['Chromosome', "Start", "End", "Annotation"],
dtype={"Chromosome": str, "Start": int, "End": int, "Annotation": str}
)
annotation_table['Idx'] = np.arange(len(annotation_table))+1
return annotation_table
def modify_annotation_table(annotation_table, range_modification):
annotation_table['Start'] = annotation_table['Start'] - range_modification
annotation_table['End'] = annotation_table['End'] + range_modification
return annotation_table
def load_function_sets(function_set_file):
function_sets = pd.read_table(
function_set_file,
header=0,
names=['Id', "Annotation", "FunctionName"],
dtype={"Id": str, "Annotation": str, "FunctionName": str}
)
return function_sets
def function_sets_to_array(function_sets, min_set_size, annotation_obj):
sets = function_sets.join(annotation_obj.annotation_table.set_index('Annotation'), on='Annotation').groupby('Id')[
'Idx'].apply(list)
set_array = [s for s in sets if len(s) >= min_set_size]
set_names = [i for i, s in enumerate(sets) if len(s) >= min_set_size]
function_array = np.sort(listnp_to_padded_nparray(set_array))
function_array_ids = sets.index[set_names]
return function_array, function_array_ids
# function to combine candidate gene by function lists form two or more objects
def make_combined_candidate_by_function_df(df_list):
zip_gene_lists = list(zip(*[df['Genes'].values for df in df_list]))
collapsed_lists=["; ".join(g_list) for g_list in zip_gene_lists ]
ids=df_list[0]['Id'].values
combined = {'Id':ids, 'Genes': collapsed_lists}
combined_df = pd.DataFrame(data=combined)
return combined_df
# --- classes
class AnnotationSet:
# constructor
def __init__(self, annotation_file='', range_modification=None):
self.annotation_file = annotation_file
self.range_modification = range_modification
self.annotation_table = load_annotation_table(self.annotation_file)
if range_modification is None:
return
self.annotation_table = modify_annotation_table(self.annotation_table, self.range_modification)
self.num_annotations = self.annotation_table.shape[0]
class FunctionSets:
# constructor
def __init__(self, function_set_file='', min_set_size=0, annotation_obj=None):
self.function_set_file = function_set_file
self.min_set_size = min_set_size
self.n_genes_with_variant = None
self.function_sets = load_function_sets(self.function_set_file)
self.function_array2d, self.function_array2d_ids = function_sets_to_array(self.function_sets,
self.min_set_size,
annotation_obj)
self.n_per_set = np.asarray([np.size(np.where(function_array != 0)) for function_array in self.function_array2d], dtype='uint16')
def update_from_gene_list(self, gene_list=None, annotation_obj=None):
self.function_sets = self.function_sets[self.function_sets['Annotation'].isin(gene_list)]
self.function_array2d, self.function_array2d_ids = function_sets_to_array(self.function_sets,
self.min_set_size,
annotation_obj)
self.n_per_set = np.asarray([np.size(np.where(function_array != 0)) for function_array in self.function_array2d], dtype='uint16')
@classmethod
def background_refined(cls, base_fs_obj, annotation_obj, variant_obj):
obj = cls.__new__(cls)
obj.function_set_file = base_fs_obj.function_set_file
obj.min_set_size = base_fs_obj.min_set_size
obj.n_genes_with_variant = np.size(np.unique(variant_obj.annotated_variants['Idx'].values))
obj.function_sets = base_fs_obj.function_sets
return obj
class Variants:
# constructor
def __init__(self, variant_file=''):
self.variant_file = variant_file
self.variants = load_variants(self.variant_file)
self.num_variants = self.variants.df.shape[0]
self.annotated_variants = None
def annotate_variants(self, annotation_obj):
self.annotated_variants = self.variants.join(pr.PyRanges(annotation_obj.annotation_table)).df
self.annotated_variants['Id'] = self.annotated_variants.Chromosome.astype(str).str.cat(
self.annotated_variants.Start.astype(str), sep='_')
def is_subset_of(self, other):
#return pd.merge(self.variants, other.variants).equals(self.variants)
return self.variants.df.shape[0]==self.variants.intersect(other.variants).df.shape[0]
def annotation_with_variant(self):
return self.annotated_variants['Annotation'].unique()
def multicore_make_id_idx_map_list(annotated_variants, n_cores):
split_annotated_variant_tables = np.array_split(annotated_variants, n_cores)
with cf.ProcessPoolExecutor(max_workers=n_cores) as executor:
results = executor.map(make_id_idx_map_list, split_annotated_variant_tables)
results = list(results)
flat_results = [item for sublist in results for item in sublist]
return flat_results
def make_id_idx_map_list(annotated_variants): # should make this a multiprocess function!
map_list = annotated_variants.groupby('Id')['Idx'].apply(list).tolist()
return map_list
def get_idx_array(annotated_variants):
"""returns array with shape, enables compatibility with permutation_fset_intersect"""
tmp_idx_array = np.asarray(np.unique(annotated_variants['Idx']))
idx_array = np.ndarray((1, np.size(tmp_idx_array)), dtype='uint16')
idx_array[0] = tmp_idx_array
return idx_array.astype('uint16')
def n_candidates_per_set(annotation_obj, function_obj):
candidate_set = set(annotation_obj.annotation_table['Annotation'].values)
candidates_in_function_sets = function_obj.function_sets.groupby('Id')['Annotation'].apply(
lambda x: np.unique(list(set(x).intersection(candidate_set))))
candidates_in_function_sets = pd.DataFrame(candidates_in_function_sets[pd.Index(function_obj.function_array2d_ids)])
candidates_in_function_sets = candidates_in_function_sets.reset_index(level=['Id'])
candidates_in_function_sets.columns = ['Id', 'CandidateAnnotations']
candidates_in_function_sets['n_CandidatesInSet'] = candidates_in_function_sets['CandidateAnnotations'].apply(
lambda x: len(x))
return candidates_in_function_sets
def candidates_per_set(candidate_array, function_obj, annotation_obj):
candidate_idx_in_function_set = [np.intersect1d(i, candidate_array) if len(np.intersect1d(i, candidate_array)) > 0 else np.asarray(-9) for i in function_obj.function_array2d]
candidate_genes_in_function_set = [np.sort(annotation_obj.annotation_table.loc[annotation_obj.annotation_table['Idx'].isin(candidate_idxs)]['Annotation'].values) if np.all(candidate_idxs!=-9) else np.asarray(None) for candidate_idxs in candidate_idx_in_function_set]
candidate_genes_collapsed = [np.array2string(f_set,separator=", ", max_line_width=10e6, threshold=10e6) for f_set in candidate_genes_in_function_set]
d = {'Id': function_obj.function_array2d_ids, 'Genes': candidate_genes_collapsed}
df = pd.DataFrame(data=d)
return df
class TestObject:
# constructor
def __init__(self, candidate_obj, background_obj, function_set_obj, annotation_obj, n_cores=1):
if not candidate_obj.is_subset_of(background_obj):
print("error: candidate set is not a subset of the background")
return
self.background_id_idx_map = multicore_make_id_idx_map_list(background_obj.annotated_variants, n_cores)
self.candidate_array = get_idx_array(candidate_obj.annotated_variants)
self.n_candidates = np.size(self.candidate_array)
self.n_candidate_per_function = permutation_fset_intersect(
(self.candidate_array, function_set_obj.function_array2d))
self.candidates_in_functions_df = candidates_per_set(self.candidate_array, function_set_obj, annotation_obj)
@classmethod
def add_objects(cls, *args):
obj = cls.__new__(cls)
obj.background_id_idx_map = None
obj.candidate_array = [ obj.candidate_array[0] for obj in args ]
obj.n_candidates = sum([ obj.n_candidates for obj in args])
obj.n_candidate_per_function = sum([ obj.n_candidate_per_function for obj in args])
obj.candidates_in_functions_df = make_combined_candidate_by_function_df([obj.candidates_in_functions_df for obj in args])
return obj
@classmethod
def nested_test(cls, cand_obj, function_set_obj, annotation_obj):
obj = cls.__new__(cls)
obj.background_id_idx_map = None
obj.candidate_array = get_idx_array(cand_obj.annotated_variants)
obj.n_candidates = np.size(obj.candidate_array)
obj.n_candidate_per_function = permutation_fset_intersect(
(obj.candidate_array, function_set_obj.function_array2d))
obj.candidates_in_functions_df = candidates_per_set(obj.candidate_array, function_set_obj, annotation_obj)
return obj
@classmethod
def union_of_objects(cls, a_obj, b_obj):
obj = cls.__new__(cls)
obj.candidate_file = [a_obj.candidate_file, b_obj.candidate_file]
obj.background_file = [a_obj.background_file, b_obj.background_file]
return obj
class Permutation:
# constructor
def __init__(self, test_obj, n_permutations, n_cores):
self.n_permutations = n_permutations
self.permutations = multicore_resample(test_obj.n_candidates, self.n_permutations, n_cores, test_obj.background_id_idx_map)
@classmethod
def nested_perm(cls, perm_obj, n_genes):
"""Return a new Permutation object, taking the first n_genes
from each permutation"""
obj = cls.__new__(cls)
obj.n_permutations = perm_obj.n_permutations
obj.permutations = perm_obj.permutations[:,:n_genes]
return obj
class SetPerPerm:
# constructor
def __init__(self, permutation_obj, function_set_obj, test_obj, n_cores):
self.set_n_per_perm = multicore_intersect(permutation_obj.permutations, function_set_obj.function_array2d, n_cores)
self.mean_per_set = np.array(np.mean(self.set_n_per_perm, axis=0))
self.sd_per_set = np.array(np.std(self.set_n_per_perm, axis=0))
# var is additive. helps with joins....
self.var_per_set = np.array(np.std(self.set_n_per_perm, axis=0))
self.p_enrichment, self.p_depletion = calculate_p_values(test_obj.n_candidate_per_function, self.set_n_per_perm)
self.n_candidate_per_function = test_obj.n_candidate_per_function
self.n_permutations = permutation_obj.n_permutations
@classmethod
def join_objects(cls, *args):
"""Return a new SetPerPerm object, equivalent to a + b.
Used because addition is too complex for default __init__"""
obj = cls.__new__(cls)
# objects should have same number of permutations!
n_perm_list= [ obj.n_permutations for obj in args ]
if(n_perm_list.count(n_perm_list[0]) == len(n_perm_list)):
obj.n_permutations=n_perm_list[0]
obj.set_n_per_perm = sum([ obj.set_n_per_perm for obj in args])
obj.mean_per_set = sum([ obj.mean_per_set for obj in args])
obj.var_per_set = sum([ obj.var_per_set for obj in args])
obj.sd_per_set = np.sqrt(obj.var_per_set)
obj.n_candidate_per_function = sum([ obj.n_candidate_per_function for obj in args])
obj.p_enrichment, obj.p_depletion = calculate_p_values(obj.n_candidate_per_function, obj.set_n_per_perm)
return obj
else:
raise ValueError("Objects must have the same number of permutations!")
# --- redundant and/or not used anymore
def perm_p_matrix(perm_n_per_set, method='enrichment'):
if(len(perm_n_per_set.shape)>1):
n_perms, n_sets = perm_n_per_set.shape
else:
n_perms = perm_n_per_set.size
n_sets = 1
out = np.ndarray((n_perms, n_sets), dtype='float64')
method_int = 1
if method == 'enrichment':
method_int = -1
if(len(perm_n_per_set.shape)>1):
for i in range(n_sets):
out[:, i] = rankdata(method_int * perm_n_per_set[:, i], method='max') /n_perms
else:
out=rankdata(method_int * perm_n_per_set, method='max')/n_perms
return out
def array_of_resamples(feature_list, n_total, n_reps):
out = np.ndarray((n_reps, n_total), dtype='uint16')
for i in range(n_reps):
out[i] = sample_from_feature_list(feature_list, n_total, 1.4)
return out
def random_check_intersection(n_per_set, perms, sets, check_n):
check_idxs = []
n_perms = np.shape(perms)[0]
n_sets = np.shape(sets)[0]
for i in range(check_n):
j = sample(range(0, n_perms - 1), 1)[0]
k = sample(range(0, n_sets - 1), 1)[0]
check_idxs.append(len(set(perms[j]).intersection(set(sets[k]))) == n_per_set[j][k])
return check_idxs
# scratch
def contiguous_feature_coordinates(feature_table):
out_df = pd.DataFrame({'Chromosome': [], 'Start': [], 'End': [], 'idx': []})
for c in feature_table['Chromosome'].unique():
sub_starts = feature_table[feature_table['Chromosome'] == c]['Start'].values
sub_ends = feature_table[feature_table['Chromosome'] == c]['End'].values
sub_lengths = sub_ends - sub_starts
for i in range(len(sub_starts)):
if i == 0:
sub_starts = sub_starts - sub_starts[i] + 1
sub_ends[i] = sub_starts[i] + sub_lengths[i]
elif i > 0:
sub_starts[i] = sub_ends[i - 1] + 1
sub_ends[i] = sub_starts[i] + sub_lengths[i]
c_df = pd.DataFrame(
zip(repeat(c), sub_starts, sub_ends, feature_table[feature_table['Chromosome'] == c]['idx'].values),
columns=['Chromosome', 'Start', "End", "idx"])
out_df = pd.concat([out_df, c_df])
return out_df
``` |
{
"source": "joshuanazareth97/popviz",
"score": 3
} |
#### File: popviz/cli/__main__.py
```python
import sys
import argparse
from scraper import IMDBScraper
from search import search_imdb
from reports import TVReport
def get_results_from_imdb(query):
results = search_imdb(query)[:10]
if not results:
print("No results found! Check the search term.")
sys.exit(1)
elif len(results) == 1:
chosen = results[0]
print(
f"Found a single result: {chosen['showname']} ({chosen['year']}) with ID {chosen['id']}"
)
input("Continue?")
print()
else:
for num, result in enumerate(results):
name = result["showname"]
cat = result["category"]
date = result["year"]
print(f"{num+1}. {name}\t({date})\t[{cat}]")
print()
while True:
choice = input("Choose one of the above shows (Or enter 0 to exit) > ")
try:
choice = int(choice) - 1
except ValueError:
print("Please enter a valid (numerical) choice.\n")
continue
if choice == -1: # User input 0, which gets saved as -1
print("Exiting app. Bye bye!")
sys.exit(0)
try:
chosen = results[choice]
except IndexError:
print("Please enter a number present in the list above.\n")
continue
break
print(f"You have chosen {chosen['showname']} with ID: {chosen['id']}\n")
return chosen
def main():
print()
parser = argparse.ArgumentParser(
description="Generate beautiful reports of IMDb ratings data."
)
input_term_group = parser.add_mutually_exclusive_group()
input_term_group.add_argument(
"-s", "--search", help="Search for a television show."
)
# TODO: Add test to check if ID format is correct.
input_term_group.add_argument(
"-i", "--id", help="Directly provide the IMDb ID of a television show."
)
parser.add_argument(
"-o",
"--output",
help="Specify a filename for the output. Defaults to the name of the show.",
default=None,
)
parser.add_argument(
"-c",
"--colorscheme",
help="Set the heatmap colorscheme. Defaults to blue.",
choices=["red", "blue"],
default="blue",
)
args = parser.parse_args()
if not args.id:
if not args.search:
query = input("Enter a search term for a television show > ")
print()
else:
query = args.search.strip()
print(f'Searching for "{query}" on IMDb...')
chosen = get_results_from_imdb(query)
chosen_id = chosen["id"]
else:
chosen_id = args.id
print("Retrieving show data...")
scraper = IMDBScraper(chosen_id)
reporter = TVReport(data_provider=scraper)
print("\nGenerating report...")
reporter.heatmap(color=args.colorscheme)
file = reporter.save_file(output_dir="./data", filename=args.output)
print(f"Report saved to {file.absolute()}.")
if __name__ == "__main__":
main()
```
#### File: popviz/reports/tv_report_gen.py
```python
from pathlib import Path
import math
import numpy as np
import seaborn as sns
from matplotlib import gridspec, offsetbox, pyplot as plt
from reports.utils import pad_nan, wrap_text, format_filename
class TVReport:
def __init__(self, data_provider):
sns.set(font_scale=0.7)
self.data = data_provider.seasons
self.show_metadata = data_provider.show_metadata
self.ratings = self._get_2d_array()
self.mean = math.floor(np.nanmean(self.ratings))
self.median = math.floor(np.nanmedian(self.ratings))
self.n_seasons, self.n_episodes = self.ratings.shape
self.inverted = False
axis = 1
average_shape = (self.n_seasons, 1)
# Always ensure that the matrix is more or less landscape
if self.n_seasons > self.n_episodes and not self.is_square:
# Put seasons make seasons rows
self.inverted = True
axis = 0
self.ratings = self.ratings.transpose()
average_shape = (1, self.n_seasons)
self.season_averages = np.nanmean(self.ratings, axis=axis).reshape(
average_shape
)
@property
def is_square(self):
return 1 <= max(self.ratings.shape) / min(self.ratings.shape) < 1.3
def _get_2d_array(self):
ratings = []
for season in self.data:
episodes = [
np.float(episode["rating"])
for episode in season["episodes"]
if episode["rating"]
]
ratings.append(episodes)
return pad_nan(np.array(ratings))
def _setup_page_layout(self, size="A4"):
size_map = {"A4": (11.69, 8.27), "A3": (16.53, 11.69)} # (width, height)
assert size in size_map
page_width, page_height = size_map[size]
relative_heights = [2, 5.5, 1, 2.5]
title_params = dict(
xy=(0.5, 0.9), xycoords="axes fraction", va="center", ha="center", size=28
)
subtitle_params = dict(
xy=(0.5, 0.65), xycoords="axes fraction", va="center", ha="center", size=14
)
metadata_params = dict(xycoords="axes fraction", va="center", size=12,)
if self.is_square:
# Change orientation to portrait
page_height, page_width = page_width, page_height
subtitle_params["size"] = 18
relative_heights = [2.5, 5.5, 0.5, 2]
fig = plt.figure(
figsize=(page_width, page_height), dpi=300, constrained_layout=True
)
fig.set_size_inches(page_width, page_height)
spec = gridspec.GridSpec(
figure=fig, ncols=1, nrows=4, height_ratios=relative_heights
)
if self.is_square:
ep_info = gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=spec[3, :])
else:
ep_info = gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=spec[2:, :])
with sns.axes_style("white"):
title_ax = fig.add_subplot(spec[0, :])
spacer_ax = fig.add_subplot(spec[2, :])
misc = fig.add_subplot(ep_info[:, 2])
best_ep_ax = fig.add_subplot(ep_info[:, :2])
worst_ep_ax = fig.add_subplot(ep_info[:, 3:])
for ax in [title_ax, best_ep_ax, worst_ep_ax, spacer_ax, misc]:
sns.despine(ax=ax, bottom=True, left=True) # , top=True, right=True)
ax.axes.xaxis.set_ticks([])
ax.axes.yaxis.set_ticks([])
title_ax.annotate(f"{self.show_metadata['title']}", **title_params)
running_date = self.show_metadata.get(
"running_date", ""
) # running date can be blank
if running_date:
title_ax.annotate(f"({running_date})", **subtitle_params)
plot = wrap_text(self.show_metadata["plot_summary"], 110)
# title_ax.annotate(
# "\n".join(plot), xy=(0.5, 0.55), style="italic", **metadata_params
# )
metadata_params["ha"] = "left"
# vertical_dist = 0.5 - 0.05 * len(plot)
vertical_dist = 0.5
writers = self.show_metadata.get("creators")
if writers:
writers = ", ".join(writers)
title_ax.annotate(
f"Writer(s): {writers}", xy=(0, vertical_dist), **metadata_params
)
vertical_dist -= 0.2
cast = self.show_metadata.get("stars")[:-1]
if cast:
cast = ", ".join(cast)
title_ax.annotate(f"Cast: {cast}", xy=(0, vertical_dist), **metadata_params)
vertical_dist -= 0.2
genres = self.show_metadata.get("tags")
if genres:
genres = ", ".join(genres)
title_ax.annotate(
f"Genre(s): {genres}", xy=(0, vertical_dist), **metadata_params
)
self._fill_episode_info(best_ep_ax, cat="best")
self._fill_episode_info(worst_ep_ax, cat="worst")
self.page = {
"fig": fig,
"axes": [title_ax, best_ep_ax, worst_ep_ax],
"gridspec": spec,
}
def _fill_episode_info(self, ax, cat="best"):
best = cat == "best"
info_params = dict(
xycoords="axes fraction", va="center", ha="left" if best else "right",
)
ep_list = self._get_episode(cat=cat)
horizontal_margin = 0.02 if best else 0.98
rating = ep_list[0]["rating"]
rating_box_params = dict()
rating_color = "#27ae60" if best else "#e74c3c"
# at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
# at.patch.set_edgecolor(rating_color)
# at.patch.set_facecolor(rating_color)
# ax.add_artist(at)
title = ax.annotate(
f"{cat.title()} rated episode(s)",
xy=(horizontal_margin, 0.8),
color="#2980b9",
size=16,
**info_params,
)
title_box = title.get_window_extent(
renderer=title.get_figure().canvas.get_renderer()
)
rating_loc = (title_box.width + 50, 0) if best else (-title_box.width - 50, 0)
rating = ax.annotate(
rating,
xy=(horizontal_margin, 0.8),
size=14,
color="w",
xytext=rating_loc,
textcoords="offset pixels",
bbox=dict(boxstyle="round", fc=rating_color),
**info_params,
)
vertical = 0.6
info_params.update(
{
# "ha": "left" if cat == "best" else "right",
"size": 12,
}
)
vertical_dist = 0.2 if self.is_square else 0.1
for ep in ep_list:
title = ep["title"]
s = ep["season"]
e = int(ep["episode_number"])
column_width = 40 if self.is_square else 45
plot = wrap_text(ep["plot"], column_width)
ax.annotate(
f"S{s:02d}E{e:02d} - {title}",
xy=(horizontal_margin, vertical),
**info_params,
)
vertical -= vertical_dist
if len(ep_list) == 1:
info_params["size"] = 11
info_params["va"] = "top"
ep_plot = ax.annotate(
"\n".join(plot),
xy=(horizontal_margin, vertical),
style="italic",
**info_params,
)
info_params["size"] = 12
vertical -= 0.05 * len(plot)
# vertical -= 0.05 # Episode differentiation buffer
if vertical < 0:
break
def _get_episode(self, cat="best"):
criteria = np.nanmax if cat == "best" else np.nanmin
result = np.where(
self.ratings == criteria(self.ratings)
) # two tuples, with row, column indices respectively
episode_list = []
for season, episode in zip(*result):
if self.inverted:
episode, season = season, episode # season will be across columns
episode_data = self.data[season]["episodes"][episode]
episode_data["season"] = int(season) + 1
episode_list.append(episode_data)
return episode_list
def heatmap(
self, color="red",
):
colormap = {
"red": sns.color_palette("YlOrRd", 10),
"blue": sns.color_palette("YlGnBu", 10),
}
height, width = self.ratings.shape
yticks = np.arange(1, height + 1)
y_label = "Season"
xticks = np.arange(1, width + 1)
x_label = "Episode"
# Setting up matplotlib and seaborn
self._setup_page_layout()
fig = self.page["fig"]
# Set up heatmap specific layout
title_ax, best_ep_ax, worst_ep_ax = self.page["axes"]
if self.inverted:
main_section = gridspec.GridSpecFromSubplotSpec(
2,
2,
height_ratios=[height, 1],
width_ratios=[width, 0.1],
subplot_spec=self.page["gridspec"][1, :],
)
main_ax = fig.add_subplot(main_section[0, 0])
average_ax = fig.add_subplot(main_section[1, 0])
cbar_ax = fig.add_subplot(main_section[:, 1])
else:
main_section = gridspec.GridSpecFromSubplotSpec(
1,
3,
width_ratios=[width, 1, 0.5],
subplot_spec=self.page["gridspec"][1, :],
)
main_ax = fig.add_subplot(main_section[:, 0])
average_ax = fig.add_subplot(main_section[:, 1])
cbar_ax = fig.add_subplot(main_section[:, 2])
main_ax.xaxis.set_ticks_position("top")
main_ax.xaxis.set_label_position("top")
average_ax.xaxis.set_ticks_position("top")
opts = {
"vmax": 10, # min(10, median + 3),
"vmin": math.floor(np.nanmin(self.ratings)), # max(0, median - 3),
"cmap": colormap[
color
], # sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95, reverse=True, as_cmap=True),#sns.color_palette("cubehelix_r", 10),
"mask": (self.ratings == 0),
"annot": True,
"square": True,
"linewidths": 1,
"xticklabels": xticks,
"yticklabels": yticks,
"ax": main_ax,
"cbar": False
# "cbar_ax": cbar
}
average_opts = opts.copy()
average_opts.update(
{"yticklabels": False, "xticklabels": ["Average"], "ax": average_ax}
)
if self.inverted:
y_label, x_label = x_label, y_label
average_opts.update(
{"yticklabels": ["Average"], "xticklabels": False,}
)
average_opts.pop("mask")
sns.heatmap(self.ratings, **opts)
sns.heatmap(self.season_averages, **average_opts)
sm = plt.cm.ScalarMappable(cmap="YlOrRd", norm=plt.Normalize(vmin=0, vmax=1))
fig.colorbar(main_ax.collections[0], cax=cbar_ax)
main_ax.set_xlabel(x_label)
main_ax.set_ylabel(y_label)
# plt.show()
self.fig = fig
def save_file(self, filename=None, output_dir=".", file_format="png"):
if not self.fig:
print(
"Could not find a figure. Ensure that you have called the heatmap function."
)
if filename is None:
filename = format_filename(self.show_metadata["title"])
filename = f"{filename}.{file_format}"
output_dir = Path(output_dir)
if not Path.exists(output_dir):
Path.mkdir(output_dir, parents=True)
output_file = output_dir / filename
self.fig.savefig(output_file, dpi=300, bbox_inches="tight", pad_inches=0.2)
plt.close(self.fig)
return output_file
```
#### File: popviz/reports/utils.py
```python
import numpy as np
from regex import regex as re
def pad_nan(matrix):
"""
Take a jagged matrix, and convert it into square by filling all rows smaller than the longest with zeros.
"""
lens = np.array(list(map(len, matrix)))
mask = np.arange(lens.max()) < lens[:, np.newaxis]
output = np.empty(mask.shape, dtype=np.float)
output.fill(np.nan)
output[mask] = np.concatenate(matrix)
return output
def wrap_text(text, column_width=60):
char_count = 0
lines = []
line = ""
for word in text.split(" "):
char_count += len(word)
line += word + " "
if char_count >= column_width:
char_count = 0
lines.append(line)
line = ""
if line:
lines.append(line)
return lines
def format_filename(string):
string = string.lower()
filename = re.sub(r"[<>:\'\"\/\|?.*]", "", string)
filename = filename.replace(" ", "_")
return filename
``` |
{
"source": "joshuanunn/algorithms_unlocked",
"score": 4
} |
#### File: algorithms_unlocked/algorithms/chapter3.py
```python
import sys
INF_INTEGER = sys.maxsize
def binary_search(array, search_term):
"""
Binary search algorithm for finding an int in an array.
Returns index of first instance of search_term or None.
"""
p = 0
r = len(array) - 1
while p <= r:
q = (p + r) // 2
value = array[q]
if value == search_term:
return q
elif value > search_term:
r = q - 1
else: # value < search_term
p = q + 1
return None
def recursive_binary_search(array, search_term, p=0, r=None):
"""
Binary search algorithm for finding an int in an array, using recursion.
Returns index of first instance of search_term or None.
"""
if r == None:
r = len(array) - 1
if p > r:
return None
else:
q = (p + r) // 2
value = array[q]
if value == search_term:
return q
elif value > search_term:
return recursive_binary_search(array, search_term, p, q - 1)
else: # value < search_term
return recursive_binary_search(array, search_term, q + 1, r)
def selection_sort(array):
"""
Selection sort algorithm for sorting an array of ints.
Returns sorted array.
Running time = O(n2) to O(n2)
"""
A = array.copy()
n = len(array)
# Loop over the first to penultimate elements in array
for i in range(0, n - 1):
# Find smallest element in subarray
smallest = i
for j in range(i + 1, n):
if A[j] < A[smallest]:
smallest = j
# Swap current element with smallest in subarray
A[i], A[smallest] = A[smallest], A[i]
return A
def insertion_sort(array):
"""
Insertion sort algorithm for sorting an array of ints.
Returns sorted array.
Running time = O(n2) to O(n)
"""
A = array.copy()
n = len(array)
# Loop over the first to penultimate elements in array
for i in range(1, n):
# Find smallest element in subarray
key = A[i]
j = i - 1
while j >= 0 and A[j] > key:
A[j + 1] = A[j]
j -= 1
A[j + 1] = key
return A
def merge_sort(array, p=0, r=None):
"""
Merge sort algorithm for sorting an array of ints.
Returns sorted array.
Running time = O(n.lgn)
"""
if r is None:
array = array.copy()
r = len(array)
if r - p <= 1:
return
q = (p + r + 1) // 2
merge_sort(array, p, q)
merge_sort(array, q, r)
merge(array, p, q, r)
return array
def merge(A, p, q, r):
"""
Merge sort supporting algorithm.
"""
# Using sort key of largest system integer to represent infinity
B = A[p:q].copy() + [INF_INTEGER]
C = A[q:r].copy() + [INF_INTEGER]
i = 0
j = 0
for k in range(p, r):
if B[i] <= C[j]:
A[k] = B[i]
i += 1
else: # B[i] > C[j]
A[k] = C[j]
j += 1
def quicksort(array, p=0, r=None):
"""
Quicksort algorithm for sorting an array of ints.
Regular / deterministic algorithm.
Running time = O(n2) to O(n.lgn)
"""
if r is None:
array = array.copy()
r = len(array)
if r - p <= 1:
return
q = partition(array, p, r - 1)
quicksort(array, p, q)
quicksort(array, q + 1, r)
return array
def partition(A, p, r):
"""
Quicksort supporting algorithm.
"""
q = p
for u in range(p, r):
if A[u] <= A[r]:
A[q],A[u] = A[u],A[q]
q += 1
A[q],A[r] = A[r],A[q]
return q
``` |
{
"source": "joshuanunn/python-ffi-demo",
"score": 2
} |
#### File: ffi_demo_ctypes/core/interface.py
```python
import ctypes
import numpy as np
from ctypes import byref, POINTER, Structure
from ctypes import c_char, c_double, c_int, c_ubyte
from pathlib import Path
MODULE_ROOT = Path(__file__).parent.resolve()
C_UCHAR_SS = POINTER(POINTER(c_ubyte))
C_DOUBLE_SS = POINTER(POINTER(c_double))
PGCATS = {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6}
RESOLUTION = {'LOW': 0, 'MEDIUM': 1, 'HIGH': 2, 'EXTREME': 3}
GRIDTYPE = {'PLAN': 0, 'SECTION': 1}
ROUGHNESS = {'urban': 0, 'rural': 1}
class MetHour(Structure):
_fields_ = [
('hours', c_int),
('wspd', c_double),
('wdir', c_double),
#('temp', c_double),
('pgcat', c_int),
]
class Domain(Structure):
_fields_ = [
('xr_min', c_int), ('xr_max', c_int),
('yr_min', c_int), ('yr_max', c_int),
('xh_min', c_int), ('xh_max', c_int),
('zh_min', c_int), ('zh_max', c_int),
('xr_spacing', c_int),
('yr_spacing', c_int),
('xh_spacing', c_int),
('zh_spacing', c_int),
('xr_points', c_int),
('yr_points', c_int),
('xh_points', c_int),
('zh_points', c_int)
]
class Source(Structure):
_fields_ = [
('x', c_double), ('y', c_double),
('height', c_double),
('diameter', c_double),
('velocity', c_double),
('temp', c_double),
('emission', c_double)
]
# Import compiled c code using ctypes
try:
_disperse = ctypes.CDLL(MODULE_ROOT / 'disperse.so')
except:
raise ValueError('C shared object file [disperse.so] is missing, complete module setup to compile.')
### Setup ctypes functions ###
# double get_sigma_y(char pgcat, double x)
_disperse.get_sigma_y.argtypes = [c_char, c_double]
_disperse.get_sigma_y.restype = c_double
# double get_sigma_z(char pgcat, double x)
_disperse.get_sigma_z.argtypes = [c_char, c_double]
_disperse.get_sigma_z.restype = c_double
# double calc_uz(double uz_ref, double z, double z_ref, char pgcat, char roughness)
_disperse.calc_uz.argtypes = [c_double, c_double, c_double, c_char, c_char]
_disperse.calc_uz.restype = c_double
# void plume_rise(double* dH, double* Xf, double us, double vs, double ds, double Ts, double Ta, char pgcat)
_disperse.plume_rise.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_double, c_double, c_double, c_double, c_char]
_disperse.plume_rise.restype = None
# double conc(double x, double y, double z, double u_z, double Q, double H, double s_y, double s_z)
_disperse.conc.argtypes = [c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double]
_disperse.conc.restype = c_double
# void iter_disp(double* rgrid, double* hgrid, Domain* domain, Source* source, MetHour* met)
_disperse.iter_disp.argtypes = [C_DOUBLE_SS, C_DOUBLE_SS, POINTER(Domain), POINTER(Source), POINTER(MetHour)]
_disperse.iter_disp.restype = None
# void create_image(unsigned char* destgrid, double* grid, Domain* domain, bool vertical)
_disperse.create_image.argtypes = [C_UCHAR_SS, C_DOUBLE_SS, POINTER(Domain), c_int]
_disperse.create_image.restype = None
# MetHour new_methour()
_disperse.new_methour.argtypes = None
_disperse.new_methour.restype = MetHour
# Domain new_domain(int resolution)
_disperse.new_domain.argtypes = [c_int]
_disperse.new_domain.restype = Domain
# Source new_source()
_disperse.new_source.argtypes = None
_disperse.new_source.restype = Source
def get_sigma_y(pgcat, x):
return _disperse.get_sigma_y(c_char(PGCATS[pgcat]), c_double(x))
def get_sigma_z(pgcat, x):
return _disperse.get_sigma_z(c_char(PGCATS[pgcat]), c_double(x))
def calc_uz(uzref, z, zref, pgcat, roughness):
return _disperse.calc_uz(c_double(uzref), c_double(z), c_double(zref), c_char(PGCATS[pgcat]), c_char(ROUGHNESS[roughness]))
def wind_components(wind_components, e_r, n_r, e_s, n_s, sin_phi, cos_phi):
return _disperse.wind_components(wind_components, c_double(e_r), c_double(n_r), c_double(e_s), c_double(n_s), c_double(sin_phi), c_double(cos_phi))
def plume_rise(dH, Xf, us, vs, ds, Ts, Ta, pgcat):
return _disperse.plume_rise(dH, Xf, c_double(us), c_double(vs), c_double(ds), c_double(Ts), c_double(Ta), c_char(PGCATS[pgcat]))
def conc(x, y, z, u_z, Q, H, s_y, s_z):
return _disperse.conc(c_double(x), c_double(y), c_double(z), c_double(u_z), c_double(Q), c_double(H), c_double(s_y), c_double(s_z))
def new_methour():
return _disperse.new_methour()
def new_domain(resolution):
return _disperse.new_domain(c_int(RESOLUTION[resolution]))
def new_source():
return _disperse.new_source()
def iter_disp(r_grid_np, h_grid_np, domain, source, methour):
return _disperse.iter_disp(r_grid_np.ctypes.data_as(C_DOUBLE_SS), h_grid_np.ctypes.data_as(C_DOUBLE_SS), byref(domain), byref(source), byref(methour))
def create_image(png_grid_np, grid_np, domain, gridtype):
return _disperse.create_image(png_grid_np.ctypes.data_as(C_UCHAR_SS), grid_np.ctypes.data_as(C_DOUBLE_SS), byref(domain), c_int(GRIDTYPE[gridtype]))
def new_grids(domain):
r_grid = np.zeros((domain.xr_points * domain.yr_points), dtype=np.float64)
h_grid = np.zeros((domain.xh_points * domain.zh_points), dtype=np.float64)
return r_grid, h_grid
def new_images(domain):
r_image = np.zeros((domain.xr_points * domain.yr_points), dtype=np.uint8)
h_image = np.zeros((domain.xh_points * domain.zh_points), dtype=np.uint8)
return r_image, h_image
```
#### File: ffi_demo_ctypes/tests/runtests.py
```python
import math
import unittest
from ..core import *
class TestSigmaY(unittest.TestCase):
""" Range of testcases for get_sigma_y function. """
def test_1(self):
# stability class D, 0.5km downwind, example from:
# http://faculty.washington.edu/markbenj/CEE357/CEE%20357%20air%20dispersion%20models.pdf
self.assertAlmostEqual(get_sigma_y('D', 0.5), 36.146193496038)
def test_2(self):
# stability class A, 0.997km downwind
self.assertAlmostEqual(get_sigma_y('A', 0.997), 208.157523627706)
def test_3(self):
# stability class B, 12.345m downwind
self.assertAlmostEqual(get_sigma_y('B', 0.012345), 2.835970876943)
def test_4(self):
# stability class C, 27.85km downwind
self.assertAlmostEqual(get_sigma_y('C', 27.85), 2025.696103458910)
def test_5(self):
# stability class D, 5.78m upwind
self.assertTrue(math.isnan(get_sigma_y('D', -0.00578)))
def test_6(self):
# stability class E, 445m downwind
self.assertAlmostEqual(get_sigma_y('E', 0.445), 24.275915684479)
def test_7(self):
# stability class F, 7.5558km downwind
self.assertAlmostEqual(get_sigma_y('F', 7.5558), 210.931775211803)
class TestSigmaZ(unittest.TestCase):
""" Range of testcases for get_sigma_z function. """
def test_1(self):
# stability class D, 0.5km downwind, example from:
# http://faculty.washington.edu/markbenj/CEE357/CEE%20357%20air%20dispersion%20models.pdf
self.assertAlmostEqual(get_sigma_z('D', 0.5), 18.296892641654)
def test_2(self):
# stability class D, 5.78m upwind
self.assertTrue(math.isnan(get_sigma_z('D', -0.00578)))
def test_3(self):
# stability class A, 50m downwind
self.assertAlmostEqual(get_sigma_z('A', 0.05), 7.246283645973)
def test_4(self):
# stability class A, 270m downwind
self.assertAlmostEqual(get_sigma_z('A', 0.27), 41.523682287423)
def test_5(self):
# stability class A, 2.86km downwind
self.assertAlmostEqual(get_sigma_z('A', 2.86), 4196.204889704382)
def test_6(self):
# stability class A, 54km downwind
self.assertAlmostEqual(get_sigma_z('A', 54.0), 5000.0)
def test_7(self):
# stability class B, 50m downwind
self.assertAlmostEqual(get_sigma_z('B', 0.05), 5.558326444834)
def test_8(self):
# stability class B, 270m downwind
self.assertAlmostEqual(get_sigma_z('B', 0.27), 27.177523893054)
def test_9(self):
# stability class B, 2.86km downwind
self.assertAlmostEqual(get_sigma_z('B', 2.86), 346.177898273921)
def test_10(self):
# stability class B, 54km downwind
self.assertAlmostEqual(get_sigma_z('B', 54.0), 5000.0)
def test_11(self):
# stability class C, 50m downwind
self.assertAlmostEqual(get_sigma_z('C', 0.05), 3.947711911749)
def test_12(self):
# stability class C, 270m downwind
self.assertAlmostEqual(get_sigma_z('C', 0.27), 18.459902569036)
def test_13(self):
# stability class C, 2.86km downwind
self.assertAlmostEqual(get_sigma_z('C', 2.86), 159.862915743170)
def test_14(self):
# stability class C, 54km downwind
self.assertAlmostEqual(get_sigma_z('C', 54.0), 2348.910612301645)
def test_15(self):
# stability class D, 50m downwind
self.assertAlmostEqual(get_sigma_z('D', 0.05), 2.545334368597)
def test_16(self):
# stability class D, 270m downwind
self.assertAlmostEqual(get_sigma_z('D', 0.27), 11.034101898944)
def test_17(self):
# stability class D, 2.86km downwind
self.assertAlmostEqual(get_sigma_z('D', 2.86), 63.142784897226)
def test_18(self):
# stability class D, 54km downwind
self.assertAlmostEqual(get_sigma_z('D', 54.0), 339.310493995667)
def test_19(self):
# stability class E, 50m downwind
self.assertAlmostEqual(get_sigma_z('E', 0.05), 1.979015073784)
def test_20(self):
# stability class E, 270m downwind
self.assertAlmostEqual(get_sigma_z('E', 0.27), 7.978143439122)
def test_21(self):
# stability class E, 2.86km downwind
self.assertAlmostEqual(get_sigma_z('E', 2.86), 41.083717338729)
def test_22(self):
# stability class E, 54km downwind
self.assertAlmostEqual(get_sigma_z('E', 54.0), 155.031915174584)
def test_23(self):
# stability class F, 50m downwind
self.assertAlmostEqual(get_sigma_z('F', 0.05), 1.321315762922)
def test_24(self):
# stability class F, 270m downwind
self.assertAlmostEqual(get_sigma_z('F', 0.27), 5.178781257565)
def test_25(self):
# stability class F, 2.86km downwind
self.assertAlmostEqual(get_sigma_z('F', 2.86), 26.282658227590)
def test_26(self):
# stability class F, 54km downwind
self.assertAlmostEqual(get_sigma_z('F', 54.0), 80.882017663045)
class TestCalcUz(unittest.TestCase):
""" Testcase for calc_uz function. """
def test_1(self):
uzref = 3.5
z = 100.0
zref = 10.0
pgcat = 'D'
roughness = 'rural'
u_adj = calc_uz(uzref, z, zref, pgcat, roughness)
self.assertAlmostEqual(u_adj, 4.943881406180)
uzref = 10.0
z = 50.0
zref = 45.0
pgcat = 'A'
roughness = 'urban'
u_adj = calc_uz(uzref, z, zref, pgcat, roughness)
self.assertAlmostEqual(u_adj, 10.159296222811)
class TestPlumeRise(unittest.TestCase):
""" Testcase for plume_rise function. """
def test_1(self):
# Example from:
# https://ceprofs.civil.tamu.edu/qying/cven301_fall2014_arch/lecture7_c.pdf
vs = 20.0 # m/s
ds = 5.0 # m
U = 6.0 # m/s
Ts = 400.0 # K
Ta = 280.0 # K
pgcat = 'D'
dH = c_double(0.0)
Xf = c_double(0.0)
plume_rise(byref(dH), byref(Xf), U, vs, ds, Ts, Ta, pgcat)
self.assertAlmostEqual(dH.value, 223.352113600373)
self.assertAlmostEqual(Xf.value, 1264.034881130080)
class TestConc(unittest.TestCase):
""" Testcase for conc function. """
def test_1(self):
# Example from:
# http://faculty.washington.edu/markbenj/CEE357/CEE%20357%20air%20dispersion%20models.pdf
x = 0.5 # 500 m downwind
y = 0.0 # along plume centreline
z = 0.0 # ground level
u_z = 6.0 # 6 m/s wind speed at height of 50 m
pgcat = 'D' # Neutral stability
# Source centred on (0,0), height 50 m, 10 g/s mass emission rate
Q = 10.0 # source.emission
H = 50.0 # source.height
# Calculate concentration at (x,y,z) == 19.2 ug/m3
s_y = get_sigma_y(pgcat, x)
s_z = get_sigma_z(pgcat, x)
test_conc = conc(x, y, z, u_z, Q, H, s_y, s_z)
self.assertAlmostEqual(test_conc, 1.917230120488e-05)
class TestIterDisp(unittest.TestCase):
""" Testcase for iter_disp function. """
def test_1(self):
domain = new_domain('MEDIUM')
source = new_source()
methour = new_methour()
source.height = 10.0
source.temp = 100.0
source.emission = 1.0
source.velocity = 10.0
source.diameter = 0.5
methour.hours = 1
methour.wspd = 2.0
methour.wdir = 130.0 * math.pi / 180.0
methour.pgcat = PGCATS['A']
r_grid, h_grid = new_grids(domain)
iter_disp(r_grid, h_grid, domain, source, methour)
self.assertAlmostEqual(r_grid[23 * 250 + 14], 6.366502967443e-08)
self.assertAlmostEqual(h_grid[41 * 250 + 181], 3.963714618520e-07)
class TestCreateImage(unittest.TestCase):
""" Testcase for create_image function. """
def test_1(self):
domain = new_domain('MEDIUM')
source = new_source()
methour = new_methour()
source.height = 10.0
source.temp = 100.0
source.emission = 1.0
source.velocity = 10.0
source.diameter = 0.5
methour.hours = 1
methour.wspd = 2.0
methour.wdir = 130.0 * math.pi / 180.0
methour.pgcat = PGCATS['A']
r_grid, h_grid = new_grids(domain)
iter_disp(r_grid, h_grid, domain, source, methour)
r_grid_image, h_grid_image = new_images(domain)
create_image(r_grid_image, r_grid, domain, 'PLAN')
create_image(h_grid_image, h_grid, domain, 'SECTION')
self.assertAlmostEqual(r_grid_image[48 * 250 + 17], 6)
self.assertAlmostEqual(r_grid_image[87 * 250 + 80], 8)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshuanwalker/VRM_IMPORTER_for_Blender2_8",
"score": 2
} |
#### File: VRM_IMPORTER_for_Blender2_8/importer/binaly_loader.py
```python
import struct
from ..gl_const import GL_CONSTANS
class Binaly_Reader:
def __init__(self, data: bytes)->None:
self.data = data
self.pos = 0
def set_pos(self, pos):
self.pos = pos
def read_str(self, size):
result = self.data[self.pos: self.pos + size]
self.pos += size
return result.decode("utf-8")
def read_binaly(self, size):
result = self.data[self.pos: self.pos + size]
self.pos += size
return result
def read_uint(self):
#unpackは内容の個数に関わらずタプルで返すので[0]が必要
result = struct.unpack('<I',self.data[self.pos:self.pos + 4])[0]
self.pos += 4
return result
def read_int(self):
result = struct.unpack('<i', self.data[self.pos:self.pos + 4])[0]
self.pos += 4
return result
def read_ushort(self):
result = struct.unpack('<H', self.data[self.pos:self.pos + 2])[0]
self.pos += 2
return result
def read_short(self):
result = struct.unpack('<h', self.data[self.pos:self.pos + 2])[0]
self.pos += 2
return result
def read_float(self):
result = struct.unpack('<f', self.data[self.pos:self.pos + 4])[0]
self.pos += 4
return result
def read_ubyte(self):
result = struct.unpack('<B', self.data[self.pos:self.pos + 1])[0]
self.pos += 1
return result
def read_as_dataType(self,dataType:GL_CONSTANS):
if dataType == GL_CONSTANS.UNSIGNED_INT:
return self.read_uint()
elif dataType == GL_CONSTANS.INT:
return self.read_int()
elif dataType == GL_CONSTANS.UNSIGNED_SHORT:
return self.read_ushort()
elif dataType == GL_CONSTANS.SHORT:
return self.read_short()
elif dataType == GL_CONSTANS.FLOAT:
return self.read_float()
elif dataType == GL_CONSTANS.UNSIGNED_BYTE:
return self.read_ubyte()
else:
print("unsuppoted type : {}".format(dataType))
raise Exception
if "__main__" == __name__:
Binaly_Reader(None)
```
#### File: VRM_IMPORTER_for_Blender2_8/misc/armature_maker.py
```python
import bpy
from mathutils import Matrix
from math import radians,sqrt
import json
class ICYP_OT_MAKE_ARAMATURE(bpy.types.Operator):
bl_idname = "icyp.make_basic_armature"
bl_label = "(WIP)basic armature"
bl_description = "make armature and simple setup for VRM export"
bl_options = {'REGISTER', 'UNDO'}
#身長 at meter
tall: bpy.props.FloatProperty(default=1.70, min=0.3, step=0.001)
#頭身
head_ratio: bpy.props.FloatProperty(default=8.0, min=4, step=0.05)
#足-胴比率:0:子供、1:大人 に近くなる(低等身で有効)
aging_ratio: bpy.props.FloatProperty(default=0.5, min=0, max=1, step=0.1)
#目の奥み
eye_depth: bpy.props.FloatProperty(default=-0.03, min=-0.1, max=0, step=0.005)
#肩幅
shoulder_in_width: bpy.props.FloatProperty(default=0.2125, min=0.01, step=0.005)
shoulder_width: bpy.props.FloatProperty(default=0.08, min=0.01, step=0.005)
#腕長さ率
arm_length_ratio : bpy.props.FloatProperty(default=1, min=0.5, step=0.01)
#手
hand_size :bpy.props.FloatProperty(default=0.18, min=0.01, step=0.005)
finger_1_2_ratio :bpy.props.FloatProperty(default=0.75, min=0.5,max=1, step=0.005)
finger_2_3_ratio :bpy.props.FloatProperty(default=0.75, min=0.5,max=1, step=0.005)
#足
leg_length_ratio : bpy.props.FloatProperty(default=0.5, min=0.3, max=0.6,step=0.01)
leg_width: bpy.props.FloatProperty(default=0.1, min=0.01, step=0.005)
leg_size: bpy.props.FloatProperty(default=0.26, min=0.05, step=0.005)
def execute(self, context):
armature,compare_dict = self.make_armature(context)
self.setup_as_vrm(armature,compare_dict)
return {"FINISHED"}
def make_armature(self, context):
bpy.ops.object.add(type='ARMATURE', enter_editmode=True, location=(0,0,0))
armature = context.object
armature.name = "skelton"
armature.show_in_front = True
bone_dic = {}
def bone_add(name, head_pos, tail_pos, parent_bone=None):
added_bone = armature.data.edit_bones.new(name)
added_bone.head = head_pos
added_bone.tail = tail_pos
if parent_bone is not None:
added_bone.parent = parent_bone
bone_dic.update({name:added_bone})
return added_bone
def x_mirror_bones_add(base_name, right_head_pos, right_tail_pos, parent_bones):
left_bone = bone_add(base_name + "_L", right_head_pos, right_tail_pos, parent_bones[0])
right_bone = bone_add(base_name + "_R",
[pos*axis for pos, axis in zip(right_head_pos, (-1, 1, 1))],
[pos*axis for pos, axis in zip(right_tail_pos, (-1, 1, 1))],
parent_bones[1]
)
return left_bone,right_bone
def x_add(posA, add_x):
pos = [pA + _add for pA, _add in zip(posA, [add_x, 0, 0])]
return pos
def y_add(posA, add_y):
pos = [pA + _add for pA, _add in zip(posA, [0, add_y, 0])]
return pos
def z_add(posA, add_z):
pos = [pA+_add for pA,_add in zip(posA,[0,0,add_z])]
return pos
root = bone_add("root", (0, 0, 0), (0, 0,0.3))
head_size = self.tall / self.head_ratio
#down side (前は8頭身の時の股上/股下の股下側割合、後ろは4頭身のときの〃を年齢具合で線形補完)(股上高めにすると破綻する)
eight_upside_ratio, four_upside_ratio = 1-self.leg_length_ratio, (2.5/4)*(1-self.aging_ratio)+(1-self.leg_length_ratio)*self.aging_ratio
hip_up_down_ratio = eight_upside_ratio * (1 - (8 - self.head_ratio) / 4) + four_upside_ratio * (8 - self.head_ratio) / 4
#チェスト下とチェスト~首の割合
upper_chest_neck_ratio = (1-(8-self.head_ratio)/4)*(1/3) + ((8-self.head_ratio)/4)*0.1
#体幹
neck_len = (1-upper_chest_neck_ratio)*(self.tall*(1-hip_up_down_ratio)/2)/3
upper_chest_len = (self.tall*hip_up_down_ratio - head_size - neck_len)/3
chest_len = upper_chest_len
spine_len = chest_len
Hips = bone_add("Hips", (0,0, self.tall*(1-hip_up_down_ratio) ), (0,0.1,self.tall*(1-hip_up_down_ratio)),root)
Spine = bone_add("Spine",Hips.head,z_add(Hips.head,spine_len),Hips)
Chest = bone_add("Chest", Spine.tail, z_add(Spine.tail,chest_len), Spine)
upperChest = bone_add("upperChest", Chest.tail, z_add(Chest.tail,upper_chest_len), Chest)
Neck = bone_add("Neck", upperChest.tail, z_add(upperChest.tail,neck_len), upperChest)
Head = bone_add("Head", (0,0, self.tall-head_size), (0,0, self.tall), Neck)
#目
eye_depth = self.eye_depth
eyes = x_mirror_bones_add("eye", (head_size / 5, 0, Head.head[2] + head_size / 2),
(head_size / 5, eye_depth, Head.head[2] + head_size / 2),
(Head, Head))
#足
leg_width = self.leg_width
leg_size = self.leg_size
leg_bone_lengh =( self.tall*(1-hip_up_down_ratio) - self.tall*0.05 )/2
upside_legs = x_mirror_bones_add("Upper_Leg",
x_add(Hips.head, leg_width),
z_add(x_add(Hips.head, leg_width), -leg_bone_lengh),
(Hips, Hips)
)
lower_legs = x_mirror_bones_add("Lower_Leg",
upside_legs[0].tail,
(leg_width,0,self.tall*0.05),
upside_legs
)
Foots = x_mirror_bones_add("Foot",
lower_legs[0].tail,
(leg_width,-leg_size*(2/3),0),
lower_legs
)
Toes = x_mirror_bones_add("Toes",
Foots[0].tail,
(leg_width,-leg_size,0),
Foots
)
#肩~指
shoulder_in_pos = self.shoulder_in_width / 2
shoulders = x_mirror_bones_add("shoulder",
x_add(upperChest.tail, shoulder_in_pos),
x_add(upperChest.tail, shoulder_in_pos + self.shoulder_width),
(upperChest,upperChest))
arm_lengh = head_size * (1*(1-(self.head_ratio-6)/2)+1.5*((self.head_ratio-6)/2)) * self.arm_length_ratio
arms = x_mirror_bones_add("Arm",
shoulders[0].tail,
x_add(shoulders[0].tail,arm_lengh),
shoulders)
hand_size = self.hand_size
forearms = x_mirror_bones_add("forearm",
arms[0].tail,
#グーにするとパーの半分くらいになる、グーのとき手を含む下腕の長さと上腕の長さが概ね一緒
x_add(arms[0].tail,arm_lengh - hand_size/2),
arms)
hands = x_mirror_bones_add("hand",
forearms[0].tail,
x_add(forearms[0].tail,hand_size/2),
forearms
)
def fingers(finger_name,proximal_pos,finger_len_sum):
finger_normalize = 1/(self.finger_1_2_ratio*self.finger_2_3_ratio+self.finger_1_2_ratio+1)
proximal_finger_len = finger_len_sum*finger_normalize
intermediate_finger_len = finger_len_sum*finger_normalize*self.finger_1_2_ratio
distal_finger_len = finger_len_sum*finger_normalize*self.finger_1_2_ratio*self.finger_2_3_ratio
proximal_bones = x_mirror_bones_add(f"{finger_name}_proximal",proximal_pos,x_add(proximal_pos,proximal_finger_len),hands)
intermediate_bones = x_mirror_bones_add(f"{finger_name}_intermidiate",proximal_bones[0].tail,x_add(proximal_bones[0].tail,intermediate_finger_len),proximal_bones)
distal_bones = x_mirror_bones_add(f"{finger_name}_distal",intermediate_bones[0].tail,x_add(intermediate_bones[0].tail,distal_finger_len),intermediate_bones)
return proximal_bones,intermediate_bones,distal_bones
finger_y_offset = -hand_size/10
thumbs = fingers(
"finger_thumbs",
y_add(hands[0].head,finger_y_offset - hand_size/5),
hand_size/2
)
mats = [thumbs[0][i].matrix.translation for i in [0,1]]
mats = [Matrix.Translation(mat) for mat in mats]
for j in range(3):
for n,angle in enumerate([-45,45]):
thumbs[j][n].transform( mats[n].inverted() )
thumbs[j][n].transform( Matrix.Rotation(radians(angle),4,"Z") )
thumbs[j][n].transform( mats[n] )
index_fingers = fingers(
"finger_index",
y_add(hands[0].tail,-hand_size/5 +finger_y_offset),
(hand_size/2)-(1/2.3125)*(hand_size/2)/3
)
middle_fingers = fingers(
"finger_middle",
y_add(hands[0].tail,finger_y_offset),
hand_size/2
)
ring_fingers = fingers(
"finger_ring",
y_add(hands[0].tail,hand_size/5 +finger_y_offset),
(hand_size/2)-(1/2.3125)*(hand_size/2)/3
)
little_fingers = fingers(
"finger_little",
y_add(hands[0].tail,2*hand_size/5 +finger_y_offset),
((hand_size/2)-(1/2.3125)*(hand_size/2)/3) * ((1/2.3125)+(1/2.3125)*0.75)
)
#'s is left,right tupple
body_dict = {
"hips":Hips.name,
"spine":Spine.name,
"chest":Chest.name,
"upperChest":upperChest.name,
"neck":Neck.name,
"head":Head.name
}
left_right_body_dict = {
f"{left_right}{bone_name}":bones[lr].name
for bone_name,bones in {
"Eye":eyes,
"UpperLeg":upside_legs,
"LowerLeg":lower_legs,
"Foot":Foots,
"Toes":Toes,
"Shoulder":shoulders,
"UpperArm":arms,
"LowerArm":forearms,
"Hand":hands
}.items()
for lr,left_right in enumerate(["left","right"])
}
#VRM finger like name key
fingers_dict={
f"{left_right}{finger_name}{position}":finger[i][lr].name
for finger_name,finger in zip(["Thumb","Index","Middle","Ring","Little"],[thumbs,index_fingers,middle_fingers,ring_fingers,little_fingers])
for i,position in enumerate(["Proximal","Intermediate","Distal"])
for lr,left_right in enumerate(["left","right"])
}
#VRM bone name : blender bone name
bone_name_all_dict = {}
bone_name_all_dict.update(body_dict)
bone_name_all_dict.update(left_right_body_dict)
bone_name_all_dict.update(fingers_dict)
context.scene.update()
bpy.ops.object.mode_set(mode='OBJECT')
return armature,bone_name_all_dict
def setup_as_vrm(self,armature,compaire_dict):
for vrm_bone_name,blender_bone_name in compaire_dict.items():
armature.data.bones[blender_bone_name]["humanBone"] = vrm_bone_name
def write_textblock_and_assgin_to_armature(block_name,value):
text_block = bpy.data.texts.new(name=f"{armature.name}_{block_name}.json")
text_block.write(json.dumps(value,indent = 4))
armature[f"{block_name}"] = text_block.name
#param_dicts are below of this method
write_textblock_and_assgin_to_armature("humanoid_params",self.humanoid_params)
write_textblock_and_assgin_to_armature("firstPerson_params",self.firstPerson_params)
write_textblock_and_assgin_to_armature("blendshape_group",self.blendshape_group)
write_textblock_and_assgin_to_armature("spring_bone",[])
vrm_metas = [
"version",#model version (not VRMspec etc)
"author",
"contactInformation",
"reference",
"title",
"otherPermissionUrl",
"otherLicenseUrl"
]
for v in vrm_metas:
armature[v] = ""
required_vrm_metas = {
"allowedUserName":"Disallow",
"violentUssageName":"Disallow",
"sexualUssageName":"Disallow",
"commercialUssageName":"Disallow",
"licenseName":"Redistribution_Prohibited",
}
for k,v in required_vrm_metas.items():
armature[k] = v
return
humanoid_params = {
"armStretch": 0.05,
"legStretch": 0.05,
"upperArmTwist": 0.5,
"lowerArmTwist": 0.5,
"upperLegTwist": 0.5,
"lowerLegTwist": 0.5,
"feetSpacing": 0,
"hasTranslationDoF": False
}
firstPerson_params = {
"firstPersonBone": "Head",
"firstPersonBoneOffset": {
"x": 0,
"y": 0,
"z": 0
},
"meshAnnotations": [],
"lookAtTypeName": "Bone",
"lookAtHorizontalInner": {
"curve": [
0,0,0,1,
1,1,1,0
],
"xRange": 90,
"yRange": 8
},
"lookAtHorizontalOuter": {
"curve": [
0,0,0,1,
1,1,1,0
],
"xRange": 90,
"yRange": 12
},
"lookAtVerticalDown": {
"curve": [
0,0,0,1,
1,1,1,0
],
"xRange": 90,
"yRange": 10
},
"lookAtVerticalUp": {
"curve": [
0,0,0,1,
1,1,1,0
],
"xRange": 90,
"yRange": 10
}
}
blendshape_group = [
{
"name": "Neutral",
"presetName": "neutral",
"binds": [],
"materialValues": []
},
{
"name": "A",
"presetName": "a",
"binds": [],
"materialValues": []
}
]
#TODO spring_bone sample
``` |
{
"source": "joshuaochia/Django-API-eComemrce",
"score": 2
} |
#### File: api_project/core/models.py
```python
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin
)
from django.utils.text import slugify
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from django_countries.fields import CountryField
from products.models import ProductVariant
# from colorfield import fields
class UserManager(BaseUserManager):
"""
Creates and save a User with the given email,
username, firstname, lastname, and password.
"""
# Create User
def create_user(
self, username, email,
first_name, last_name,
password=None
):
# check if there's username and email
if not email and username:
raise ValueError('User must have an email and username')
# Create a user using the params of create_user
user = self.model(
username=username,
email=self.normalize_email(email),
first_name=first_name,
last_name=last_name,
)
# set password then hash it and saving the new model
user.set_password(password)
user.save(using=self._db)
return user
# Create super user
def create_superuser(
self, username, email,
first_name, last_name, password
):
# using the create_user function to create new user
user = self.create_user(
username, email, first_name,
last_name, password
)
# setting the new user to admin and superuser
user.is_admin = True
user.is_superuser = True
user.is_staff = True
# saving the changes and returning it
user.save(using=self._db)
return User
class User(AbstractBaseUser, PermissionsMixin):
""" Custom User """
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True
)
username = models.CharField(max_length=55, unique=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
""" Fields """
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email', 'first_name', 'last_name']
""" Creating user Handler """
objects = UserManager()
def __str__(self):
# string representation for DB
return f'{self.username}'
def has_perm(self, perm, obj=None):
# User permission
return True
def has_module_perms(self, app_label):
# User permission to view the ap modules
return True
class Profile(models.Model):
""" Separate Profile for security """
profile = models.ImageField(upload_to='profile_pics/')
slug = models.SlugField()
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='profiles'
)
full_name = models.CharField(max_length=256)
first_name = models.CharField(max_length=256)
last_name = models.CharField(max_length=256)
cart_cost = models.IntegerField(null=True)
def save(self, *args, **kwargs):
self.full_name = f'{self.user.first_name} {self.user.last_name}'
self.slug = slugify(self.user.username)
return super().save(*args, **kwargs)
def __str__(self):
return f'{self.first_name} {self.last_name}'
class ProfileCartManager(models.Manager):
""" Managing cart item when created"""
def create(self, profile, product, quantity, cost,):
# Add the new cost to user current cart cost.
money = cost * quantity
profile.cart_cost += money
# create the cart
cart = self.model(
profile=profile,
product=product,
quantity=quantity,
cost=cost
)
# saving both changes
profile.save()
cart.save(using=self._db)
return cart
class ProfileProductCart(models.Model):
""" Creating a cart item specific for one product """
profile = models.ForeignKey(
Profile,
on_delete=models.CASCADE,
)
product = models.ForeignKey(
ProductVariant,
on_delete=models.CASCADE,
related_name='cart_p',
null=True
)
quantity = models.PositiveSmallIntegerField()
cost = models.PositiveSmallIntegerField()
objects = ProfileCartManager()
# Create a profile instance after the user is created
@receiver(post_save, sender=User)
def after_user_created(sender, instance, created, **kwargs):
profile = Profile()
profile.user = instance
profile.slug = instance.username
profile.first_name = instance.first_name
profile.last_name = instance.last_name
profile.save()
class Address(models.Model):
""" Address connected to single user """
# Different address may apply for buying products
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='user_address'
)
street_address = models.CharField(max_length=255)
apartment_address = models.CharField(max_length=255)
country = CountryField()
zip_code = models.IntegerField()
default = models.BooleanField(default=True)
def __str__(self):
return f'{self.user} Address'
``` |
{
"source": "joshuaochia/School-Management-API",
"score": 2
} |
#### File: finance/test/test_check_signals.py
```python
from school.test.test_api_school_department import department_sample
from django.contrib.auth import get_user_model
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from .. import models
from students.models import Students
from school.models import (
Courses, School, Schedule, Section, TeacherSubject, Subjects, Employees
)
from faker import Faker
import random
fake = Faker()
def sched_sample():
days = [
('Monday', 'Monday'),
('Tuesday', 'Tuesday'),
('Wednesday', 'Wednesday'),
('Thursday', 'Thursday'),
('Friday', 'Friday'),
('Saturday', 'Saturday'),
('Sunday', 'Sunday'),
]
sched = Schedule.objects.create(
start=fake.time(),
end=fake.time(),
day=random.choice(days)[0]
)
return sched
def section_sample():
return Section.objects.create(name='Sample', code='Sample')
def subject_sample(sample_course, school):
sub = Subjects.objects.create(
name='TEST SUB',
course=sample_course,
unit=0,
lab=0,
cost = 200,
school = school
)
return sub
def teacher_sub_sample(subject, schedule, section):
teach_sub = TeacherSubject.objects.create(
subject=subject,
schedule=schedule,
section=section
)
return teach_sub
def sample_students(school, course):
user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
first_name='Test',
last_name='User',
middle_name='Test',
)
student = Students.objects.create(
user=user,
school=school,
course=course,
)
return student
def school_sample():
school = School.objects.create(
name='Test School',
vision='Test Vision',
mission='Test Mission',
street='Test Street',
city='Test City',
zip_code='900'
)
return school
def course_sample(school_sample):
course = Courses.objects.create(
school=school_sample,
course='Bachelor of Science in Business',
major='Marketing'
)
return course
class StudentBalanceTest(TestCase):
def setUp(self) -> None:
self.client = Client()
self.school = school_sample()
self.course = course_sample(self.school)
self.student = sample_students(self.school, self.course)
self.sub = subject_sample(self.course, self.school)
self.sched = sched_sample()
self.section = section_sample()
self.teach_sub = teacher_sub_sample(
subject=self.sub,
schedule=self.sched,
section=self.section
)
def test_student_signal(self):
all_balance = models.StudentBalance.objects.all()
self.assertEqual(len(all_balance), 1)
self.assertIn(self.student.bal, all_balance)
def test_student_payment(self):
self.student.bal.balance = 3000
self.student.bal.save()
payment= models.StudentPayment.objects.create(
balance = self.student.bal,
money = 200
)
all_payments = models.StudentPayment.objects.all()
self.student.bal.refresh_from_db()
self.assertEqual(len(all_payments), 1)
self.assertEqual(self.student.bal.balance, 2800)
def test_student_add_sub(self):
self.student.bal.balance = 0
self.student.bal.save()
student_sub = models.StudentSubject.objects.create(
student = self.student,
subject = self.teach_sub
)
self.student.bal.refresh_from_db()
self.assertEqual(self.student.bal.balance, 200)
class EmployeeSalaryTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
first_name='Normal',
last_name='User',
middle_name='placeholder',
password='<PASSWORD>'
)
self.created_by = get_user_model().objects.create_superuser(
email='<EMAIL>',
first_name='created',
last_name='User',
middle_name='By',
password='<PASSWORD>'
)
self.school = school_sample()
self.department = department_sample(self.school)
self.employee = Employees.objects.create(
created_by=self.created_by,
user=self.user,
school=self.school,
department=self.department,
rate=365,
days_week=5,
salary=5000
)
def test_employee_leave(self):
emp_ot = models.EmployeeOT.objects.create(
salary=self.employee,
hrs=2,
day=fake.date()
)
all_ot = models.EmployeeOT.objects.all()
self.assertEqual(len(all_ot), 1)
emp_ot.salary.refresh_from_db()
total = int(5000 + (365/8) * 2)
# emp = Employees.refresh_from_db()
self.assertEqual(self.employee.salary, total)
def test_employee_ot(self):
emp_leave = models.EmployeeLeave.objects.create(
salary=self.employee,
day=fake.date()
)
emp_leave.salary.refresh_from_db()
self.assertEqual(self.employee.salary, 4635)
```
#### File: school/api/serializers.py
```python
from rest_framework import serializers
from .. import models
from django.contrib.auth import get_user_model
from django_countries.serializers import CountryFieldMixin
class OwnProfileSerializer(serializers.ModelSerializer):
"""
Serializer for editing your own profile as an employee
"""
school = serializers.StringRelatedField(read_only=True)
class Meta:
model = models.Employees
fields = (
'id', 'bday', 'city', 'zip_code',
'sex', 'civil_status', 'department',
'school', 'user', 'slug'
)
read_only_fields = ('id', 'school', 'user', 'department')
class EmployeesSerializer(serializers.ModelSerializer):
"""
Only For Creating New Employees and Listing all of them
"""
password = serializers.CharField(
style={"input_type": "password"},
write_only=True
)
email = serializers.EmailField(write_only=True)
first_name = serializers.CharField(max_length=99, write_only=True)
last_name = serializers.CharField(max_length=99, write_only=True)
middle_name = serializers.CharField(max_length=99, write_only=True)
class Meta:
model = models.Employees
fields = '__all__'
read_only_fields = (
'id','school', 'slug', 'created_by',
'user', 'is_hr', 'is_employee', 'is_teacher'
)
def create(self, validated_data):
email = validated_data.pop('email')
f_name = validated_data.pop('first_name')
l_name = validated_data.pop('last_name')
m_name = validated_data.pop('middle_name')
password = validated_data.pop('password')
user = get_user_model().objects.create_user(
email=email,
first_name=f_name,
last_name=l_name,
password=password,
middle_name=m_name
)
validated_data['user'] = user
position = validated_data.get('position')
if position == 'Teacher':
validated_data['is_teacher'] = True
if position == 'HR':
validated_data['is_hr'] = True
return super().create(validated_data)
class CoursesSerializer(serializers.ModelSerializer):
"""
Serializer for Courses model - for editing, adding, and deleting.
"""
class Meta:
model = models.Courses
fields = '__all__'
read_only_fields = ('id', 'school')
class DepartmentSerializer(serializers.ModelSerializer):
"""
Serializer for department model - for editing, adding, and deleting.
"""
class Meta:
model = models.Department
fields = '__all__'
read_only_fields = ('id', 'school')
class PoliciesSerializer(serializers.ModelSerializer):
"""
Serializer for policies model - for editing, adding, and deleting.
"""
class Meta:
model = models.Policies
fields = '__all__'
read_only_fields = ('id', 'school')
class SchoolSerializer(CountryFieldMixin, serializers.ModelSerializer):
"""
Serialzier for the school
"""
policies = PoliciesSerializer(many=True, read_only=True)
departments = DepartmentSerializer(many=True, read_only=True)
courses = CoursesSerializer(many=True, read_only=True)
class Meta:
model = models.School
fields = (
'id', 'name', 'vision', 'mission', 'country',
'street', 'city', 'zip_code', 'date_funded',
'policies', 'departments', 'courses', 'employees',
)
read_only_fields = ('id', )
class SectionSerializer(serializers.ModelSerializer):
"""
Save new student.models.section or edit existing one
"""
class Meta:
model = models.Section
fields = '__all__'
read_only_fields = ('id',)
class ScheduleSerializer(serializers.ModelSerializer):
"""
Save new student.models.Schedule or edit existing one
"""
class Meta:
model = models.Schedule
fields = '__all__'
read_only_fields = ('id',)
class SubjectSerializer(serializers.ModelSerializer):
"""
Create new subject for the school.
Also: Nested serializer for StudentSubjectSerializer for reading
"""
schedule = serializers.StringRelatedField(read_only=True)
section = serializers.StringRelatedField(read_only=True)
class Meta:
model = models.Subjects
fields = '__all__'
read_only_fields = ('id',)
class TeacherAddSubject(serializers.ModelSerializer):
section = serializers.StringRelatedField(read_only=True)
subject = serializers.StringRelatedField(read_only=True)
schedule = serializers.StringRelatedField(read_only=True)
section_id = serializers.PrimaryKeyRelatedField(
queryset=models.Section.objects.all(),
write_only=True
)
subject_id = serializers.PrimaryKeyRelatedField(
queryset=models.Subjects.objects.all(),
write_only=True
)
schedule_id = serializers.PrimaryKeyRelatedField(
queryset=models.Schedule.objects.all(),
write_only=True
)
class Meta:
model = models.TeacherSubject
fields = '__all__'
read_only_fields = ('id', 'teacher')
def create(self, validated_data):
section_id = validated_data.pop('section_id')
schedule_id = validated_data.pop('schedule_id')
subject_id = validated_data.pop('subject_id')
validated_data['subject'] = subject_id
validated_data['section'] = section_id
validated_data['schedule'] = schedule_id
q = models.TeacherSubject.objects.create(**validated_data)
return q
```
#### File: management/commands/policies.py
```python
from django.core.management.base import BaseCommand
from django.shortcuts import get_object_or_404
from school.models import Policies, School
from faker import Faker
fake = Faker()
class Command(BaseCommand):
help = 'Populate fake Policies data'
def add_arguments(self, parser):
parser.add_argument('first', type=int, help='A number less than 100')
def handle(self, *args, **options):
school = get_object_or_404(School, pk=1)
success = 0
for _ in range(options['first']):
policy = Policies.objects.get_or_create(
policy=fake.text(),
school=school
)
if policy[1]:
policy[0].save()
self.stdout.write(self.style.SUCCESS('Creating data....'))
success += 1
else:
self.stdout.write(
self.style.WARNING('Duplicated data found..')
)
self.stdout.write(
self.style.SUCCESS(f'Success, created {success} fake data')
)
```
#### File: school/test/test_api_school_policy.py
```python
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from .. import models
from school.api import serializers
def detail_url(school_id):
return reverse('api_school:institution-detail', args=[school_id])
def school_sample():
school = models.School.objects.create(
name='Test School',
vision='Test Vision',
mission='Test Mission',
street='Test Street',
city='Test City',
zip_code='900'
)
return school
def policy_sample(school_sample):
policy = models.Policies.objects.create(
school=school_sample,
policy='Sample'
)
return policy
def school_policy_url(school_id):
return reverse('api_school:institution-policies', args=[school_id])
class PoliciesPublicAPI(TestCase):
"""
TDD for end point institution-policies and institution-detail
"""
def setUp(self) -> None:
self.client = APIClient()
def test_school_detail(self):
"""
*Check for getting a detail view with no authentication
Expected result: 200
"""
school = school_sample()
url = detail_url(school.id)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_school_policies_get_post(self):
"""
* Check for getting the API if not authorized
Expected result: 200
* Check for posting with no authentication
Expected result: 401
* Check if the data posted is not created
Excpected result: False
"""
school = school_sample()
policy_sample(school)
data = {
'school': school,
'policy': 'sample'
}
url = school_policy_url(school.id)
res = self.client.get(url)
res2 = self.client.post(url, data)
object = models.Policies.objects.filter(**data).exists()
self.assertEqual(res2.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertFalse(object)
class PoliciesPrivateAPI(TestCase):
def setUp(self) -> None:
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
first_name='Normal',
last_name='User',
middle_name='placeholder',
password='<PASSWORD>'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_policy_user_post_get(self):
"""
*Check if the get method is avail if user authenticated
Expected result: 200
*Check if the status code for posting with authenticated user
Expected result: 403
*Check if the model is created after posting
Expected result: False
"""
school = school_sample()
data = {
'school': school,
'policy': 'sample'
}
url = school_policy_url(school.id)
res = self.client.get(url)
res2 = self.client.post(url, data)
object = models.Policies.objects.filter(**data).exists()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res2.status_code, status.HTTP_403_FORBIDDEN)
self.assertFalse(object)
class PolicySuperuserAPI(TestCase):
"""
TDD for end point institution-policies if superuser
"""
def setUp(self) -> None:
self.user = get_user_model().objects.create_superuser(
email='<EMAIL>',
first_name='Super',
last_name='User',
middle_name='placeholder',
password='<PASSWORD>'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_policy_superuser_get_post(self):
"""
*Check the get method
Expected result: 200
*Check the post status code if superuser
Expected result: 200
*Check if the data existed after posting
Expected result: True or 1 == 1
*Check the filter 'id' and if it's get the equal data
Expected result: True
"""
school = school_sample()
data = {
'school': school,
'policy': 'sample'
}
url = school_policy_url(school.id)
res = self.client.get(url)
res2 = self.client.post(url, data)
object = models.Policies.objects.get(**data)
res3 = self.client.get(url, {'id': f'{object.id}'})
query = models.Policies.objects.all()
serializer = serializers.PoliciesSerializer(object)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res2.status_code, status.HTTP_200_OK)
self.assertIn(object, query)
self.assertEqual(len(query), 1)
self.assertEqual(serializer.data, res3.data)
```
#### File: SchoolManagement_API/students/models.py
```python
from django.db import models
from django.conf import settings
from school.models import School, Courses, TeacherSubject
from school.conf import sex, status
from django_countries.fields import CountryField
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from django.utils import timezone
# Create your models here.
sem = [
('First', 'First'),
('Second', 'Second')
]
grade_status = [
('Passed', 'Passed'),
('Completed', 'Completed'),
('Failed', 'Failed'),
('INC', 'INC')
]
class Students(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='student',
null=True
)
pic = models.ImageField(
upload_to='student_pic',
null=True,
blank=True
)
school = models.ForeignKey(
School,
on_delete=models.CASCADE,
related_name='students'
)
course = models.ForeignKey(
Courses,
on_delete=models.CASCADE,
related_name='students'
)
bday = models.DateField(null=True, verbose_name='Birth Day')
country = CountryField(default='PH')
city = models.CharField(max_length=255, null=True)
zip_code = models.PositiveSmallIntegerField(
verbose_name='Zip Code',
null=True
)
sex = models.CharField(max_length=55, choices=sex, null=True)
civil_status = models.CharField(
max_length=55,
choices=status,
verbose_name='Civil Status',
null=True
)
subjects = models.ManyToManyField(TeacherSubject, through='StudentSubject')
school_yr = models.CharField(default='2012', max_length=255)
sem = models.CharField(choices=sem, max_length=55)
slug = models.SlugField(null=True)
class Meta:
verbose_name = _('Student')
verbose_name_plural = _('Students')
def save(self, *args, **kwargs):
self.slug = slugify(self.user.first_name + '' + self.user.last_name)
return super().save(*args, **kwargs)
@property
def get_year(self):
date = timezone.datetime.strptime('%Y', self.school_yr)
return date
def __str__(self):
return f"{self.user}"
class StudentSubject(models.Model):
student = models.ForeignKey(
Students,
on_delete=models.CASCADE,
related_name='student_sub'
)
subject = models.ForeignKey(
TeacherSubject,
on_delete=models.CASCADE,
related_name='student'
)
period_1 = models.IntegerField(default=0)
abs_1 = models.IntegerField(default=0)
period_2 = models.IntegerField(default=0)
abs_2 = models.IntegerField(default=0)
period_3 = models.IntegerField(default=0)
abs_3 = models.IntegerField(default=0)
avg = models.IntegerField(default=0)
status = models.CharField(max_length=255, choices=grade_status)
def __str__(self):
return f'{self.student}'
class Project(models.Model):
subject = models.ForeignKey(
TeacherSubject,
on_delete=models.CASCADE,
related_name='projects'
)
title = models.CharField(max_length=255)
dead_line = models.DateTimeField(verbose_name='Dead Line')
description = models.TextField(max_length=5006)
assign = models.ManyToManyField(
StudentSubject,
related_name='my_project',
verbose_name='Members'
)
sample = models.FileField(
upload_to='project/',
null=True
)
files = models.ManyToManyField(
StudentSubject,
through='FileProject',
blank=True
)
class Assignment(models.Model):
subject = models.ForeignKey(
TeacherSubject,
on_delete=models.CASCADE,
related_name='assignment'
)
title = models.CharField(max_length=255)
dead_line = models.DateTimeField(verbose_name='Dead Line')
description = models.TextField(max_length=5006)
assign = models.ManyToManyField(
StudentSubject,
related_name='my_assignment',
verbose_name='Members',
blank=True
)
sample = models.FileField(
upload_to='assignment/',
null=True
)
files = models.ManyToManyField(
StudentSubject,
through='FileAssignment',
blank=True
)
class FileAssignment(models.Model):
student = models.ForeignKey(
StudentSubject,
on_delete=models.CASCADE,
related_name='assignment_file',
null=True
)
assignment = models.ForeignKey(
Assignment,
on_delete=models.CASCADE,
related_name='assignment_files',
null=True
)
file = models.FileField(upload_to='assignment/')
class FileProject(models.Model):
student = models.ForeignKey(
StudentSubject,
on_delete=models.CASCADE,
related_name='project_file',
null=True
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name='project_files',
null=True
)
file = models.FileField(upload_to='project/')
``` |
{
"source": "JoshuaOkafor/powerapps-docstring",
"score": 3
} |
#### File: powerapps-docstring/powerapps_docstring/parser.py
```python
import os
import json
import yaml
class Parser():
def __init__(self, source) -> None:
self.source_path = source
def get_connections(self) -> dict:
"""Read existing connections
"""
connections = {} # create empty dict
connections_file = os.path.join(self.source_path, "Connections", "Connections.json")
if os.path.isfile(connections_file):
with open(connections_file, "r") as file:
connections = json.load(file)
return connections
def _get_screen_content(self, screen_name):
screen_path = os.path.join(self.source_path, "Src", screen_name)
screen_content = {}
with open(screen_path, "r", encoding='utf8') as file:
screen_content = yaml.load(file, Loader=yaml.BaseLoader)
return screen_content
def get_screen_objects(self, screen_name) -> tuple:
screen_content = self._get_screen_content(screen_name)
# print(screen_content)
screen_name = screen_name.replace(".fx.yaml", "")
return screen_name, screen_content
def get_canvas_manifest(self):
# get name from CanvasManifest.json
manifest_file = os.path.join(self.source_path, "CanvasManifest.json")
if os.path.isfile(manifest_file):
with open(manifest_file, "r", encoding="utf-8") as file:
canvas_manifest = json.load(file)
return canvas_manifest
```
#### File: powerapps-docstring/powerapps_docstring/powerapp.py
```python
import os
class UnknownSourceException(Exception):
pass
class CanvasManifestNotFoundInSourceException(Exception):
pass
class PowerApp():
def __init__(self, source) -> None:
self.source = os.path.normpath(source)
self.source_type = self._check_source_type()
def get_pa_src_path(self):
source_path = None
if self.source_type == "directory":
source_path = self.source
elif self.source_type == "zip":
# TODO: unzip and unpack msssap to retrieve src folder
pass
elif self.source_type == "msapp":
# TODO: unpack msssap to retrieve src folder
pass
return source_path
def _check_source_type(self) -> str:
if os.path.isdir(self.source):
if not os.path.isfile(os.path.join(self.source, "CanvasManifest.json")):
path_to_canvas_manifest = self.find("CanvasManifest.json", self.source)
if path_to_canvas_manifest != None:
print(f"CanvasManifest.json could not be found in provided source path: {self.source}")
print(f"did you ment: {path_to_canvas_manifest}")
raise CanvasManifestNotFoundInSourceException()
return "directory"
elif os.path.isfile(self.source) and self.source.endswith(".zip"):
return "zip"
elif os.path.isfile(self.source) and self.source.endswith(".msapp"):
return "msapp"
else:
raise UnknownSourceException
def _unpack_zip(self):
pass
def _unpack_mssap(self):
pass
def find(self, name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
```
#### File: powerapps_docstring/tests/test_parser.py
```python
import pytest
SOURCE_PATH = "example/src/meetingcapturedemo/"
def test_instantiate_parser():
from powerapps_docstring.parser import Parser
p = Parser(SOURCE_PATH)
assert isinstance(p, Parser), ("Should be an instance of parser object")
assert p.source_path == SOURCE_PATH
def test_get_connections():
from powerapps_docstring.parser import Parser
p = Parser(SOURCE_PATH)
connections = p.get_connections()
# spot check on some values in connection
for con in connections.items():
assert "connectionInstanceId" in con[1].keys()
assert "connectionParameters" in con[1].keys()
assert "connectionRef" in con[1].keys()
def test_get_screen_objects():
from powerapps_docstring.parser import Parser
p = Parser(SOURCE_PATH)
result = p.get_screen_objects("WelcomeScreen.fx.yaml")
assert result[0] == "WelcomeScreen", "Should return on index 0 the name of the screen"
assert isinstance(result[1], dict), "Shall return on index 1 the screen contents as dict"
def test_get_canvas_manifest():
from powerapps_docstring.parser import Parser
p = Parser(SOURCE_PATH)
result = p.get_canvas_manifest()
assert isinstance(result, dict), "Shall return a dict with CanvasManifest content"
``` |
{
"source": "Joshua-Oladeji/Automate-the-boring-stuff",
"score": 4
} |
#### File: Automate-the-boring-stuff/Chapter_5-Dictionaries/answers.py
```python
import pprint
import copy
inventory = { 'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12 }
def displayInventory(listing):
total = 0
print('\nInventory: ')
for k, v in listing.items():
print(f'{v} {k}')
total += v
print(f'Total number of items: {total}')
displayInventory(inventory)
# List to Dictionary Function for Fantasy Game Inventory
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
def addToInventory(weapons, loot):
# prevents the original inventory from being modified
new_inventory = copy.copy(weapons)
for item in loot:
new_inventory.setdefault(item, 0)
new_inventory[item] += 1
return new_inventory
displayInventory(addToInventory(inventory, dragonLoot))
``` |
{
"source": "JoshuaOloton/FLASKAPP",
"score": 2
} |
#### File: app/api/posts.py
```python
from flask import jsonify, current_app, url_for, g, request
from flask_login import current_user
from app.api.errors import forbidden
from app.models import Permission, Post, User
from app.api import api
from app.api.decorators import permission_required
from app import db
@api.route('/posts/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/')
def get_posts():
page = request.args.get('page',1,type=int)
pagination = Post.query.paginate(
page,per_page=current_app.config['POSTS_PER_PAGE'],error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts',page=page-1)
next = None
if pagination.has_next:
next = url_for('api.get_posts',page=page+1)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev_url': prev,
'next_url': next,
'count': pagination.total
})
@api.route('/posts/',methods=['POST'])
@permission_required(Permission.WRITE)
def new_post():
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, \
{'location':url_for('api.get_post',id=post.id)}
@api.route('/posts/<int:id>',methods=['PUT'])
@permission_required(Permission.WRITE)
def edit_post(id):
post = Post.query.get_or_404(id)
if g.current_user != post.author and not g.current_user.can(Permission.ADMIN):
return forbidden('Insufficient permissions!')
post.body = request.json.get('body',post.body)
db.session.commit()
return jsonify(post.to_json())
@api.route('/posts/<int:id>',methods=['DELETE'])
@permission_required(Permission.WRITE)
def delete_post(id):
post = Post.query.get_or_404(id)
if post.author != g.current_user and not g.current_user.can(Permission.ADMIN):
return forbidden('Insufficient permissions')
db.session.delete(post)
db.session.commit()
return jsonify({'message':'post successfully deleted'})
```
#### File: FLASKAPP/app/email.py
```python
from flask import current_app, render_template, url_for
import requests, os
def send_reset_email(token,email):
return requests.post(
f"https://api.mailgun.net/v3/{os.environ.get('SANDBOX_DOMAIN_NAME')}/messages",
auth=("api", f"{os.environ.get('API_KEY')}"),
data={"from": f"Flask App <<EMAIL>('SANDBOX_DOMAIN_NAME')}>",
"to": [f"{email}"],
"subject": "Reset your Password",
"text": f""" Please follow this link to reset your account
{url_for("auth.password_reset",token=token)}
Please ignore this mail if you did not make the following request."""})
def send_confirmation_email(token,email):
return requests.post(
f"https://api.mailgun.net/v3/{os.environ.get('SANDBOX_DOMAIN_NAME')}/messages",
auth=("api", f"{os.environ.get('API_KEY')}"),
data={"from": f"Flask App <<EMAIL>('SANDBOX_DOMAIN_<EMAIL>')}>",
"to": [f"{email}"],
"subject": "Confirm Your Account",
"text": f""" Please follow this link to verify your account
{url_for("auth.confirm",_external=True,token=token)}
Please ignore this mail if you did not make the following request."""})
```
#### File: FLASKAPP/app/generate_post_comments.py
```python
from random import randint
from sqlalchemy.exc import IntegrityError
from faker import Faker
from app import db
from app.models import User, Post, Comment
def post_comments():
fake = Faker()
user_count = User.query.count()
post_count = Post.query.count()
for i in range(1,post_count+1):
p = Post.query.get(i)
for i in range(randint(10,15)):
u = User.query.offset(randint(0, user_count - 1)).first()
comment = Comment(
body=fake.text(),
date_created=fake.past_date(),
author=u,
post=p)
db.session.add(comment)
db.session.commit()
``` |
{
"source": "JoshuaOndieki/cohort-19-day2",
"score": 4
} |
#### File: JoshuaOndieki/cohort-19-day2/data_types_lab.py
```python
def data_type(inputs):
type_of_input = type(inputs)
if type_of_input == str:
return len(inputs)
elif type_of_input == bool:
return inputs
elif type_of_input == int:
if inputs < 100:
return "less than 100"
elif inputs == 100:
return "equal to 100"
else:
return "more than 100"
elif type_of_input == list:
if len(inputs) < 3:
return None
else:
return inputs[2]
else:
return "no value"
``` |
{
"source": "JoshuaOndieki/cohort-19-day4",
"score": 4
} |
#### File: JoshuaOndieki/cohort-19-day4/binary_search_lab.py
```python
class BinarySearch(list):
def __init__(self, a, b):
self.length = a
self.append(b)
list_len = 1
while list_len < a: #Loop to generate the list
self.append(self[list_len - 1] + b)
list_len += 1
def search(self, query):
first = 0
last = self.length - 1
found = False
result = {'count': 0, 'index': -1}
if query == self[first]:
result['index'] = 0
return result
elif query == self[last]:
result['index'] = last
return result
while first<=last and not found:
midpoint = (first + last)//2
if self[midpoint] == query:
result['count'] += 1
result['index'] = midpoint
found = True
else:
if query < self[midpoint]:
last = midpoint-1
else:
first = midpoint+1
return result
```
#### File: JoshuaOndieki/cohort-19-day4/missing_number_lab.py
```python
def find_missing(array1,array2):
#if given arrays are both null return 0
if len(array1)==0 and len(array2)==0:
return 0
# if array1 is longer than array2 loop to see which value is missing in array2
elif len(array1)>len(array2):
for i in array1:
if i in array2:
pass
else:
return i #return the missing value
#if array2 is longer than array1 loop to see which value is missing in array1
elif len(array2)>len(array1):
for i in array2:
if i in array1:
pass
else:
return i #return the missing value
#else the arrays are equal then just like the null arrays, return 0
else:
return 0
``` |
{
"source": "JoshuaOndieki/contacts",
"score": 3
} |
#### File: contacts/tests/test_contact_model.py
```python
import unittest
from models.contact import Contact
class TestCreateContact(unittest.TestCase):
def test_creates_contact_instance(self):
self.contact = Contact('Joshua', 'Ondieki', 254700009999)
self.assertTrue('Joshua' == self.contact.firstname, msg="Should get first name")
self.assertTrue('Ondieki' == self.contact.surname, msg="Should get last name")
self.assertTrue(254700009999 == self.contact.number, msg="Phone number should be saved with accuracy")
if __name__ == '__main__':
unittest.main()
```
#### File: JoshuaOndieki/contacts/ui.py
```python
from termcolor import colored
from prettytable import PrettyTable
def success(text):
"""Returns a green colored text
"""
return(colored(text, 'green', attrs=['blink', 'bold']))
def magenta(text):
"""Returns a magenta colored text
"""
return(colored(text, 'magenta', attrs=['blink', 'bold']))
def error(text):
"""Returns a red colored text
"""
return(colored(text, 'red', attrs=['blink', 'bold']))
def table(data):
"""Generates and returns a table
"""
x = PrettyTable()
name = colored("Name", 'blue')
phone_number = colored("Phone Number", 'blue')
x.field_names = [name, phone_number]
for contact in data:
contact_name = data[contact][0] + " " + data[contact][1]
x.add_row([contact_name, contact])
x.sortby = name
return(x)
``` |
{
"source": "JoshuaOndieki/full-contact",
"score": 3
} |
#### File: JoshuaOndieki/full-contact/app.py
```python
from docopt import docopt,DocoptExit
from functions import FullContact
import cmd, os, sys
from termcolor import colored,cprint
from prettytable import *
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system('cls' if os.name == 'nt' else 'clear')
print(__doc__)
def whois(criteria,args):
if criteria == 'email':
data = FullContact.whois(email = args["<query>"])
elif criteria == 'phone':
data = FullContact.whois(phone = args["<query>"])
elif criteria == 'twitter':
data = FullContact.whois(twitter = args["<query>"])
if isinstance(data, str):
print(colored(data,'red', attrs = ['bold']))
else:
if data["status"] is not 200 :
print(colored('\t\t\t\t'+data["message"], 'red', attrs=['bold']))
print(colored('\t\t\t\t\t\t\tSTATUS: '+str(data["status"]), 'red', attrs=['bold']))
else:
print(colored('\t\t\t\tID: '+data["requestId"], 'green', attrs=['bold']))
print(colored('\t\t\t\t\t\t\tSTATUS: '+str(data["status"]), 'green', attrs=['bold']))
datasheet = PrettyTable(['#','Info'])
try:
datasheet.add_row(['Full Name',data['contactInfo']['fullName']])
except KeyError:
pass
try:
datasheet.add_row(['Given Name',data['contactInfo']['givenName']])
except KeyError:
pass
try:
datasheet.add_row(['Location',data['demographics']['locationDeduced']['normalizedLocation']])
except KeyError:
pass
try:
datasheet.add_row(['Gender',data['demographics']['gender']])
except KeyError:
pass
social = PrettyTable(['Social Network','url','usermname','id'])
for profile in data['socialProfiles']:
social.add_row([profile['typeName'], profile['url'],profile['username'],profile['id']])
print(datasheet)
print(social)
class FContact(cmd.Cmd):
text = colored('FullContact$$$', 'green', attrs=['blink'])
prompt = text
@docopt_cmd
def do_mail(self,args):
"""Usage: mail <query>"""
whois('email', args = args)
@docopt_cmd
def do_phone(self,args):
"""Usage: phone <query>"""
whois('phone',args)
@docopt_cmd
def do_twitter(self,args):
"""Usage: twitter <query>"""
whois('twitter',args)
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('cls' if os.name == 'nt' else 'clear')
print ('Full Contact has quit')
exit()
if __name__ == "__main__":
try:
intro()
FContact().cmdloop()
except KeyboardInterrupt:
os.system('cls' if os.name == 'nt' else 'clear')
print('Full Contact has quit')
``` |
{
"source": "JoshuaOndieki/google-search-cli",
"score": 3
} |
#### File: JoshuaOndieki/google-search-cli/app.py
```python
from docopt import docopt,DocoptExit
from functions import GoogleSearchApi as gsa
import cmd
import os
from termcolor import colored,cprint
from prettytable import *
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system("clear")
print(__doc__)
class GSA(cmd.Cmd):
text = colored('GSA$$$', 'green', attrs=['blink'])
prompt = text
@docopt_cmd
def do_search(self, arg):
"""Usage: search <your_query>"""
search_term=arg['<your_query>']
search_data=gsa.search(search_term)
if isinstance(search_data,str):
print(search_data)
exit()
kind_data=search_data['kind']
queries_data=search_data['queries']
search_info=search_data['searchInformation']
items_data=search_data['items']
heading=PrettyTable(['Heading'])
heading.add_row([kind_data])
heading.add_row(['Total Results : '+queries_data['request'][0]['totalResults']])
print(heading)
print()
for item in items_data:
item_title=item['title']
data_table=PrettyTable([item_title])
data_table.add_row([item['kind']])
data_table.add_row([item['snippet']])
data_table.add_row([item['link']])
print(data_table)
print()
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('GSA has quit')
exit()
if __name__ == "__main__":
try:
intro()
GSA().cmdloop()
except KeyboardInterrupt:
os.system("clear")
print('GSA has quit')
``` |
{
"source": "JoshuaOndieki/joshua-ondieki-bootcamp-17",
"score": 4
} |
#### File: Day 1/prime_numbers/TestCases.py
```python
import unittest
from prime import prime_numbers
class PrimeTest(unittest.TestCase):
def test_returns_prime_numbers(self):
self.assertListEqual(prime_numbers(6), [2,3,5], msg="Range of 0-6 should return [2,3,5] as the prime numbers")
def test_input_is_a_number(self):
with self.assertRaises(TypeError, msg="Should raise type error if a string is passed as argument"):
prime_numbers("String")
def test_return_value_is_list(self):
self.assertTrue(isinstance(prime_numbers(10), list), msg="The function should return a list")
def test_returns_0_for_negative_input(self):
self.assertEqual(prime_numbers(-30),"No prime numbers within that range! All prime numbers are positive",msg="There are no negative primes")
def test_does_not_return_negative_primes(self):
self.assertGreater(min(prime_numbers(50)),0)
def test_does_not_include_non_primes(self):
self.assertNotIn([0,1,4,6],prime_numbers(6))
if __name__=='__main__':
unittest.main()
```
#### File: Day 1/tdd/TestCases.py
```python
import unittest
from loan_calculator import loan_calculator
class Loan(unittest.TestCase):
def test_month_is_not_greater_than_twelve(self):
self.assertEquals(loan_calculator(100000, 11, 13), "Invalid Number of months!")
def test_it_works(self):
self.assertEquals(loan_calculator(100000, 12, 1), 112000)
def test_loan_is_a_float(self):
loan = loan_calculator(100000, 12, 1)
self.assertIsInstance(loan, float, "The returned loan should be a float")
def test_amount_is_not_negative(self):
self.assertEquals(loan_calculator(-895, 12, 1), "Invalid amount!", "Loan amount should not be negative!")
def test_rate_is_not_greater_than_hundred(self):
self.assertEquals(loan_calculator(100000, 101, 13), "Invalid rate!")
if __name__=='__main__':
unittest.main()
``` |
{
"source": "JoshuaOndieki/mordor",
"score": 2
} |
#### File: JoshuaOndieki/mordor/functions.py
```python
class Mordor():
skills={}
def skills_import(skills):
return skills
def add_skill():
pass
def complete_skill():
pass
def view_skills():
pass
def progress(skills):
pass
``` |
{
"source": "JoshuaOndieki/oneforma-fashion-attribute",
"score": 3
} |
#### File: static/src/srcjpy.py
```python
import json
def restructure_category_data():
with open('categoryimages.json', 'r') as jfile:
new_data = json.load(jfile)
with open('category.json') as jfile:
data = json.load(jfile)
with open('category.json', 'w') as jfile:
for item in data:
item['IMAGES'] = new_data[item['NAME']]
json.dump(data, jfile, sort_keys=True, indent=4)
def restructure_part_data():
with open('partimages.json', 'r') as jfile:
new_data = json.load(jfile)
with open('part.json') as jfile:
data = json.load(jfile)
with open('part.json', 'w') as jfile:
for item in data:
item['IMAGES'] = new_data[item['PART']]
json.dump(data, jfile, sort_keys=True, indent=4)
def restructure_texture_data():
with open('textureimages.json', 'r') as jfile:
new_data = json.load(jfile)
with open('texture.json') as jfile:
data = json.load(jfile)
with open('texture.json', 'w') as jfile:
for item in data:
item['IMAGES'] = new_data[item['TEXTURE']]
json.dump(data, jfile, sort_keys=True, indent=4)
restructure_category_data()
restructure_part_data()
restructure_texture_data()
``` |
{
"source": "JoshuaOndieki/ridemyway-api",
"score": 3
} |
#### File: ridemyway/controllers/ride_controller.py
```python
from datetime import datetime
from flask import current_app as app
from ridemyway.models.ride import Ride
from ridemyway.utils.response import Response
from ridemyway.utils.validators import date_has_passed
class RideController():
"""
Controls all CRUD operations of the Ride object.
"""
def create_ride(self, **kwargs):
"""
Creates and adds a ride to the app database.
Returns:
A success status if success adding ride,
failed status otherwise.
"""
ride_ids = [x for x in app.database['Rides']]
if ride_ids:
ride_id = max(ride_ids) + 1
else:
ride_id = 1
date_offered = datetime.now().strftime('%b %d %Y %H:%M%p')
self.new_ride = Ride(
ride_id=ride_id,
departure=kwargs['departure'],
origin=kwargs['origin'],
destination=kwargs['destination'],
vehicle_number_plate=kwargs['vehicle_number_plate'],
capacity=kwargs['capacity'],
cost=kwargs['cost'],
date_offered=date_offered,
availability='available')
ride = self.new_ride.__dict__
app.database['Rides'][self.new_ride.ride_id] = ride
message = 'Ride created successfully'
attributes = {
'location': '/rides/' + str(ride['ride_id']),
'repr': self.new_ride.__repr__()
}
return Response.success(message=message, attributes=attributes)
def fetch_one(self, ride_id):
"""
Fetches a single ride from the app database.
Returns:
The requested ride,
failed status if no such ride exists.
"""
try:
self.ride = app.database['Rides'][ride_id]
message = 'Ride fetched successfully'
return Response.success(message=message, data=self.ride), 200
except KeyError:
meta = {'errors': 1, 'source': '/rides/' + str(ride_id)}
message = 'NOT FOUND'
info = 'That ride does not exist'
response = Response.failed(meta=meta,
message=message,
info=info)
return response, 404
def fetch_all(self):
"""
Fetches all available rides from the app database.
Returns:
All available rides,
"""
rides_count = 0
self.fetched_rides = {}
message = 'Rides fetched successfully'
for key, value in app.database['Rides'].items():
if date_has_passed(value['departure']):
continue
rides_count += 1
self.fetched_rides[key] = value
response = Response.success(message=message,
data=self.fetched_rides,
meta={'rides': rides_count})
return response
```
#### File: ridemyway/controllers/ride_request_controller.py
```python
from flask import current_app as app
from ridemyway.models.request import Request
from ridemyway.utils.response import Response
class RequestController():
"""
Controls all CRUD operations of the Request object.
"""
def create_request(self, **kwargs):
"""
Creates and adds a request to the app database.
Returns:
A success status if success adding ride,
failed status otherwise.
"""
if kwargs['ride_id'] in app.database['Rides']:
request_ids = [x for x in app.database['Requests']]
if request_ids:
request_id = max(request_ids) + 1
else:
request_id = 1
self.new_request = Request(
request_id=request_id,
ride_id=kwargs['ride_id'],
status='available'
)
request = self.new_request.__dict__
app.database['Requests'][request_id] = request
message = 'Ride request created successfully'
attributes = {
'location': '/api/v1/rides/' + str(request_id) + '/requests'
}
response = Response.success(message=message, attributes=attributes)
return response, 201
meta = {'errors': 1,
'source': '/' + str(kwargs['ride_id']) + '/requests'}
message = 'NOT FOUND'
return Response.failed(meta=meta, message='NOT FOUND',
info='The ride requested does not exist'), 404
```
#### File: ridemyway/models/request.py
```python
class Request():
"""
Creates Request objects.
**kwargs:
ride_id: A unique identifier of the ride the request is
being made to.
request_id: A unique identifier for the request.
status: Status of the request.
"""
def __init__(self, **kwargs):
"""
Request object initializer.
Returns:
Object
"""
self.request_id = kwargs['request_id']
self.ride_id = kwargs['ride_id']
self.status = kwargs['status']
```
#### File: ridemyway/tests/__init__.py
```python
import unittest
from ridemyway import create_app
class BaseTest(unittest.TestCase):
"""
Base class for testing
"""
def setUp(self):
"""
Set up tests
"""
self.app = create_app(config_name='testing')
self.client = self.app.test_client
self.headers = {'content-type': 'application/json'}
self.context = self.app.app_context()
self.context.push()
def tearDown(self):
"""
Teardown all test files and instances created
"""
self.context.pop()
# TEST DATA
VALID_RIDE_DATASET = {
'departure': 'Jun 25 2050 1:30PM',
'origin': 'Nairobi',
'destination': 'Garissa',
'cost': 350,
'vehicle_number_plate': 'KBC-A21',
'capacity': 3}
VALID_RIDE_DATASET_1 = {
'departure': 'Jun 28 2050 7:00AM',
'origin': 'Garissa',
'destination': 'Nairobi',
'cost': 500,
'vehicle_number_plate': 'KBC-A21',
'capacity': 3}
INVALID_DATE_DATASET = {
'departure': 'Not a date',
'origin': 'Nairobi',
'destination': 'Garissa',
'cost': 350, 'vehicle_number_plate':
'KBC-A21', 'capacity': 3
}
PAST_DATE_DATASET = {
'departure': 'Jun 25 1901 1:30PM',
'origin': 'Nairobi',
'destination': 'Garissa',
'cost': 350, 'vehicle_number_plate':
'KBC-A21', 'capacity': 3
}
INVALID_COST_DATASET = {
'departure': 'Jun 25 2018 1:30PM',
'origin': 'Nairobi',
'destination': 'Garissa',
'cost': '%^$', 'vehicle_number_plate':
'KBC-A21', 'capacity': 3
}
INVALID_CAPACITY_DATASET = {
'departure': 'Jun 25 2018 1:30PM',
'origin': 'Nairobi',
'destination': 'Garissa',
'cost': 350, 'vehicle_number_plate':
'KBC-A21', 'capacity': 3.5
}
INVALID_VEHICLE_NUMBER_PLATE = {
'departure': 'Jun 25 2018 1:30PM',
'origin': 'Nairobi',
'destination': 'Garissa',
'cost': 350, 'vehicle_number_plate':
2121, 'capacity': 3
}
``` |
{
"source": "JoshuaOxley/test2",
"score": 3
} |
#### File: JoshuaOxley/test2/test.py
```python
import unittest
import calculator
class Test(unittest.TestCase):
def test_add(self):
self.assertEqual(calculator.add(2, 3), 5)
def test_sub(self):
self.assertEqual(calculator.sub(2, 3) -1)
def test_mult(self):
self.assertEqual(calculator.mult(2, 3), 6)
def test_div(self):
self.assertEqual(calculator.div(2, 3), 2)
if __name__== '__main___':
unittest.main()
``` |
{
"source": "joshua-paperspace/ResNet-Streamlit-App",
"score": 2
} |
#### File: joshua-paperspace/ResNet-Streamlit-App/app-flask.py
```python
from flask import Flask, request, render_template, redirect, url_for
from resnet import resnet18
from preprocess import imgToTensor
import torch
from PIL import Image
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
MODEL_PATH = './models/resnet18-epochs-5.pth'
app = Flask(__name__)
@app.route('/')
def home_page():
return render_template('index.html')
@app.route('/', methods=['POST'])
def predict_img():
image = Image.open(request.form['img'])
filename = 'static/uploads/uploaded_image.png'
image.save(filename)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = resnet18(3, 10)
model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
tensor = imgToTensor(image)
output = model(tensor)
_, predicted = torch.max(output.data, 1)
print(predicted)
prediction = classes[predicted]
return render_template('result.html', prediction=prediction)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port='8000')
``` |
{
"source": "joshua-paperspace/sentiment-analysis-flask",
"score": 2
} |
#### File: joshua-paperspace/sentiment-analysis-flask/app.py
```python
from flask import Flask
from sentiment-analysis import sentiment_analysis
app = Flask(__name__)
@app.route('/')
def hello_world():
phrase = request.data
return sentiment_analysis(phrase)
# return 'Hey, we have Flask in a Docker container!'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port='8000')
``` |
{
"source": "joshuapatel/PewDiePie",
"score": 3
} |
#### File: src/cogs/economy_phrases.py
```python
import discord
from discord.ext import commands
class EconomyPhrases(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def update_shovel(self):
self.bot.econ["pos"] = await self.bot.pool.fetch("SELECT name, id FROM shovel WHERE fate = true")
self.bot.econ["neg"] = await self.bot.pool.fetch("SELECT name, id FROM shovel WHERE fate = false")
async def update_crime(self):
self.bot.econ["crime"]["pos"] = await self.bot.pool.fetch("SELECT name, id FROM crime WHERE fate = true")
self.bot.econ["crime"]["neg"] = await self.bot.pool.fetch("SELECT name, id FROM crime WHERE fate = false")
@commands.group(invoke_without_command = True)
async def phrase(self, ctx, pid: int, u: str = "shovel"):
if "crime" in u:
table = "crime"
else:
table = "shovel"
pcheck = await self.bot.pool.fetchrow(f"SELECT name, fate FROM {table} WHERE id = $1", pid)
if pcheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Phrase Not Found", value = f"Phrase #{pid} could not be found")
await ctx.send(embed = em)
return
fate = pcheck["fate"]
p = pcheck["name"]
if fate:
em = discord.Embed(color = discord.Color.green())
else:
em = discord.Embed(color = discord.Color.red())
em.add_field(name = "Raw Phrase", value = p)
em.set_footer(text = f"Phrase #{pid}")
await ctx.send(embed = em)
@phrase.command()
@commands.is_owner()
async def add(self, ctx, fate: bool, *, phrase: str):
if phrase.startswith("<-- ADD CRIME -->"):
phrase = phrase.replace("<-- ADD CRIME -->", "")
table = "crime"
else:
table = "shovel"
await self.bot.pool.execute(f"INSERT INTO {table} VALUES ($1, $2)", phrase, fate)
pid = await self.bot.pool.fetchval(f"SELECT id FROM {table} WHERE name = $1 AND fate = $2", phrase, fate)
if fate:
em = discord.Embed(color = discord.Color.green())
else:
em = discord.Embed(color = discord.Color.red())
em.add_field(name = "Added Phrase", value = f"The phrase has been added to the {table} command. Fate: {fate}")
em.set_footer(text = f"Phrase #{pid}")
await ctx.send(embed = em)
if table == "shovel":
await self.update_shovel()
else:
await self.update_crime()
@phrase.command()
@commands.is_owner()
async def edit(self, ctx, pid: int, *, phrase: str):
if phrase.startswith("<-- EDIT CRIME -->"):
phrase = phrase.replace("<-- EDIT CRIME -->", "")
table = "crime"
else:
table = "shovel"
pcheck = await self.bot.pool.fetchrow(f"SELECT * FROM {table} WHERE id = $1", pid)
if pcheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Phrase Not Found", value = f"Phrase #{pid} could not be found")
await ctx.send(embed = em)
return
await self.bot.pool.execute(f"UPDATE {table} SET name = $1 WHERE id = $2", phrase, pid)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Phrase Updated", value = f"Phrase #{pid} has been updated")
await ctx.send(embed = em)
if table == "shovel":
await self.update_shovel()
else:
await self.update_crime()
@phrase.command(aliases = ["remove"])
@commands.is_owner()
async def delete(self, ctx, pid: int, crime: bool = False):
if crime:
table = "crime"
else:
table = "shovel"
pcheck = await self.bot.pool.fetchrow(f"SELECT * FROM {table} WHERE id = $1", pid)
if pcheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Phrase Not Found", value = f"Phrase #{pid} could not be found")
await ctx.send(embed = em)
return
await self.bot.pool.execute(f"DELETE FROM {table} WHERE id = $1", pid)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Phrase Removed", value = f"Phrase #{pid} has been removed")
await ctx.send(embed = em)
if table == "shovel":
await self.update_shovel()
else:
await self.update_crime()
@commands.group(invoke_without_command = True)
async def crimephrase(self, ctx, pid: int):
await ctx.invoke(self.bot.get_command("phrase"), pid = pid, u = "crime")
@crimephrase.command(name = "add")
@commands.is_owner()
async def crime_add(self, ctx, fate: bool, *, phrase: str):
phrase = "<-- ADD CRIME -->" + phrase
await ctx.invoke(self.bot.get_command("phrase add"), fate = fate, phrase = phrase)
@crimephrase.command(name = "edit")
@commands.is_owner()
async def crime_edit(self, ctx, pid: int, *, phrase: str):
phrase = "<-- EDIT CRIME -->" + phrase
await ctx.invoke(self.bot.get_command("phrase edit"), pid = pid, phrase = phrase)
@crimephrase.command(name = "delete", aliases = ["remove"])
@commands.is_owner()
async def crime_delete(self, ctx, pid: int):
await ctx.invoke(self.bot.get_command("phrase delete"), pid = pid, crime = True)
def setup(bot):
bot.add_cog(EconomyPhrases(bot))
```
#### File: src/cogs/economy_shop.py
```python
import discord
from discord.ext import commands
import datetime
class AmountConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
return int(argument)
except:
pass
if "all" in argument:
coins = await ctx.bot.pool.fetchval("SELECT coins FROM econ WHERE userid = $1 AND guildid = $2", ctx.author.id, ctx.guild.id)
if ctx.command.name == "transfer":
coins = round(coins * 0.5)
return coins
elif "," in argument:
return int(argument.replace(",", ""))
else:
return 0
class EconomyShop(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.tcoinimage = "<:bro_coin:541363630189576193>"
async def cad_user(ctx): # pylint: disable=E0213
dbcheck = await ctx.bot.pool.fetchrow("SELECT * FROM econ WHERE userid = $1 AND guildid = $2", ctx.author.id, ctx.guild.id) # pylint: disable=E1101
if dbcheck == None or dbcheck == []:
await ctx.bot.pool.execute("INSERT INTO econ VALUES ($1, $2, $3)", 0, ctx.author.id, ctx.guild.id) # pylint: disable=E1101
return True
else:
return True
return False
@commands.group(invoke_without_command = True)
async def shop(self, ctx):
roles = await self.bot.pool.fetch("""
SELECT * FROM econshop WHERE guildid = $1 ORDER BY reqamount DESC
""", ctx.guild.id)
em = discord.Embed(color = discord.Color.dark_red())
em.set_thumbnail(url = ctx.guild.icon_url)
em.set_author(name = f"{ctx.guild.name}'s Shop")
for r in roles:
role = ctx.guild.get_role(r["roleid"])
if role == None:
await self.bot.pool.execute("DELETE FROM econshop WHERE roleid = $1", r["roleid"])
continue
em.add_field(name = f"Role: {role.name}", value = f"Required amount: {r['reqamount']:,d} {self.tcoinimage}", inline = False)
if len(em.fields) == 0:
em.set_author(name = "")
em.add_field(name = "No Roles", value = f"No roles have been found for {ctx.guild.name}")
await ctx.send(embed = em)
@shop.command(aliases = ["role", "make"])
@commands.bot_has_permissions(manage_roles = True)
@commands.has_permissions(manage_roles = True)
async def add(self, ctx, req_amount: AmountConverter, *, role: discord.Role):
rolecheck = await self.bot.pool.fetchrow("SELECT * FROM econshop WHERE roleid = $1 AND guildid = $2", role.id, ctx.guild.id)
if rolecheck != None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Role Found", value = "This role is already in the shop. Use the `shop edit` command to edit it")
await ctx.send(embed = em)
return
if 0 >= req_amount:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Too Small", value = f"You cannot set the amount at 0 or below")
await ctx.send(embed = em)
return
await self.bot.pool.execute("INSERT INTO econshop VALUES ($1, $2, $3)", role.id, ctx.guild.id, req_amount)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Role Added", value = f"`{role.name}` has been added to the shop and requires {req_amount:,d} {self.tcoinimage} to purchase")
await ctx.send(embed = em)
@shop.command(aliases = ["purchase", "spend", "get"])
@commands.bot_has_permissions(manage_roles = True)
@commands.check(cad_user)
async def buy(self, ctx, *, role: discord.Role):
if role in ctx.author.roles:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Role in Possession", value = f"You already have the `{role.name}` role therefore you cannot buy it")
await ctx.send(embed = em)
return
req_amount = await self.bot.pool.fetchval("SELECT reqamount FROM econshop WHERE roleid = $1 AND guildid = $2", role.id, ctx.guild.id)
if req_amount == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Role Not Found", value = "This role has not been added to this shop. Use the `shop add` command to add it")
await ctx.send(embed = em)
return
user_amount = await self.bot.pool.fetchval("SELECT coins FROM econ WHERE userid = $1 AND guildid = $2", ctx.author.id, ctx.guild.id)
if user_amount >= req_amount:
try:
await ctx.author.add_roles(role, reason = f"Purchased from the shop costing {req_amount:,d} Bro Coins")
except:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Forbidden", value = f"""
It looks like I am not able to give the user this role. Please check that my role is **above** the role you are trying to give.
""")
await ctx.send(embed = em)
return
await self.bot.pool.execute("UPDATE econ SET coins = coins - $1 WHERE userid = $2 AND guildid = $3", req_amount, ctx.author.id, ctx.guild.id)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Purchased Role", value = f"{ctx.author.mention} bought the `{role.name}` role costing {req_amount:,d} {self.tcoinimage}")
em.timestamp = datetime.datetime.utcnow()
await ctx.send(embed = em)
else:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Not Enough", value = f"""
You need {req_amount - user_amount:,d} more {self.tcoinimage} to buy the `{role.name}` role.
""")
await ctx.send(embed = em)
@shop.command(aliases = ["change", "adjust"])
@commands.has_permissions(manage_roles = True)
async def edit(self, ctx, req_amount: AmountConverter, *, role: discord.Role):
rolecheck = await self.bot.pool.fetchrow("SELECT * FROM econshop WHERE roleid = $1 AND guildid = $2", role.id, ctx.guild.id)
if rolecheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Role Not Found", value = "This role could not be found in the shop. You can create on using the `shop add` command")
await ctx.send(embed = em)
return
if 0 >= req_amount:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Too Small", value = f"You cannot set the amount at 0 or below")
await ctx.send(embed = em)
return
await self.bot.pool.execute("UPDATE econshop SET reqamount = $1 WHERE roleid = $2 AND guildid = $3", req_amount, role.id, ctx.guild.id)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Role Updated", value = f"`{role.name}`'s required amount to purchase has been changed to {req_amount:,d} {self.tcoinimage}")
await ctx.send(embed = em)
@shop.command(aliases = ["remove"])
@commands.has_permissions(manage_roles = True)
async def delete(self, ctx, *, role: discord.Role):
rolecheck = await self.bot.pool.fetchrow("SELECT * FROM econshop WHERE roleid = $1 AND guildid = $2", role.id, ctx.guild.id)
if rolecheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Role Not Found", value = "This role could not be found in the shop. You can create on using the `shop add` command")
await ctx.send(embed = em)
return
await self.bot.pool.execute("DELETE FROM econshop WHERE roleid = $1 AND guildid = $2", role.id, ctx.guild.id)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Role Deleted", value = f"`{role.name}` has been removed from the shop")
await ctx.send(embed = em)
def setup(bot):
bot.add_cog(EconomyShop(bot))
```
#### File: src/cogs/events.py
```python
import discord
from discord.ext import commands, tasks
# -> Miscellaneous
import datetime
# -> Loop
import aiohttp
import asyncio
# -> Configuration
import sys
sys.path.append("../")
import config
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.JOIN_LEAVE_LOG = 501089724421767178
self.autostatus.start()
def cog_unload(self):
self.autostatus.cancel()
@commands.Cog.listener()
async def on_command_completion(self, ctx):
prefix = self.bot.prefixes.get(ctx.guild.id, None) or "p."
print()
print(f"COMPLETED COMMAND: {ctx.command.name}. Invoked by: {ctx.author.name}#{ctx.author.discriminator}")
print(f"GUILD: {ctx.guild.name} | GUILD ID: {ctx.guild.id}\nUSER ID: {ctx.author.id} | CHANNEL ID: {ctx.channel.id}")
await ctx.send(f"The bot is going away. Read more in the announcements channel in the Discord server (run `{prefix}info` to get the invite).")
@commands.Cog.listener()
async def on_guild_join(self, guild):
print(f"Joined guild named '{guild.name}' with {guild.member_count} members")
logchannel = self.bot.get_channel(self.JOIN_LEAVE_LOG)
em = discord.Embed(title = "Joined Guild", color = discord.Color.teal())
bot_count = len([b for b in guild.members if b.bot])
em.set_thumbnail(url = guild.icon_url)
em.add_field(name = "Name", value = guild.name)
em.add_field(name = "ID", value = str(guild.id))
em.add_field(name = "Owner", value = str(guild.owner))
em.add_field(name = "Member Count", value = f"{guild.member_count:,d}")
em.add_field(name = "Bot Count", value = format(bot_count, ",d"))
em.add_field(name = "Human Count", value = format(guild.member_count - bot_count, ",d"))
em.add_field(name = "Verification Level", value = str(guild.verification_level))
em.add_field(name = "Channel Count", value = f"{len(guild.channels):,d}")
em.add_field(name = "Creation Time", value = guild.created_at)
em.timestamp = datetime.datetime.utcnow()
await logchannel.send(embed = em)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
print(f"Left guild named '{guild.name}' that had {guild.member_count} members")
logchannel = self.bot.get_channel(self.JOIN_LEAVE_LOG)
em = discord.Embed(title = "Left Guild", color = discord.Color.purple())
bot_count = len([b for b in guild.members if b.bot])
em.set_thumbnail(url = guild.icon_url)
em.add_field(name = "Name", value = guild.name)
em.add_field(name = "ID", value = str(guild.id))
em.add_field(name = "Owner", value = str(guild.owner))
em.add_field(name = "Member Count", value = f"{guild.member_count:,d}")
em.add_field(name = "Bot Count", value = format(bot_count, ",d"))
em.add_field(name = "Human Count", value = format(guild.member_count - bot_count, ",d"))
em.add_field(name = "Verification Level", value = str(guild.verification_level))
em.add_field(name = "Channel Count", value = f"{len(guild.channels):,d}")
em.add_field(name = "Creation Time", value = guild.created_at)
em.timestamp = datetime.datetime.utcnow()
await logchannel.send(embed = em)
@tasks.loop(seconds = 30)
async def autostatus(self):
watching = "The bot is going away"
await self.bot.change_presence(activity = discord.Activity(type = discord.ActivityType.watching, name = watching))
@autostatus.before_loop
async def before_autostatus(self):
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(Events(bot))
```
#### File: PewDiePie/src/pewdiepie.py
```python
import discord
from discord.ext import commands
# -> Configuration
import config
# -> Miscellaneous
import random
import datetime
# -> Database
import asyncpg
# -> Loop
import asyncio
import sys
import os
# Supports asyncio subprocesses for Windows (Python 3.7+)
if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
async def custom_prefix(bot, message):
await bot.wait_until_ready()
try:
prefix = bot.prefixes.get(message.guild.id)
except AttributeError:
return str(random.random())
if prefix is None:
return commands.when_mentioned_or(*bot.default_prefixes)(bot, message)
else:
return commands.when_mentioned_or(prefix)(bot, message)
extensions = ["jishaku", "cogs.functions"]
for f in os.listdir("cogs"):
if f.endswith(".py") and not f"cogs.{f[:-3]}" in extensions:
extensions.append("cogs." + f[:-3])
class PewDiePie(commands.AutoShardedBot):
def __init__(self):
super().__init__(
command_prefix = custom_prefix,
case_insensitive = True,
max_messages = 500,
fetch_offline_members = False,
reconnect = True
)
self.pool = None
self.prefixes = {}
self.owner_role = config.owner
self.owners = set()
async def on_ready(self):
# Owners
if len(self.owner_role) == 2:
guild = self.get_guild(self.owner_role[0])
role = guild.get_role(self.owner_role[1])
self.owners.update(r.id for r in role.members)
else:
app = await self.application_info()
self.owners.add(app.owner.id)
if not hasattr(self, "uptime"):
self.uptime = datetime.datetime.utcnow()
print(f"{self.user.name} is ready!")
async def is_owner(self, user):
return user.id in self.owners
async def on_connect(self):
# Database
pool_creds = {
"user": config.db_user,
"password": <PASSWORD>_password,
"port": 5432,
"host": "localhost",
"database": "tseries"
}
try:
self.pool = await asyncpg.create_pool(**pool_creds)
except Exception as error:
print("There was a problem")
print("\n" + str(error))
await super().logout()
with open("../schema.sql", "r") as schema:
await self.pool.execute(schema.read())
# Prefixes
self.default_prefixes = [
"p.", "P.", "p!", "P!",
"t.", "t!", "ts!", "ts.",
"Ts!", "tS!", "TS!", "T.", "T!",
"Ts.", "tS.", "TS."
]
self.prefixes = dict(await self.pool.fetch("SELECT * FROM prefixes"))
self.prepare_extensions()
def prepare_extensions(self):
for extension in extensions:
try:
self.load_extension(extension)
except Exception as error:
print(f"There was a problem loading in the {extension} extension")
print("\n" + str(error))
async def start(self):
await self.login(config.pubtoken) # pylint: disable=no-member
try:
await self.connect()
except KeyboardInterrupt:
await self.stop()
async def stop(self):
await self.pool.close()
await super().logout()
def run(self):
loop = self.loop
try:
loop.run_until_complete(self.start())
except KeyboardInterrupt:
loop.run_until_complete(self.stop())
if __name__ == "__main__":
PewDiePie().run()
``` |
{
"source": "joshuap-cfy/frontier-versa-sdwan-poc-0117",
"score": 2
} |
#### File: tests/integration/test_cgnat.py
```python
import unittest
import requests
from versa_plugin.versaclient import VersaClient
import versa_plugin.cgnat
from versa_plugin.cgnat import AddressRange
import configuration
requests.packages.urllib3.disable_warnings()
class CgnatTestCase(unittest.TestCase):
def setUp(self):
self.config = configuration.data
def notest_get_pools(self):
with VersaClient(self.config) as client:
app = 'mytestapp'
org = 'mytestorg'
print versa_plugin.cgnat.get_list_nat_pools(client, app, org)
def notest_get_rules(self):
with VersaClient(self.config) as client:
app = 'mytestapp'
org = 'mytestorg'
print versa_plugin.cgnat.get_list_nat_rules(client, app, org)
def notest_create_pool(self):
with VersaClient(self.config) as client:
app = 'testapp'
org = 'child'
pool_name = "testpool"
routing = "vr",
provider = "mytestorg"
addr_range = [AddressRange("range",
"172.16.17.32", "172.16.58.3")]
versa_plugin.cgnat.create_pool(client, app, org, pool_name,
addr_range,
routing, provider)
def notest_delete_pool(self):
with VersaClient(self.config) as client:
app = 'testapp'
org = 'child'
pool_name = "testpool"
versa_plugin.cgnat.delete_pool(client, app, org, pool_name)
def notest_create_rule(self):
with VersaClient(self.config) as client:
app = 'testapp'
org = 'child'
pool_name = "testpool"
rule_name = "testrule"
source_addr = ["1.2.3.0/24"]
versa_plugin.cgnat.create_rule(client, app, org, rule_name,
source_addr, pool_name)
def test_delete_rule(self):
with VersaClient(self.config) as client:
app = 'testapp'
org = 'child'
rule_name = "testrule"
versa_plugin.cgnat.delete_rule(client, app, org, rule_name)
```
#### File: tests/integration/test_vpn.py
```python
import base
import requests
import unittest
import versa_plugin.operations
import versa_plugin.vpn
import test_networking
import get_configuration as get_conf
requests.packages.urllib3.disable_warnings()
router = """
use_existing: false
appliance_name: $appliance_name
router:
name: $name
instance-type: virtual-router
interfaces:
- $interface
"""
limits = """
appliance_name: $appliance_name
org_name: $org_name
routes:
- $router
interfaces:
- $interface
"""
vpn_profile = """
appliance_name: $appliance_name
org_name: $org_name
profile:
name: $name
vpn-type: site-to-site
tunnel-initiate: automatic
hardware-accelerator: any
tunnel-routing-instance: $router
tunnel-interface: $interface
local-auth-info:
auth-type: psk
id-string: 1.2.3.4
id-type: ip
key: 1
peer-auth-info:
auth-type: psk
id-type: ip
key: 1
id-string: 1.2.3.5
ipsec:
fragmentation: pre-fragmentation
force-nat-t: disable
mode: tunnel
pfs-group: mod-none
anti-replay: disable
transform: esp-aes128-sha1
keepalive-timeout: 10
ike:
version: v2
group: mod2
transform: aes128-sha1
lifetime: 28800
dpd-timeout: 30
local:
inet: 172.16.58.3
peer:
inet: 172.16.17.32
"""
class ApplianceTestCase(base.BaseTest):
def add_vpn_profile(self, name, **kwargs):
""" Add VPN profile """
self.assertFalse(get_conf.vpn_profile(self.appliance, self.org, name))
versa_plugin.operations.create_vpn_profile()
self.assertTrue(get_conf.vpn_profile(self.appliance, self.org, name))
def delete_vpn_profile(self, name, **kwargs):
""" Delete VPN profile """
self.assertTrue(get_conf.vpn_profile(self.appliance, self.org, name))
versa_plugin.operations.delete_vpn_profile()
self.assertFalse(get_conf.vpn_profile(self.appliance, self.org, name))
def test_vpn_profile(self):
interface = 'tvi-0/9'
unit = '.0'
router_name = self.gen_name('router')
name = self.gen_name('vpn-profile')
networking = test_networking.Operations(self.appliance)
self.add_to_sequence(networking.add_interface,
networking.delete_interface,
test_networking.tvi_interface_with_address,
name=interface)
self.add_to_sequence(networking.add_router,
networking.delete_router,
router,
name=router_name,
interface=interface+unit)
self.add_to_sequence(networking.add_limits,
networking.delete_limits,
limits,
router=router_name,
interface=interface+unit)
self.add_to_sequence(self.add_vpn_profile,
self.delete_vpn_profile,
vpn_profile,
name=name,
router=router_name,
interface=interface+unit)
self.run_sequence()
```
#### File: frontier-versa-sdwan-poc-0117/versa_plugin/networking.py
```python
import json
from versa_plugin.versaclient import JSON
from versa_plugin import find_by_name
from requests import codes
def create_interface(client, appliance, interface):
url = '/api/config/devices/device/{}/config/interfaces'.format(appliance)
itype = interface['name'].split('-')[0]
data = {itype: interface}
client.post(url, json.dumps(data), JSON, codes.created)
def delete_interface(client, appliance, name):
itype = name.split('-')[0]
url = '/api/config/devices/device/{}'\
'/config/interfaces/{}/%22{}%22'.format(appliance, itype, name)
client.delete(url, codes.no_content)
def create_network(client, appliance, network):
url = '/api/config/devices/device/{}/config/networks'.format(appliance)
data = {"network": network}
client.post(url, json.dumps(data), JSON, codes.created)
def delete_network(client, appliance, name):
url = '/api/config/devices/device/{}'\
'/config/networks/network/{}'.format(appliance, name)
client.delete(url, codes.no_content)
def is_network_exists(client, appliance, name):
url = '/api/config/devices/device/{}/config/networks/network?deep'.\
format(appliance)
result = client.get(url, None, None, codes.ok, JSON)
return find_by_name(result, 'network', name)
def create_virtual_router(context, router):
url = '/api/config/devices/device/{}'\
'/config/routing-instances'.format(context.appliance)
data = {"routing-instance": [router]}
context.client.post(url, json.dumps(data), JSON, codes.created)
def delete_virtual_router(context, name):
url = '/api/config/devices/device/{}'\
'/config/routing-instances'\
'/routing-instance/{}'.format(context.appliance, name)
context.client.delete(url, codes.no_content)
def is_router_exists(context, name):
url = '/api/config/devices/device/{}'\
'/config/routing-instances/routing-instance?deep'.\
format(context.appliance)
result = context.client.get(url, None, None, codes.ok, JSON)
return find_by_name(result, 'routing-instance', name)
def add_network_to_router(context, name, network):
url = '/api/config/devices/device/{}'\
'/config/routing-instances/'\
'routing-instance/{}'.format(context.appliance, name)
result = context.client.get(url, None, None, codes.ok, JSON)
result["routing-instance"]["networks"].append(network)
context.client.put(url, json.dumps(result), JSON, codes.no_content)
def delete_network_from_router(context, name, network):
url = '/api/config/devices/device/{}'\
'/config/routing-instances/'\
'routing-instance/{}'.format(context.appliance, name)
result = context.client.get(url, None, None, codes.ok, JSON)
result["routing-instance"]["networks"].remove(network)
context.client.put(url, json.dumps(result), JSON, codes.no_content)
def update_zone(client, appliance, org, zone):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}'\
'/objects/zones/zone/{}'.format(appliance, org, zone['name'])
data = {
"zone": zone}
client.put(url, json.dumps(data), JSON, codes.ok)
def get_zone(client, appliance, org, zone_name):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}'\
'/objects/zones/zone'.format(appliance, org)
result = client.get(url, None, None, codes.ok, JSON)
for zone in result['zone']:
if zone['name'] == zone_name:
return zone
return None
def create_zone(client, appliance, org, zone):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}/objects/zones'.format(appliance, org)
data = {"zone": zone}
client.post(url, json.dumps(data), JSON, codes.created)
def delete_zone(client, appliance, org, zone_name):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}'\
'/objects/zones/zone/{}'.format(appliance, org, zone_name)
client.delete(url)
```
#### File: frontier-versa-sdwan-poc-0117/versa_plugin/operations.py
```python
from cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
import versa_plugin
from copy import deepcopy
from versa_plugin import with_versa_client
from versa_plugin import get_mandatory
import versa_plugin.appliance
import versa_plugin.cgnat
import versa_plugin.connectors
import versa_plugin.dhcp
import versa_plugin.firewall
import versa_plugin.networking
import versa_plugin.tasks
import versa_plugin.vpn
import versa_plugin.limits
from versa_plugin.cgnat import AddressRange
from collections import namedtuple
ApplianceContext = namedtuple("ApplianceContext",
"client, appliance, organization")
def is_use_existing():
return ctx.node.properties.get('use_existing')
def reqursive_update(d, u):
for k, v in u.iteritems():
if isinstance(v, dict):
r = reqursive_update(d.get(k, {}), v)
d[k] = r
elif isinstance(v, list):
if isinstance(u[k], list):
d[k] = d.setdefault(k, []) + u[k]
else:
d[k] = d.setdefault(k, []) + [u[k]]
else:
d[k] = u[k]
return d
def _get_node_configuration(key, kwargs):
value = ctx.node.properties.get(key, {})
value.update(kwargs.get(key, {}))
if value:
return value
else:
raise cfy_exc.NonRecoverableError(
"Configuration parameter {0} is absent".format(key))
def _get_context(client):
appliance = get_mandatory(ctx.node.properties, 'appliance_name')
org = ctx.node.properties.get('org_name', None)
return ApplianceContext(client, appliance, org)
@operation
@with_versa_client
def create_resource_pool(versa_client, **kwargs):
if is_use_existing():
return
instance = _get_node_configuration('instance', kwargs)
versa_plugin.connectors.add_resource_pool(versa_client, instance)
@operation
@with_versa_client
def delete_resource_pool(versa_client, **kwargs):
if is_use_existing():
return
instance = _get_node_configuration('instance', kwargs)
name = get_mandatory(instance, 'name')
versa_plugin.connectors.delete_resource_pool(versa_client, name)
@operation
@with_versa_client
def create_cms_local_organization(versa_client, **kwargs):
if is_use_existing():
return
organization = _get_node_configuration('organization', kwargs)
versa_plugin.connectors.add_organization(versa_client, organization)
@operation
@with_versa_client
def delete_cms_local_organization(versa_client, **kwargs):
if is_use_existing():
return
organization = _get_node_configuration('organization', kwargs)
name = get_mandatory(organization, 'name')
versa_plugin.connectors.delete_organization(versa_client, name)
@operation
@with_versa_client
def create_organization(versa_client, **kwargs):
if is_use_existing():
return
organization = _get_node_configuration('organization', kwargs)
versa_plugin.appliance.add_organization(versa_client, organization)
@operation
@with_versa_client
def delete_organization(versa_client, **kwargs):
if is_use_existing():
return
organization = _get_node_configuration('organization', kwargs)
name = get_mandatory(organization, 'name')
versa_plugin.appliance.delete_organization(versa_client, name)
@operation
@with_versa_client
def create_appliance(versa_client, **kwargs):
if is_use_existing():
return
device = _get_node_configuration('device', kwargs)
management_ip = get_mandatory(device, 'mgmt-ip')
versa_plugin.appliance.wait_for_device(versa_client, management_ip, ctx)
task = versa_plugin.appliance.add_appliance(versa_client, device)
versa_plugin.tasks.wait_for_task(versa_client, task, ctx)
@operation
@with_versa_client
def delete_appliance(versa_client, **kwargs):
if is_use_existing():
return
device = _get_node_configuration('device', kwargs)
name = get_mandatory(device, 'name')
task = versa_plugin.appliance.delete_appliance(versa_client, name)
versa_plugin.tasks.wait_for_task(versa_client, task, ctx)
@operation
@with_versa_client
def associate_organization(versa_client, **kwargs):
if is_use_existing():
return
organization = _get_node_configuration('organization', kwargs)
appliance = get_mandatory(organization, 'appliance')
net_info = get_mandatory(organization, 'networking-info')
for net in net_info:
interface_name = get_mandatory(get_mandatory(net, 'network-info'),
'parent-interface')
interface = {"name": interface_name, "enable": True,
"promiscuous": False}
versa_plugin.networking.create_interface(versa_client, appliance,
interface)
task = versa_plugin.appliance.associate_organization(versa_client,
organization)
versa_plugin.tasks.wait_for_task(versa_client, task, ctx)
@operation
@with_versa_client
def create_router(versa_client, **kwargs):
if is_use_existing():
return
context = _get_context(versa_client)
router = _get_node_configuration('router', kwargs)
if versa_plugin.networking.is_router_exists(context, router):
raise cfy_exc.NonRecoverableError("Router exists")
versa_plugin.networking.create_virtual_router(context, router)
@operation
@with_versa_client
def delete_router(versa_client, **kwargs):
if is_use_existing():
return
context = _get_context(versa_client)
router = _get_node_configuration('router', kwargs)
router_name = router['name']
if versa_plugin.networking.is_router_exists(context, router_name):
versa_plugin.networking.delete_virtual_router(context, router_name)
@operation
@with_versa_client
def insert_to_router(versa_client, **kwargs):
if is_use_existing():
return
context = _get_context(versa_client)
router = _get_node_configuration('router', kwargs)
router_name = router['name']
networks = ctx.node.properties.get('networks', [])
for name in networks:
versa_plugin.networking.add_network_to_router(
context, router_name, name)
@operation
@with_versa_client
def delete_from_router(versa_client, **kwargs):
if is_use_existing():
return
context = _get_context(versa_client)
networks = ctx.node.properties.get('networks', [])
router_name = ctx.node.properties['name']
for name in networks:
versa_plugin.networking.delete_network_from_router(
context, router_name, name)
@operation
@with_versa_client
def create_cgnat(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
pool = ctx.node.properties['pool']
pool_name = pool['name']
ranges = [AddressRange(r['name'], r['low'], r['hight'])
for r in pool['ranges']]
routing_instance = pool['routing_instance']
provider_org = pool['provider_org']
versa_plugin.cgnat.create_pool(versa_client, appliance_name,
org_name, pool_name,
ranges, routing_instance,
provider_org)
rule = ctx.node.properties['rule']
rule_name = rule['name']
source_addresses = rule['addresses']
versa_plugin.cgnat.create_rule(versa_client, appliance_name,
org_name, rule_name,
source_addresses, pool_name)
@operation
@with_versa_client
def delete_cgnat(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
pool = ctx.node.properties['pool']
pool_name = pool['name']
rule = ctx.node.properties['rule']
rule_name = rule['name']
versa_plugin.cgnat.delete_rule(versa_client, appliance_name,
org_name, rule_name)
versa_plugin.cgnat.delete_pool(versa_client, appliance_name,
org_name, pool_name)
@operation
@with_versa_client
def create_zone(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
zone = ctx.node.properties['zone']
zone_name = zone['name']
zone_exsists = versa_plugin.networking.get_zone(versa_client,
appliance_name,
org_name,
zone_name)
if zone_exsists:
raise cfy_exc.NonRecoverableError(
"Zone '{}' exsists".format(zone_name))
versa_plugin.networking.create_zone(versa_client,
appliance_name,
org_name, zone)
@operation
@with_versa_client
def insert_to_zone(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
zone = ctx.node.properties['zone']
zone_name = zone['name']
zone_exsists = versa_plugin.networking.get_zone(versa_client,
appliance_name,
org_name,
zone_name)
if zone_exsists:
ctx.instance.runtime_properties[zone_name] = deepcopy(zone_exsists)
new_zone = reqursive_update(zone_exsists, zone)
versa_plugin.networking.update_zone(versa_client,
appliance_name,
org_name, new_zone)
@operation
@with_versa_client
def delete_zone(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
zone = ctx.node.properties['zone']
zone_name = zone['name']
versa_plugin.networking.delete_zone(versa_client,
appliance_name,
org_name, zone_name)
@operation
@with_versa_client
def delete_from_zone(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
zone = ctx.node.properties['zone']
zone_name = zone['name']
old_zone = ctx.instance.runtime_properties.get(zone_name, None)
if old_zone:
versa_plugin.networking.update_zone(versa_client,
appliance_name,
org_name, old_zone)
@operation
@with_versa_client
def create_firewall_policy(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
policy = ctx.node.properties['policy']
versa_plugin.firewall.add_policy(versa_client, appliance_name,
org_name, policy)
@operation
@with_versa_client
def delete_firewall_policy(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
policy = ctx.node.properties['policy']
versa_plugin.firewall.delete_policy(versa_client, appliance_name,
org_name, policy['name'])
@operation
@with_versa_client
def create_firewall_rule(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
policy_name = ctx.node.properties['policy_name']
rule = ctx.node.properties['rule']
ctx.instance.runtime_properties['rules'] = {}
ctx.instance.runtime_properties['appliance'] = appliance_name
ctx.instance.runtime_properties['org'] = org_name
ctx.instance.runtime_properties['policy'] = policy_name
name = rule['name']
ctx.instance.runtime_properties['rules'][name] = rule
versa_plugin.firewall.add_rule(versa_client, appliance_name,
org_name, policy_name, rule)
if ctx.node.properties['on_top']:
all_rules = versa_plugin.firewall.get_all_rules(versa_client,
appliance_name,
org_name, policy_name)
sorted_list = []
for rule in all_rules:
if rule['name'] == name:
sorted_list.insert(0, rule)
else:
sorted_list.append(rule)
versa_plugin.firewall.reorder_rules(versa_client, appliance_name,
org_name, policy_name, sorted_list)
@operation
@with_versa_client
def update_firewall_rule(versa_client, **kwargs):
rule = kwargs.get('rule')
if not rule:
return
name = rule.get('name')
if not name:
ctx.logger.info("Key 'name' in rule is absent.")
return
old_rule = ctx.instance.runtime_properties['rules'].get(name)
if not old_rule:
ctx.logger.info("Rule: '{}' not found.".format(name))
return
reqursive_update(rule, old_rule)
appliance_name = ctx.instance.runtime_properties['appliance']
org_name = ctx.instance.runtime_properties['org']
policy_name = ctx.instance.runtime_properties['policy']
versa_plugin.firewall.update_rule(versa_client, appliance_name,
org_name, policy_name, rule)
@operation
@with_versa_client
def get_firewall_rule(versa_client, **kwargs):
name = kwargs.get('name')
if not name:
ctx.logger.info("Key 'name' is absent.")
return
appliance_name = ctx.instance.runtime_properties['appliance']
org_name = ctx.instance.runtime_properties['org']
policy_name = ctx.instance.runtime_properties['policy']
rule = versa_plugin.firewall.get_rule(versa_client, appliance_name,
org_name, policy_name, name)
ctx.logger.info("Rule '{} is: {}".format(name, rule))
@operation
@with_versa_client
def delete_firewall_rule(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
policy_name = ctx.node.properties['policy_name']
rule = ctx.node.properties['rule']
versa_plugin.firewall.delete_rule(versa_client, appliance_name,
org_name, policy_name, rule['name'])
@operation
@with_versa_client
def create_url_filters(versa_client, **kwargs):
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
url_filters = ctx.node.properties['filters']
for url_filter in url_filters:
ctx.logger.info("Filter: {}".format(url_filter))
versa_plugin.firewall.add_url_filter(versa_client, appliance_name,
org_name, url_filter)
@operation
@with_versa_client
def delete_url_filters(versa_client, **kwargs):
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
url_filters = ctx.node.properties['filters']
for url_filter in url_filters:
ctx.logger.info("Filter: {}".format(url_filter))
versa_plugin.firewall.delete_url_filter(versa_client, appliance_name,
org_name, url_filter)
@operation
@with_versa_client
def create_dhcp_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
profile_name = ctx.node.properties['profile_name']
if versa_plugin.limits.is_dhcp_profile_exists(versa_client,
appliance_name,
profile_name):
raise cfy_exc.NonRecoverableError("Dhcp profile exists")
versa_plugin.limits.create_dhcp_profile(versa_client, appliance_name,
profile_name)
@operation
@with_versa_client
def delete_dhcp_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
profile_name = ctx.node.properties['profile_name']
if versa_plugin.limits.is_dhcp_profile_exists(versa_client,
appliance_name,
profile_name):
versa_plugin.limits.delete_dhcp_profile(versa_client,
appliance_name,
profile_name)
@operation
@with_versa_client
def create_dhcp_lease_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
lease_name = ctx.node.properties['lease_profile']
versa_plugin.dhcp.create_lease_profile(versa_client, appliance_name,
org_name, lease_name)
@operation
@with_versa_client
def delete_dhcp_lease_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
lease_name = ctx.node.properties['lease_profile']
if versa_plugin.dhcp.is_lease_profile_exsists(versa_client, appliance_name,
org_name, lease_name):
versa_plugin.dhcp.delete_lease_profile(versa_client, appliance_name,
org_name, lease_name)
@operation
@with_versa_client
def create_dhcp_options_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
options_name = ctx.node.properties['name']
domain = ctx.node.properties['domain']
servers = ctx.node.properties['servers']
versa_plugin.dhcp.create_options_profile(versa_client, appliance_name,
org_name, options_name,
domain, servers)
@operation
@with_versa_client
def delete_dhcp_options_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
options_name = ctx.node.properties['name']
if versa_plugin.dhcp.is_dhcp_profile_exists(versa_client, appliance_name,
org_name, options_name):
versa_plugin.dhcp.delete_options_profile(versa_client, appliance_name,
org_name, options_name)
@operation
@with_versa_client
def create_dhcp_global_configuration(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
lease_profile = ctx.node.properties['lease_profile']
options_profile = ctx.node.properties['options_profile']
versa_plugin.dhcp.update_global_configuration(versa_client, appliance_name,
org_name, lease_profile,
options_profile)
@operation
@with_versa_client
def delete_dhcp_global_configuration(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
lease_profile = []
options_profile = []
versa_plugin.dhcp.update_global_configuration(versa_client, appliance_name,
org_name, lease_profile,
options_profile)
@operation
@with_versa_client
def create_dhcp_pool(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
lease_profile = ctx.node.properties['lease_profile']
options_profile = ctx.node.properties['options_profile']
pool_name = ctx.node.properties['name']
mask = ctx.node.properties['mask']
range_name = ctx.node.properties['range_name']
begin_address = ctx.node.properties['begin_address']
end_address = ctx.node.properties['end_address']
versa_plugin.dhcp.create_pool(versa_client, appliance_name, org_name,
pool_name, mask, lease_profile,
options_profile,
range_name, begin_address, end_address)
@operation
@with_versa_client
def delete_dhcp_pool(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
pool_name = ctx.node.properties['name']
if versa_plugin.dhcp.is_pool_exists(versa_client, appliance_name,
org_name, pool_name):
versa_plugin.dhcp.delete_pool(versa_client, appliance_name, org_name,
pool_name)
@operation
@with_versa_client
def create_dhcp_server(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
lease_profile = ctx.node.properties['lease_profile']
options_profile = ctx.node.properties['options_profile']
pool_name = ctx.node.properties['pool_name']
server_name = ctx.node.properties['name']
networks = ctx.node.properties['networks']
versa_plugin.dhcp.create_server(versa_client, appliance_name, org_name,
server_name, lease_profile, options_profile,
networks, pool_name)
@operation
@with_versa_client
def delete_dhcp_server(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
server_name = ctx.node.properties['name']
if versa_plugin.dhcp.is_server_exists(versa_client, appliance_name,
org_name, server_name):
versa_plugin.dhcp.delete_server(versa_client, appliance_name, org_name,
server_name)
@operation
@with_versa_client
def create_interface(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
interface = _get_node_configuration('interface', kwargs)
versa_plugin.networking.create_interface(versa_client, appliance_name,
interface)
@operation
@with_versa_client
def delete_interface(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
interface = _get_node_configuration('interface', kwargs)
name = interface['name']
versa_plugin.networking.delete_interface(versa_client, appliance_name,
name)
@operation
@with_versa_client
def create_network(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
network = _get_node_configuration('network', kwargs)
if versa_plugin.networking.is_network_exists(versa_client,
appliance_name,
network):
raise cfy_exc.NonRecoverableError("Network exists")
versa_plugin.networking.create_network(versa_client, appliance_name,
network)
@operation
@with_versa_client
def delete_network(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
network = _get_node_configuration('network', kwargs)
name = network['name']
if versa_plugin.networking.is_network_exists(versa_client,
appliance_name,
name):
versa_plugin.networking.delete_network(versa_client, appliance_name,
name)
@operation
@with_versa_client
def insert_to_limits(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
dhcp_profile = ctx.node.properties.get('dhcp_profile')
routes = ctx.node.properties.get('routes', [])
networks = ctx.node.properties.get('networks', [])
interfaces = ctx.node.properties.get('interfaces', [])
provider_orgs = ctx.node.properties.get('provider_orgs', [])
if dhcp_profile:
versa_plugin.limits.insert_dhcp_profile_to_limits(versa_client,
appliance_name,
org_name,
dhcp_profile)
for name in routes:
versa_plugin.limits.add_routing_instance(versa_client,
appliance_name,
org_name, name)
for name in networks:
versa_plugin.limits.add_traffic_identification_networks(
versa_client, appliance_name, org_name, name, 'using-networks')
for name in interfaces:
versa_plugin.limits.add_traffic_identification_networks(
versa_client, appliance_name, org_name, name, 'using')
for name in provider_orgs:
versa_plugin.limits.add_provider_organization(versa_client,
appliance_name,
org_name,
name)
@operation
@with_versa_client
def delete_from_limits(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
dhcp_profile = ctx.node.properties.get('dhcp_profile')
routes = ctx.node.properties.get('routes', [])
networks = ctx.node.properties.get('networks', [])
interfaces = ctx.node.properties.get('interfaces', [])
provider_orgs = ctx.node.properties.get('provider_orgs', [])
for name in routes:
versa_plugin.limits.delete_routing_instance(versa_client,
appliance_name,
org_name, name)
for name in networks:
versa_plugin.limits.delete_traffic_identification_networks(
versa_client, appliance_name, org_name, name, 'using-networks')
for name in interfaces:
versa_plugin.limits.delete_traffic_identification_networks(
versa_client, appliance_name, org_name, name, 'using')
for name in provider_orgs:
versa_plugin.limits.delete_provider_organization(versa_client,
appliance_name,
org_name,
name)
if dhcp_profile:
versa_plugin.limits.delete_dhcp_profile_from_limits(versa_client,
appliance_name,
org_name,
dhcp_profile)
@operation
@with_versa_client
def create_vpn_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
profile = _get_node_configuration('profile', kwargs)
name = profile['name']
if versa_plugin.vpn.is_profile_exists(versa_client,
appliance_name, org_name,
name):
raise cfy_exc.NonRecoverableError("VPN profile exists")
versa_plugin.vpn.create_profile(versa_client, appliance_name, org_name,
profile)
@operation
@with_versa_client
def delete_vpn_profile(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
profile = _get_node_configuration('profile', kwargs)
name = profile['name']
if versa_plugin.vpn.is_profile_exists(versa_client,
appliance_name, org_name,
name):
versa_plugin.vpn.delete_profile(versa_client, appliance_name, org_name,
name)
@operation
@with_versa_client
def insert_captive_portal(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
portal = _get_node_configuration('captive_portal', kwargs)
versa_plugin.firewall.update_captive_portal(versa_client, appliance_name,
org_name, portal)
@operation
@with_versa_client
def clean_captove_portal(versa_client, **kwargs):
if is_use_existing():
return
appliance_name = ctx.node.properties['appliance_name']
org_name = ctx.node.properties['org_name']
portal = {"port": "0", "track-by-host": False, "expiration-time": "30",
"custom-pages": {}}
versa_plugin.firewall.update_captive_portal(versa_client, appliance_name,
org_name, portal)
```
#### File: frontier-versa-sdwan-poc-0117/versa_plugin/versaclient.py
```python
import requests
import json
import os
from os import chmod
from xml.dom.minidom import parseString
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from cloudify import exceptions as cfy_exc
from cloudify import ctx
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
JSON = 'json'
XML = 'xml'
def _save_key_file(path, value):
path = os.path.expanduser(path)
with open(path, 'w') as content_file:
chmod(path, 0600)
content_file.write(value)
def _check_response(response, return_code, accept):
if response.status_code == requests.codes.no_content:
return None
if response.status_code != return_code:
raise cfy_exc.HttpException(response.url, response.status_code,
response.content)
if response.content:
if accept == JSON:
return json.loads(response.content)
else:
return parseString(response.content)
else:
return None
class VersaClient():
def __init__(self, config, key_file):
self.versa_url = config["versa_url"]
self.client_id = config["client_id"]
self.client_secret = config["client_secret"]
self.username = config["username"]
self.password = config["password"]
self.access_token = None
self.verify = False
self.key_file = key_file
def __enter__(self):
self.get_token()
return self
def __exit__(self, type, value, traceback):
# self.revoke_token()
pass
def read_tokens_form_file(self):
if os.path.isfile(self.key_file):
with open(self.key_file) as file:
self.access_token = file.readline().rstrip()
return True
return False
def save_token_to_file(self):
with open(self.key_file, "w") as file:
file.write(self.access_token)
def get_token(self):
if self.read_tokens_form_file():
return
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"username": self.username,
"password": self.password,
"grant_type": "password"}
headers = self._get_headers(JSON, JSON)
result = requests.post(self.versa_url + "/auth/token",
headers=headers, data=json.dumps(data),
verify=self.verify)
try:
result = json.loads(result.content)
self.access_token = result['access_token']
self.save_token_to_file()
except (KeyError, TypeError, ValueError):
raise cfy_exc.NonRecoverableError(
"Incorrect reply: {}".format(result))
def revoke_token(self):
headers = {"Authorization": "Bearer {}".format(self.access_token)}
requests.post(self.versa_url + "/auth/revoke",
headers=headers, verify=self.verify)
if os.path.isfile(self.key_file):
os.remove(self.key_file)
self.access_token = None
def get(self, path, data, content_type, return_code=200, accept=JSON):
return self._request(requests.get, path, data,
content_type, return_code, accept)
def post(self, path, data, content_type, return_code=201, accept=JSON):
return self._request(requests.post, path, data,
content_type, return_code, accept)
def put(self, path, data, content_type, return_code=204, accept=JSON):
return self._request(requests.put, path, data,
content_type, return_code, accept)
def delete(self, path, return_code=204, accept=JSON):
return self._request(requests.delete, path, None,
None, return_code, accept)
def _request(self, request_type, path, data, content_type, return_code,
accept):
retry = 0
ctx.logger.debug("Sending {0} request to {1} with data {2}".format(
request_type.__name__, self.versa_url+path, str(data)))
while True:
headers = self._get_headers(content_type, accept)
response = request_type(
self.versa_url + path,
headers=headers, data=data,
verify=self.verify)
if response.status_code == 401:
if retry == 1:
break
retry += 1
self.revoke_token()
self.get_token()
else:
response_str = _check_response(response, return_code, accept)
ctx.logger.debug("respose code: {0} string:{1}".format(
return_code, response_str))
return response_str
def _get_headers(self, content_type, accept):
content_dict = {'json': 'application/json', 'xml': 'application/xml'}
headers = {}
if content_type:
try:
headers['Content-type'] = content_dict[content_type]
except KeyError:
raise cfy_exc.NonRecoverableError(
"Unknown content-type: {}".format(content_type))
if self.access_token:
headers["Authorization"] = "Bearer {}".format(self.access_token)
headers['Accept'] = content_dict[accept]
return headers
```
#### File: frontier-versa-sdwan-poc-0117/versa_plugin/vpn.py
```python
import json
from versa_plugin.versaclient import JSON
from versa_plugin import find_by_name
from requests import codes
def create_profile(client, appliance, org, profile):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}/ipsec'.format(appliance, org)
data = {"vpn-profile": profile}
client.post(url, json.dumps(data), JSON, codes.created)
def delete_profile(client, appliance, org, name):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}'\
'/ipsec/vpn-profile/{}'.format(appliance, org, name)
client.delete(url)
def is_profile_exists(client, appliance, org, name):
url = '/api/config/devices/device/{}'\
'/config/orgs/org-services/{}'\
'/ipsec/vpn-profile?deep'.format(appliance, org)
result = client.get(url, None, None, codes.ok)
return find_by_name(result, "vpn-profile", name)
``` |
{
"source": "JoshuaPiinRueyPan/StatoilCompetition",
"score": 2
} |
#### File: StatoilCompetition/src/Classifier.py
```python
import tensorflow as tf
import src.RadarImage as RadarImage
import settings.OutputSettings as outSettings
from settings.SubnetSettings import SubnetFactory
import settings.TrainSettings as trainSettings
class Classifier:
def __init__(self):
self.isTraining = tf.placeholder(tf.bool)
self.trainingStep = tf.placeholder(tf.int64)
self.inputImage = tf.placeholder(tf.float32,
[None, RadarImage.DATA_WIDTH, RadarImage.DATA_HEIGHT, RadarImage.DATA_CHANNELS])
self.inputAngle = tf.placeholder(tf.float32, [None, 1])
self.groundTruth = tf.placeholder(tf.float32, [None, outSettings.NUMBER_OF_CATEGORIES])
self.net = SubnetFactory(self.isTraining, self.trainingStep, self.inputImage, self.inputAngle, self.groundTruth)
def Build(self):
self.logits, updateSubnetOperation = self.net.Build()
self.predictions = tf.nn.softmax(self.logits, name="tf.nn.softmax")
with tf.name_scope("calculateLoss"):
crossEntropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.groundTruth,
name="tf.nn.softmax_cross_entropy_with_logits")
loss = tf.reduce_mean(crossEntropy, name="tf.reduce_mean")
accuracy = self.calculateAccuracy(self.predictions, self.groundTruth)
return loss, accuracy, updateSubnetOperation
def calculateAccuracy(self, predictions_, groundTruth_):
with tf.name_scope("calculateAccuracy"):
correctPredictions = tf.equal(tf.argmax(predictions_, 1), tf.argmax(groundTruth_, 1), name="tf.equal")
accuracy = tf.reduce_mean(tf.cast(correctPredictions, tf.float32), name="tf.reduce_mean")
return accuracy
```
#### File: src/net/AlexnetBatchNorm.py
```python
import tensorflow as tf
from src.net.SubnetBase import SubnetBase
from src.layers.BasicLayers import *
import settings.OutputSettings as outSettings
class AlexnetBatchNorm(SubnetBase):
def __init__(self, isTraining_, trainingStep_, inputImage_, inputAngle_, groundTruth_):
self.isTraining = isTraining_
self.trainingStep = trainingStep_
self.inputImage = inputImage_
self.inputAngle = inputAngle_
self.groundTruth = groundTruth_
self.dropoutValue = 0.5
def Build(self):
net = ConvLayer('Conv1', self.inputImage, 3, 8, stride_=1, padding_='SAME')
net, updateVariablesOp1 = BatchNormalization('BN1', net, isConvLayer_=True,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = tf.nn.relu(net)
net = MaxPoolLayer('Pool1', net, kernelSize_=2)
net = ConvLayer('Conv2', net, 3, 16, stride_=1, padding_='SAME')
net, updateVariablesOp2 = BatchNormalization('BN2', net, isConvLayer_=True,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = tf.nn.relu(net)
net = MaxPoolLayer('Pool2', net, kernelSize_=2)
net = ConvLayer('Conv3', net, 3, 16, stride_=1, padding_='SAME')
net, updateVariablesOp3 = BatchNormalization('BN3', net, isConvLayer_=True,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = tf.nn.relu(net)
net = MaxPoolLayer('Pool3', net, kernelSize_=2)
net = ConvLayer('Conv4', net, 3, 16, stride_=1, padding_='SAME')
net, updateVariablesOp4 = BatchNormalization('BN4', net, isConvLayer_=True,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = tf.nn.relu(net)
net = MaxPoolLayer('Pool4', net, kernelSize_=2)
net = FullyConnectedLayer('Fc1', net, numberOfOutputs_=128)
net, updateVariablesOp1 = BatchNormalization('BN5', net, isConvLayer_=False,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = LeakyRELU('LeakyRELU1', net)
net = tf.cond(self.isTraining, lambda: tf.nn.dropout(net, self.dropoutValue), lambda: net)
net = FullyConnectedLayer('Fc2', net, numberOfOutputs_=128)
net, updateVariablesOp2 = BatchNormalization('BN6', net, isConvLayer_=False,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = LeakyRELU('LeakyRELU2', net)
net = tf.cond(self.isTraining, lambda: tf.nn.dropout(net, self.dropoutValue), lambda: net)
net = FullyConnectedLayer('Fc3', net, numberOfOutputs_=outSettings.NUMBER_OF_CATEGORIES)
updateVariablesOperations = tf.group(updateVariablesOp1, updateVariablesOp2)
return net, updateVariablesOperations
```
#### File: src/net/ResnetFat.py
```python
import tensorflow as tf
from src.net.SubnetBase import SubnetBase
from src.layers.BasicLayers import *
from src.layers.ResidualLayers import *
import settings.OutputSettings as outSettings
class ResnetFat(SubnetBase):
def __init__(self, isTraining_, trainingStep_, inputImage_, inputAngle_, groundTruth_):
self.isTraining = isTraining_
self.trainingStep = trainingStep_
self.inputImage = inputImage_
self.inputAngle = inputAngle_
self.groundTruth = groundTruth_
def Build(self):
with tf.name_scope('Layer1'):
'''
Conv1st (5x5, 16) is the best result.
(5x5, 8) is also good.
'''
net = ConvLayer('Conv1', self.inputImage, filterSize_=5, numberOfFilters_=16,
stride_=1, padding_='SAME')
net, updateOp1 = BatchNormalization('BN_1', net, isConvLayer_=True,
isTraining_=self.isTraining, currentStep_=self.trainingStep)
net = LeakyRELU('LeakyRELU', net)
net = MaxPoolLayer('MaxPool1', net, kernelSize=2)
# Layer 2
net, updateOp2 = ResidualLayer( "ResLayer2",
net, numberOfResidualBlocks_=3, listOfConvFilterSize_=[8, 8, 32],
isTraining_=self.isTraining, trainingStep_=self.trainingStep, activationType_="RELU")
# Layer 3
net, updateOp3 = ResidualLayer( "ResLayer3",
net, numberOfResidualBlocks_=4, listOfConvFilterSize_=[16, 16, 64],
isTraining_=self.isTraining, trainingStep_=self.trainingStep, activationType_="RELU")
'''
MaxPool seems a little improve (lower the loss).
'''
#net = AvgPoolLayer("AveragePool", net, kernelSize=38, padding_='VALID')
net = AvgPoolLayer("AveragePool", net, kernelSize=7, padding_='SAME')
#net = MaxPoolLayer("MaxPool", net, kernelSize=38, padding_='VALID')
#net = MaxPoolLayer("MaxPool", net, kernelSize=7, padding_='SAME')
net = FullyConnectedLayer('Fc', net, numberOfOutputs_=outSettings.NUMBER_OF_CATEGORIES)
updateOperations = tf.group(updateOp1, updateOp2, updateOp3, name="groupUpdateOps")
return net, updateOperations
```
#### File: JoshuaPiinRueyPan/StatoilCompetition/Train.py
```python
import os
import tensorflow as tf
import numpy as np
import src.RadarImage
from src.DataManager import TrainingDataManager
from src.Classifier import *
import settings.TrainSettings as trainSettings
import src.layers.LayerHelper as LayerHelper
class Solver:
def __init__(self):
# Data Manager
self.dataManager = TrainingDataManager(trainSettings.TRAINING_SET_PATH_NAME, trainSettings.VALIDATION_RATIO)
self.validation_x, self.validation_x_angle, self.validation_y = self.dataManager.GetValidationSet()
# Net
self.net = Classifier()
self.lossOp, self.accuracyOp, self.updateNetOp = self.net.Build()
# Optimizer
self.learningRate = tf.placeholder(tf.float32, shape=[])
try:
# If there's other losses (e.g. Regularization Loss)
otherLossOp = tf.losses.get_total_loss(add_regularization_losses=True)
totalLossOp = self.lossOp + otherLossOp
except:
# If there's no other loss op
totalLossOp = self.lossOp
#self.optimzeOp = tf.train.AdamOptimizer(learning_rate=self.learningRate).minimize(totalLossOp)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learningRate)
gradients = optimizer.compute_gradients(totalLossOp)
self.drawGradients(gradients)
self.optimzeOp = optimizer.apply_gradients(gradients)
# Summary
self.trainSumWriter = tf.summary.FileWriter(trainSettings.PATH_TO_SAVE_MODEL+"/train")
self.validaSumWriter = tf.summary.FileWriter(trainSettings.PATH_TO_SAVE_MODEL+"/valid")
self.saver = tf.train.Saver(max_to_keep=trainSettings.MAX_TRAINING_SAVE_MODEL)
self.summaryOp = tf.summary.merge_all()
def Run(self):
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
self.validaSumWriter.add_graph(sess.graph)
self.recoverFromPretrainModelIfRequired(sess)
# Calculate Validation before Training
print("Validation before Training ======================================")
self.CalculateValidation(sess, shouldSaveSummary=False)
while self.dataManager.epoch < trainSettings.MAX_TRAINING_EPOCH:
batch_x, batch_x_angle, batch_y = self.dataManager.GetTrainingBatch(trainSettings.BATCH_SIZE)
self.trainIceNet(sess, batch_x, batch_x_angle, batch_y)
self.updateIceNet(sess, batch_x, batch_x_angle, batch_y)
if self.dataManager.isNewEpoch:
print("Epoch: " + str(self.dataManager.epoch)+" ======================================")
self.CalculateTrainingLoss(sess, batch_x, batch_x_angle, batch_y)
self.CalculateValidation(sess, shouldSaveSummary=True)
if self.dataManager.epoch >= trainSettings.EPOCHS_TO_START_SAVE_MODEL:
self.saveCheckpoint(sess)
print("Optimization finished!")
def recoverFromPretrainModelIfRequired(self, session):
if trainSettings.PRETRAIN_MODEL_PATH_NAME != "":
print("Load Pretrain model from: " + trainSettings.PRETRAIN_MODEL_PATH_NAME)
listOfAllVariables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variablesToBeRecovered = [ eachVariable for eachVariable in listOfAllVariables \
if eachVariable.name.split('/')[0] not in \
trainSettings.NAME_SCOPES_NOT_TO_RECOVER_FROM_CHECKPOINT ]
modelLoader = tf.train.Saver(variablesToBeRecovered)
modelLoader.restore(session, trainSettings.PRETRAIN_MODEL_PATH_NAME)
def trainIceNet(self, session, batch_x, batch_x_angle, batch_y):
currentLearningRate = trainSettings.GetLearningRate(self.dataManager.epoch)
session.run( self.optimzeOp,
feed_dict={self.net.isTraining : True,
self.net.trainingStep : self.dataManager.step,
self.net.inputImage : batch_x,
self.net.inputAngle : batch_x_angle,
self.net.groundTruth : batch_y,
self.learningRate : currentLearningRate})
def updateIceNet(self, session, batch_x, batch_x_angle, batch_y):
'''
Some Network has variables that need to be updated after training (e.g. the net with
batch normalization). After training, following code update such variables.
'''
session.run( self.updateNetOp,
feed_dict={self.net.isTraining : False,
self.net.trainingStep : self.dataManager.step,
self.net.inputImage : batch_x,
self.net.inputAngle : batch_x_angle,
self.net.groundTruth : batch_y})
def CalculateTrainingLoss(self, session, batch_x, batch_x_angle, batch_y):
summaryValue, lossValue, accuValue = session.run( [self.summaryOp, self.lossOp, self.accuracyOp],
feed_dict={ self.net.isTraining : False,
self.net.trainingStep : self.dataManager.step,
self.net.inputImage : batch_x,
self.net.inputAngle : batch_x_angle,
self.net.groundTruth : batch_y})
summary = tf.Summary()
summary.ParseFromString(summaryValue)
summary.value.add(tag='loss', simple_value=lossValue)
summary.value.add(tag='accuracy', simple_value=accuValue)
self.trainSumWriter.add_summary(summary, self.dataManager.epoch)
print(" train:")
print(" loss: " + str(lossValue) + ", accuracy: " + str(accuValue) + "\n")
def CalculateValidation(self, session, shouldSaveSummary):
if trainSettings.NUMBER_OF_VALIDATION_DATA <= trainSettings.BATCH_SIZE:
self.calculateValidationBywholeBatch(session, shouldSaveSummary)
else:
self.calculateValidationOneByOne(session, shouldSaveSummary)
def calculateValidationBywholeBatch(self, session, shouldSaveSummary):
summaryValue, lossValue, accuValue = session.run( [self.summaryOp, self.lossOp, self.accuracyOp],
feed_dict={ self.net.isTraining : False,
self.net.trainingStep : self.dataManager.step,
self.net.inputImage : self.validation_x,
self.net.inputAngle : self.validation_x_angle,
self.net.groundTruth : self.validation_y})
if shouldSaveSummary:
summary = tf.Summary()
summary.ParseFromString(summaryValue)
summary.value.add(tag='loss', simple_value=lossValue)
summary.value.add(tag='accuracy', simple_value=accuValue)
self.validaSumWriter.add_summary(summary, self.dataManager.epoch)
print(" validation:")
print(" loss: " + str(lossValue) + ", accuracy: " + str(accuValue) + "\n")
def calculateValidationOneByOne(self, session, shouldSaveSummary):
'''
When deal with a Large Model, stuff all validation set into a batch is not possible.
Therefore, following stuff each validation data at a time
'''
arrayOfValidaLoss = np.zeros( (trainSettings.NUMBER_OF_VALIDATION_DATA) )
arrayOfValidaAccu = np.zeros( (trainSettings.NUMBER_OF_VALIDATION_DATA) )
for i in range(trainSettings.NUMBER_OF_VALIDATION_DATA):
validaImage = self.validation_x[i]
validaImage = np.reshape(validaImage,
[1, RadarImage.DATA_WIDTH, RadarImage.DATA_HEIGHT, RadarImage.DATA_CHANNELS])
validaAngle = self.validation_x_angle[i]
validaAngle = np.reshape(validaAngle, [1, 1])
validaLabel = self.validation_y[i]
validaLabel = np.reshape(validaLabel, [1, 2])
lossValue, accuValue = session.run( [ self.lossOp, self.accuracyOp],
feed_dict={ self.net.isTraining : False,
self.net.trainingStep : self.dataManager.step,
self.net.inputImage : validaImage,
self.net.inputAngle : validaAngle,
self.net.groundTruth : validaLabel})
arrayOfValidaLoss[i] = lossValue
arrayOfValidaAccu[i] = accuValue
meanLoss = np.mean(arrayOfValidaLoss)
meanAccu = np.mean(arrayOfValidaAccu)
if shouldSaveSummary:
summary = tf.Summary()
summary.value.add(tag='loss', simple_value=meanLoss)
summary.value.add(tag='accuracy', simple_value=meanAccu)
self.validaSumWriter.add_summary(summary, self.dataManager.epoch)
print(" validation:")
print(" loss: " + str(meanLoss) + ", accuracy: " + str(meanAccu) + "\n")
def saveCheckpoint(self, tf_session):
pathToSaveCheckpoint = os.path.join(trainSettings.PATH_TO_SAVE_MODEL, "save_epoch_" + str(self.dataManager.epoch) )
checkpointPathFileName = os.path.join(pathToSaveCheckpoint, "IceNet.ckpt")
self.saver.save(tf_session, checkpointPathFileName)
def drawGradients(self, gradientsInfo_):
for eachGradient, eachVariable in gradientsInfo_:
if eachGradient is not None:
tf.summary.histogram(eachVariable.op.name + '/gradient', eachGradient)
if __name__ == "__main__":
solver = Solver()
solver.Run()
``` |
{
"source": "JoshuaPK/atomicdb",
"score": 2
} |
#### File: patroni-docker/scripts/callback_role.py
```python
import json
import logging
import requests
import requests.exceptions
import os
import sys
import time
TOKEN_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/token'
CA_CERT_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
API_URL = 'https://kubernetes.default.svc.cluster.local/api/v1/namespaces/{0}/pods/{1}'
logger = logging.getLogger(__name__)
NUM_ATTEMPTS = 10
LABEL = 'patroni-role'
def change_host_role_label(new_role):
try:
with open(TOKEN_FILENAME, "r") as f:
token = f.read()
except IOError:
sys.exit("Unable to read K8S authorization token")
headers = {'Authorization': 'Bearer {0}'.format(token)}
headers['Content-Type'] = 'application/json-patch+json'
url = API_URL.format(os.environ.get('POD_NAMESPACE', 'default'),
os.environ['HOSTNAME'])
data = [{'op': 'add', 'path': '/metadata/labels/{0}'.format(LABEL), 'value': new_role}]
for i in range(NUM_ATTEMPTS):
try:
r = requests.patch(url, headers=headers, data=json.dumps(data), verify=CA_CERT_FILENAME)
if r.status_code >= 300:
logger.warning("Unable to change the role label to {0}: {1}".format(new_role, r.text))
else:
break
except requests.exceptions.RequestException as e:
logger.warning("Exception when executing POST on {0}: {1}".format(url, e))
time.sleep(1)
else:
logger.warning("Unable to set the label after {0} attempts".format(NUM_ATTEMPTS))
def record_role_change(action, new_role):
# on stop always sets the label to the replica, the load balancer
# should not direct connections to the hosts with the stopped DB.
if action == 'on_stop':
new_role = 'replica'
change_host_role_label(new_role)
logger.debug("Changing the host's role to {0}".format(new_role))
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
if len(sys.argv) == 4 and sys.argv[1] in ('on_start', 'on_stop', 'on_role_change', 'on_restart'):
record_role_change(action=sys.argv[1], new_role=sys.argv[2])
else:
sys.exit("Usage: {0} action role name".format(sys.argv[0]))
return 0
if __name__ == '__main__':
main()
``` |
{
"source": "JoshuaPMallory/lambdata-JoshuaPMallory",
"score": 3
} |
#### File: lambdata-JoshuaPMallory/lambdata_joshuapmallory/test.py
```python
import unittest
import numpy as np
from utility_functions import google_drive_useable
class function_tester(unittest.TestCase):
'''Obligatory docstring, test square root functions!'''
def test_google_drive_useable(self):
'''docstring'''
self.assertEqual(google_drive_useable('https://drive.google.com/open?id=1pEOgqOZcgxwu7gA4GnFuMwO_wVBLQ7cf')
,'https://drive.google.com/uc?id=1pEOgqOZcgxwu7gA4GnFuMwO_wVBLQ7cf'
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshuapouliot27/Land-Drone",
"score": 3
} |
#### File: joshuapouliot27/Land-Drone/Drone_Server.py
```python
import json
import logging
import math
import time
import Math
import RPi.GPIO as GPIO
from websocket_server import WebsocketServer
from Background_Thread import Background_Thread
moving_left = False
moving_right = False
moving_forward = False
moving_backward = False
# Current Value Variables
current_latitude: float = None
current_longitude: float = None
current_direction_degrees: float = None
current_distance_ahead: float = 25
#current_distance_ahead: float = 0
dir_left = False
dir_right = False
dir_forward = False
dir_backward = False
stop_everything = False
gps_lat_points = set()
gps_lon_points = set()
direction_points = set()
sonar_points = set()
current_pwm = [0, 0]
# Pin Number Variables
left_motor_direction_pin = 15
right_motor_direction_pin = 16
left_motor_pwm_speed_pin = 11
right_motor_pwm_speed_pin = 12
sonar_trig_pin = 18
sonar_echo_pin = 22
# GPIO variables
left_motor_pwm: GPIO.PWM = None
right_motor_pwm: GPIO.PWM = None
# Frequency variables
main_loop_frequency = 5000
gps_frequency = 5000
sonar_frequency = 5000
# Averaging variables
sonar_points_num_averaging = 5
gps_points_num_averaging = 5
# Misc Variables
trace = True
trace_loop = False
all_stop = False
less_turn_percent = 0.4 # what percent less should a motor turn while in auto mated to turn while moving forward
right_motor_less_pwm_percent = 0.9 # change to make motors go at same speed, right motor turns at (1-right_motor_less_pwm_percent)% of left motor
max_left_pwm = 1000
max_right_pwm = 250
max_left_turn_pwm = 1500
max_right_turn_pwm = 1500
# Automated Variables
time_last_turn_start = 0
turn_min_time = 3.5 # minimum time before changing turning
automated_mode = False
was_automated = False
sonar_min_distance = 4
gps_target = [0, 0]
direction_target = 0
gps_tolerance = 4 # in meters
direction_tolerance = 5 # in degrees
current_gps_index = -1
field_gps_points = [
[44.9063300, -68.6683193],
[44.9063243, -68.6682348],
[44.9062683, -68.6681932],
[44.9062256, -68.6680524],
[44.9062740, -68.6679786],
[44.9063576, -68.6679491],
[44.9068411, -68.6680645],
[44.9069075, -68.6681315],
[44.9069505, -68.6683270],
[44.9069161, -68.6687002],
[44.9068230, -68.6688034],
[44.9066948, -68.6688343],
[44.9062085, -68.6687592],
[44.9061610, -68.6657136],
[44.9061610, -68.6686090],
[44.9062142, -68.6685312],
[44.9062977, -68.6684856],
[44.9063224, -68.6683971]
] # [[lat, lon], [lat, lon]...]
house_gps_points = [
[44.924718, -68.646828]
] # 4 points along streets near house
# gps_points = field_gps_points
gps_points = house_gps_points
finished = False
def web_socket_handler(client, server, message):
if "return" in message:
json_data = get_json_string()
server.send_message(client, json_data)
else:
set_json_variables(message)
return None
def get_json_string():
data = {
"moving_left": moving_left,
"moving_right": moving_right,
"moving_forward": moving_forward,
"moving_backward": moving_backward,
"stop_everything": stop_everything,
"automated": automated_mode
}
return json.dumps(data)
def set_json_variables(json_string):
global moving_forward, moving_backward, moving_left, moving_right, stop_everything, automated_mode
json_data = json.loads(json_string)
moving_forward = bool(json_data["moving_forward"])
moving_backward = bool(json_data["moving_backward"])
moving_right = bool(json_data["moving_right"])
moving_left = bool(json_data["moving_left"])
stop_everything = bool(json_data["stop_everything"])
automated_mode = bool(json_data["automated"])
# def get_position():
# global current_latitude, current_longitude, current_direction_degrees
# gps_packet = gpsd.get_current()
# if gps_packet.mode > 1:
# if len(gps_lat_points) >= gps_points_num_averaging:
# for point in gps_lat_points:
# gps_lat_points.remove(point)
# break
# if len(gps_lon_points) >= gps_points_num_averaging:
# for point in gps_lon_points:
# gps_lon_points.remove(point)
# break
# if len(direction_points) >= gps_points_num_averaging:
# for point in direction_points:
# direction_points.remove(point)
# break
# gps_lat_points.add(gps_packet.lat)
# gps_lon_points.add(gps_packet.lon)
# direction_points.add(gps_packet.track)
# current_direction_degrees = math.fsum(direction_points) / len(direction_points)
# current_longitude = math.fsum(gps_lon_points) / len(gps_lon_points)
# current_latitude = math.fsum(gps_lat_points) / len(gps_lat_points)
# logging.debug("Current Position: Latitude: {0:.2}; Longitude: {1:.2}; direction: {2:.2}"
# .format(current_latitude, current_longitude, current_direction_degrees))
def get_sonar_distance():
time_start = 0
time_end = 0
GPIO.output(sonar_trig_pin, False)
time.sleep(.05)
GPIO.output(sonar_trig_pin, True)
time.sleep(.000001)
GPIO.output(sonar_trig_pin, False)
while GPIO.input(sonar_echo_pin) == 0:
time_start = time.time()
while GPIO.input(sonar_echo_pin) == 1:
time_end = time.time()
total_time = time_end - time_start
distance = (total_time / 2) * 1125.33 # Calculated in ft/s
logging.debug("Sonar distance: {0:.2} ft".format(distance))
return distance
def setup_sonar():
GPIO.setup(sonar_echo_pin, GPIO.IN)
GPIO.setup(sonar_trig_pin, GPIO.OUT)
GPIO.output(sonar_trig_pin, False)
logging.info("Sonar setup!")
def setup_motor_drivers():
global left_motor_pwm, right_motor_pwm
# Left
GPIO.setup(left_motor_direction_pin, GPIO.OUT)
GPIO.output(left_motor_direction_pin, False)
GPIO.setup(right_motor_direction_pin, GPIO.OUT)
GPIO.output(right_motor_direction_pin, False)
GPIO.setup(left_motor_pwm_speed_pin, GPIO.OUT)
left_motor_pwm = GPIO.PWM(left_motor_pwm_speed_pin, 1)
# Right
GPIO.output(left_motor_direction_pin, False)
GPIO.setup(right_motor_direction_pin, GPIO.OUT)
GPIO.output(right_motor_direction_pin, False)
GPIO.setup(right_motor_pwm_speed_pin, GPIO.OUT)
right_motor_pwm = GPIO.PWM(right_motor_pwm_speed_pin, 1)
set_motor_direction(True, True)
set_motor_direction(True, True)
logging.info("Motor drivers setup!")
def setup():
setup_logging()
GPIO.setmode(GPIO.BOARD)
# GPS
#setup_gps()
# Sonar
#setup_sonar()
# Motor Drivers
setup_motor_drivers()
logging.info("Setup complete!")
# def setup_gps():
# gpsd.connect()
# packet = gpsd.get_current()
# if packet.mode < 2:
# logging.warning("GPS does not have a fix!")
# counter = 0
# while packet.mode < 2:
# if counter > 150:
# logging.error("GPS cannot get a fix!")
# return
# packet = gpsd.get_current()
# logging.warning("GPS still does not have a fix.")
# counter += 1
# time.sleep(.2)
# logging.info("GPS has fix.")
def setup_logging():
logging.basicConfig(format='%(asctime)s; %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
filename="drone.log", level=logging.DEBUG)
def ramp_pwm(end, is_left):
global current_pwm
print("left: " + str(is_left) + ", set pwm to " + str(end))
if is_left:
beginning = current_pwm[0]
else:
beginning = current_pwm[1]
if beginning is end:
return
step_max = 1000
step_freq = (step_max / 10000)
if beginning > end:
steps = math.fabs((beginning - end) // step_max)
left_over = math.fabs((beginning - end)) - steps * step_max
new_pwm = 0
for x in range(0, int(steps)):
if is_left:
new_pwm = current_pwm[0] - step_max
else:
new_pwm = current_pwm[1] - step_max
set_pwm_freq(is_left, new_pwm)
time.sleep(step_freq)
if left_over > 0:
if is_left:
new_pwm = current_pwm[0] - left_over
else:
new_pwm = current_pwm[1] - left_over
set_pwm_freq(is_left, new_pwm)
else:
steps = math.fabs((beginning - end) // step_max)
left_over = math.fabs((beginning - end)) - steps * step_max
new_pwm = 0
for x in range(0, int(steps)):
if is_left:
new_pwm = current_pwm[0] + step_max
else:
new_pwm = current_pwm[1] + step_max
set_pwm_freq(is_left, new_pwm)
time.sleep(step_freq)
if left_over > 0:
if is_left:
new_pwm = current_pwm[0] + left_over
else:
new_pwm = current_pwm[1] + left_over
set_pwm_freq(is_left, new_pwm)
def set_pwm_freq(is_left, freq):
print("current pwm: "+str(freq)+", left?: "+str(is_left))
global current_pwm
if is_left:
if freq is current_pwm[0]:
return
else:
if freq is current_pwm[1]:
return
if is_left:
if (freq <= 0) and current_pwm[0] > 0:
left_motor_pwm.stop()
current_pwm[0] = 0
elif 100 <= freq <= 20000 and current_pwm[0] > 0:
left_motor_pwm.ChangeFrequency(freq)
current_pwm[0] = freq
elif 100 <= freq <= 20000 and current_pwm[0] <= 0:
left_motor_pwm.start(50)
left_motor_pwm.ChangeFrequency(freq)
current_pwm[0] = freq
else:
if (freq <= 0) and current_pwm[1] > 0:
right_motor_pwm.stop()
current_pwm[1] = 0
elif 100 <= freq <= 20000 and current_pwm[1] > 0:
right_motor_pwm.ChangeFrequency(freq)
current_pwm[1] = freq
elif 100 <= freq <= 20000 and current_pwm[1] <= 0:
right_motor_pwm.start(50)
right_motor_pwm.ChangeFrequency(freq)
current_pwm[1] = freq
def set_motor_speed(percent, emergency=False, is_left=None):
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "Set motor speed to " + str(percent*100) + "%, emergency: "+str(emergency)+" isleft: "+str(is_left))
if emergency and is_left is None:
if not dir_left and not dir_right:
set_pwm_freq(False, percent * max_right_pwm)
set_pwm_freq(True, percent * max_left_pwm)
else:
set_pwm_freq(False, percent * max_right_turn_pwm)
set_pwm_freq(True, percent * max_left_turn_pwm)
elif is_left is None:
if not dir_left and not dir_right:
set_pwm_freq(True, percent * max_left_pwm)
set_pwm_freq(False, percent * max_right_pwm)
else:
set_pwm_freq(True, percent * max_left_turn_pwm)
set_pwm_freq(False, percent * max_right_turn_pwm)
else:
if is_left:
end_freq = percent * max_left_pwm
else:
end_freq = percent * max_right_pwm
thread = Background_Thread(set_pwm_freq, (is_left, end_freq))
def set_motor_direction(is_left, forward):
if is_left:
GPIO.output(left_motor_direction_pin, forward)
else:
GPIO.output(right_motor_direction_pin, forward)
def set_proper_direction():
global dir_left, dir_backward, dir_forward, dir_right
if moving_forward and not dir_forward:
(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "set direction to forward!")
set_motor_direction(True, True)
set_motor_direction(False, True)
dir_forward = True
dir_left = False
dir_right = False
dir_backward = False
if moving_left and not dir_left:
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "set direction to left!")
set_motor_direction(True, False)
set_motor_direction(False, True)
dir_left = True
dir_forward = False
dir_right = False
dir_backward = False
if moving_right and not dir_right:
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "set direction to right!")
set_motor_direction(True, True)
set_motor_direction(False, False)
dir_right = True
dir_left = False
dir_forward = False
dir_backward = False
if moving_backward and not dir_backward:
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "set direction to backward!")
set_motor_direction(True, False)
set_motor_direction(False, False)
dir_backward = True
dir_left = False
dir_right = False
dir_forward = False
def is_proper_direction():
if moving_forward and not dir_forward:
return False
if moving_left and not dir_left:
return False
if moving_right and not dir_right:
return False
if moving_backward and not dir_backward:
return False
return True
def only_positive_numbers(number: float):
if number > 0:
return number
else:
return 0
def sonar_loop():
while True:
if all_stop:
break
if trace_loop:
print("sonar loop")
global current_distance_ahead
if len(sonar_points) >= sonar_points_num_averaging:
for point in sonar_points:
sonar_points.remove(point)
break
sonar_points.add(get_sonar_distance())
current_distance_ahead = math.fsum(sonar_points) / len(sonar_points)
time.sleep(1 / sonar_frequency)
# def gps_loop():
# while True:
# if all_stop:
# break
# if trace_loop:
# print("gps loop")
# get_position()
# time.sleep(1 / gps_frequency)
def web_socket_loop():
server = WebsocketServer(8181, host='0.0.0.0')
server.set_fn_message_received(web_socket_handler)
server.run_forever()
def correct_automated_direction():
if math.fabs(current_direction_degrees - direction_target) < direction_tolerance:
return True
else:
return False
def pos_degree(number):
if number >= 360:
new_num = number - 360
elif number < 0:
new_num = number + 360
else:
new_num = number
return new_num
def should_turn_left():
right_turn_degrees = pos_degree(direction_target - current_direction_degrees)
left_turn_degrees = pos_degree(360 - right_turn_degrees)
if left_turn_degrees <= right_turn_degrees:
return True
else:
return False
def is_moving():
if current_pwm[0] > 0 or current_pwm[1] > 0:
return True
else:
return False
def reset_gps_target():
global current_gps_index
current_gps_index = -1
def get_next_gps_target():
global current_gps_index
current_gps_index += 1
if current_gps_index >= len(gps_points):
return None
else:
return gps_points[current_gps_index]
def main_loop():
global was_automated, automated_mode, gps_target, direction_target, finished, time_last_turn_start, \
dir_right, dir_left, dir_backward, dir_forward
while True:
if trace_loop:
print("Main loop")
if all_stop:
break
# Distance Sensor
if (stop_everything or current_distance_ahead <= sonar_min_distance) and is_moving():
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "obstacle in the way or stop pressed("+str(current_distance_ahead)+" ft), emergency stopping")
set_motor_speed(0, True)
# if direction isn't proper, then stop moving change direction and start moving
if not is_proper_direction() and not stop_everything:
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "changing proper direction")
if is_moving():
set_motor_speed(0)
set_proper_direction()
time.sleep(.1)
set_motor_speed(1)
if (stop_everything or current_distance_ahead <= sonar_min_distance) and is_moving():
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "obstacle in the way or stop pressed, emergency stopping")
set_motor_speed(0, True)
# If distance is fine and remote button isn't pressed and not moving, then start moving
if current_distance_ahead > sonar_min_distance and not is_moving() \
and (moving_right or moving_left or moving_forward or moving_backward) and not stop_everything:
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "starting motors...")
set_motor_speed(1)
if (stop_everything or current_distance_ahead <= sonar_min_distance) and is_moving():
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "obstacle in the way or stop pressed, emergency stopping")
set_motor_speed(0, True)
# if not supposed to be moving, but is moving then stop moving
if ((not moving_backward and not moving_forward and not moving_left and not moving_right) or stop_everything) \
and is_moving():
print(time.strftime('{%Y-%m-%d %H:%M:%S} ') + "stopping motion")
set_motor_speed(0)
time.sleep(1 / main_loop_frequency)
try:
setup()
print("Setup complete!")
thread = Background_Thread(web_socket_loop)
#thread3 = Background_Thread(sonar_loop)
#thread2 = Background_Thread(gps_loop)
thread4 = Background_Thread(main_loop)
while True:
time.sleep(.01)
except Exception as error:
set_pwm_freq(False, 0)
set_pwm_freq(True, 0)
print("ERROR: " + str(error))
all_stop = True
GPIO.cleanup()
print("cleaned up!")
```
#### File: joshuapouliot27/Land-Drone/Math.py
```python
import math
def two_bytes_to_number(byte_high, byte_low):
number_result = 256 * byte_high + byte_low
if number_result >= 32768:
number_result -= 65536
return number_result
def distance_between_points(lat1, lat2, lon1, lon2):
earth_radius = 6371e3 # in meters
theta_1 = math.radians(lat1)
theta_2 = math.radians(lat2)
change_theta = math.radians(lat2 - lat1)
change_lambda = math.radians(lon2 - lon1)
square_half_chord_length = (math.sin(change_theta / 2) ** 2) \
+ ((math.cos(theta_1) * math.cos(theta_2))
* (math.sin(change_lambda / 2) ** 2))
angular_distance = 2 * math.atan2(math.sqrt(square_half_chord_length), math.sqrt(1 - square_half_chord_length))
distance = earth_radius * angular_distance
return distance
def heading_between_points(lat1, lat2, lon1, lon2):
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = (math.cos(lat1) * math.sin(lat2)) - (math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1))
bearing = math.degrees(math.atan2(y, x))
normalized_bearing = (bearing + 360) % 360
return normalized_bearing
``` |
{
"source": "joshuapouliot27/minimu-9v5_python",
"score": 4
} |
#### File: joshuapouliot27/minimu-9v5_python/calibrate.py
```python
import sys, signal, os
import time
import math
from LIS3MDL import LIS3MDL
import datetime
def handle_ctrl_c(signal, frame):
os.system("clear")
print("magXmin = ", magXmin)
print("magYmin = ", magYmin)
print("magZmin = ", magZmin)
print("magXmax = ", magXmax)
print("magYmax = ", magYmax)
print("magZmax = ", magZmax)
sys.exit(130) # 130 is standard exit code for ctrl-c
magnetometer = LIS3MDL()
# This will capture exit when using Ctrl-C
signal.signal(signal.SIGINT, handle_ctrl_c)
a = datetime.datetime.now()
# Preload the variables used to keep track of the minimum and maximum values
magXmin = 9999999
magYmin = 9999999
magZmin = 9999999
magXmax = -9999999
magYmax = -9999999
magZmax = -9999999
try:
while True:
# Read magnetometer values
MAGx = magnetometer.get_magnetometer_data().x
MAGy = magnetometer.get_magnetometer_data().y
MAGz = magnetometer.get_magnetometer_data().z
if MAGx > magXmax:
magXmax = MAGx
if MAGy > magYmax:
magYmax = MAGy
if MAGz > magZmax:
magZmax = MAGz
if MAGx < magXmin:
magXmin = MAGx
if MAGy < magYmin:
magYmin = MAGy
if MAGz < magZmin:
magZmin = MAGz
os.system("clear")
print("X: {:5} to {:5}".format(magXmin, magXmax))
print("Y: {:5} to {:5}".format(magYmin, magYmax))
print("Z: {:5} to {:5}".format(magZmin, magZmax))
# slow program down a bit, makes the output more readable
time.sleep(1/10)
except:
os.system("clear")
print("X: {:5} to {:5}".format(magXmin, magXmax))
print("Y: {:5} to {:5}".format(magYmin, magYmax))
print("Z: {:5} to {:5}".format(magZmin, magZmax))
``` |
{
"source": "joshuaprewitt/update-package-repository",
"score": 2
} |
#### File: joshuaprewitt/update-package-repository/update-package-repository.py
```python
import json
import random
import requests
import sys
import systemlink.clientconfig
import time
import urllib3
# disable https warnings
urllib3.disable_warnings()
# load SystemLink HTTP configuration
configuration = systemlink.clientconfig.get_configuration('nirepo')
host = configuration.host
def dc():
# disable cache to prevent getting stale data
return '?_dc='+str(random.randrange(10000000))
def job_status(job_id):
# poll for the job to complete
for i in range(10):
job_url = host+'/v1/jobs'+dc()+'&id='+job_id
response = requests.get(job_url, headers=configuration.api_key, verify=False)
status = json.loads(response.text)["jobs"][0]["status"]
if status == "SUCCEEDED":
break
if status == "FAILED":
break
print(status)
time.sleep(1)
return response
def check_for_updates(feed_id):
check_url = host+'/v1/feeds/'+feed_id+'/update-check'
response = requests.post(check_url, headers=configuration.api_key, verify=False)
job_id = json.loads(response.text)["jobId"]
print("JOB ID = ",job_id)
status = job_status(job_id)
return status
def get_updates(update_id):
update_url = host+'/v1/updates/'+update_id+dc()
response = requests.get(update_url, headers=configuration.api_key, verify=False)
updates = response.text
return updates
def update_feed(feed_id, updates):
update_url = host+'/v1/feeds/'+feed_id+'/update-apply?ignoreImportErrors=false&shouldOverwrite=false'
headers = {'Content-type': 'application/json'}
headers.update(configuration.api_key)
response = requests.post(update_url, verify=False, data=updates, headers=headers)
job_id = json.loads(response.text)["jobId"]
status = job_status(job_id)
return status
def main():
# updates all of the feeds in the package repository based on the feeds they were replicated from
# get feed ids
feeds_url = host+"/v1/feeds"
response = requests.get(feeds_url, headers=configuration.api_key, verify=False)
feeds = json.loads(response.text)
feed_ids = []
for feed in feeds["feeds"]:
feed_ids.append(feed["id"])
print("FEED IDS = ",feed_ids)
# check for updates
# todo: hard code feed ID for now, but will utlimately iterate over every feed
feed_id = '5f518c1ba9460dbc70490640'
status = check_for_updates(feed_id)
print('CHECK STATUS = ', status.text)
update_id = json.loads(status.text)["jobs"][0]["resourceId"]
# todo: continue if there are no updates or the job failed
# get list of updates
updates = get_updates(update_id)
print('UPDATES = ', updates)
# apply updates
status = update_feed(feed_id,updates)
print('APPLY STATUS = ', status.text)
if __name__ == "__main__":
main()
``` |
{
"source": "joshuaprince/ecs193",
"score": 3
} |
#### File: contrail/crawler/crawler.py
```python
import asyncio
import datetime
import logging
import sys
from typing import List
from contrail.crawler.providers import BaseProvider, REGISTERED_PROVIDER_CLASSES, import_provider_directory
logger = logging.getLogger('contrail.crawler')
providers = [] # type: List[BaseProvider]
async def _crawl_loop(provider: BaseProvider):
"""
Endlessly crawls over a single provider, yielding between successive crawls.
:param provider:
:return:
"""
while True:
# noinspection PyBroadException
try:
time_wait = provider.crawl() # type: datetime.timedelta
except Exception:
# If a crawl attempt had an error, print error and try again in 2 minutes
logger.exception("Caught exception while crawling {provider}".format(provider=provider.provider_name()))
logger.info("Cooling down for 2 minutes before retry.")
time_wait = datetime.timedelta(minutes=2)
await asyncio.sleep(time_wait.total_seconds())
async def _dummy_loop():
# A task to keep the event loop doing something even when it has nothing else to do, so that keyboard interrupts
# don't appear to hang until something comes through the loop.
# TODO fix this workaround
while True:
await asyncio.sleep(1)
async def _main():
"""
Main async loop for the crawler.
"""
provider_tasks = [_crawl_loop(p) for p in providers]
provider_tasks.append(_dummy_loop())
await asyncio.gather(*provider_tasks)
def create_providers():
"""
Imports all providers in the crawler/providers directory, and loads them into `providers`
"""
import_provider_directory()
for provider_class in REGISTERED_PROVIDER_CLASSES:
providers.extend(provider_class.create_providers())
def crawl():
"""
Runs the crawler until a keyboard interrupt is received.
"""
logging.basicConfig(
level=logging.INFO,
handlers=[
logging.StreamHandler(sys.stdout)
]
)
logger.info("Starting crawler.")
create_providers()
logger.info("Loaded {} providers from {} provider classes.".format(len(providers), len(REGISTERED_PROVIDER_CLASSES)))
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(_main())
except KeyboardInterrupt:
print("Crawler is shutting down.")
if __name__ == '__main__':
crawl()
```
#### File: crawler/providers/azure.py
```python
import datetime
import json
import logging
from typing import List, Dict
import requests
import time
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from contrail.configuration import config
from contrail.crawler.providers import BaseProvider, register_provider
logger = logging.getLogger('contrail.crawler.azure')
URL_TOKEN_REQUEST = 'https://login.microsoftonline.com/{tenant}/oauth2/token'
"""
URL queried to obtain an Azure management access token. Replaces {tenant} with a tenant (directory) ID.
"""
URL_RATECARD_REQUEST = "https://management.azure.com:443/subscriptions/{subscriptionId}/providers/" \
"Microsoft.Commerce/RateCard?api-version=2016-08-31-preview&$filter=" \
"OfferDurableId eq 'MS-AZR-0003P' and Currency eq 'USD' " \
"and Locale eq 'en-US' and RegionInfo eq 'US'"
"""
URL queried to list prices. Replaces {subscriptionId}.
"""
URL_CAPABILITIES_REQUEST = "https://management.azure.com/subscriptions/{subscriptionId}/" \
"providers/Microsoft.Compute/skus?api-version=2017-09-01"
"""
URL queried to get instance capabilities (mapping from size to vcpus, memory, etc). Replaces {subscriptionId}.
"""
@register_provider
class Azure(BaseProvider):
def __init__(self):
super().__init__()
self._access_token = ''
self._access_token_expire = 0
@classmethod
def create_providers(cls) -> List['Azure']:
return [cls()]
def crawl(self) -> datetime.timedelta:
ratecard = self.request_ratecard()
ratecard['Capabilities'] = self.request_capabilities()
self.store_provider_data(region='US', data=ratecard)
return datetime.timedelta(minutes=60)
def request_ratecard(self):
url = URL_RATECARD_REQUEST.format(subscriptionId=config['AZURE']['subscription_id'])
response = requests.get(url, allow_redirects=False,
headers={'Authorization': 'Bearer {}'.format(self.access_token())})
# Initial request forces a redirect. Look at response headers to get the redirect URL
redirect_url = response.headers['Location']
# Get the ratecard content by making another call to go the redirect URL
rate_card = requests.get(redirect_url)
return json.loads(rate_card.content.decode('utf-8'))
def request_capabilities(self) -> Dict[str, Dict]:
"""
Get a mapping from instance sizes to parameters such as vcpus, memory, etc.
"""
url = URL_CAPABILITIES_REQUEST.format(subscriptionId=config['AZURE']['subscription_id'])
response = requests.get(url, allow_redirects=False,
headers={'Authorization': 'Bearer {}'.format(self.access_token())})
resp_dict = json.loads(response.content.decode('utf-8'))
capabilities = {}
for instance in resp_dict['value']:
if instance.get('resourceType') != 'virtualMachines':
continue
if instance.get('size') in capabilities:
# Response contains each instance size multiple times, so don't load if we've already loaded this size
continue
size = instance['size'].replace('_', ' ')
# Response lists capabilities like this: [{"name": "vCPUS", "value": 2}, {"name": "memory", "value": 8}...]
# Convert it to a more pythonic form, like {"vCPUS": 2, "memory": 8...}
capabilities[size] = {}
for capability in instance['capabilities']:
capabilities[size][capability['name']] = capability['value']
return capabilities
def _renew_access_token(self):
"""
Retrieve an access token needed to pull pricing data.
:return: The access token.
"""
logger.info("Renewing access token.")
post_data = {
'client_id': config['AZURE']['client_id'],
'grant_type': 'client_credentials',
'client_secret': config['AZURE']['client_secret'],
'resource': 'https://management.azure.com/'
}
request = Request(URL_TOKEN_REQUEST.format(tenant=config['AZURE']['tenant_id']), urlencode(post_data).encode())
response = urlopen(request).read().decode()
resp_json = json.loads(response)
self._access_token = resp_json['access_token']
self._access_token_expire = int(resp_json['expires_on'])
def access_token(self) -> str:
"""
Get a current Access token, renewing it if it is due to expire soon.
:return:
"""
# Renew access token 1 minute before the current one expires
if self._access_token_expire < time.time() + 60:
self._renew_access_token()
return self._access_token
```
#### File: contrail/crawler/s3upload.py
```python
import gzip
import json
import logging
import shutil
import urllib.request
import boto3
from contrail.configuration import config
logger = logging.getLogger('contrail.crawler')
class S3Client:
_session = boto3.Session(
aws_access_key_id=config['AWS']['access_key_id'],
aws_secret_access_key=config['AWS']['secret']
)
_client = _session.client('s3')
def upload_file_from_url(self, url: str, destination: str):
"""
Pulls data from a certain URL, compresses it, and uploads it to S3.
:param url: URL to pull data from.
:param destination: Path within S3 to store data
:return:
"""
logger.info("Uploading file {} to {}".format(url, destination))
tmpfile = "tmp.json"
zipfile = tmpfile + ".gz"
urllib.request.urlretrieve(url, tmpfile)
# source: https://docs.python.org/3/library/gzip.html
with open(tmpfile, 'rb') as f_in:
with gzip.open(zipfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
self._client.upload_file(zipfile, config['AWS']['bucket_name'], destination + ".gz")
def upload_file_from_variable(self, data: dict, destination: str):
"""
Takes data formatted as a Python dictionary, serializes it to JSON,
compresses this JSON file, and uploads it to S3.
:param data: The dictionary/list/object to serialize to JSON.
:param destination: Path within S3 to store data.
"""
logger.info("Uploading raw data to {}".format(destination))
tmpfile = "tmp-rawdata.json"
zipfile = tmpfile + ".gz"
# source: https://stackoverflow.com/questions/49534901/is-there-a-way-to-use-json-dump-with-gzip
with gzip.open(zipfile, 'wt', encoding='ascii') as f_out:
json.dump(data, f_out, indent=2)
self._client.upload_file(zipfile, config['AWS']['bucket_name'], destination + ".gz")
```
#### File: apps/contrailsite/views.py
```python
from django.http import HttpRequest
from django.shortcuts import render
from django.urls import reverse
from django.utils.http import urlencode
from django.views.generic.base import TemplateView
from contrail.frontend.apps.contrailsite.forms import PriceForm
from contrail.frontend.field_info import FIELD_INFO
from contrail.frontend.query import *
class HomeView(TemplateView):
"""
Render Home page
"""
template_name = "home.html"
def price_view(request):
"""
Render Price page
"""
context = {'form': PriceForm({
'amazon_web_services': True,
'microsoft_azure': True,
'on_demand': True,
'reserved': True,
'spot': True,
}), 'aws_regions': list_regions('aws'),
'azure_regions': list_regions('azure')}
instance_filters = {}
if request.method == 'POST':
form = PriceForm(request.POST)
if form.is_valid():
context['form'] = form
instance_filters = {'provider__in': [], 'priceType__in': []}
if form.cleaned_data['amazon_web_services']:
instance_filters['provider__in'].append('AmazonEC2')
if form.cleaned_data['microsoft_azure']:
instance_filters['provider__in'].append('Azure')
if form.cleaned_data['on_demand']:
instance_filters['priceType__in'].append('On Demand')
if form.cleaned_data['reserved']:
instance_filters['priceType__in'].append('Reserved')
if form.cleaned_data['spot']:
instance_filters['priceType__in'].append('Spot')
if form.cleaned_data['operating_system']:
instance_filters['operatingSystem'] = form.cleaned_data['operating_system']
regions = []
if form.cleaned_data['aws_region']:
regions.extend(form.cleaned_data['aws_region'])
if form.cleaned_data['azure_region']:
regions.extend(form.cleaned_data['azure_region'])
if len(regions) > 0:
instance_filters['region__in'] = regions
if form.cleaned_data['memory_from']:
instance_filters['memory__gte'] = form.cleaned_data['memory_from']
if form.cleaned_data['memory_to']:
instance_filters['memory__lte'] = form.cleaned_data['memory_to']
if form.cleaned_data['vcpu_from']:
instance_filters['vcpu__gte'] = form.cleaned_data['vcpu_from']
if form.cleaned_data['vcpu_to']:
instance_filters['vcpu__lte'] = form.cleaned_data['vcpu_to']
if form.cleaned_data['pricehr_from']:
instance_filters['pricePerHour__gte'] = form.cleaned_data['pricehr_from']
if form.cleaned_data['pricehr_to']:
instance_filters['pricePerHour__lte'] = form.cleaned_data['pricehr_to']
if [] in instance_filters.values():
instances = []
else:
instances = list_instances(page=1, **instance_filters) # TODO properly paginate
for instance in instances:
instance['url'] = reverse('instance') + '?' + urlencode(generate_detail_link_dict(instance))
context['instances'] = instances
return render(request, 'list.html', context)
def instance_view(request: HttpRequest):
"""
Render Instance Detail page
"""
filter_parameters = dict(request.GET.items())
try:
check_instance_detail_filters(**filter_parameters)
except (AttributeError, AmbiguousTimeSeries) as e:
return render(request, 'error.html', {'error': str(e)}, status=400)
except InstanceNotFound as e:
return render(request, 'error.html', {'error': '404: ' + str(e)}, status=404)
instance_details = get_instance_details(**filter_parameters) # raw instance details from database
displayed_instance_details = [] # formatted instance details
for key, value in instance_details.items():
field_info = FIELD_INFO.get(key)
if field_info and field_info.get('exclude'):
continue
if field_info:
displayed_instance_details.append({
'key': key,
'name': field_info.get('friendlyName') or key,
'value': value,
'unit': field_info.get('unit') or '',
'hint': field_info.get('hint') or '',
'link': field_info.get('link') or ''
})
else:
displayed_instance_details.append({'key': key, 'name': key, 'value': value})
# Sort details by their order in FIELD_INFO, with fields that are not defined in FIELD_INFO last.
displayed_instance_details.sort(key=lambda detail: list(FIELD_INFO.keys()).index(detail['key']) if detail['key'] in FIELD_INFO.keys() else 999)
context = {
'rawInstanceDetails': instance_details,
'instanceDetails': displayed_instance_details,
'currentPrices': get_instance_current_prices(**filter_parameters)
}
return render(request, 'instance.html', context)
def history_graph_view(request: HttpRequest):
"""
Render price history graph asynchronously
"""
filter_parameters = dict(request.GET.items())
try:
check_instance_detail_filters(**filter_parameters)
except (AttributeError, AmbiguousTimeSeries) as e:
return render(request, 'error.html', {'error': str(e)}, status=400)
except InstanceNotFound as e:
return render(request, 'error.html', {'error': '404: ' + str(e)}, status=404)
context = {
'priceHistory': get_instance_price_history(**filter_parameters)
}
return render(request, 'history_graph.html', context)
class HelpView(TemplateView):
"""
Render Help page
"""
template_name = "help.html"
def storage_view(request: HttpRequest):
all_instances = list_storage()
for instance_dict in all_instances:
instance_dict['header'] = instance_dict.get('storageMedia') + ' ' + instance_dict.get('volumeType')
all_instances.sort(key=lambda i: (i['region'], i['header']))
headers = []
for inst in all_instances:
if inst['header'] not in headers:
headers.append(inst['header'])
context = {
'headers': headers,
'allInstances': all_instances,
}
return render(request, 'storage/storage.html', context)
class AboutUs(TemplateView):
"""
Render About Us page
"""
template_name = "about.html"
```
#### File: contrail/frontend/query.py
```python
from typing import Dict, List
from cachetools import cached, TTLCache
from infi.clickhouse_orm.database import Database
from contrail.configuration import config
from contrail.loader.warehouse import InstanceData, InstanceDataLastPointView, InstanceDataLastPointViewAllReserved, InstanceDataHourlyPriceView, InstanceDataDailyPriceView, InstanceDataMonthlyPriceView
db = Database(config['CLICKHOUSE']['db_name'], db_url=config['CLICKHOUSE']['db_url'], readonly=True)
LIST_QUERY_SIZE = 1000
"""Number of instances to list on a single page"""
DISCRIMINATORS = {
'AmazonEC2': [
'provider',
'instanceType',
'region',
'operatingSystem'
],
'Azure': [
'provider',
'instanceType',
'region',
'operatingSystem'
]
}
"""
A Dict that maps each provider to the set of fields needed to uniquely identify an instance of that provider. For
example, Amazon instances have dimensions of instance type, region, and operating system, so uniquely identifying a time
series requires that we filter by at least those fields.
"""
PRICE_HISTORY_PARAMS = ['crawlTime', 'priceType', 'pricePerHour', 'priceUpfront', 'leaseContractLength', 'purchaseOption']
"""
The set of fields that vary over an instance's time series, and therefore should be included in price history data and
excluded from instance details.
"""
def generate_detail_link_dict(instance: Dict) -> Dict:
"""
Generate a dictionary that consists of this instance's provider discriminators, so that we can convert it to a link
to the instance detail page.
:param instance: A dict representing an instance, that contains `provider` keys and keys for all discriminators of
that provider.
:return: e.g. {
'provider': 'AmazonEC2',
'instanceType': 'c4.xlarge',
'operatingSystem': 'Linux',
'region': 'apeast1'
}
"""
provider = instance['provider']
details = {discriminator: instance[discriminator] for discriminator in DISCRIMINATORS[provider]}
return details
@cached(cache=TTLCache(maxsize=10, ttl=86400))
def list_regions(provider) -> List[str]:
"""
List all regions found in the `region` column of InstanceDataLastPointView.
"""
if provider == 'aws':
return list(map(lambda i: i.region, InstanceData.objects_in(db).filter(provider='AmazonEC2').distinct().only('region').order_by('region')))
else:
return list(map(lambda i: i.region, InstanceData.objects_in(db).filter(provider='Azure').distinct().only('region').order_by('region')))
def list_instances(page, **kwargs) -> List[Dict]:
"""
List known instances satisfying the filter provided.
:param kwargs: A set of filters to search for instances. These should follow Infi ORM query parameters:
https://github.com/Infinidat/infi.clickhouse_orm/blob/develop/docs/querysets.md#filtering
:return: List of instance dicts
"""
# Only query for the fields we need on an instance list
fields = ['crawlTime', 'provider', 'instanceType', 'region', 'operatingSystem', 'vcpu', 'memory', 'priceType',
'pricePerHour', 'priceUpfront', 'gpu', 'location']
instances = InstanceDataLastPointView.objects_in(db).filter(productFamily='VM', **kwargs).only(*fields).paginate(page, LIST_QUERY_SIZE)[0]
return [{k: v for k, v in instance.to_dict().items() if v} for instance in instances]
def get_instance_details(**kwargs) -> Dict:
"""
Get details about a single instance described by `kwargs`.
:param kwargs: A set of filters used to identify the desired instance. Must at least consist of the fields specified
by this provider's DISCRIMINATORS.
:return: All known, most up-to-date details about the instance being queried.
"""
query = InstanceDataLastPointView.objects_in(db).distinct().filter(**kwargs)
instance_details = {}
# Collect all details from all versions of this instance, because the "On Demand" record might know more details
# than the "Spot" record, for example.
for record in query:
for k, v in record.to_dict().items():
# Filter out null fields and price-related fields from the instance dict
if k not in PRICE_HISTORY_PARAMS and bool(v):
instance_details[k] = v
return instance_details
def get_instance_current_prices(**kwargs) -> Dict[str, Dict]:
"""
Get a dict of current pricing modes for this instance.
:param kwargs: A set of filters used to identify the desired instance. Must at least consist of the fields specified
by this provider's DISCRIMINATORS.
:return: A dict mapping a pricing mode (i.e. 'onDemand' or 'reserved1yrFullUpfront') to a price dict that consists
of crawlTime and pricePerHour, and priceUpfront if nonzero.
"""
query = InstanceDataLastPointViewAllReserved.objects_in(db).filter(**kwargs)\
.distinct().only(*PRICE_HISTORY_PARAMS).order_by('crawlTime')
price_modes = {}
for record in query: # type: InstanceData
price_dict = {'crawlTime': record.crawlTime, 'pricePerHour': record.pricePerHour}
if record.priceType == 'On Demand':
price_modes['onDemand'] = price_dict
elif record.priceType == 'Spot':
price_modes['spot'] = price_dict
else:
# Reserved price
if record.purchaseOption == 'All Upfront':
price_dict['priceUpfront'] = record.priceUpfront
price_modes['reserved{}FullUpfront'.format(record.leaseContractLength)] = price_dict
elif record.purchaseOption == 'Partial Upfront':
price_dict['priceUpfront'] = record.priceUpfront
price_modes['reserved{}PartialUpfront'.format(record.leaseContractLength)] = price_dict
elif record.purchaseOption == 'No Upfront':
price_dict['priceUpfront'] = record.priceUpfront
price_modes['reserved{}NoUpfront'.format(record.leaseContractLength)] = price_dict
return price_modes
def get_instance_price_history(record_count=100, **kwargs) -> Dict[str, List[Dict]]:
"""
Get a set of time series, each containing price history points for a given instance and its pricing mode.
:param record_count: Number of history points to retrieve per pricing mode.
:param kwargs: A set of filters used to identify the desired instance. Must at least consist of the fields specified
by this provider's DISCRIMINATORS.
:return: A dict mapping a pricing mode (i.e. 'onDemand' or 'reserved1yrFullUpfront') to a List of "history points",
where each history point is a dictionary consisting of crawlTime, priceType, and optionally pricePerHour,
priceUpfront, and leaseContractLength.
"""
# hourly_base_query = InstanceDataHourlyPriceView.objects_in(db).filter(**kwargs).only(*PRICE_HISTORY_PARAMS).order_by('-crawlTime')
daily_base_query = InstanceDataDailyPriceView.objects_in(db).filter(**kwargs).only(*PRICE_HISTORY_PARAMS).order_by('-crawlTime')
monthly_base_query = InstanceDataMonthlyPriceView.objects_in(db).filter(**kwargs).only(*PRICE_HISTORY_PARAMS).order_by('-crawlTime')
# base_query = InstanceData.objects_in(db).filter(**kwargs).distinct().only(*PRICE_HISTORY_PARAMS).order_by('-crawlTime')
# Get a time series from the last several entries in the database that match this filter
price_history = {
# 'hourlyOnDemand': list(hourly_base_query.filter(priceType='On Demand')[:record_count]),
# 'hourlySpot': list(hourly_base_query.filter(priceType='Spot')[:record_count]),
# 'hourlyReserved1yrNoUpfront': list(hourly_base_query.filter(priceType='Reserved', offeringClass='standard', leaseContractLength='1yr', purchaseOption='No Upfront')[:record_count]),
'dailyOnDemand': list(daily_base_query.filter(priceType='On Demand')[:record_count]),
'dailySpot': list(daily_base_query.filter(priceType='Spot')[:record_count]),
'dailyReserved1yrNoUpfront': list(daily_base_query.filter(priceType='Reserved', offeringClass='standard', leaseContractLength='1yr', purchaseOption='No Upfront')[:record_count]),
'monthlyOnDemand': list(monthly_base_query.filter(priceType='On Demand')[:record_count]),
'monthlySpot': list(monthly_base_query.filter(priceType='Spot')[:record_count]),
'monthlyReserved1yrNoUpfront': list(monthly_base_query.filter(priceType='Reserved', offeringClass='standard', leaseContractLength='1yr', purchaseOption='No Upfront')[:record_count]),
}
# Build our own list of "price history point" dicts, since we don't want to include null or zero fields
price_history_points = {k: [] for k, v in price_history.items() if v}
for price_mode in price_history.keys():
for inst in price_history[price_mode]:
current_inst = {}
for param in PRICE_HISTORY_PARAMS:
if getattr(inst, param) is not None:
current_inst[param] = getattr(inst, param)
price_history_points[price_mode].append(current_inst)
return price_history_points
def check_instance_detail_filters(**kwargs):
"""
Ensure that the filters used to query a single instance return **exactly one unique instance**, raising an exception
otherwise.
:param kwargs: Set of filters used to identify the desired instance.
:except AttributeError: if a provided field does not exist in the instance data.
:except AmbiguousTimeSeries: if the filters provided are not enough to distinctly identify one instance.
:except InstanceNotFound: if there are no instances in the database that match the provided filters.
"""
if 'provider' not in kwargs.keys():
raise AmbiguousTimeSeries("provider must be specified")
if kwargs['provider'] not in DISCRIMINATORS.keys():
raise InstanceNotFound("Unknown provider: " + kwargs['provider'])
# Queries must specify all discriminators, else returned price history will be ambiguous.
for discriminator in DISCRIMINATORS[kwargs['provider']]:
if discriminator not in kwargs.keys():
raise AmbiguousTimeSeries("Missing discriminator: " + discriminator)
try:
InstanceDataLastPointView.objects_in(db).filter(**kwargs).order_by('-crawlTime')[0]
except AttributeError:
raise
except StopIteration:
raise InstanceNotFound("No instance found matching query.")
class AmbiguousTimeSeries(LookupError):
"""
Error raised if attempting to query a single instance, but the filters provided may return two or more instances.
"""
pass
class InstanceNotFound(LookupError):
"""
Error raised if attempting to query a single instance, but no such instance exists.
"""
pass
def list_storage():
"""
List all current storage options and their prices.
:return:
"""
fields = ['crawlTime', 'provider', 'region', 'pricePerHour', 'maxThroughputVolume', 'storageMedia', 'volumeType']
instances = InstanceDataLastPointView.objects_in(db).filter(productFamily='Storage').only(*fields).distinct()
instance_dicts = []
for instance in instances:
instance_dict = {}
for k, v in instance.to_dict().items():
if k == 'pricePerHour':
k = 'pricePerGbMonth'
if v:
instance_dict[k] = v
instance_dicts.append(instance_dict)
return instance_dicts
```
#### File: loader/loaders/aws_ec2_spot.py
```python
import collections
import logging
from infi.clickhouse_orm.database import Database
from contrail.crawler.providers.aws_ec2_spot import AmazonEC2Spot
from contrail.loader.warehouse import InstanceData
from contrail.loader.loaders import BaseLoader, register_loader
from contrail.loader.normalizers import normalizeData
logger = logging.getLogger('contrail.loader.aws_ec2')
def nested_dict_iter(nested):
lower = lambda s: s[:1].lower() + s[1:] if s else ''
for key, value in nested.items():
if isinstance(value, collections.abc.Mapping):
yield from nested_dict_iter(value)
else:
yield lower(key), value
def getSpotData(d, last_modified, region):
attributes = []
data = []
for instance in d:
values = []
for key, value in dict(nested_dict_iter(instance)).items():
try:
try:
if normalizeData(key, value) == None:
pass
else:
key, value = normalizeData(key, value)
except(ValueError):
values.extend(normalizeData(key, value))
pass
if isinstance(value, tuple):
values.extend((key, value))
else:
values.append((key, value))
except(KeyError, AttributeError):
pass
k, v = normalizeData('region', region)
values += [('priceType', 'Spot'), (k, v), ['provider', 'AmazonEC2']]
instanceType = [val for val in values if val[0] == 'instanceType'][0][1]
data.append(values)
return data
@register_loader(provider=AmazonEC2Spot)
class AmazonEC2SpotLoader(BaseLoader):
@classmethod
def load(cls, filename: str, json: dict, last_modified: str, db: Database):
logger.info("Loading {} into ClickHouse.".format(filename.split('/')[-1]))
region = "{}".format(filename.split('/')[1])
spot_data = getSpotData(json, last_modified, region)
instances = []
for item in spot_data:
instance = InstanceData()
for i in item:
setattr(instance, i[0], i[1])
instances.append(instance)
insertables = [instances[i * 1000:(i + 1) * 1000] for i in range((len(instances) + 1000 - 1) // 1000 )]
for inst in insertables:
db.insert(inst)
```
#### File: ecs193/contrail/main.py
```python
import argparse
import os
import sys
from contrail.configuration import config, CFG_FILE
def check_config(*required_sections):
for section in required_sections:
for key in config[section]:
if not config[section][key]:
print("Couldn't find required configuration setting '{}'. Please set one in {}".format(key, CFG_FILE))
exit(1)
def run_crawler(args):
check_config('AWS', 'AZURE')
from contrail.crawler import crawler
crawler.crawl()
def run_loader(args):
check_config('AWS', 'CLICKHOUSE')
from contrail.loader import loader
loader.load()
def run_initdb(args):
from contrail.loader.warehouse import create_contrail_table
create_contrail_table(True)
def run_fixdb(args):
from contrail.loader.warehouse import fix_aggregated_data
fix_aggregated_data()
def run_frontend(args):
check_config('CLICKHOUSE', 'WEBSITE')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'contrail.frontend.settings.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv[1:])
def run_tests(args):
import unittest
suite = unittest.TestLoader().discover('test', top_level_dir='.')
unittest.TextTestRunner().run(suite)
def main():
print("Starting CONTRAIL with configuration file {}".format(CFG_FILE))
parser = argparse.ArgumentParser(description='Run one of Contrail\'s components.')
subparsers = parser.add_subparsers(dest="component")
subparsers.required = True
parser_crawler = subparsers.add_parser('crawler', aliases=['c'])
parser_crawler.set_defaults(func=run_crawler)
parser_loader = subparsers.add_parser('loader', aliases=['l'])
parser_loader.set_defaults(func=run_loader)
parser_frontend = subparsers.add_parser('frontend', aliases=['f', 'manage.py', 'manage', 'm'])
parser_frontend.set_defaults(func=run_frontend)
parser_test = subparsers.add_parser('test', aliases=['t'])
parser_test.set_defaults(func=run_tests)
parser_init = subparsers.add_parser('initdb')
parser_init.set_defaults(func=run_initdb)
parser_fix = subparsers.add_parser('fixdb')
parser_fix.set_defaults(func=run_fixdb)
args, unknown = parser.parse_known_args()
args.func(args)
if __name__ == '__main__':
main()
```
#### File: crawler/providers/test_aws_ec2.py
```python
import json
import unittest
import requests
from contrail.crawler.providers.aws_ec2 import AmazonEC2, URL_REGION_VERSION
class AmazonEC2TestCase(unittest.TestCase):
def setUp(self):
self.instance = AmazonEC2.create_providers()[0]
def test_load_regions(self):
"""
Check that the EC2 loader has correctly loaded AWS regions.
"""
self.instance.load_regions()
# Check for a well-known region to be in the region queue
self.assertIn('us-west-1', self.instance.region_queue, "Couldn't find us-west-1 region.")
# Check that get_next_region() returns at least one region before the region queue is empty.
loaded_region = self.instance.get_next_region()
self.assertIsNotNone(loaded_region, "No regions were loaded into AmazonEC2.regions.")
# Loop over all regions in queue and check that currentVersionUrl is a JSON URL.
while loaded_region is not None:
self.assertRegex(
loaded_region['currentVersionUrl'], r'^/.*\.json$',
"Region {} had an invalid currentVersionUrl.".format(loaded_region)
)
loaded_region = self.instance.get_next_region()
def test_file_structure(self):
"""
Check several structural components of the raw data to ensure it is in the proper format to be loaded.
"""
try:
self.instance.load_regions()
# Get the us-west-1 region to test on
region = self.instance.get_next_region()
while region['regionCode'] != 'us-west-1':
self.assertTrue(self.instance.region_queued(), "Couldn't find region 'us-west-1'.")
region = self.instance.get_next_region()
region_url_postfix = region['currentVersionUrl']
response_obj = requests.get(URL_REGION_VERSION.format(currentVersionUrl=region_url_postfix))
response_json = response_obj.content.decode('utf-8')
response_dict = json.loads(response_json)
self.assertEqual(response_dict['formatVersion'], 'v1.0', "formatVersion mismatch: at /formatVersion")
self.assertEqual(response_dict['offerCode'], 'AmazonEC2', "offerCode mismatch: at /offerCode")
self.assertIn('version', response_dict, "version missing: at /version")
self.assertIn('products', response_dict, "products list missing: at /products/")
for name, product in response_dict['products'].items():
if product['productFamily'] == 'Data Transfer':
# For now, ignore data transfer products, which are not compute instances and don't match format
continue
self.assertIn('sku', product, "sku missing: at /products/{0}/sku".format(name))
self.assertIn('attributes', product, "attributes missing: at /products/{0}/attributes/".format(name))
self.assertIn('location', product['attributes'],
"location missing: at /products/{0}/attributes/location".format(name))
self.assertIn('terms', response_dict, "terms missing: at /terms/")
for paymentType in ('OnDemand', 'Reserved'):
self.assertIn(paymentType, response_dict['terms'], "{0} missing: at /terms/{0}/".format(paymentType))
for name, product in response_dict['terms'][paymentType].items():
offer = list(product.values())[0]
self.assertIn('offerTermCode', offer,
"offerTermCode missing: at /terms/{0}/{1}/offerTermCode/".format(paymentType, name))
except AssertionError:
with open('ec2_erroneous.json', 'w') as outfile:
json.dump(response_dict, outfile, indent=2)
print("Encountered a structural error with the JSON retrieved from AWS.")
print("Writing the erroneous data to 'ec2_erroneous.json'")
print("The location of the error within the JSON hierarchy can be found in the error message below.")
raise
```
#### File: crawler/providers/test_aws_ec2_spot.py
```python
import unittest
import unittest.mock
import copyingmock
from contrail.crawler.providers.aws_ec2_spot import AmazonEC2Spot
class AmazonEC2SpotTestCase(unittest.TestCase):
def setUp(self):
self.instances = AmazonEC2Spot.create_providers()
def test_crawl(self):
"""
Check that some number of crawl iterations result in uploading data to S3.
"""
mocked_instance = self.instances[0]
mocked_instance.store_provider_data = copyingmock.CopyingMock()
# The crawl procedure may take several iterations before attempting to upload data.
# Continue calling crawl() until it does call store_provider_data().
while not mocked_instance.store_provider_data.called:
mocked_instance.crawl()
uploaded_data = mocked_instance.store_provider_data.call_args[1]['data']
# Check that at least one instance was found
self.assertGreater(len(uploaded_data), 0)
# Check for common fields on each instance
for instance in uploaded_data:
self.assertIn('InstanceType', instance)
self.assertIn('SpotPrice', instance)
self.assertIn('vcpu', instance)
self.assertIn('memory', instance)
self.assertIn('Timestamp', instance)
def test_load_instance_type_details(self):
"""
Ensure that we can load details about instance types (i.e. a mapping between 't2.xlarge' -> 2 vcpus, etc.)
"""
instance_types = AmazonEC2Spot.get_instance_type_details()
# Make sure that one (arbitrarily selected) instance type can be indexed in this details dict
self.assertIn('m3.large', instance_types)
# Make sure that each instance type's value contains some general information about that instance type
for instance_type in iter(instance_types.values()):
self.assertIn('instanceType', instance_type)
self.assertIn('vcpu', instance_type)
self.assertIn('memory', instance_type)
self.assertIn('storage', instance_type)
```
#### File: crawler/providers/test_base_provider.py
```python
import datetime
import unittest
from contrail.crawler.providers import REGISTERED_PROVIDER_CLASSES, import_provider_directory, BaseProvider, register_provider
class BaseProviderTestCase(unittest.TestCase):
def setUp(self):
pass
def test_register_provider(self):
"""
Test the @register_provider decorator.
"""
@register_provider
class TestProvider(BaseProvider):
@classmethod
def create_providers(cls):
return [cls()]
@classmethod
def provider_name(cls):
return "Test Provider"
def crawl(self):
return datetime.timedelta(seconds=5)
self.assertIn(TestProvider, REGISTERED_PROVIDER_CLASSES,
"@register_provider failed to add a class to REGISTERED_PROVIDER_CLASSES.")
def test_import_provider_directory(self):
"""
Test that we can import the provider directory, and that it discovers at least one provider.
"""
import_provider_directory()
self.assertTrue(len(REGISTERED_PROVIDER_CLASSES) > 0,
"Failed to register any provider classes. The crawler won't do anything.")
def test_all_providers_extend_base(self):
"""
Test that all registered providers extend from BaseProvider.
"""
import_provider_directory()
for provider in REGISTERED_PROVIDER_CLASSES:
self.assertTrue(issubclass(provider, BaseProvider),
"All classes decorated with @register_provider must inherit from BaseProvider.")
```
#### File: loaders/aws_ec2/test_load_aws_ec2.py
```python
import json
import unittest
from typing import List
from infi.clickhouse_orm.database import Database
from contrail.configuration import config
from contrail.loader.loaders.aws_ec2 import AmazonEC2Loader
from contrail.loader.warehouse import InstanceData
class LoadAmazonEC2TestCase(unittest.TestCase):
def setUp(self):
self.test_db = Database(db_name='contrail_test', db_url=config['CLICKHOUSE']['db_url'])
# Make sure we have a clean database to test on
if self.test_db.db_exists:
self.test_db.drop_database()
self.test_db.create_database()
self.test_db.create_table(InstanceData)
def tearDown(self):
self.test_db.drop_database()
pass
def test_load_file(self):
"""
Basic tests loading the sample file.
Sample file is formatted to the following specs:
- 1 product ('VG6Z') with no instanceSKU
- 2 On Demand terms; one for G5FF, one for VG6Z
- 12 Reserved terms for VG6Z
- All instances share the same hardware and software configurations
"""
with open('test/loader/loaders/aws_ec2/ec2_limited.json', 'r') as js_file:
data_dict = json.load(js_file)
AmazonEC2Loader.load(
filename="AmazonEC2/ap-northeast-1/2019-04-15T03:55:18.174709.json.gz",
json=data_dict,
last_modified="2019-04-15T03:55:18.174709",
db=self.test_db
)
instances = list(InstanceData.objects_in(self.test_db)) # type: List[InstanceData]
self.assertEqual(len(instances), 13) # 1 on demand + 12 reserved
# Check for correct hardware/software parameters common to all offers
for inst in instances:
self.assertEqual(inst.instanceType, 'c4.4xlarge')
self.assertEqual(inst.operatingSystem, 'Linux')
self.assertEqual(inst.vcpu, 16)
self.assertEqual(inst.clockSpeed, 2.9)
self.assertEqual(inst.memory, 30)
# Check for several different pricing types to be loaded correctly
# On Demand price
self.assertTrue(any(inst.pricePerHour == 2.928 for inst in instances))
# Reserved price: all up front
self.assertTrue(any(inst.priceUpfront == 63106 and inst.leaseContractLength == '3yr' for inst in instances))
self.assertTrue(any(inst.priceUpfront == 22726 and inst.leaseContractLength == '1yr' for inst in instances))
# Reserved price: half up front/half hourly
self.assertTrue(any(inst.priceUpfront == 32966 and inst.pricePerHour == 1.254 for inst in instances))
# Reserved price: all hourly
self.assertTrue(any(inst.pricePerHour == 2.751 for inst in instances))
``` |
{
"source": "joshuapwalton/cloud-reset",
"score": 2
} |
#### File: lib/modules/aws_ec2.py
```python
from ..BaseResource import BaseResource
import boto3
class Resource(BaseResource):
name = 'aws_ec2'
type = 'ec2'
client = None
dry_run = True
ids = []
resources = []
def __init__(self):
self.client = boto3.client(self.type)
def get_resources(self):
client = self.client
paginator = client.get_paginator('describe_instances')
for page in paginator.paginate():
for reservation in page['Reservations']:
for instance in reservation["Instances"]:
if instance.get('InstanceId'):
self.ids.append(instance['InstanceId'])
self.resources.append({
"Id": instance['InstanceId'],
"Tags": instance.get('Tags'),
})
return self.ids
def list_resources(self):
self.get_resources()
pprint(self.ids)
pass
def delete_resources(self, ids):
""" delete resources specified by ids list"""
client = self.client
if self.dry_run:
print('dry_run flag set')
try:
response = client.terminate_instances(
InstanceIds=ids,
DryRun=self.dry_run
)
return True
except Exception as error:
if error.response.get('Error').get('Code') == 'DryRunOperation':
pass
else:
raise
``` |
{
"source": "JoshuaQChurch/EarthSim",
"score": 3
} |
#### File: EarthSim/earthsim/io.py
```python
import numpy as np
import pandas as pd
import fiona
import geoviews as gv
import dask.dataframe as dd
import xarray as xr
import cartopy.crs as ccrs
from osgeo import gdal, osr
def get_sampling(bounds, shape):
"""
Generates x/y coordinates from bounds and shape.
Parameters
----------
bounds - tuple(float, float, float, float)
north, south, east, west
shape - tuple(int, int)
rows, cols
"""
rows, cols = shape
y1, y0, x1, x0 = bounds
xunit = (x1-x0)/cols/2.
yunit = (y1-y0)/rows/2.
xs = np.linspace(x0+xunit, x1-xunit, cols)
ys = np.linspace(y0+yunit, y1-yunit, rows)
return xs, ys
def open_gssha(filename):
"""
Reads various filetypes produced by GSSHA
"""
# Read metadata
ftype = filename.split('.')[-1]
if ftype in ['fgd', 'asc']:
f = open(filename, 'r')
c, r, xlc, ylc, gsize, nanval = [
t(f.readline().split(' ')[-1].split('\n')[0])
for t in [int, int, float, float, float, float]
]
xs = np.linspace(xlc+gsize/2., xlc+c*gsize-gsize/2., c+1)
ys = np.linspace(ylc+gsize/2., ylc+r*gsize-gsize/2., r)
else:
header_df = pd.read_table(filename, engine='python',
names=['meta_key', 'meta_val'],
sep=' ', nrows=6)
bounds = header_df.loc[:3, 'meta_val'].values.astype(float)
r, c = header_df.loc[4:6, 'meta_val'].values.astype(int)
xs, ys = get_sampling(bounds, (r, c))
# Read data using dask
ddf = dd.read_csv(filename, skiprows=6, header=None,
sep=' ')
darr = ddf.values.compute()
if ftype == 'fgd':
darr[darr==nanval] = np.NaN
return xr.DataArray(darr[::-1], coords={'x': xs, 'y': ys},
name='z', dims=['y', 'x'])
def get_ccrs(filename):
"""
Loads WKT projection string from file and return
cartopy coordinate reference system.
"""
inproj = osr.SpatialReference()
proj = open(filename, 'r').readline()
inproj.ImportFromWkt(proj)
projcs = inproj.GetAuthorityCode('PROJCS')
return ccrs.epsg(projcs)
def read_3dm_mesh(fpath, skiprows=1):
"""
Reads a 3DM mesh file and returns the simplices and vertices as dataframes
Parameters
----------
fpath: str
Path to 3dm file
Returns
-------
tris: DataFrame
Simplexes of the mesh
verts: DataFrame
Vertices of the mesh
"""
all_df = pd.read_table(fpath, delim_whitespace=True, header=None, skiprows=skiprows,
names=('row_type', 'cmp1', 'cmp2', 'cmp3', 'val'), index_col=1)
conns = all_df[all_df['row_type'].str.lower() == 'e3t'][['cmp1', 'cmp2', 'cmp3']].values.astype(int) - 1
pts = all_df[all_df['row_type'].str.lower() == 'nd'][['cmp1', 'cmp2', 'cmp3']].values.astype(float)
pts[:, 2] *= -1
verts = pd.DataFrame(pts, columns=['x', 'y', 'z'])
tris = pd.DataFrame(conns, columns=['v0', 'v1', 'v2'])
return tris, verts
def read_mesh2d(fpath):
"""
Loads a .dat file containing mesh2d data corresponding to a 3dm mesh.
Parameters
----------
fpath: str
Path to .dat file
Returns
-------
dfs: dict(int: DataFrame)
A dictionary of dataframes indexed by time.
"""
attrs = {}
with open(fpath, 'r') as f:
dataset = f.readline()
if not dataset.startswith('DATASET'):
raise ValueError('Expected DATASET file, cannot read data.')
objtype = f.readline()
if not objtype.startswith('OBJTYPE "mesh2d"'):
raise ValueError('Expected "mesh2d" OBJTYPE, cannot read data.')
_ = f.readline()
nd, nc = f.readline(), f.readline()
name = f.readline()[6:-2]
unit = f.readline()
df = pd.read_table(fpath, delim_whitespace=True,
header=None, skiprows=7, names=[0, 1, 2]).iloc[:-1]
ts_index = np.where(df[0]=='TS')[0]
indexes = [df.iloc[idx, 2] for idx in ts_index]
dfs = {}
for time, tdf in zip(indexes, np.split(df, ts_index)[1:]):
tdf = tdf.iloc[1:].astype(np.float64).dropna(axis=1, how='all')
if len(tdf.columns) == 1:
tdf.columns = [name]
else:
tdf.columns = [name+'_%d' % c for c in range(len(tdf.columns))]
dfs[time] = tdf.reset_index(drop=True)
return dfs
def save_shapefile(cdsdata, path, template):
"""
Accepts bokeh ColumnDataSource data and saves it as a shapefile,
using an existing template to determine the required schema.
"""
collection = fiona.open(template)
arrays = [np.column_stack([xs, ys]) for xs, ys in zip(cdsdata['xs'], cdsdata['ys'])]
polys = gv.Polygons(arrays, crs=ccrs.GOOGLE_MERCATOR)
projected = gv.operation.project_path(polys, projection=ccrs.PlateCarree())
data = [list(map(tuple, arr)) for arr in projected.split(datatype='array')]
shape_data = list(collection.items())[0][1]
shape_data['geometry']['coordinates'] = data
with fiona.open(path, 'w', collection.driver, collection.schema, collection.crs) as c:
c.write(shape_data)
``` |
{
"source": "joshuar500/poem-scraper",
"score": 3
} |
#### File: poem-scraper/poemcrawler/models.py
```python
from sqlalchemy import create_engine, Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.engine.url import URL
import settings
DeclarativeBase = declarative_base()
def db_connect():
"""
Performs database connection using database settings from settings.py.
Returns sqlalchemy engine instance
"""
return create_engine(URL(**settings.DATABASE))
def create_poetry_table(engine):
DeclarativeBase.metadata.create_all(engine)
class Poem(DeclarativeBase):
__tablename__ = "poem"
title = Column(String(250), nullable=False)
id = Column(Integer, primary_key=True)
poem = Column(Text(), nullable=False)
author = Column(String(250), nullable=False)
url = Column(String(350), nullable=False)
# Some stuff for PostgreSQL
# insert into Authors (author)
# select distinct author
# from Poems;
# ALTER TABLE poem
# ADD COLUMN author_id integer,
# ADD FOREIGN KEY (author_id) REFERENCES author (id);
# update poem
# set author_id = author.id
# from author
# where author.name = poem.author;
```
#### File: poemcrawler/spiders/poem_spider.py
```python
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from poemcrawler.items import PoemItem
class PoetrySpider(CrawlSpider):
name = "poemhunter"
allowed_domains = ["poemhunter.com"]
start_urls = [
"http://www.poemhunter.com/classics/"
]
rules = (
Rule(LinkExtractor(
allow="classics/(.*?)",
# We never want any 'members' profiles
deny=["members", "poets"],
restrict_xpaths="//div[contains(@class, 'pagination')]/ul/li[contains(@class, 'next')]"),
follow=True),
Rule(LinkExtractor(
deny=["members", "poets"],
restrict_xpaths="//a[contains(@class, 'name')]"),
follow=True),
Rule(LinkExtractor(
# After first follow, exclude anything that doesn't contain "poems"
allow="(.*?)/poems",
deny=["members", "/(.*?)/"],
restrict_xpaths="//a[contains(@class, 'name')]/poems"),
follow=True),
Rule(LinkExtractor(
allow="/poem/(.*?)",
deny=["members", "/(.*?)/poems", "/(.*?)/comments"],
restrict_xpaths=["//a",]),
callback='parse_item',
follow=True),
)
# These are the exact XPath's to the required text.
def parse_item(self, response):
for sel in response.xpath("//body"):
item = PoemItem()
item['author'] = sel.xpath("//div/div[contains(@id, 'content')]/h2/text()").extract()[0].strip()
item['title'] = sel.xpath("///div/div/div/div/div/div/div/div/a/span[contains(@itemprop, 'title')]/text()").extract()[0].strip()
item['poem'] = [x.strip() for x in sel.xpath("//div[contains(@class, 'poem-detail')]/div/div[contains(@class, 'KonaBody')]/p/text()").extract() if x.strip()]
item['url'] = response.request.url
yield item
``` |
{
"source": "joshuar500/swiss-tournament-db",
"score": 3
} |
#### File: vagrant/tournament/tournament.py
```python
import psycopg2
import bleach
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
try:
return psycopg2.connect("dbname=tournament")
except Exception, e:
raise e
def commitAndCloseConnect(c):
c.commit()
c.close()
def deleteMatches():
"""Remove all the match records from the database."""
connection = connect()
c = connection.cursor()
c.execute("DELETE FROM result;")
c.execute("DELETE FROM match;")
commitAndCloseConnect(connection)
def deletePlayers():
"""Remove all the player records from the database."""
connection = connect()
c = connection.cursor()
c.execute("DELETE FROM player;")
commitAndCloseConnect(connection)
def deleteTournaments():
"""Remove all tournament records from the database."""
connection = connect()
c = connection.cursor()
c.execute("DELETE FROM tournament;")
commitAndCloseConnect(connection)
def countPlayers():
"""Returns the number of players currently registered."""
connection = connect()
c = connection.cursor()
c.execute("SELECT COUNT(*) from player;")
players = c.fetchone()[0]
connection.close()
return players
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
connection = connect()
c = connection.cursor()
c.execute("INSERT INTO player (name) values (%s);", (bleach.clean(name),))
commitAndCloseConnect(connection)
def createTournament(name):
"""Adds a tournament to the tournament database.
The database assigns a unique serial id number for the tournament.
Args:
name: the name of the tournament (need not be unique)
Return:
ID of the created tournament
"""
connection = connect()
cursor = connection.cursor()
cursor.execute("""insert into tournament (name)
values(%s) returning id;""", (bleach.clean(name),))
tournament_id = cursor.fetchone()[0]
commitAndCloseConnect(connection)
return tournament_id
def playerStandings(tournament):
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player
in first place, or a player tied for first place
if there is currently a tie.
Args:
tournament: the id of the tournament
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
connection = connect()
c = connection.cursor()
c.execute("""SELECT player_id, player_name, wins, ties, matches
FROM standings where tournament_id=%s""", (bleach.clean(tournament),))
standings = c.fetchall()
connection.close()
return standings
def reportMatch(t, p1, p2, w):
"""Records the outcome of a single match between two players.
Args:
t: the id of the tournament
p1: the id of player1
p2: the id of player2
w: the id number of the player who won
"""
connection = connect()
c = connection.cursor()
c.execute(
"""
insert into match (tournament_id, player1, player2)
values(%(tournament)s, %(player1)s, %(player2)s)
returning id;
""",
{'tournament': bleach.clean(t), 'player1': bleach.clean(p1), 'player2': bleach.clean(p2)})
match_id = c.fetchone()[0]
if w:
c.execute("""
insert into result (match_id, winner)
values(%(match_id)s, %(winner)s);
""", {'match_id': bleach.clean(match_id), 'winner': bleach.clean(w)})
commitAndCloseConnect(connection)
def swissPairings(tournament):
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Args:
tournament: the id of the tournament
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
connection = connect()
c = connection.cursor()
#
# Get player id, player name, and player byes
# from player standings and opponent standings
# where tournament is referenced in both
# player standings and opponent standings
# and player id is equal to opponent id
# store into a variable 'standings'
c.execute(
"""
select p.player_id, p.player_name, p.byes
from standings p, opponent_standings o
where p.tournament_id=%s
and p.tournament_id=o.tournament_id
and p.player_id=o.player_id
order by p.wins, p.losses desc, o.wins, o.losses desc
""",
(tournament,))
standings = c.fetchall()
match = ()
matches = []
# if the length of standings is divisble
# by 2 and not equal to zero, then
# no bye is needed at this time
bye_needed = len(standings) % 2 != 0
for player in standings:
if bye_needed and player[2] == 0:
matches.append(player[0:2] + (None, None))
else:
match = match + player[0:2]
if len(match) == 4:
matches.append(match)
match = ()
connection.close()
return list(set(matches))
``` |
{
"source": "Joshua-Ren/better_supervisory_signal",
"score": 2
} |
#### File: Joshua-Ren/better_supervisory_signal/main_gen_teacher.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.utils.data as Data
import torchvision
import torchvision.transforms as T
import pandas as pd
import numpy as np
import os
import copy
import argparse
import random
from utils import *
save_list = [4, 50, 78, 118, 158, 178, 199]
def parse():
parser = argparse.ArgumentParser(description='Generate learning path for CIFAR10/100')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--scheduler',default='cosine',type=str,help='cosine or multi')
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--net',default='resnet18',type=str,
help='resnet18, resnet50, efficientb3 (with pre-train), mobile, vgg')
parser.add_argument('--dataset', default='cifar10', type=str, help='cifar10, cifar100')
parser.add_argument('--ny_ratio', default=0.,type=float)
parser.add_argument('--batch_size',default=250, type=int)
parser.add_argument('--seed',default=10086,type=int)
parser.add_argument('--proj_name',default='Gen_teacher', type=str)
parser.add_argument('--run_name',default=None, type=str)
parser.add_argument('--num_work',default=4, type=int)
parser.add_argument('--smoothing',default=0.05, type=float,help='smoothing factor in FilterBAN')
parser.add_argument('--filt_tau',default=1., type=float,help='temperature in FilterBAN')
args = parser.parse_args()
if args.dataset=='cifar10':
args.k_clas = 10
elif args.dataset=='cifar100':
args.k_clas = 100
return args
# =========== Record the paths ================
def _Update_Teach_Table(store_table, pred_batch, idx_batch):
pred_items = pred_batch.cpu().detach()
batch_size = idx_batch.shape[0]
for i in range(batch_size):
idx_key = idx_batch[i].item()
tmp_pred = pred_items[i]
if store_table[idx_key, :].sum()==0:
store_table[idx_key, :] = tmp_pred
else:
store_table[idx_key, :] = (1-args.smoothing)*store_table[idx_key, :] + args.smoothing*tmp_pred
def get_validation(model, data_loader):
model.eval()
b_cnt, correct = 0, 0
valid_loss, pb_table, tf_table = [],[],[]
batch_size = data_loader.batch_size
for x, _, ny, idx in data_loader:
b_cnt += 1
x,ny = x.float().cuda(), ny.long().cuda()
with torch.no_grad():
hid = model(x)
loss = nn.CrossEntropyLoss()(hid, ny.squeeze())
valid_loss.append(loss.item())
hid = hid.detach()
pred_idx = hid.data.max(1, keepdim=True)[1]
prob = torch.gather(nn.Softmax(1)(hid),dim=1, index=pred_idx)
pb_table.append(prob)
tf_table.append(pred_idx.eq(ny.data.view_as(pred_idx)))
model.train()
pb_table = torch.stack(pb_table).reshape(-1,1)
tf_table = torch.stack(tf_table).reshape(-1,1)
ECE = cal_ECE(pb_table, tf_table)
B_NUM = batch_size*b_cnt
correct = tf_table.sum()
return correct/B_NUM, np.mean(valid_loss), ECE
def train(model, optimizer, scheduler, loss_type='from_oht', teacher=None, teach_table=None, tau=1):
results = {'tacc':[], 'vacc':[], 'tloss':[],'vloss':[],'tECE':[],'vECE':[], 'bestg_ac':[],'bestg_lo':[]}
store_table = torch.zeros((50000, args.k_clas))
update_cnt = 0
vacc_max, vloss_min = 0, 50
bestg_ac, bestg_lo = 0, 0
ES_Model = copy.deepcopy(model)
if teach_table is not None:
teach_table = teach_table.cuda()
for g in range(args.epochs):
for x, _, ny, idx in train_loader:
model.train()
x,ny = x.float().cuda(), ny.long().cuda()
optimizer.zero_grad()
hid = model(x)
pred_batch = F.softmax(hid/tau, 1)
_Update_Teach_Table(store_table, pred_batch, idx)
if teacher!=None:
teacher.eval()
hid_teach = teacher(x)
hid_teach = hid_teach.detach()
if loss_type=='from_oht':
loss = nn.CrossEntropyLoss()(hid, ny.squeeze())
elif loss_type=='normal_kd':
loss = distil_loss(hid, ny, hid_teach, T=tau, alpha=1)
elif loss_type=='from_teach_table':
teacher_score = teach_table[idx]
loss = nn.KLDivLoss(reduction='batchmean')(F.log_softmax(hid/tau, 1), teacher_score)*(tau*tau * 2.0)
loss.backward()
optimizer.step()
wandb.log({'loss':loss.item()})
# ----- At the end of each epoch --------
scheduler.step()
wandb.log({'learning_rate':optimizer.param_groups[0]['lr']})
tacc, tloss, tECE = get_validation(model, data_loader=train_loader)
vacc, vloss, vECE = get_validation(model, data_loader=valid_loader)
if vloss<vloss_min:
vloss_min = vloss
bestg_lo = g
file_name = 'best_loss'
save_checkpoint(model,save_path,file_name)
save_storetable(store_table,save_path,file_name)
if vacc>vacc_max:
vacc_max = vacc
bestg_ac = g
file_name = 'best_acc'
save_checkpoint(model,save_path,file_name)
save_storetable(store_table,save_path,file_name)
if g in save_list:
file_name = 'epoch_'+str(g)
save_checkpoint(model,save_path,file_name)
save_storetable(store_table,save_path,file_name)
results['tacc'].append(tacc.item())
results['vacc'].append(vacc.item())
results['tloss'].append(tloss)
results['vloss'].append(vloss)
results['tECE'].append(tECE)
results['vECE'].append(vECE)
results['bestg_ac'].append(bestg_ac)
results['bestg_lo'].append(bestg_lo)
wandb_record_results(results, g)
def main():
global args, device, train_loader, valid_loader, BATCH_X
global PATH_TRAIN_COARSE, PATH_BATCH_COARSE, PATH_BATCH_FINE
global save_path
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args = parse()
rnd_seed(args.seed)
# -------- Initialize wandb
run_name = wandb_init(proj_name=args.proj_name, run_name=args.run_name, config_args=args)
#run_name = 'add'
save_path = './results/'+args.proj_name+'/'+args.net+run_name
if not os.path.exists(save_path):
os.makedirs(save_path)
# -------- Prepare loader, model, optimizer, etc.
train_loader, valid_loader = data_gen(args, valid_split=False)
net = get_init_net(args, args.net)
net = net.to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr,momentum=0.9, weight_decay=5e-4)
if args.scheduler=='cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-5)
elif args.scheduler=='multi':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[80, 120, 160, 180], gamma=0.1)
# -------- Train the model and record the path
train(net,optimizer,scheduler,'from_oht')
if __name__ == '__main__':
main()
``` |
{
"source": "Joshua-Ren/IL_for_SSL",
"score": 2
} |
#### File: Joshua-Ren/IL_for_SSL/data_loader_DALI.py
```python
import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import warnings
warnings.filterwarnings('ignore')
#LOCAL_PATH = 'E:\DATASET\tiny-imagenet-200'
@pipeline_def
def create_dali_pipeline(dataset, crop, size, shard_id, num_shards, dali_cpu=False, is_training=True):
if dataset.lower()=='imagenet':
DATA_PATH = '/home/sg955/rds/rds-nlp-cdt-VR7brx3H4V8/datasets/ImageNet/'
if is_training:
data_dir = os.path.join(DATA_PATH, 'train_caffe')
else:
data_dir = os.path.join(DATA_PATH, 'val_caffe')
images, labels = fn.readers.caffe(path=data_dir, shard_id=shard_id, num_shards=num_shards,
pad_last_batch=True, name="Reader")
elif dataset.lower()=='tiny':
DATA_PATH = '/home/sg955/rds/hpc-work/tiny-imagenet-200/'
if is_training:
data_dir = os.path.join(DATA_PATH, 'train')
else:
data_dir = os.path.join(DATA_PATH, 'val')
images, labels = fn.readers.file(file_root=data_dir, shard_id=shard_id, num_shards=num_shards,
random_shuffle=is_training, pad_last_batch=True, name="Reader")
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
if is_training:
#images = fn.decoders.image_random_crop(images, device=decoder_device, output_type=types.RGB, random_aspect_ratio=[0.8, 1.25],
# random_area=[0.1, 1.0], num_attempts=100)
images = fn.decoders.image(images, device=decoder_device, output_type=types.RGB)
images = fn.resize(images, device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
mirror = fn.random.coin_flip(probability=0.5)
else:
images = fn.decoders.image(images, device=decoder_device, output_type=types.RGB)
images = fn.resize(images, device=dali_device, resize_x=crop, resize_y=crop, mode="not_smaller", interp_type=types.INTERP_TRIANGULAR)
#images = fn.resize(images, device=dali_device, size=size, mode="not_smaller", interp_type=types.INTERP_TRIANGULAR)
mirror = False
images = fn.crop_mirror_normalize(images.gpu(), dtype=types.FLOAT, output_layout="CHW",
crop=(crop, crop),mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255], mirror=mirror)
labels = labels.gpu()
return images, labels
if __name__ == '__main__':
# iteration of PyTorch dataloader
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_dst = datasets.ImageFolder(IMG_DIR, transform_train)
train_loader = torch.utils.data.DataLoader(train_dst, batch_size=2048, shuffle=True, pin_memory=True, num_workers=8)
transform_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
val_dst = datasets.ImageFolder(IMG_DIR, transform_val)
val_loader = torch.utils.data.DataLoader(val_dst, batch_size=2000, shuffle=False, pin_memory=True, num_workers=8)
print('[PyTorch] start iterate test dataloader')
start = time.time()
for i, (x,y) in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = x.cuda(non_blocking=True)
labels = y.cuda(non_blocking=True)
end = time.time()
test_time = end-start
print('[PyTorch] end test dataloader iteration')
# print('[PyTorch] iteration time: %fs [train], %fs [test]' % (train_time, test_time))
print('[PyTorch] iteration time: %fs [test]' % (test_time))
pipe = create_dali_pipeline(batch_size=2048, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=224, size=256, dali_cpu=False, shard_id=0, num_shards=1, is_training=True)
pipe.build()
train_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
pipe = create_dali_pipeline(batch_size=2000, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=256, size=256, dali_cpu=True, shard_id=0, num_shards=1, is_training=False)
pipe.build()
val_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
print('[DALI-GPU] start iterate train dataloader')
start = time.time()
for i, data in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-GPU] iteration time: %fs [test]' % (test_time))
print('[DALI-cpu] start iterate val dataloader')
start = time.time()
for i, data in enumerate(val_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-cpu] iteration time: %fs [test]' % (test_time))
```
#### File: Joshua-Ren/IL_for_SSL/ImgNet_finetune_multiGPU.py
```python
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.utils.data as Data
import torchvision
import torchvision.transforms as T
import pandas as pd
import numpy as np
from data_loader_lmdb import ImageFolderLMDB
import os
import argparse
import random
from utils import *
import copy
from vit_pytorch import ViT
from my_MAE import my_MAE
from einops import rearrange, repeat
import torch.distributed as dist
def parse():
parser = argparse.ArgumentParser(description='ImageNet-Finetune')
parser.add_argument('--scratch',action='store_true',help='train from scratch')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=0.05, type=float)
parser.add_argument('--batch_size',default=256, type=int)
parser.add_argument('--seed',default=10086,type=int)
parser.add_argument('--proj_path',default='Finetune_ImgNet', type=str)
parser.add_argument('--epochs',default=100, type=int)
parser.add_argument('--accfreq',default=10, type=int, help='every xx iteration, update acc')
parser.add_argument('--run_name',default=None,type=str)
parser.add_argument('--enable_amp',action='store_true')
parser.add_argument('--sync_bn', action='store_true')
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--workers',default=4, type=int)
parser.add_argument('--dataset',type=str,default='tiny',help='can be imagenet, tiny')
parser.add_argument('--modelsize',type=str,default='tiny',help='ViT model size, must be tiny, small or base')
parser.add_argument('--loadrun',type=str,default='basetry_4GPU_1kbs')
parser.add_argument('--loadep',type=str,default='ep0')
args = parser.parse_args()
#/home/sg955/GitWS/IL_for_SSL/results/Interact_MAE/base/basetry_4GPU_1kbs/checkpoint
# For example ../Interact_MAE/tiny/tinytry_4GPU/checkpoint/encoder_ep0.pt
base_folder = '/home/sg955/GitWS/IL_for_SSL/'
base_path = base_folder + 'results/Interact_MAE/'
base_file = 'encoder_'+args.loadep+'.pt'
args.load_ckpt_path = os.path.join(base_path, args.modelsize.lower(),
args.loadrun,'checkpoint', base_file)
args.run_name = args.dataset+'_'+args.modelsize+'_'+ args.loadep+'__'+args.run_name
if args.modelsize.lower()=='tiny':
enc_params = [192, 12, 3, 512] # dim, depth, heads, mlp_dim
dec_params = [512, 6] # dec_dim, dec_depth
elif args.modelsize.lower()=='small':
enc_params = [384, 12, 6, 1024] # dim, depth, heads, mlp_dim
dec_params = [512, 6] #[1024, 2] # dec_dim, dec_depth
elif args.modelsize.lower()=='base':
enc_params = [768, 12, 12, 2048] # dim, depth, heads, mlp_dim
dec_params = [512, 6] #[2048, 4] # dec_dim, dec_depth
else:
print('ViT model size must be tiny, small, or base')
[args.enc_dim, args.enc_depth, args.enc_heads, args.enc_mlp] = enc_params
[args.dec_dim, args.dec_depth] = dec_params
if args.dataset.lower()=='imagenet':
tmp_kfp=[1000, 256, 256, 16, 4] # k_clas, fill_size, fig_size, patch_size, ds_ratio
elif args.dataset.lower()=='tiny':
tmp_kfp=[200, 64, 64, 8, 1]
else:
print('dataset must be imagenet or tiny')
[args.k_clas, args.fill_size, args.fig_size, args.patch_size, args.ds_ratio] = tmp_kfp
args.patch_num=int(args.fig_size/args.patch_size)
return args
# =================== Some utils functions ==========================
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
def adjust_learning_rate(args, optimizer, epoch):
"""
warm up (linearly to lr) 5-10 epoch, then cosine decay to lr_min
"""
warmup_ep = 5
lr_min = 1e-6
lr_start = args.lr
if epoch<warmup_ep:
lr_current = lr_min+(lr_start-lr_min)*(epoch)/warmup_ep
else:
degree = (epoch-warmup_ep)/(args.epochs-warmup_ep)*np.pi
lr_current = lr_min+0.5*(lr_start-lr_min)*(1+np.cos(degree))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_current
return lr_current
# ======================== Main and Train ==========================
def main():
global args
args = parse()
rnd_seed(args.seed)
# ================= Prepare for distributed training =====
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.enable_amp or args.distributed or args.sync_bn:
global DDP, amp, optimizers, parallel
from apex.parallel import DistributedDataParallel as DDP
from apex import amp, optimizers, parallel
cudnn.benchmark = True
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.total_batch_size = args.world_size * args.batch_size
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
# ================== Create the model: mae ==================
# Here we create mae and only use encoder to make finetune (as checkpoint is saved as mae)
encoder = ViT(image_size=args.fig_size, patch_size=args.patch_size, num_classes=args.k_clas,
dim=args.enc_dim, depth=args.enc_depth, heads=args.enc_heads, mlp_dim=args.enc_mlp)
mae = my_MAE(encoder=encoder, masking_ratio=0.75, decoder_dim=args.dec_dim, decoder_depth=args.dec_depth)
if args.sync_bn:
print("using apex synced BN")
mae = parallel.convert_syncbn_model(mae)
if not args.scratch:
ckp = ckp_converter(torch.load(args.load_ckpt_path))
mae.load_state_dict(ckp)
# Scale learning rate based on global batch size
encoder.cuda()
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
optimizer = optim.AdamW(encoder.parameters(), lr=args.lr, betas=(0.9, 0.95),
weight_decay=args.weight_decay)
if args.enable_amp:
encoder, optimizer = amp.initialize(encoder, optimizer, opt_level="O1")
if args.distributed:
encoder = DDP(encoder, delay_allreduce=True)
# ================== Prepare for the dataloader ===============
normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
traindir = os.path.join('/home/sg955/rds/rds-nlp-cdt-VR7brx3H4V8/datasets/ImageNet/', 'train.lmdb')
valdir = os.path.join('/home/sg955/rds/rds-nlp-cdt-VR7brx3H4V8/datasets/ImageNet/', 'val.lmdb')
train_set = ImageFolderLMDB(
traindir, T.Compose([T.RandomResizedCrop(args.fig_size), T.RandomHorizontalFlip(),
T.ToTensor(), normalize, ]))
val_set = ImageFolderLMDB(
valdir, T.Compose([ T.Resize(args.fill_size), T.CenterCrop(args.fig_size),
T.ToTensor(),normalize, ]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_set, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# =================== Initialize wandb ========================
if args.local_rank==0:
run_name = wandb_init(proj_name=args.proj_path, run_name=args.run_name, config_args=args)
#run_name = 'add'
save_path = base_folder+'results/'+args.proj_path+'/'+run_name
if not os.path.exists(save_path):
os.makedirs(save_path)
# ================= Train the model ===========================
for g in range(args.epochs):
train(train_loader, encoder, optimizer, g)
_accuracy_validate(val_loader, encoder)
torch.cuda.synchronize() # If also use val_loader, open this, but in interact, no need
def train(train_loader, encoder, optimizer, g):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
encoder.train()
for i, (x, y) in enumerate(train_loader):
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True)
# compute output, for encoder, we need cls token to get hid
hid = encoder(x)
loss = nn.CrossEntropyLoss()(hid, y)
optimizer.zero_grad()
if args.enable_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if i%args.accfreq == 0:
prec1, prec5 = accuracy(hid.data, y, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), x.size(0))
top1.update(prec1.item(), x.size(0))
top5.update(prec5.item(), x.size(0))
torch.cuda.synchronize()
if args.local_rank==0:
wandb.log({'loss':loss.item()})
if args.local_rank==0:
curr_lr = adjust_learning_rate(args, optimizer, g)
wandb.log({'epoch':g})
wandb.log({'train_loss':losses.avg})
wandb.log({'train_top1':top1.avg})
wandb.log({'train_top5':top5.avg})
wandb.log({'learn_rate':curr_lr})
def _accuracy_validate(val_loader, encoder):
'''
Calculate validation accuracy, support multi-GPU
'''
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
encoder.eval()
for i, (x, y) in enumerate(val_loader):
x = x.cuda(args.gpu, non_blocking=True)
y = y.cuda(args.gpu, non_blocking=True)
# compute output
with torch.no_grad():
hid = encoder(x)
loss = nn.CrossEntropyLoss()(hid, y)
# measure accuracy and record loss
prec1, prec5 = accuracy(hid.data, y, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), x.size(0))
top1.update(prec1.item(), x.size(0))
top5.update(prec5.item(), x.size(0))
if args.local_rank==0:
wandb.log({'valid_loss':losses.avg})
wandb.log({'valid_top1':top1.avg})
wandb.log({'valid_top5':top5.avg})
if __name__ == '__main__':
main()
``` |
{
"source": "Joshua-Ren/maddpg-again",
"score": 3
} |
#### File: maddpg-again/utils/networks.py
```python
import torch.nn as nn
import torch.nn.functional as F
class MLPNetwork(nn.Module):
"""
MLP network (can be used as value or policy)
"""
def __init__(self, input_dim, out_dim, hidden_dim=64, nonlin=F.relu,
constrain_out=False, norm_in=True, discrete_action=True):
"""
Inputs:
input_dim (int): Number of dimensions in input
out_dim (int): Number of dimensions in output
hidden_dim (int): Number of hidden dimensions
nonlin (PyTorch function): Nonlinearity to apply to hidden layers
"""
super(MLPNetwork, self).__init__()
if norm_in: # normalize inputs
self.in_fn = nn.BatchNorm1d(input_dim)
self.in_fn.weight.data.fill_(1)
self.in_fn.bias.data.fill_(0)
else:
self.in_fn = lambda x: x
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, out_dim)
self.nonlin = nonlin
if constrain_out and not discrete_action:
# initialize small to prevent saturation
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
self.out_fn = nn.Tanh()
else: # logits for discrete action (will softmax later)
self.out_fn = lambda x: x
def forward(self, X):
"""
Inputs:
X (PyTorch Matrix): Batch of observations
Outputs:
out (PyTorch Matrix): Output of network (actions, values, etc)
"""
h1 = self.nonlin(self.fc1(self.in_fn(X)))
h2 = self.nonlin(self.fc2(h1))
out = self.out_fn(self.fc3(h2))
return out
``` |
{
"source": "Joshua-Ren/Neural_Iterated_Learning",
"score": 3
} |
#### File: Neural_Iterated_Learning/utils/data_gen.py
```python
import sys
sys.path.append("..")
import numpy as np
from utils.conf import *
def valid_list_gen(low, high, num):
'''
Randomly generate distinct numbers, range in (low, high), with size.
'''
s = []
while(len(s)<num):
x = np.random.randint(low, high)
if x not in s:
s.append(x)
return s
def gen_distinct_candidates(tgt_list, sel_list, candi_size = SEL_CANDID):
'''
tgt_list may contain part of elements in sel_list
output the (data_candidates, sel_idx)
'''
batch_size = len(tgt_list)
data_candidates = np.zeros((batch_size, candi_size))
sel_idx = []
for i in range(batch_size):
tmp_idx = np.random.randint(0, candi_size)
sel_idx.append(tmp_idx)
for j in range(candi_size):
if j == 0:
data_candidates[i,j]=tgt_list[i]
continue
rand_candi = random.choice(sel_list)
while (rand_candi in data_candidates[i,:]):
rand_candi = random.choice(sel_list)
data_candidates[i, j] = rand_candi
data_candidates[i, 0] = data_candidates[i, tmp_idx]
data_candidates[i, tmp_idx] = tgt_list[i]
return data_candidates, np.asarray(sel_idx)
def gen_candidates(low, high, valid_list, batch = BATCH_SIZE, candi = SEL_CANDID, train=True):
if train == True:
s = []
num = batch*candi
while (len(s)<num):
x = np.random.randint(low, high)
while (x in valid_list):
x = np.random.randint(low, high)
s.append(x)
return np.asarray(s).reshape((batch, candi))
elif train == False:
s = []
valid_num = len(valid_list)
while (len(s)<valid_num*candi):
x = np.random.randint(0,valid_num)
s.append(valid_list[x])
return np.asarray(s).reshape((valid_num, candi))
def valid_data_gen():
sel_idx_val = np.random.randint(0,SEL_CANDID, (len(valid_list),))
valid_candidates = gen_candidates(0, NUM_SYSTEM**ATTRI_SIZE, valid_list, train=False)
valid_full = np.zeros((valid_num,))
for i in range(valid_num):
valid_full[i] = valid_candidates[i, sel_idx_val[i]]
return valid_full, valid_candidates, sel_idx_val
def batch_data_gen():
num_batches = int(len(all_list)/BATCH_SIZE) # Here we assume batch size=x*100 first
random.shuffle(all_list)
batch_list = []
for i in range(num_batches):
one_batch = {}
tmp_list = all_list[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
train_candidates, sel_idx_train = gen_distinct_candidates(tmp_list, all_list)
for i in range(BATCH_SIZE):
train_candidates[i,sel_idx_train[i]] = tmp_list[i]
one_batch['sel_idx'] = sel_idx_train
one_batch['candidates'] = train_candidates
one_batch['data'] = np.asarray(tmp_list)
batch_list.append(one_batch)
return batch_list
def batch_data_gen_valid(train_list, valid_list):
'''
Only one batch in batch_list
'''
train_batch_list = []
valid_batch_list = []
train_batch = {}
valid_batch = {}
random.shuffle(train_list)
random.shuffle(valid_list)
train_candidates, sel_idx_train = gen_distinct_candidates(train_list, train_list)
valid_candidates, sel_idx_valid = gen_distinct_candidates(valid_list, all_list)
for i in range(len(train_list)):
train_candidates[i,sel_idx_train[i]] = train_list[i]
for j in range(len(valid_list)):
valid_candidates[j,sel_idx_valid[j]] = valid_list[j]
train_batch['sel_idx'] = sel_idx_train
train_batch['candidates'] = train_candidates
train_batch['data'] = np.asarray(train_list)
valid_batch['sel_idx'] = sel_idx_valid
valid_batch['candidates'] = valid_candidates
valid_batch['data'] = np.asarray(valid_list)
train_batch_list.append(train_batch)
valid_batch_list.append(valid_batch)
return train_batch_list, valid_batch_list
'''
tl,vl = batch_data_gen_valid(train_list, valid_list)
for i in range(56):
for j in range(15):
if tl[0]['candidates'][i,j] in valid_list:
print('@@@@')
'''
def shuffle_batch(batch_list):
'''
Shuffle the order of data in the same batch.
'''
shuf_batch_list = []
for j in range(len(batch_list)):
tmp_batch = {}
train_batch, train_candidates, sel_idx_train = batch_list[j]['data'], batch_list[j]['candidates'], batch_list[j]['sel_idx']
train_batch
tmp = np.concatenate((train_batch.reshape((-1,1)),
train_candidates,
sel_idx_train.reshape((-1,1))),axis=1)
np.random.shuffle(tmp)
tmp_batch['data'] = tmp[:,0]
tmp_batch['candidates'] = tmp[:,1:-1]
tmp_batch['sel_idx'] = tmp[:,-1]
shuf_batch_list.append(tmp_batch)
return shuf_batch_list
def pair_gen(data_list, phA_rnds = 100, degnerate='none', sub_batch_size = 1):
'''
Given the list of x-y pairs generated by speaker(t), we shuffle the mappings
and yield a pair set of number
degnerate could be 'none', 'mix', 'full'
'''
all_data = []
all_msgs = []
cnt_samples = 0
for i in range(len(data_list)):
for j in range(data_list[i]['data'].shape[0]):
cnt_samples += 1
all_data.append(data_list[i]['data'][j])
if degnerate=='full':
all_msgs.append(data_list[0]['msg'].transpose(0,1)[0])
elif degnerate=='mix':
all_msgs.append(data_list[i]['msg'].transpose(0,1)[j])
all_data.append(data_list[i]['data'][j])
all_msgs.append(data_list[0]['msg'].transpose(0,1)[0])
else:
all_msgs.append(data_list[i]['msg'].transpose(0,1)[j])
phA_data_list = []
for i in range(phA_rnds):
phA_data_for_spk = {}
phA_data = []
phA_msgs = []
for j in range(sub_batch_size):
ridx = np.random.randint(0, cnt_samples)
phA_data.append(all_data[ridx])
phA_msgs.append(all_msgs[ridx])
phA_data_for_spk['data'] = np.asarray(phA_data)
phA_data_for_spk['msg'] = torch.stack(phA_msgs).transpose(0,1)
phA_data_list.append(phA_data_for_spk)
return phA_data_list
valid_num = VALID_NUM
train_num = NUM_SYSTEM**ATTRI_SIZE - VALID_NUM
all_list = [i for i in range(NUM_SYSTEM**ATTRI_SIZE)]
valid_list = valid_list_gen(0, NUM_SYSTEM**ATTRI_SIZE, valid_num)
train_list = list(set([i for i in range(NUM_SYSTEM**ATTRI_SIZE)]) ^ set(valid_list))
'''
batch_list = batch_data_gen()
shuf_batch_list = shuffle_batch(batch_list)
batch_list = batch_data_gen()
'''
```
#### File: Neural_Iterated_Learning/utils/manual_language_gen.py
```python
import sys
sys.path.append("..")
import numpy as np
from utils.conf import *
from utils.data_gen import *
from utils.result_record import *
import matplotlib.pyplot as plt
vocab_table_full = [chr(97+int(v)) for v in range(26)]
char_mapping = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
'1','2','3','4','5','6','7','8','9','0','~','!','@','#','$','%','^','&','*','(',')','_','+','<','>','?']
#random.shuffle(char_mapping)
def value_to_onehot(value, char_mapping):
'''
Map value to one-hot tensor. Shape is [ATTRI_SIZE, MSG_VOCSIZE]
'''
msg_onehot = torch.zeros((ATTRI_SIZE, MSG_VOCSIZE))
tmp_idx = 0
for i in range(len(value)):
tmp_idx = char_mapping.index(value[i])
msg_onehot[i,tmp_idx] = 1
return msg_onehot
def key_to_value(key, char_mapping,comp = True):
'''
Generate value based on key. Now only for NUM_SYSTEM=10, ATTRI_SIZE=2
'''
key[0]
tmp = ''.join([s for s in key])
int_key = int(tmp)
dig_0 = int(key[0])
dig_1 = int(key[1])
#dig_2 = np.mod(int(int_key/NUM_SYSTEM**2), NUM_SYSTEM)
value = []
if comp == True:
#value.append(char_mapping[dig_2])
value.append(char_mapping[dig_1])
value.append(char_mapping[dig_0])
else:
#value.append(char_mapping[np.random.randint(0,len(char_mapping))])
value.append(char_mapping[np.random.randint(0,len(char_mapping))])
value.append(char_mapping[np.random.randint(0,len(char_mapping))])
return ''.join(value)
# ========== Degenerate language ===================
deg_all = {}
deg_train = {}
deg_valid = {}
deg_spk_train = {} # Data for spk training, 'data' should be dicimal, 'msg' one hot
data_list = []
msg_list = []
for i in range(NUM_SYSTEM**ATTRI_SIZE):
# ===== For dictionary version
key = num_to_tup(i)
value = 'aa'
deg_all[key] = value
if i in valid_list:
deg_valid[key] = value
elif i in all_list:
deg_train[key] = value
# ==== For spk training version
msg_list.append(value_to_onehot(value, char_mapping))
data_list.append(i)
deg_spk_train['data'] = np.asarray(data_list)
deg_spk_train['msg'] = torch.stack(msg_list).transpose(0,1)
#compos_cal(deg_all) # Should be approximate 0
# ========== Compositional language ===================
comp_all = {}
comp_spk_train = {} # Data for spk training, 'data' should be dicimal, 'msg' one hot
data_list = []
msg_list = []
for i in range(NUM_SYSTEM**ATTRI_SIZE):
# ===== For dictionary version
key = num_to_tup(i)
value = key_to_value(key, char_mapping, True)
comp_all[key] = value
# ==== For spk training version
msg_list.append(value_to_onehot(value, char_mapping))
data_list.append(i)
comp_spk_train['data'] = np.asarray(data_list)
comp_spk_train['msg'] = torch.stack(msg_list).transpose(0,1)
print('Comp comp is: '+ str(compos_cal(comp_all)))
#compos_cal(comp_all) # Should approximate 1.
# ========== Holistic language ===================
holi_spk_train = {}
new_idx = torch.randperm(64)
holi_spk_train['data'] = comp_spk_train['data']
holi_spk_train['msg'] = comp_spk_train['msg'][:,new_idx,:]
comp, _, _ = compos_cal_inner(holi_spk_train['msg'],holi_spk_train['data'])
print('Holi comp is: '+ str(comp))
# ========== Holistic language2 ===================
PERM2 = 20#50
holi_spk_train2 = {}
new_idx2 = comp_spk_train['data']
perm = torch.randperm(PERM2)
new_idx2 = torch.cat((perm, torch.tensor(new_idx2[PERM2:])),0)
holi_spk_train2['data'] = comp_spk_train['data']
holi_spk_train2['msg'] = comp_spk_train['msg'][:,new_idx2,:]
comp, _, _ = compos_cal_inner(holi_spk_train2['msg'],holi_spk_train2['data'])
print('Holi2 comp is: '+ str(comp))
# ========== Holistic language3 ===================
PERM3 = 10#35
holi_spk_train3 = {}
new_idx3 = comp_spk_train['data']
perm = torch.randperm(PERM3)
new_idx3 = torch.cat((perm, torch.tensor(new_idx3[PERM3:])),0)
holi_spk_train3['data'] = comp_spk_train['data']
holi_spk_train3['msg'] = comp_spk_train['msg'][:,new_idx3,:]
comp, _, _ = compos_cal_inner(holi_spk_train3['msg'],holi_spk_train3['data'])
print('Holi3 comp is: '+ str(comp))
'''
# ========== Read language from txt ===================
path = 'exp_results/test_both_spk_and_lis/msg_all.txt'
read_spk_train = {}
cnt = 0
all_messages = []
msg_list = []
with open(path,'r') as f:
for lines in f:
for i in range(8):
cnt += 1
if cnt > 8:
#all_messages.append(lines.split()[i+1])
msg_list.append(value_to_onehot(lines.split()[i+1], char_mapping))
read_spk_train['data'] = comp_spk_train['data']
read_spk_train['msg'] = torch.stack(msg_list).transpose(0,1)
comp, _, _ = compos_cal_inner(read_spk_train['msg'],read_spk_train['data'])
print('Txt comp is: '+ str(comp))
'''
# =================== Manual Language For the listener ========================
def get_lis_curve_msg(lis_curve_batch_ls, language_train):
'''
Input is lis_curve_batch [N_B,1]. language should use the *_train version
Output has the same structure with *_train
The function only add lis_train['msg'] part
'''
lis_train = lis_curve_batch_ls[0]
tmp_data = lis_train['data']
msg_table = language_train['msg'].transpose(0,1)
msg_list = []
for i in range(tmp_data.shape[0]):
tmp_msg = msg_table[tmp_data[i]]
msg_list.append(tmp_msg)
lis_train['msg'] = torch.stack(msg_list).transpose(0,1)
return lis_train
#comp_p,_, all_msg = compos_cal_inner(comp_spk_train['msg'],comp_spk_train['data'])
'''
test_msg = {}
for i in range(100):
tmp = []
key = num_to_tup(i,2)
dig_0 = np.mod(i, 10)
dig_1 = np.mod(int(i*0.1),10)
tmp = [char_mapping[dig_0], char_mapping[dig_1]]
value = ''.join(tmp)
test_msg[key] = value
compos_cal(test_msg)
simple_msg = {}
simple_msg['0','0'] = 'aa'
simple_msg['0','1'] = 'ab'
simple_msg['1','0'] = 'ba'
simple_msg['1','1'] = 'bb'
compos_cal(simple_msg)
msg = {}
msg['green','box'] = 'aa'
msg['blue','box'] = 'ba'
msg['green','circle'] = 'ab'
msg['blue','circle'] = 'bb'
compos_cal(msg)
'''
``` |
{
"source": "joshuar/ha_arpansa_uv",
"score": 2
} |
#### File: custom_components/arpansa_uv/__init__.py
```python
from __future__ import annotations
from datetime import timedelta
import logging
import async_timeout
import asyncio
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.core import Config, HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.exceptions import ConfigEntryNotReady
from custom_components.arpansa_uv.pyarpansa import Arpansa, ApiError
from .const import (
DOMAIN,
PLATFORMS,
DEFAULT_SCAN_INTERVAL,
STARTUP_MESSAGE
)
SCAN_INTERVAL = timedelta(minutes=DEFAULT_SCAN_INTERVAL)
_LOGGER: logging.Logger = logging.getLogger(__package__)
async def async_setup(hass: HomeAssistant, config: Config):
"""Set up this integration using YAML is not supported."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up this integration using UI."""
if hass.data.get(DOMAIN) is None:
hass.data.setdefault(DOMAIN, {})
_LOGGER.info(STARTUP_MESSAGE)
session = async_get_clientsession(hass)
client = Arpansa(session)
coordinator = ArpansaDataUpdateCoordinator(hass, client=client)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = coordinator
for platform in PLATFORMS:
if entry.options.get(platform, True):
coordinator.platforms.append(platform)
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
class ArpansaDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching data from the API."""
def __init__(
self, hass: HomeAssistant, client: Arpansa
) -> None:
"""Initialize."""
self.api = client
self.platforms = []
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
async def _async_update_data(self):
"""Update data via library."""
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
await self.api.fetchLatestMeasurements()
return self.api
except ApiError as err:
raise UpdateFailed from err
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
unloaded = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
if platform in coordinator.platforms
]
)
)
if unloaded:
hass.data[DOMAIN].pop(entry.entry_id)
return unloaded
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Reload config entry."""
await async_unload_entry(hass, entry)
locations = entry.data.get("locations")
_LOGGER.debug(f"Reloading config with options {locations}")
await async_setup_entry(hass, entry)
``` |
{
"source": "JoshuaRiddell/botslib",
"score": 2
} |
#### File: machine/flash/main.py
```python
import sys
import networking
def main(wlan):
global calibrate_ws
global controller_ws
# init the bot
bot = bots.Bot(use_cbots=True)
bot.set_wlan(wlan)
# don't go any further if the user button is pressed
# use this if you locked up the device using subsequent code
if bot.user_sw.pressed():
return None, None
# init spider controller
sp = spider.Spider(bot, use_cspider=True)
# setup web server
calibrate_ws = socket_handlers.Calibrate(bot)
controller_ws = socket_handlers.Controller(sp)
networking.setup_web_server(accept_socket_cb)
# stand up
sp.xyz(0, 0, 40)
return [bot, sp]
def accept_socket_cb(webSocket, httpClient):
global calibrate_ws
global controller_ws
if (httpClient.GetRequestPath() == "/calibrate"):
webSocket.RecvTextCallback = calibrate_ws.socket_text_recv_cb
if (httpClient.GetRequestPath() == "/controller"):
webSocket.RecvTextCallback = controller_ws.socket_text_recv_cb
if __name__ == '__main__':
wlan = networking.setup_wlan()
# do all of this inside a try except
# in this way if there are errors then the ftp server doesn't crash
try:
import bots
import spider
import socket_handlers
[bot, sp] = main(wlan)
except Exception as e:
sys.print_exception(e)
bot.deinit()
``` |
{
"source": "JoshuaRiddell/traffic_lights_workshop",
"score": 4
} |
#### File: traffic_lights_workshop/examples/02_single_light_function.py
```python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# define some functions to be used later
def red_light_on():
# turn the light on
GPIO.output(light_red, True)
def red_light_off():
# turn the light off
GPIO.output(light_red, False)
# setup the light pin
light_red = 26
GPIO.setup(light_red, GPIO.OUT)
# use our functions
red_light_on()
time.sleep(3)
red_light_off()
```
#### File: traffic_lights_workshop/examples/04_light_change.py
```python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def red_light_on():
# turn the light on
GPIO.output(light_red, True)
def red_light_off():
# turn the light off
GPIO.output(light_red, False)
def yellow_light_on():
# turn the light on
GPIO.output(light_yellow, True)
def yellow_light_off():
# turn the light off
GPIO.output(light_yellow, False)
def green_light_on():
# turn the light on
GPIO.output(light_green, True)
def green_light_off():
# turn the light off
GPIO.output(light_green, False)
# set which pin number we used for the lights
light_red = 26
light_yellow = 19
light_green = 13
# setup the pins as an output
GPIO.setup(light_red, GPIO.OUT)
GPIO.setup(light_yellow, GPIO.OUT)
GPIO.setup(light_green, GPIO.OUT)
# turn on the green light for go
red_light_off()
yellow_light_off()
green_light_on()
# wait for 3 seconds
time.sleep(3)
# switch to amber light
red_light_off()
yellow_light_on()
green_light_off()
# wait for 3 seconds
time.sleep(3)
# switch to red light for stop
red_light_on()
yellow_light_off()
green_light_off()
# wait for 5 seconds
time.sleep(5)
``` |
{
"source": "JoshuaRiddell/uqrobotics-drc",
"score": 3
} |
#### File: computer/imaging/unityCamera.py
```python
import socket
import numpy as np
import cv2
import struct
from pathFinder import *
IP_ADDR = '127.0.0.1'
PORT = 8000
BUFFER_SIZE = 600000
MESSAGE = "hi"
STX = "QUTQUT"
ETX = "BBLBBL"
telemetryStruct = struct.Struct('<ffff')
class UnityCamera(object):
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((IP_ADDR, PORT))
self.sock.send(MESSAGE)
self.imageReceived = False
self.buffer = ""
self.imageString = ""
def receive(self):
try:
data = self.sock.recv(BUFFER_SIZE)
self.buffer += data
except:
print "Connection closed by remote host"
exit()
#check if there is an STX AND ETX in the buffer
stxPos = self.buffer.find(STX)
etxPos = self.buffer.find(ETX)
if (stxPos != -1 and etxPos != -1 and stxPos < etxPos):
self.imageString = self.buffer[stxPos+len(STX):etxPos]
self.buffer = self.buffer[etxPos+len(ETX):]
self.imageReceived = True
def recv_image(self):
self.imageReceived = False
telemData = self.imageString[0:16]
imageData = self.imageString[16:]
telemetry = telemetryStruct.unpack(telemData)
imageArray = np.fromstring(imageData, np.uint8)
img = cv2.imdecode(imageArray, cv2.IMREAD_COLOR)
return img, telemetry
def close(self):
self.sock.close()
if __name__ == "__main__":
# Save images from unity
SAVE_IMAGES = True
cam = UnityCamera()
directoryStr = "unityTestImages2\\"
baseImageStr = directoryStr + "img_%04d.jpg"
# telemetryFile = open(directoryStr + "telmetry.csv", "w")
# telemetryFile.write("i, Theta, X, Y, Z\n")
i = 0
while(1):
cam.receive()
if cam.imageReceived:
img, telem = cam.recv_image()
# cv2.imshow('image', img)
# print telem
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
radius, image = find_path(img)
# image = perspecitve_warp(image, True)
cv2.namedWindow('Image1', cv2.WINDOW_NORMAL)
cv2.imshow('Image1', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
exit()
```
#### File: car/computer/main.py
```python
USE_SERVER = False
import vision as vi
import hardware as hi
from threading import Thread, Lock
from time import sleep
from math import pi
if USE_SERVER:
import server
class Main(Thread):
"""Main control thread.
"""
def __init__(self):
"""Initialise the vision interface and hardware interface (arduino and
camera).
"""
super(Main, self).__init__()
self.daemon = not USE_SERVER
self.vision = vi.VisionInterface()
self.hardware = hi.HardwareInterface()
def run(self):
"""Main control loop.
"""
ON = 8
OFF = 5
while True:
if self.hardware.get_state() == hi.AUTO:
angle = self.vision.read_frame()
if angle != 0:
angle = angle - pi/2
self.hardware.add_to_pwm_queue(hi.SERVO, angle * 80)
self.hardware.add_to_pwm_queue(hi.THROT, ON)
print "frame processed {}".format(angle)
if __name__ == "__main__":
# make the main thread and start it
main = Main()
if USE_SERVER:
# make the frame streaming server. This gets main control flow.
main.start()
server.register_vision(main.vision)
server = server.WebServer(('0.0.0.0', 5000), server.CamHandler)
server.serve_forever()
else:
main.run()
```
#### File: car/computer/server.py
```python
import cv2
import Image
import threading
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from SocketServer import ThreadingMixIn
import StringIO
import time
# vision interface obeject
# TODO get rid of this dirty global
vision = None
# maximum dimension of sent frame (smaller is faster)
MAX_DIM = 400
class CamHandler(BaseHTTPRequestHandler):
"""Handles http request for camera frames.
"""
def do_GET(self):
"""Http get request callback. Replies with the relevant frame if a
frame was requested.
"""
# if it ends in a digit then we'll reply with a frame
if self.path[-1].isdigit():
# send a header
self.send_response(200)
self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
# video loop
while True:
try:
# get a frame from the vision stream
img = vision.get_frame(int(self.path[-1]))
if img is None:
continue
try:
# scale the frame to save memory
scale = MAX_DIM / float(max([img.shape[0], img.shape[1]]))
img = cv2.resize(img,
(int(img.shape[1]*scale), int(img.shape[0]*scale)),
interpolation=cv2.cv.CV_INTER_AREA)
if not (img[0][0][0] == img[0][0][1] == img[0][0][2]):
# try converting to rgb, won't work for grayscale images
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
except:
pass
if img is None:
continue
# make a jpeg and send it
jpg = Image.fromarray(img)
tmpFile = StringIO.StringIO()
jpg.save(tmpFile,'JPEG')
self.wfile.write("--jpgboundary")
self.send_header('Content-type','image/jpeg')
self.send_header('Content-length',str(tmpFile.len))
self.end_headers()
jpg.save(self.wfile,'JPEG')
# small delay so we have reasonable frame rate
time.sleep(0.1)
except KeyboardInterrupt:
# I think this is suppose to let me keyboard interrupt
# from terminal - doesn't seem to work.
break
return
class WebServer(ThreadingMixIn, HTTPServer):
"""Make a threaded http server.
"""
pass
def register_vision(vision_interface):
"""Register a the vision interface object in the library.
"""
global vision
vision = vision_interface
```
#### File: QUTDroidRacing/PythonTesting/constuctMap.py
```python
import cv2
import numpy as np
from imageManipulation import *
import csv
MAP_SIZE = (1000, 1000)
def construct_map(im1, courseMap, theta, xCentre, yCentre):
rows, cols, channels = im1.shape
#xCentre += 500/2
#yCentre += 300/2
print "theta, x , y: ", theta, xCentre, yCentre
angleToCentre = np.rad2deg(np.arctan((rows/2)*1.0/(cols/2))) #angle from corner to centre of image
radius = np.sqrt(cols*cols/4 + rows * rows / 4) # radius from corner to centre of image
tX = xCentre - radius * np.cos(np.deg2rad(-theta + angleToCentre)) #coords of top left corner
tY = yCentre - radius * np.sin(np.deg2rad(-theta + angleToCentre))
M = cv2.getRotationMatrix2D((cols/2,rows/2),theta, 1) # rotation point, rotation angle, scale
X_OFFSET = 300
Y_OFFSET = 550
M[0,2] = tX + X_OFFSET
M[1,2] = tY + Y_OFFSET
print M
transformedImage = cv2.warpAffine(im1,M,dsize = MAP_SIZE)
registeredImage = transformedImage.astype('float32')
# alpha = 0.5
# beta = ( 1.0 - alpha );
# imgToShow = cv2.addWeighted( registeredImage, alpha, courseMap, beta, 0.0)
courseMapNew = courseMap + registeredImage
courseMapNew[np.logical_and(registeredImage != 0, courseMap != 0)] /= 2
return courseMapNew
if __name__ == "__main__":
courseMap = np.zeros( (MAP_SIZE[0], MAP_SIZE[1], 3))
with open('ImageTransformResultsLong2.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
lastTheta = 0
lastX = 0
lastY = 0
mapInit = 0
for row in reader:
imageNumber = int(row[0])
if mapInit == 0:
im1Path = 'unityTestImages/img_%04d.jpg' % (imageNumber - 1)
img = cv2.imread(im1Path, cv2.IMREAD_COLOR).astype('float32')
img = perspecitve_warp(img)
courseMap = construct_map(img/255, courseMap, 0, 500/2, 300/2)
mapInit = 1
im1Path = 'unityTestImages/img_%04d.jpg' % (imageNumber)
img = cv2.imread(im1Path, cv2.IMREAD_COLOR).astype('float32')
img = perspecitve_warp(img)
theta = -float(row[1]) + lastTheta# + 12
xCentre = -float(row[2]) + lastX# + 12
yCentre = -float(row[3]) + lastY# - 42
courseMap = construct_map(img/255, courseMap, theta, xCentre + 500/2, yCentre + 300/2)
lastX = xCentre
lastY = yCentre
lastTheta = theta
# cv2.namedWindow('RegisteredImages', cv2.WINDOW_NORMAL)
cv2.imshow('RegisteredImages', courseMap)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
#### File: QUTDroidRacing/PythonTesting/imageManipulation.py
```python
import cv2
import numpy as np
from matplotlib import pyplot as plt
from unityCamera import UnityCamera
sigma = 0.001
SHOW_FINAL = True
LOG_RESULTS = True
RUN_OPTIMIZATION = True
SHOW_ALL_PLOTS = False
SHOW_DIFF = True
SHOW_THRESHOLDED_IMAGES = False
def evaluate_estimate(theta, xCentre, yCentre, newImage, targetImage):
rows, cols, channels = newImage.shape
angleToCentre = np.rad2deg(np.arctan((rows/2)*1.0/(cols/2))) #angle from corner to centre of image
radius = np.sqrt(cols*cols/4 + rows * rows / 4) # radius from corner to centre of image
tX = xCentre - radius * np.cos(np.deg2rad(-theta + angleToCentre)) #coords of top left corner
tY = yCentre - radius * np.sin(np.deg2rad(-theta + angleToCentre))
M = cv2.getRotationMatrix2D((cols/2,rows/2),theta, 1) # rotation point, rotation angle, scale
M[0,2] = tX
M[1,2] = tY
transformedImage = cv2.warpAffine(newImage,M,dsize = (cols,rows))
registeredImage = transformedImage.astype('float32')
# cv2.namedWindow('RegisteredImages', cv2.WINDOW_NORMAL)
# cv2.imshow('RegisteredImages',registeredImage)
# cv2.namedWindow('target', cv2.WINDOW_NORMAL)
# cv2.imshow('target',targetImage)
diffBig = np.abs(targetImage/255.0 - registeredImage/255.0) # TODO: Work out a way to ignore the black bits that come from the transformation. This will have all channels == 0
# print np.sum(diffBig == 0) + np.sum(diffBig == 1)
# cv2.namedWindow('diff', cv2.WINDOW_NORMAL)
# cv2.imshow('diff',diffBig)
pdf = np.exp(-diffBig / (2*sigma*sigma))
pdf = np.ones(pdf.shape)
pdf[transformedImage == 0] = 0 # THIS COULD BE A SOLUTION, BUT THIS DOES IT FOR EACH CHANNEL. WANT TO ONLY 0 WHEN ALL CHANNELS ARE 0
pdf[targetImage == 0] = 0 # THIS COULD BE A SOLUTION, BUT THIS DOES IT FOR EACH CHANNEL. WANT TO ONLY 0 WHEN ALL CHANNELS ARE 0
total = np.sum(pdf)
# print total
# cv2.namedWindow('pdf', cv2.WINDOW_NORMAL)
# cv2.imshow('pdf', pdf)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# exit()
return total, registeredImage, pdf
def estimate_pose(newImage, targetImage):
thetaStart = 0
xCentreStart = 0
yCentreStart = 0
if LOG_RESULTS:
logFile = open("ResultsUnity2.csv", "w")
# logFile.write("Theta, X, Y, Score\n")
bestTheta = None
bestX = None
bestY = None
bestScore = None
nThetaTrials = 10
nXTrials = 10
nYTrials = 10
thetaLimits = (-10,10)
xLimits = (-50, 50)
yLimits = (-50, 50)
for dTheta in np.linspace(thetaLimits[0],thetaLimits[1],nThetaTrials):
print dTheta
for dX in np.linspace(xLimits[0], xLimits[1],nXTrials):#range(-20, 20, 5):
for dY in np.linspace(yLimits[0], yLimits[1],nYTrials):#range(-40, 40, 3):
theta = thetaStart + dTheta
xCentre = cols/2 + xCentreStart + dX
yCentre = rows/2 + yCentreStart + dY
total, a, b = evaluate_estimate(theta, xCentre, yCentre, newImage, targetImage)
if bestScore is None or total > bestScore:
bestTheta = theta
bestX = xCentre
bestY = yCentre
bestScore = total
results = "%f, %f, %f, %f\n" % (theta, xCentre, yCentre, total)
if LOG_RESULTS:
logFile.write(results)
# print results
theta = bestTheta
xCentre = bestX
yCentre = bestY
print "theta: ", theta
dX = xCentre - cols/2
print "dX: ", dX
dY = yCentre - rows/2
print "dY: ", dY
print "Optimal probability of %f with theta = %f, X = %f, Y = %f\n" % (bestScore, theta, xCentre, yCentre)
return bestTheta, dX, dY, bestScore
def estimate_pose_2(newImage, targetImage):
sz = newImage.shape
im1_gray = cv2.cvtColor(newImage,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(targetImage,cv2.COLOR_BGR2GRAY)
# cv2.imshow("Image 1r", newImage[:,:,0])
# cv2.imshow("Image 1g", newImage[:,:,1])
# cv2.imshow("Image 1b", newImage[:,:,2])
# cv2.imshow("Image 2", im2_gray)
# cv2.waitKey(0)
# Define the motion model
warp_mode = cv2.MOTION_EUCLIDEAN
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 500;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria)
im2_aligned = cv2.warpAffine(targetImage, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
# cv2.imshow("Image 1", newImage)
# cv2.imshow("Image 2", targetImage)
# im2_aligned += newImage
# im2_aligned /= 2
# cv2.imshow("Aligned Image 2", im2_aligned)
# cv2.waitKey(0)
return warp_matrix
def perspecitve_warp(image):
# pts0 = np.float32([[367, 864], [1585, 864], [1234, 592], [700, 592]])
pts0 = np.float32([[145, 703], [1773, 703], [1258, 499], [660, 499]]) #square with side length 10 in unity
cX = 250
cY = 270
L = 70 / 2 # Therefore 40 pixels = 10 unity units. Conversing from pixels to unity is / 4
pts1 = np.float32([[cX - L , cY + L], [cX + L, cY + L], [cX + L, cY - L], [cX - L, cY - L]])
M = cv2.getPerspectiveTransform(pts0,pts1)
return cv2.warpPerspective(image,M, (500, 300))
def stitch_images(im1, courseMap, theta, xCentre, yCentre):
rows, cols, channels = im1.shape
print "Theta, x, y, :", theta, xCentre, yCentre
angleToCentre = np.rad2deg(np.arctan((rows/2)*1.0/(cols/2))) #angle from corner to centre of image
radius = np.sqrt(cols*cols/4 + rows * rows / 4) # radius from corner to centre of image
tX = xCentre - radius * np.cos(np.deg2rad(-theta + angleToCentre)) #coords of top left corner
tY = yCentre - radius * np.sin(np.deg2rad(-theta + angleToCentre))
M = cv2.getRotationMatrix2D((cols/2,rows/2),theta, 1) # rotation point, rotation angle, scale
X_OFFSET = 500
Y_OFFSET = 500
OUTPUT_SIZE = (1000,1000)
M[0,2] = tX + X_OFFSET
M[1,2] = tY + Y_OFFSET
print M
transformedImage = cv2.warpAffine(im1,M,dsize = OUTPUT_SIZE)
registeredImage = transformedImage.astype('float32')
# alpha = 0.5
# beta = ( 1.0 - alpha );
# imgToShow = cv2.addWeighted( registeredImage, alpha, courseMap, beta, 0.0)
courseMap = courseMap + registeredImage
courseMap[registeredImage != 0] /= 2
imgToShow = courseMap
cv2.namedWindow('RegisteredImages', cv2.WINDOW_NORMAL)
cv2.imshow('RegisteredImages',imgToShow)
cv2.waitKey(0)
cv2.destroyAllWindows()
# exit()
if __name__ == "__main__":
resultsFile = open("ImageTransformResultsLong2.csv", "w", 0)
# resultsFile.write("Image, theta, x, y, score\n")
for imageCounter in range(48, 66):
#img = cv2.imread('star20_100_100.jpg', cv2.IMREAD_COLOR)
im1Path = 'unityTestImages/img_%04d.jpg' % (imageCounter)
im2Path = 'unityTestImages/img_%04d.jpg' % (imageCounter + 1)
print im1Path
img = cv2.imread(im1Path, cv2.IMREAD_COLOR)
# img = cv2.resize(img, (0,0), fx=0.2, fy=0.2)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img = perspecitve_warp(img)
imgOrig = img.copy().astype('float32')
# cv2.imwrite('afterHomography.jpg', img)
# cv2.namedWindow('Image 1', cv2.WINDOW_NORMAL)
# cv2.imshow('Image 1', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# exit()
# a, img = cv2.threshold(img[:,:,0],110,255,cv2.THRESH_BINARY)
redChan = img[:,:,0]
blueChan = img[:,:,1]
greenChan = img[:,:,2]
greenChan[greenChan < 200] = 0 #passed by reference so shoudl auto update img
blueChan[blueChan < 200] = 0
redChan[redChan < 200] = 0
# img = redChan
if SHOW_THRESHOLDED_IMAGES:
cv2.namedWindow('Image 1', cv2.WINDOW_NORMAL)
cv2.imshow('Image 1', img)
img = img.astype('float32')
rows,cols,chans = img.shape
# img2 = cv2.imread('star.jpg', cv2.IMREAD_COLOR)
img2 = cv2.imread(im2Path, cv2.IMREAD_COLOR)
# img = cv2.resize(img, (0,0), fx=0.2, fy=0.2)
# img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
img2 = perspecitve_warp(img2)
img2Orig = img2.copy().astype('float32')
redChan = img2[:,:,0]
blueChan = img2[:,:,1]
greenChan = img2[:,:,2]
greenChan[greenChan < 200] = 0 #passed by reference so shoudl auto update img
blueChan[blueChan < 200] = 0
redChan[redChan < 200] = 0
# cv2.imwrite('afterHomography.jpg', img)
a, img2 = cv2.threshold(img2,100,150,cv2.THRESH_BINARY)
if SHOW_THRESHOLDED_IMAGES:
cv2.namedWindow('Image 2', cv2.WINDOW_NORMAL)
cv2.imshow('Image 2', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
exit()
img2 = img2.astype('float32')
try:
warp_matrix = estimate_pose_2(img, img2)
except:
print "Error: could not converge"
continue
print warp_matrix
targetImage = img2Orig/255
registeredImage = cv2.warpAffine(imgOrig/255,warp_matrix,dsize = (cols,rows))
cv2.namedWindow('Image reg', cv2.WINDOW_NORMAL)
cv2.imshow('Image reg', registeredImage)
cv2.namedWindow('Image un', cv2.WINDOW_NORMAL)
cv2.imshow('Image un', targetImage)
combined = targetImage.copy()
combined = (combined + registeredImage) / 2
cv2.namedWindow('Image com', cv2.WINDOW_NORMAL)
cv2.imshow('Image com', combined)
cv2.waitKey(0)
cv2.destroyAllWindows()
tX = warp_matrix[0][2] - 500
tY = warp_matrix[1][2] - 500
theta = -np.rad2deg(np.arccos(warp_matrix[0][0]))
rows, cols, channels = combined.shape
angleToCentre = np.rad2deg(np.arctan((rows/2)*1.0/(cols/2))) #angle from corner to centre of image
radius = np.sqrt(cols*cols/4 + rows * rows / 4) # radius from corner to centre of image
xCentre = tX + radius * np.cos(np.deg2rad(-theta + angleToCentre)) #coords of top left corner
yCentre = tY + radius * np.sin(np.deg2rad(-theta + angleToCentre))
score = 0
print "theta, x, y, ", theta, xCentre, yCentre
resultsFile.write("%d, %f, %f, %f, %f\n" % (imageCounter + 1, theta, xCentre, yCentre, score))
# continue
# exit()
# print img.shape
# if RUN_OPTIMIZATION:
# theta, xCentre, yCentre, score = estimate_pose(img, img2)
# resultsFile.write("%d, %f, %f, %f, %f\n" % (imageCounter + 1, theta, xCentre, yCentre, score))
# continue
# else:
# theta = -3.33
# xCentre = cols/2 + 5.555
# yCentre = rows/2 + 38.8888
if SHOW_FINAL:
# # total, dst, diff = evaluate_estimate(theta, xCentre - cols/2, yCentre - rows/2, imgOrig/255, img2Orig/255)
# # total, b, diff = evaluate_estimate(theta, xCentre-cols/2, yCentre-rows/2, img, img2)
# print "Score: ", total
print rows, cols
courseMap = np.zeros((1000,1000,3))
M2 = cv2.getRotationMatrix2D((cols/2,rows/2),0, 1)
M2[0,2] = 0 + 500
M2[1,2] = 0 + 500
courseMap += cv2.warpAffine(img2Orig/255,M2,dsize =(1000,1000))
stitch_images(imgOrig/255, courseMap, theta, xCentre + cols/2, yCentre+rows/2)
continue
# needed_multi_channel_img = np.zeros((img.shape[0], img.shape[1], 3))
# needed_multi_channel_img [:,:,0] = (dst[:,:,0]*1.0/255/3 + dst[:,:,1]*1.0/255/3 + dst[:,:,2]*1.0/255/3)
# needed_multi_channel_img [:,:,2] = (img2[:,:,0]*1.0/255/3 + img2[:,:,1]*1.0/255/3 + img2[:,:,2]*1.0/255/3)
# imgToShow = needed_multi_channel_img
alpha = 0.5
beta = ( 1.0 - alpha );
imgToShow = cv2.addWeighted( dst, alpha, img2Orig/255, beta, 0.0)
if SHOW_ALL_PLOTS:
plt.subplot(131),plt.imshow(img2.astype('uint8'),'gray'),plt.title('Target')
plt.axis('off')
plt.subplot(132),plt.imshow(img.astype('uint8'),'gray'),plt.title('New')
plt.axis('off')
plt.subplot(133),plt.imshow(imgToShow,'gray'),plt.title('Registered')
plt.axis('off')
plt.tight_layout()
plt.show()
elif SHOW_DIFF:
cv2.namedWindow('Diff', cv2.WINDOW_NORMAL)
cv2.imshow('Diff',diff)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
cv2.namedWindow('Orig1', cv2.WINDOW_NORMAL)
cv2.imshow('Orig1', imgOrig/255)
cv2.namedWindow('Orig2', cv2.WINDOW_NORMAL)
cv2.imshow('Orig2', img2Orig/255)
cv2.namedWindow('RegisteredImages', cv2.WINDOW_NORMAL)
cv2.imshow('RegisteredImages',imgToShow)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.namedWindow('newImage', cv2.WINDOW_NORMAL)
# cv2.imshow('newImage',img.astype('uint8'))
# cv2.namedWindow('targetImage', cv2.WINDOW_NORMAL)
# cv2.imshow('targetImage',img2.astype('uint8'))
``` |
{
"source": "joshuariveramnltech/projectIE",
"score": 2
} |
#### File: projectIE/account/views.py
```python
from django.shortcuts import render, reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import PasswordChangeForm
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.contrib.auth import update_session_auth_hash
from .forms import PersonalFacultyForm, PersonalStaffForm, PersonalUserForm, PersonalStudentForm
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'dashboard.html', {})
@login_required
def change_password(request):
context = {}
if request.method == 'GET':
change_password_form = PasswordChangeForm(user=request.user)
elif request.method == 'POST':
change_password_form = PasswordChangeForm(
user=request.user, data=request.POST)
if change_password_form.is_valid():
new_password = change_password_form.save()
update_session_auth_hash(request, new_password.user)
messages.success(request, 'Password Change Successful!')
return HttpResponseRedirect(reverse('account:change_password'))
context['change_password_form'] = change_password_form
return render(request, 'change_password.html', context)
@login_required
def view_update_profile(request):
context = {'request': request}
if request.method == 'GET':
user_form = PersonalUserForm(instance=request.user)
if request.user.is_faculty:
personal_profile_form = PersonalFacultyForm(
instance=request.user.faculty_profile)
elif request.user.is_staff:
personal_profile_form = PersonalStaffForm(
instance=request.user.staff_profile)
elif request.user.is_student:
personal_profile_form = PersonalStudentForm(
instance=request.user.student_profile)
elif request.method == 'POST':
user_form = PersonalUserForm(
data=request.POST, files=request.FILES, instance=request.user)
if request.user.is_faculty:
personal_profile_form = PersonalFacultyForm(
data=request.POST, instance=request.user.faculty_profile)
elif request.user.is_staff:
personal_profile_form = PersonalStaffForm(
data=request.POST, instance=request.user.staff_profile)
elif request.user.is_student:
personal_profile_form = PersonalStudentForm(
data=request.POST, instance=request.user.student_profile)
if user_form.is_valid() and personal_profile_form.is_valid():
user_form.save()
personal_profile_form.save()
messages.success(request, 'Profile Updated Successfully.')
return HttpResponseRedirect(reverse('account:view_update_profile'))
context.update(
{'user_form': user_form, 'personal_profile_form': personal_profile_form})
return render(request, 'view_update_profile.html', context)
```
#### File: projectIE/grading_system/views.py
```python
from django.shortcuts import render, reverse
from .models import SubjectInstance, SubjectGrade
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.exceptions import PermissionDenied
from .models import SemesterFinalGrade
from .forms import UpdateSubjectGrade
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from datetime import datetime
from django.contrib.auth import get_user_model
from django.template.loader import render_to_string
import weasyprint
from django.conf import settings
# Create your views here.
User = get_user_model()
# student only
@login_required
def view_all_grades(request):
if not request.user.is_student:
raise PermissionDenied
grades = SemesterFinalGrade.objects.filter(
student=request.user.student_profile
).order_by('-school_year', '-semester', '-date_created')
context = {'grades': grades}
return render(request, 'view_all_grades.html', context)
# student only
@login_required
def view_schedule_student(request):
if not request.user.is_student:
raise PermissionDenied
latest_semester_grade = SemesterFinalGrade.objects.all().order_by(
'-school_year',
'-semester',
'-date_created'
).first()
if latest_semester_grade: # checks if there's a result in the prior query
latest_semester_grade = latest_semester_grade.subject_grades.all()
context = {'latest_semester_grade': latest_semester_grade}
return render(request, 'view_schedule_student.html', context)
# faculty view
@login_required
def view_assigned_subjects(request):
if not request.user.is_faculty:
raise PermissionDenied
context = {}
assigned_subject_list = SubjectInstance.objects.filter(
instructor=request.user.faculty_profile).order_by('-date_created')
assigned_subject_query = request.GET.get('assigned_subject_query')
if assigned_subject_query:
assigned_subject_list = assigned_subject_list.filter(
Q(subject__subject_code__icontains=assigned_subject_query) |
Q(subject__description__icontains=assigned_subject_query) |
Q(school_year__icontains=assigned_subject_query)
).distinct()
assigned_subject_paginator = Paginator(assigned_subject_list, 10)
assigned_subject_page = request.GET.get('assigned_subject_page')
try:
assigned_subjects = assigned_subject_paginator.page(
assigned_subject_page)
except PageNotAnInteger:
assigned_subjects = assigned_subject_paginator.page(1)
except EmptyPage:
assigned_subjects = assigned_subject_paginator.page(
assigned_subject_paginator.num_pages)
context['assigned_subjects'] = assigned_subjects
return render(request, 'view_assigned_subjects.html', context)
# for faculty only
@login_required
def view_students_per_subject(request, subject_instance_id, subject_code):
if not request.user.is_faculty:
raise PermissionDenied
context = {}
subject_instance = SubjectInstance.objects.get(id=subject_instance_id)
subject_grades = SubjectGrade.objects.filter(
subject_instance=subject_instance)
context.update({'subject_grades': subject_grades,
'subject_instance': subject_instance})
return render(request, 'view_students_per_subject.html', context)
# for faculty only
@login_required
def view_update_grade(request, subject_grade_id):
if not request.user.is_faculty:
raise PermissionDenied
subject_grade = SubjectGrade.objects.get(id=subject_grade_id)
if subject_grade.is_finalized:
return render(request, 'request_error.html', {})
if request.method == 'GET':
update_subject_grade_form = UpdateSubjectGrade(instance=subject_grade)
elif request.method == 'POST':
update_subject_grade_form = UpdateSubjectGrade(
instance=subject_grade, data=request.POST)
if update_subject_grade_form.is_valid():
instance = update_subject_grade_form.save(commit=False)
update_subject_grade_form.save()
messages.success(request, 'Grade Updated Successfully')
if instance.is_finalized:
return HttpResponseRedirect(
reverse(
'grading_system:view_students_per_subject',
args=[subject_grade.subject_instance.id,
subject_grade.subject_instance.subject.subject_code]
)
)
return HttpResponseRedirect(reverse('grading_system:view_update_grade', args=[subject_grade_id, ]))
context = {'subject_grade': subject_grade,
'update_subject_grade_form': update_subject_grade_form
}
return render(request, 'view_update_grade.html', context)
# student only
@login_required
def student_registration(request):
if not request.user.is_student:
raise PermissionDenied
current = SubjectInstance.objects.all().values(
'school_year', 'semester').distinct().order_by('-semester', '-school_year').first()
subject_list = SubjectInstance.objects.filter(
year_and_section=request.user.student_profile.year_and_section,
school_year=current['school_year'],
semester=current['semester']
).order_by('-semester')
enrolled_subjects = SubjectGrade.objects.filter(
student=request.user.student_profile,
school_year=current['school_year'],
semester=current['semester']
).order_by('-subject_instance__semester')
if request.method == "POST":
selected_subjects = request.POST.getlist('selected_subjects')
if selected_subjects:
for each in selected_subjects:
subject = SubjectInstance.objects.get(id=int(each))
SubjectGrade.objects.create(
student=request.user.student_profile,
subject_instance=subject,
semester=subject.semester,
school_year=subject.school_year,
) # create subject grade instance
subject_grade = SubjectGrade.objects.get(
student=request.user.student_profile,
subject_instance=subject,
semester=subject.semester,
school_year=subject.school_year,
) # retrieve created subject grade instance
try:
semester_grade = SemesterFinalGrade.objects.get(
student=request.user.student_profile,
semester=subject.semester,
school_year=subject.school_year,
) # retrive semester final grade instance
except SemesterFinalGrade.DoesNotExist:
SemesterFinalGrade.objects.create(
student=request.user.student_profile,
semester=subject.semester,
school_year=subject.school_year,
) # create semester final grade instance
semester_grade = SemesterFinalGrade.objects.get(
student=request.user.student_profile,
semester=subject.semester,
school_year=subject.school_year,
) # retrieve semester grade instance
semester_grade.subject_grades.add(subject_grade)
return HttpResponseRedirect(reverse('grading_system:student_registration'))
context = {'subject_list': subject_list,
'enrolled_subjects': enrolled_subjects,
'current_semester' : current['semester'],
'current_school_year': current['school_year']}
return render(request, 'student_registration.html', context)
# for chairperson only
@login_required
def view_all_students_chairperson(request):
if not request.user.faculty_profile.is_chairperson:
raise PermissionDenied
student_list = User.objects.filter(
is_student=True, ).order_by('-date_joined')
if request.user.faculty_profile.department == "Department of Industrial Engineering":
student_list = student_list.filter(student_profile__course='BSIE')
student_query = request.GET.get('student_query')
if student_query:
student_list = student_list.filter(
Q(first_name__icontains=student_query) |
Q(last_name__icontains=student_query) |
Q(username__icontains=student_query) |
Q(email__icontains=student_query)
).distinct()
student_paginator = Paginator(student_list, 20)
student_page = request.GET.get('student_page')
try:
students = student_paginator.page(student_page)
except PageNotAnInteger:
students = student_paginator.page(1)
except EmptyPage:
students = student_paginator.page(student_paginator.num_pages)
context = {'students': students}
return render(request, 'view_all_students_chairperson.html', context)
# chairperson only
@login_required
def student_tagging(request, student_id, student_username):
if not request.user.faculty_profile.is_chairperson:
raise PermissionDenied
student = User.objects.get(id=student_id)
current = SubjectInstance.objects.all().values(
'school_year', 'semester').distinct().order_by('-semester', '-school_year').first()
student_subject_list = SubjectGrade.objects.filter(
student=student.student_profile).order_by('-semester', '-school_year', '-date_created')
subject_list = SubjectInstance.objects.filter(
school_year=current['school_year'],
semester=current['semester']
).order_by('-date_created')
subject_query = request.GET.get('subject_query')
record_query = request.GET.get('record_query')
if subject_query:
subject_list = subject_list.filter(
Q(subject__subject_code__icontains=subject_query) |
Q(subject__description__icontains=subject_query) |
Q(school_year__icontains=subject_query) |
Q(semester__icontains=subject_query)
).distinct()
if record_query:
student_subject_list = student_subject_list.filter(
Q(subject_instance__subject__subject_code__icontains=record_query) |
Q(subject_instance__subject__description__icontains=record_query) |
Q(subject_instance__school_year__icontains=record_query) |
Q(subject_instance__semester__icontains=record_query)
).distinct()
subject_paginator = Paginator(subject_list, 10)
subject_page = request.GET.get('subject_page')
try:
subjects = subject_paginator.page(subject_page)
except PageNotAnInteger:
subjects = subject_paginator.page(1)
except EmptyPage:
subjects = subject_paginator.page(subject_paginator.num_pages)
student_subject_paginator = Paginator(student_subject_list, 10)
student_subject_page = request.GET.get('student_subject_page')
try:
student_subjects = student_subject_paginator.page(student_subject_page)
except PageNotAnInteger:
student_subjects = student_subject_paginator.page(1)
except EmptyPage:
student_subjects = student_subject_paginator.page(
student_subject_paginator.num_pages)
if request.method == "POST":
sub_list = request.POST.getlist('selected_subjects')
for each in sub_list:
subject = SubjectInstance.objects.get(id=int(each))
SubjectGrade.objects.create(
student=student.student_profile,
subject_instance=subject,
semester=subject.semester,
school_year=subject.school_year,
) # create subject grade instance
subject_grade = SubjectGrade.objects.get(
student=student.student_profile,
subject_instance=subject,
semester=subject.semester,
school_year=subject.school_year,
) # retrieve created subject grade instance
try:
semester_grade = SemesterFinalGrade.objects.get(
student=student.student_profile,
semester=subject.semester,
school_year=subject.school_year,
)
except SemesterFinalGrade.DoesNotExist:
SemesterFinalGrade.objects.create(
student=student.student_profile,
semester=subject.semester,
school_year=subject.school_year,
)
semester_grade = SemesterFinalGrade.objects.get(
student=student.student_profile,
semester=subject.semester,
school_year=subject.school_year,
)
semester_grade.subject_grades.add(subject_grade)
return HttpResponseRedirect(reverse('grading_system:student_tagging', args=[student_id, student_username]))
context = {'student': student, 'subjects': subjects,
'student_subjects': student_subjects}
return render(request, 'student_tagging.html', context)
# for chairperson only
@login_required
def remove_subject_chairperson(request, subject_grade_id, student_id, student_username):
if not request.user.faculty_profile.is_chairperson:
raise PermissionDenied
instance = SubjectGrade.objects.get(id=subject_grade_id)
instance.delete()
return HttpResponseRedirect(reverse('grading_system:student_tagging', args=[student_id, student_username]))
# for faculty only
@login_required
def class_list_pdf(request, subject_instance_id, subject_code):
if not request.user.is_faculty:
raise PermissionDenied
current_date_time = str(datetime.now().strftime('%h %d %Y %H:%M'))
subject_instance = SubjectInstance.objects.get(id=subject_instance_id)
protocol = request.build_absolute_uri().split(':')[0]
subject_students = User.objects.filter(
student_profile__student_grade__subject_instance=subject_instance)
context = {
'subject_instance': subject_instance,
'protocol': protocol,
'subject_students': subject_students,
'current_date_time': current_date_time
}
html = render_to_string('class_list_pdf.html', context)
response = HttpResponse(content_type='application/pdf')
response["Content-Disposition"] = "filename='class_list{}_{}.pdf'".format(
subject_instance.subject.description, subject_instance.year_and_section)
weasyprint.HTML(string=html).write_pdf(response, stylesheets=[
weasyprint.CSS(settings.STATIC_ROOT + '/main.css'), ])
return response
# for faculty only
@login_required
def print_schedule_pdf(request):
if not request.user.is_faculty:
raise PermissionDenied
current_date_time = str(datetime.now().strftime('%h %d %Y %H:%M'))
current = SubjectInstance.objects.all().values(
'school_year', 'semester').distinct().order_by('-semester', '-school_year').first()
assigned_subjects_per_year = SubjectInstance.objects.filter(
instructor=request.user.faculty_profile,
school_year=current['school_year'],
semester=current['semester']
).order_by('-semester', '-date_created')
context = {'assigned_subjects_per_year': assigned_subjects_per_year,
'current_date_time': current_date_time, 'current_school_year': current['school_year']}
html = render_to_string('print_schedule_pdf.html', context)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = "filename='schedule_{}.pdf'".format(
current['school_year'])
weasyprint.HTML(string=html).write_pdf(response, stylesheets=[
weasyprint.CSS(settings.STATIC_ROOT + '/main.css'), ])
return response
``` |
{
"source": "joshuarli/get",
"score": 3
} |
#### File: get/src/util.py
```python
import re
import unicodedata
_chunk = re.compile(r"([0-9]+)")
_sub1 = re.compile(r"[^\w\s-]")
_sub2 = re.compile(r"[-\s]+")
# Based on <NAME>'s simplification of <NAME>'s alphanum algorithm,
# but I'm not sure if this is 100% correct.
# isdigit is faster than branching off handling a ValueError.
def alphanum_key(s: str):
return [int(chunk) if chunk.isdigit() else chunk for chunk in _chunk.split(s)]
# Adapted from Django.
def slugify(value: str, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
return _sub2.sub("-", _sub1.sub("", value.lower())).strip("-_")
``` |
{
"source": "JoshuaRM/ImPyParser",
"score": 2
} |
#### File: ImPyParser/src/file.py
```python
import twelve
from two import four
from .hello import world
def randomFunction():
x = 27
```
#### File: ImPyParser/src/relative.py
```python
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker
class MyRawChecker(BaseChecker):
#check for lines with absolute imports and warns
__implements__ = IRawChecker
name = 'relative-checker'
msgs = {'W0043': (
'Use only relative imports for keras classes and functions',
'absolute-import',
'Refer to relative versus absolute imports'
),
}
options = ()
def process_module(self, node):
"""process a module
the module's content is accessible via node.stream() function
"""
ignore = ['__future__', 'collections', 'random', 'six', 'cPickle', 'scipy', 'hashlib',
'io', 'contextlib', 'unittest', 'types', 'h5py', 'inspect', 'tarfile', 'yaml',
'copy', 'marshal', 'requests', 'functools', 'gzip', 're', 'Queue', 'queue',
'os', 'pickle', 'importlib', 'mock', 'threading', 'codecs', 'tempfile', 'time',
'binascii', 'pydot', 'zipfile', 'json', 'shutil', 'abc', 'sys', 'csv', 'cntk',
'warnings', 'numpy', 'skimage', 'multiprocessing', 'distutils', 'tensorflow',
'theano', 'keras_applications', "keras_preprocessing"]
comment = False
with node.stream() as stream:
for (lineno, line) in enumerate(stream):
line = line.decode("utf-8").strip()
#Ingore lines withing multi line comments
if '\"\"\"' in line:
comment = not comment
#Empty line or comment line
if line == "" or comment == True or '#' in line:
continue
else:
split_line = line.split()
#Import
if split_line[0] == 'import':
module_split = split_line[1].split('.')
#Check if module is an ignored library
if module_split[0] in ignore:
continue
else:
self.add_message('W0043', line=lineno)
#ImportFrom
elif split_line[0] == 'from' and len(split_line) >= 3:
#Check if module is an ignored library or line doesnt contain import
if split_line[1] in ignore or split_line[2] != 'import':
continue
#Check if import is absolute or relative
elif split_line[1].startswith('.'):
pass
else:
module_split = split_line[1].split('.')
if module_split[0] in ignore:
continue
else:
self.add_message('W0043', line=lineno)
else:
continue
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(MyRawChecker(linter))
``` |
{
"source": "JoshuaRM/keras-bot",
"score": 3
} |
#### File: enforce_codeowners/keras_bot/pull_requests.py
```python
import os
import github
codeowners_url = ('https://raw.githubusercontent.com/'
'keras-team/keras-contrib/master/CODEOWNERS')
repo_id = 'keras-team/keras-contrib'
import urllib.request
def parse_codeowners():
response = urllib.request.urlopen(codeowners_url)
data = response.read()
text = data.decode('utf-8')
map_path_owner = []
for line in text.splitlines():
line = line.strip()
if line.startswith('#') or line == '':
continue
x = line.split(' ')
path = x[0]
owner = x[-1]
owner = owner[1:] # removes '@'
map_path_owner.append((path, owner))
if not path.startswith('examples'): # examples don't have tests.
map_path_owner.append(('tests/' + path, owner))
return map_path_owner
def send_message(pull_request, owner, files_changed):
message = """Hello, I'm a bot! I can make mistakes, notify gabrieldemarmiesse if I made one.
I see that you modified the following file{plural}:
{files_changed}
The owner of those file is @{owner}
@{owner} could you please take a look at it whenever
you have the time and add a review? Thank you in advance for the help.
"""
files_changed_formatted = '\n'.join(f'* `{x}`' for x in files_changed)
plural = 's' if len(files_changed) > 1 else ''
message = message.format(files_changed=files_changed_formatted,
owner=owner,
plural=plural)
print(f'Would send message to PR {pull_request.title}:\n')
print(message)
print('-----------------------------------------------------')
if os.environ.get('DRY_RUN', '1') == '0':
pull_request.create_issue_comment(message)
print('Sent.')
def already_notified_owner(pull_request):
for comment in pull_request.get_issue_comments():
if comment.user.login != 'bot-of-gabrieldemarmiesse':
continue
if 'owner' in comment.body and 'review' in comment.body:
print(f'Already notified owner in {pull_request.title}')
return True
return False
def examine_single_pull_request(pull_request, map_path_owner):
if 'adam' in pull_request.title:
pass
owners_to_notify = []
for file_changed in pull_request.get_files():
for file_owned, owner in map_path_owner:
if file_changed.filename == file_owned and owner != pull_request.user.login:
owners_to_notify.append((file_changed.filename, owner))
if len(set(x[1] for x in owners_to_notify)) > 1:
# let's not notify multiple people, otherwise it's going to turn into
# a mess for big PRs with multiple files.
return
# let's avoid sending a message if we already sent one.
if already_notified_owner(pull_request):
return
if owners_to_notify:
owner_to_notify = owners_to_notify[0][1]
files_changed = [x[0] for x in owners_to_notify]
send_message(pull_request, owner_to_notify, files_changed)
def examine_pull_requests():
map_path_owner = parse_codeowners()
client = github.Github(os.environ['GITHUB_TOKEN'])
repo = client.get_repo(repo_id)
for pull_request in repo.get_pulls():
examine_single_pull_request(pull_request, map_path_owner)
if __name__ == '__main__':
examine_pull_requests()
``` |
{
"source": "josh-ua/Robinhood",
"score": 2
} |
#### File: Robinhood/tests/test_getdata.py
```python
from os import path
from datetime import datetime
import six
import pytest
from flaky import flaky
import requests
from Robinhood import Robinhood
import helpers
if six.PY2:
from Robinhood import RH_exception
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
CONFIG_FILENAME = path.join(HERE, 'test_config.cfg')
CONFIG = helpers.get_config(CONFIG_FILENAME)
TEST_QUOTE = {}
TESTABLE_KEYS = [
'previous_close_date',
'symbol',
'trading_halted',
'previous_close',
'last_trade_price_source',
'instrument'
]
@pytest.mark.incremental
class TestQuoteHelpers:
"""wrapper to test quote architecture in order"""
test_ticker = CONFIG.get('FETCH', 'test_ticker')
fake_ticker = CONFIG.get('FETCH', 'fake_ticker')
rh_obj = Robinhood()
def test_quote_endpoint(self, config=CONFIG):
"""get raw data from Robinhood to test against"""
global TEST_QUOTE
TEST_QUOTE = helpers.fetch_REST_directly(
'quotes',
self.test_ticker,
config
)
def test_validate_quote(self):
"""validate fetcher"""
data = self.rh_obj.quote_data(self.test_ticker)
if data['updated_at'] == TEST_QUOTE['updated_at']:
assert data == TEST_QUOTE
else:
for key in data.keys(): #SKIP PRICE DATA
if key in TESTABLE_KEYS:
assert data[key] == TEST_QUOTE[key]
def test_validate_fail_quote(self):
"""validate bad-path exception"""
with pytest.raises(NameError):
data = self.rh_obj.quote_data(self.fake_ticker)
def test_validate_get_quote(self):
"""validate `get_quote` call"""
data = self.rh_obj.get_quote(self.test_ticker)
assert data == TEST_QUOTE['symbol']
@flaky
def test_validate_ask_price(self):
"""validate `ask_price` call"""
data = self.rh_obj.ask_price(self.test_ticker)
quote = self.rh_obj.quote_data(self.test_ticker)
assert data == quote['ask_price']
@flaky
def test_validate_ask_size(self):
"""validate `ask_size` call"""
data = self.rh_obj.ask_size(self.test_ticker)
quote = self.rh_obj.quote_data(self.test_ticker)
assert data == quote['ask_size']
@flaky
def test_validate_bid_price(self):
"""validate `bid_price` call"""
data = self.rh_obj.bid_price(self.test_ticker)
quote = self.rh_obj.quote_data(self.test_ticker)
assert data == quote['bid_price']
@flaky
def test_validate_bid_size(self):
"""validate `bid_size` call"""
data = self.rh_obj.bid_size(self.test_ticker)
quote = self.rh_obj.quote_data(self.test_ticker)
assert data == quote['bid_size']
@flaky
def test_validate_last_trade_price(self):
"""validate `last_trade_price` call"""
data = self.rh_obj.last_trade_price(self.test_ticker)
quote = self.rh_obj.quote_data(self.test_ticker)
assert data == quote['last_trade_price']
def test_validate_previous_close(self):
"""validate `previous_close` call"""
data = self.rh_obj.previous_close(self.test_ticker)
assert data == TEST_QUOTE['previous_close']
def test_validate_previous_close_date(self):
"""validate `previous_close_date` call"""
data = self.rh_obj.previous_close_date(self.test_ticker)
assert data == TEST_QUOTE['previous_close_date']
def test_validate_adjusted_previous_close(self):
"""validate `adjusted_previous_close` call"""
data = self.rh_obj.adjusted_previous_close(self.test_ticker)
assert data == TEST_QUOTE['adjusted_previous_close']
def test_validate_symbol(self):
"""validate `symbol` call"""
data = self.rh_obj.symbol(self.test_ticker)
assert data == TEST_QUOTE['symbol']
@flaky
def test_validate_last_updated_at(self):
"""validate `last_updated_at` call"""
data = self.rh_obj.last_updated_at(self.test_ticker)
quote = self.rh_obj.quote_data(self.test_ticker)
assert data == quote['updated_at']
TEST_FUNDAMENTAL = {}
@pytest.mark.incremental
class TestFundamentalsHelpers:
"""wrapper to test fundamental architecture in order"""
test_ticker = CONFIG.get('FETCH', 'test_ticker')
fake_ticker = CONFIG.get('FETCH', 'fake_ticker')
rh_obj = Robinhood()
def test_fundamental_endpoint(self, config=CONFIG):
"""get raw data from Robinhood to test against"""
global TEST_FUNDAMENTAL
TEST_FUNDAMENTAL = helpers.fetch_REST_directly(
'fundamentals',
self.test_ticker,
config
)
TEST_FUNDAMENTAL['volume'] = 'OVERWRITE' #flaky value
@flaky
def test_validate_fundamental(self):
"""validate fetcher"""
data = self.rh_obj.get_fundamentals(self.test_ticker)
data['volume'] = 'OVERWRITE' #flaky value
assert data == TEST_FUNDAMENTAL
def test_validate_fail_fundamental(self):
"""validate bad-path exception"""
with pytest.raises(NameError):
data = self.rh_obj.get_fundamentals(self.fake_ticker)
@flaky
def test_validate_fundamental_wrapper(self):
main_data = self.rh_obj.fundamentals(self.test_ticker)
wrapped_data = self.rh_obj.fundamentals(self.test_ticker)
main_data['volume'] = 'OVERWRITE' #flaky value
wrapped_data['volume'] = 'OVERWRITE' #flaky value
assert wrapped_data == main_data
assert wrapped_data == TEST_FUNDAMENTAL
TEST_URL_RESULT = {}
@pytest.mark.incremental
class TestURLWrapper:
"""make sure get_url returns expected behavior"""
base_url = 'https://api.robinhood.com/'
rh_obj = Robinhood()
def test_url_endpoint(self):
"""fetch url directly"""
global TEST_URL_RESULT
req = requests.get(self.base_url)
req.raise_for_status()
TEST_URL_RESULT = req.json()
def test_get_url(self):
"""fetch url with get_url"""
data = self.rh_obj.get_url(self.base_url)
assert data == TEST_URL_RESULT
def test_get_news(config=CONFIG):
"""test `get_news` endpoint"""
test_ticker = CONFIG.get('FETCH', 'test_ticker')
raw_news = helpers.fetch_REST_directly(
'news',
test_ticker,
config
)
get_news = Robinhood().get_news(test_ticker)
assert get_news == raw_news
def test_intstruments(config=CONFIG):
"""test `instruments` endpoint"""
#TODO: this test is bad, just repeat of code inside endpoint
params = {
'query': CONFIG.get('FETCH', 'test_ticker')
}
headers = {
'User-Agent': CONFIG.get('FETCH', 'user_agent')
}
address = Robinhood().endpoints['instruments']
res = requests.get(
address,
headers=headers,
params=params
)
res.raise_for_status()
hard_data = res.json()['results']
data = Robinhood().instruments(CONFIG.get('FETCH', 'test_ticker'))
assert data == hard_data
``` |
{
"source": "Joshua-Robison/NNFS",
"score": 4
} |
#### File: NNFS/src/helper.py
```python
import numpy as np
def shuffle(X, y):
"""This function shuffles the input features (X) and labels (y)."""
perm = np.random.permutation(X.shape[0])
return X[perm], y[perm]
def mae(y_true, y_pred):
"""This function computes the mean absolute error for a neural network."""
return np.mean(np.abs(y_true - y_pred))
def rmse(y_true, y_pred):
"""This function computes the root mean squared error for a neural network."""
return np.sqrt(np.mean(np.power(y_true - y_pred, 2)))
def evaluate(model, X_test, y_test):
"""This function computes the mae and rmse for a neural network."""
preds = model.forward(X_test)
preds = preds.reshape(-1, 1)
print('Mean absolute error: {:.2f}'.format(mae(preds, y_test)))
print('Root mean squared error: {:.2f}\n'.format(rmse(preds, y_test)))
if __name__ == '__main__':
pass
```
#### File: NNFS/src/neuralnetwork.py
```python
import numpy as np
from typing import List
from block import Layer, Dense
from linear import Linear
from loss import Loss, MeanSquaredError
class NeuralNetwork:
"""This class defines a neural network."""
def __init__(self, layers: List[Layer], loss: Loss, seed: int=1):
self.layers = layers
self.loss = loss
self.seed = seed
if seed:
for layer in self.layers:
setattr(layer, "seed", self.seed)
def forward(self, x_batch: np.ndarray) -> np.ndarray:
out = x_batch
for layer in self.layers:
out = layer.forward(out)
return out
def backward(self, loss_grad: np.ndarray):
grad = loss_grad
for layer in reversed(self.layers):
grad = layer.backward(grad)
def train_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> float:
predictions = self.forward(x_batch)
loss = self.loss.forward(predictions, y_batch)
self.backward(self.loss.backward())
return loss
def params(self):
for layer in self.layers:
yield from layer.params
def param_grads(self):
for layer in self.layers:
yield from layer.param_grads
if __name__ == '__main__':
pass
```
#### File: NNFS/src/weights.py
```python
import numpy as np
from operation import ParamOperation
class WeightMultiply(ParamOperation):
"""This class defines a weight multiplication operation for a neural network."""
def __init__(self, W: np.ndarray):
super().__init__(W)
def _output(self) -> np.ndarray:
return np.dot(self.input_, self.param)
def _input_grad(self, output_grad: np.ndarray) -> np.ndarray:
return np.dot(output_grad, np.transpose(self.param, (1, 0)))
def _param_grad(self, output_grad: np.ndarray) -> np.ndarray:
return np.dot(np.transpose(self.input_, (1, 0)), output_grad)
if __name__ == '__main__':
pass
``` |
{
"source": "Joshua-Robison/SupervisedLearning",
"score": 4
} |
#### File: SupervisedLearning/src/perceptron.py
```python
import numpy as np
class Perceptron(object):
def __init__(self, lr: float = 0.01, max_iters: int = 1000) -> None:
self.lr = lr
self.iters = max_iters
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
D = X.shape[1]
self.w = np.random.randn(D)
self.b = 0
N = len(y)
costs = []
for _ in range(self.iters):
y_hat = self.predict(X)
errors = np.nonzero(y != y_hat)[0]
if len(errors) == 0:
break
i = np.random.choice(errors)
self.w += self.lr * y[i] * X[i]
self.b += self.lr * y[i]
c = len(errors) / N
costs.append(c)
def predict(self, X: np.ndarray) -> np.ndarray:
return np.sign(X.dot(self.w) + self.b)
def score(self, X: np.ndarray, y: np.ndarray) -> float:
p = self.predict(X)
return np.mean(p == y)
if __name__ == "__main__":
pass
``` |
{
"source": "joshuaroot/chaostoolkit-lib",
"score": 3
} |
#### File: chaostoolkit-lib/chaoslib/rollback.py
```python
from concurrent.futures import ThreadPoolExecutor
from typing import Iterator
from logzero import logger
from chaoslib.activity import execute_activity
from chaoslib.types import Configuration, Experiment, Run, Secrets
__all__ = ["run_rollbacks"]
def run_rollbacks(experiment: Experiment, configuration: Configuration,
secrets: Secrets, pool: ThreadPoolExecutor,
dry: bool = False) -> Iterator[Run]:
"""
Run all rollbacks declared in the experiment in their order. Wait for
each rollback activity to complete before to the next unless the activity
is declared with the `background` flag.
"""
rollbacks = experiment.get("rollbacks", [])
if not rollbacks:
logger.info("No declared rollbacks, let's move on.")
for activity in rollbacks:
logger.info("Rollback: {t}".format(t=activity.get("name")))
if activity.get("background"):
logger.debug("rollback activity will run in the background")
yield pool.submit(execute_activity, experiment=experiment,
activity=activity, configuration=configuration,
secrets=secrets, dry=dry)
else:
yield execute_activity(experiment, activity,
configuration=configuration,
secrets=secrets, dry=dry)
```
#### File: fixtures/controls/dummy_with_secrets.py
```python
from typing import Any, Dict, List
from chaoslib.types import Activity, Configuration, \
Experiment, Hypothesis, Journal, Run, Secrets, Settings
def configure_control(experiment: Experiment, configuration: Configuration,
secrets: Secrets, settings: Settings):
experiment["configure_control_secrets"] = secrets
def cleanup_control():
pass
def before_experiment_control(context: Experiment, secrets: Secrets, **kwargs):
context["before_experiment_control_secrets"] = secrets
def after_experiment_control(context: Experiment, state: Journal, secrets: Secrets, **kwargs):
context["after_experiment_control_secrets"] = secrets
def before_hypothesis_control(context: Hypothesis, experiment: Experiment, secrets: Secrets, **kwargs):
experiment["before_hypothesis_control_secrets"] = secrets
def after_hypothesis_control(context: Hypothesis, experiment: Experiment,
state: Dict[str, Any], secrets: Secrets, **kwargs):
experiment["after_hypothesis_control_secrets"] = secrets
def before_method_control(context: Experiment, secrets: Secrets, **kwargs):
context["before_method_control_secrets"] = secrets
def after_method_control(context: Experiment, state: List[Run], secrets: Secrets, **kwargs):
context["after_method_control_secrets"] = secrets
def before_rollback_control(context: Experiment, secrets: Secrets, **kwargs):
context["before_rollback_control_secrets"] = secrets
def after_rollback_control(context: Experiment, state: List[Run], secrets: Secrets, **kwargs):
context["after_rollback_control_secrets"] = secrets
def before_activity_control(context: Activity, experiment: Experiment, secrets: Secrets, **kwargs):
experiment["before_activity_control_secrets"] = secrets
def after_activity_control(context: Activity, experiment: Experiment, state: Run, secrets: Secrets, **kwargs):
experiment["after_activity_control_secrets"] = secrets
```
#### File: fixtures/controls/interrupter.py
```python
from chaoslib.exceptions import InterruptExecution
from chaoslib.types import Activity, Run
def before_activity_control(context: Activity, **kwargs):
raise InterruptExecution("let's blow this up")
```
#### File: chaostoolkit-lib/tests/test_action.py
```python
import sys
import pytest
import requests_mock
from chaoslib.exceptions import InvalidActivity
from chaoslib.activity import ensure_activity_is_valid
from chaoslib.types import Action
from fixtures import actions
def test_empty_action_is_invalid():
with pytest.raises(InvalidActivity) as exc:
ensure_activity_is_valid(actions.EmptyAction)
assert "empty activity is no activity" in str(exc.value)
```
#### File: chaostoolkit-lib/tests/test_extension.py
```python
import pytest
from chaoslib.exceptions import InvalidExperiment
from chaoslib.extension import get_extension, has_extension, merge_extension, \
remove_extension, set_extension, validate_extensions
from fixtures import experiments
def test_extensions_must_have_name():
with pytest.raises(InvalidExperiment):
exp = experiments.Experiment.copy()
set_extension(exp, {"somekey": "blah"})
validate_extensions(exp)
def test_get_extension_returns_nothing_when_not_extensions_block():
assert get_extension(experiments.Experiment, "myext") is None
def test_get_extension_returns_nothing_when_missing():
ext = experiments.Experiment.copy()
set_extension(ext, {
"name": "myotherext",
"somekey": "blah"
})
assert get_extension(ext, "myext") is None
def test_get_extension():
exp = experiments.Experiment.copy()
set_extension(exp, {
"name": "myext",
"somekey": "blah"
})
ext = get_extension(exp, "myext")
assert ext is not None
assert ext["somekey"] == "blah"
def test_remove_extension():
exp = experiments.Experiment.copy()
set_extension(exp, {
"name": "myext",
"somekey": "blah"
})
assert get_extension(exp, "myext") is not None
remove_extension(exp, "myext")
assert get_extension(exp, "myext") is None
def test_merge_extension():
exp = experiments.Experiment.copy()
set_extension(exp, {
"name": "myext",
"somekey": "blah"
})
ext = get_extension(exp, "myext")
assert ext is not None
assert ext["somekey"] == "blah"
merge_extension(exp, {
"name": "myext",
"somekey": "burp",
"otherkey": "oneday"
})
ext = get_extension(exp, "myext")
assert ext is not None
assert ext["somekey"] == "burp"
assert ext["otherkey"] == "oneday"
``` |
{
"source": "joshua-r/touch-test",
"score": 3
} |
#### File: joshua-r/touch-test/touch.py
```python
import argparse
import datetime
import evdev
REQUIRED_MULTITOUCH_CODES = [
evdev.ecodes.ABS_MT_SLOT, evdev.ecodes.ABS_MT_POSITION_X,
evdev.ecodes.ABS_MT_POSITION_Y, evdev.ecodes.ABS_MT_TRACKING_ID
]
class TouchPoint:
def __init__(self, id):
self.id = id
self.start_time = datetime.datetime.now()
self.end_time = None
self.x = None
self.y = None
def update_x(self, x):
self.x = x
def update_y(self, y):
self.y = y
def finish(self):
self.end_time = datetime.datetime.now()
def is_multitouch_device(device):
capabilities = device.capabilities()
if not evdev.ecodes.EV_ABS in capabilities:
return False
abs_codes = [cap[0] for cap in capabilities[evdev.ecodes.EV_ABS]]
return all([code in abs_codes for code in REQUIRED_MULTITOUCH_CODES])
def get_abs_value_range(device, code):
for caps_code, abs_info in device.capabilities()[evdev.ecodes.EV_ABS]:
if caps_code == code:
return (abs_info.min, abs_info.max)
return None
def main():
parser = argparse.ArgumentParser(description="Test touch input.")
parser.add_argument("--device", type=str)
args = parser.parse_args()
device = evdev.InputDevice(args.device)
if not is_multitouch_device(device):
raise RuntimeError("Device '{}' is not a multitouch device!".format(
device.path))
x_min, x_max = get_abs_value_range(device, evdev.ecodes.ABS_MT_POSITION_X)
x_range = x_max - x_min
y_min, y_max = get_abs_value_range(device, evdev.ecodes.ABS_MT_POSITION_Y)
y_range = y_max - y_min
finished_touch_points = []
touch_points = [None] * 100
slot = 0
now = datetime.datetime.now()
with open("log-{}.csv".format(now.isoformat()), "w") as log_file:
for event in device.read_loop():
if event.code == evdev.ecodes.ABS_MT_SLOT:
slot = event.value
elif event.code == evdev.ecodes.ABS_MT_TRACKING_ID:
if event.value >= 0:
# new touch point
touch_points[slot] = TouchPoint(id=event.value)
else:
# touch contact was lifted
touch_point = touch_points[slot]
touch_point.finish()
finished_touch_points.append(touch_point)
touch_points[slot] = None
duration_ms = (touch_point.end_time -
touch_point.start_time).microseconds / 1000
print("{id},{x},{y},{time},{duration}".format(
id=touch_point.id,
x=touch_point.x,
y=touch_point.y,
time=datetime.datetime.now(),
duration=duration_ms),
file=log_file)
elif event.code == evdev.ecodes.ABS_MT_POSITION_X:
touch_points[slot].update_x(x=(event.value - x_min) / x_range)
elif event.code == evdev.ecodes.ABS_MT_POSITION_Y:
touch_points[slot].update_y(y=(event.value - y_min) / y_range)
if __name__ == "__main__":
main()
``` |
{
"source": "JoshuaS3/s3-bsync",
"score": 2
} |
#### File: src/classes/sync_managed_bucket.py
```python
from . import sync_directory_map, sync_fileobject
__all__ = ["sync_managed_bucket"]
class sync_managed_bucket:
bucket_name = ""
directory_maps = []
fileobjects = []
def __init__(self, bucket_name):
self.bucket_name = bucket_name
def create_dirmap(
local_path,
s3_prefix,
gz_compress=0,
recursive=True,
gpg_enabled=False,
gpg_email="",
):
dirmap = classes.sync_dirmap()
dirmap.local_path = ""
dirmap.s3_prefix = ""
dirmap.gz_compress = 0
dirmap.recursive = True
dirmap.gpg_enabled = False
dirmap.gpg_email = ""
self.directory_maps.append(dirmap)
def create_fileobject(key, modified, etag, size):
fileobject = classes.sync_fileobject()
fileobject.key = None
fileobject.modified = 0
fileobject.etag = None
fileobject.size = 0
self.fileobjects.append(fileobject)
```
#### File: s3-bsync/src/__main__.py
```python
import sys
import logging
import pprint
from . import *
logger = logging.getLogger(__name__)
def main():
args = command_parse.command_parse(sys.argv[1:])
logLevel = logging.INFO
if args.debug:
logLevel = logging.DEBUG
logging.basicConfig(
format="\x1b[0;37m[ \x1b[0;35m%(relativeCreated)04d \x1b[0;37m/ \x1b[0;33m%(name)s\x1b[0;37m:\x1b[1;36m%(funcName)s \x1b[0;37m/ \x1b[0;34m%(levelname)s \x1b[0;37m] \x1b[0m%(message)s",
datefmt="%H:%M:%S",
level=logLevel,
)
logger.debug(f"Parsed input arguments:\n{pprint.pformat(vars(args))}")
logger.debug("Sanitizing input arguments")
settings = command_parse.sanitize_arguments(args)
logger.debug(f"Interpreted settings:\n{pprint.pformat(vars(settings))}")
run(settings)
return 0
if __name__ == "__main__":
sys.exit(main() or 0)
``` |
{
"source": "Joshua-Samjaya/elmo-oie",
"score": 3
} |
#### File: elmo-oie/src/utils.py
```python
import re
import random
import numpy as np
import nltk
from nltk.tree import Tree
import sys, os
def get_vocab_datum(data, threshold=1, log=False):
vocab = {}
for datum in data:
words = datum['text'].split()
for word in words:
vocab[word] = vocab.get(word, 0)+1
selected = set()
for word in vocab.keys():
if vocab[word] >= threshold:
selected.add(word)
if log:
print(len(vocab)-len(selected), 'words were filtered out because their counts are less than', threshold)
return selected
def filter_data_by_vocab(data, vocab, token='<unk>'):
vocab.add(token)
for datum in data:
datum['text'] = ' '.join([i if i in vocab else token for i in datum['text'].split()])
return data
def wordvec_add_unknown_vocab(wordvec, vocab, var=1, length=50):
words = wordvec.keys()
word = next(iter(words))
length = len(wordvec[word])
count = 0
f = open('log.txt', 'w')
for i in vocab:
if i not in words:
f.write(i+'\n')
count += 1
wordvec[i] = np.random.uniform(-var, var, length)
f.close()
print(count,"words are not in trained embeddings")
return wordvec
def get_lookup(wordvec):
# get the lookup table from word vector
words = list(wordvec.keys())
element = words[0]
dim = len(wordvec[element])
print("Embedding dim = "+str(dim))
word_map = {}
W = np.zeros((len(words)+1, dim), dtype=np.float32)
for i in range(len(words)):
word = words[i]
W[i+1] = wordvec[word]
word_map[word] = i+1
return W, word_map
def get_label_map(data):
y_map = set()
for datum in data:
y_map.add(datum['y'])
label_map = {}
for i, y in enumerate(y_map):
label_map[y] = i
return label_map
def get_maxlen(data_list):
maxlen = 0
for i in data_list:
maxleni = max(map(lambda x: x['num_words'], i))
maxlen = max(maxlen, maxleni)
return maxlen
def sample_from_data(data, ratio=0.1):
num_samples = int(ratio*len(data))
sample = np.random.choice(data, num_samples, replace=False)
return sample
def sample_from_numpy(X, y, ratio=0.1):
num_samples = int(ratio*X.shape[0])
sample = np.random.choice(X.shape[0], num_samples, replace=False)
return X[sample], y[sample]
def consolidate_labels(labels):
"""
Return a consolidated list of labels, e.g., O-A1 -> O, A1-I -> A
"""
return map(consolidate_label , labels)
def consolidate_label(label):
"""
Return a consolidated label, e.g., O-A1 -> O, A1-I -> A
"""
return label.split("-")[0] if label.startswith("O") else label
``` |
{
"source": "JoshuaSBrown/InterMol",
"score": 4
} |
#### File: InterMol/intermol/atom.py
```python
class Atom(object):
""" """
def __init__(self, index, name=None, residue_index=-1, residue_name=None):
"""Create an Atom object
Args:
index (int): index of atom in the molecule
name (str): name of the atom (eg., N, CH)
residue_index (int): index of residue in the molecule
residue_name (str): name of the residue (eg., THR, CYS)
"""
self.index = index
self.name = name
self.residue_index = residue_index
self.residue_name = residue_name
self._position = list()
self._velocity = list()
self._force = list()
self._atomtype = dict()
self.bondingtype = None
self.atomic_number = None
self.cgnr = None
self._mass = dict()
self._charge = dict()
self.ptype = "A"
self._sigma = dict()
self._epsilon = dict()
@property
def atomtype(self):
return self._atomtype
@atomtype.setter
def atomtype(self, index_atomtype):
"""Sets the atomtype
Args:
index_atomtype (tuple): A or B state and atomtype
"""
try:
idx, val = index_atomtype
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._atomtype[idx] = val
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, index_sigma):
"""Sets the sigma
Args:
index_sigma (tuple): A or B state and sigma
"""
try:
idx, val = index_sigma
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._sigma[idx] = val
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, index_epsilon):
"""Sets the epsilon
Args:
index_epsilon (tuple): A or B state and epsilon
"""
try:
idx, val = index_epsilon
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._epsilon[idx] = val
@property
def mass(self):
return self._mass
@mass.setter
def mass(self, index_mass):
"""Sets the mass
Args:
index_mass (tuple): A or B state and mass
"""
try:
idx, val = index_mass
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._mass[idx] = val
@property
def charge(self):
return self._charge
@charge.setter
def charge(self, index_charge):
"""Sets the charge
Args:
index_charge (tuple): A or B state and charge
"""
try:
idx, val = index_charge
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._charge[idx] = val
@property
def position(self):
"""Return the cartesian coordinates of the atom """
return self._position
@position.setter
def position(self, xyz):
"""Sets the position of the atom
Args:
xyz (list, float): x, y and z coordinates
"""
self._position = xyz
@property
def velocity(self):
"""Return the velocity of the atom"""
return self._velocity
@velocity.setter
def velocity(self, vxyz):
"""Sets the velocity of the atom
Args:
vxyz (list, float): x-, y- and z-directed velocity
"""
self._velocity = vxyz
@property
def force(self):
"""Return the force on the atom """
return self._force
@force.setter
def force(self, fxyz):
"""Sets the force on the atom
Args:
fxyz (list, float): x-, y- and z-directed force
"""
self._force = fxyz
def __repr__(self):
return 'Atom{0}({1}, {2})'.format(id(self), self.index, self.name)
def __str__(self):
return 'Atom({0}, {1})'.format(self.index, self.name)
```
#### File: intermol/desmond/desmond_parser.py
```python
import logging
from warnings import warn
import math
import numpy as np
import parmed.unit as units
from intermol.atom import Atom
from intermol.forces import *
import intermol.forces.forcefunctions as ff
from intermol.exceptions import (UnimplementedFunctional, UnsupportedFunctional,
UnimplementedSetting, UnsupportedSetting,
DesmondError, InterMolError)
from intermol.molecule import Molecule
from intermol.moleculetype import MoleculeType
from intermol.system import System
from intermol.desmond import cmap_parameters
#MRS for old desmond functionality
import re
import copy
logger = logging.getLogger('InterMolLog')
ENGINE = 'desmond'
# driver helper functions
def load(cms_file):
"""Load a DESMOND input file into a 'System'
Args:
cms_file:
include_dir:
Returns:
system:
"""
parser = DesmondParser(cms_file)
return parser.read()
def save(cms_file, system):
"""Unpacks a 'System' into a DESMOND input file
Args:
cms_file:
system:
"""
parser = DesmondParser(cms_file, system)
return parser.write()
# parser helper functions
def end_header_section(blank_section, header, header_lines):
if blank_section:
header_lines = list()
header_lines.append(header)
header_lines.append(' :::\n')
else:
header_lines[0] = header
return header_lines
def split_with_quotes(line):
line = list(line)
in_quotes = False
for i, char in enumerate(line):
if char == '"':
in_quotes = not in_quotes
if char == ' ' and in_quotes:
line[i] = '_'
space_split = "".join(line).split()
for i, sub in enumerate(space_split):
sub = sub.replace('"', '')
space_split[i] = sub.replace('_', ' ')
return space_split
def create_lookup(forward_dict):
return dict((v, k) for k, v in forward_dict.items())
def create_type(forward_dict):
return dict((k, eval(v.__name__ + 'Type')) for k, v in forward_dict.items())
class DesmondParser(object):
"""
A class containing methods required to read in a Desmond CMS File
"""
# 'lookup_*' is the inverse dictionary typically used for writing
desmond_combination_rules = {'1': 'Multiply-C6C12',
'2': 'Lorentz-Berthelot',
'3': 'Multiply-Sigeps'
}
lookup_desmond_combination_rules = create_lookup(desmond_combination_rules)
desmond_pairs = {'LJ12_6_SIG_EPSILON': LjSigepsPair,
'LJ': LjDefaultPair,
'COULOMB': LjDefaultPair
}
lookup_desmond_pairs = create_lookup(desmond_pairs) # not unique
desmond_pair_types = create_type(desmond_pairs)
desmond_bonds = {'HARM_CONSTRAINED': HarmonicBond,
'HARM': HarmonicBond
}
lookup_desmond_bonds = create_lookup(desmond_bonds) # not unique
desmond_bond_types = create_type(desmond_bonds)
def canonical_bond(self, bond, params, direction='into', name=None):
if direction == 'into':
canonical_force_scale = self.canonical_force_scale_into
phase = 'Read'
else:
try:
name = self.lookup_desmond_bonds[bond.__class__] # check to make sure this OK given the c
except:
raise UnsupportedFunctional(bond, ENGINE)
canonical_force_scale = self.canonical_force_scale_from
phase = 'Write'
names = []
paramlists = []
if bond.__class__ in [HarmonicBond, HarmonicPotentialBond]:
if direction == 'into':
bond.k *= canonical_force_scale
if name == 'HARM_CONSTRAINED':
bond.c = True
elif name == 'HARM':
bond.c = False
else:
warn("ReadError: Found unsupported bond in Desmond %s" % name)
return bond
else:
params['k'] *= canonical_force_scale
# harmonic potentials in Gromacs should be constrained (??: check what this means)
name = 'HARM'
if hasattr(bond,'c'):
if getattr(bond,'c') and not isinstance(bond, HarmonicPotentialBond):
name = 'HARM_CONSTRAINED'
names.append(name)
paramlists.append(params)
return names, paramlists
desmond_angles = {'HARM_CONSTRAINED': HarmonicAngle,
'HARM': HarmonicAngle,
'UB': UreyBradleyNoharmAngle
}
lookup_desmond_angles = create_lookup(desmond_angles)
desmond_angle_types = create_type(desmond_angles)
def canonical_angle(self, angle, params, direction='into', name=None,
molecule_type=None):
"""
Args:
name:
kwds:
angle:
direction: 'into' means into the canonical form, 'from' means from the
canonical form into Desmond
current molecule type (would like to be able to get rid of this, but need it to search angles for now
Returns:
modified list of keywords and names
"""
if direction == 'into':
canonical_force_scale = self.canonical_force_scale_into
else:
# we'd like to automate this, but currently have to state explicitly.
if angle.__class__ not in [HarmonicAngle, UreyBradleyAngle]:
raise UnsupportedFunctional(angle, ENGINE)
canonical_force_scale = self.canonical_force_scale_from
phase = 'Write'
names = []
paramlists = []
if angle.__class__ in [HarmonicAngle, UreyBradleyAngle, UreyBradleyNoharmAngle]:
if direction == 'into':
if angle.__class__ in [UreyBradleyAngle, UreyBradleyNoharmAngle]:
angle.kUB *= canonical_force_scale
if angle.__class__ in [UreyBradleyAngle, HarmonicAngle]:
angle.k *= canonical_force_scale
if name == 'HARM_CONSTRAINED': # this needs to go first because HARM is a substring
angle.c = True
elif name == 'HARM':
angle.c = False
else:
params['k'] = canonical_force_scale * params['k']
name = 'HARM'
if hasattr(angle,'c'):
if getattr(angle,'c'):
name = 'HARM_CONSTRAINED'
if direction == 'into' and angle.__class__ in [UreyBradleyNoharmAngle,HarmonicAngle]:
if angle.__class__ == UreyBradleyNoharmAngle:
# Urey-Bradley is implemented in DESMOND differently, with the
# terms implemented in a new angle term independent of the harmonic term.
# Instead, we will add everything together afterwards into a single term
angle = self.create_forcetype(UreyBradleyAngle,[angle.atom1,angle.atom2,angle.atom3],
[0,0,angle.r._value,angle.kUB._value]) # this seems kludgy
# next, find if we already have this angle somewhere
matched_angle = molecule_type.match_angles(angle)
if matched_angle: # we found one, if false, we haven't seen it yet, we'll add later
if matched_angle.__class__ == HarmonicAngle:
angle.k = matched_angle.k
angle.theta = matched_angle.theta
molecule_type.angle_forces.remove(matched_angle)
elif angle.__class__ == HarmonicAngle:
matched_angle = molecule_type.match_angles(angle)
if matched_angle and matched_angle.__class__ == UreyBradleyAngle:
# just copy over the information into the old angle.
matched_angle.k = angle.k
matched_angle.theta = angle.theta
angle = None
elif direction == 'from' and angle.__class__ in [UreyBradleyAngle]:
params_harmpart = {k:v for (k,v) in params.items() if k in ['theta','k','c'] }
names.append(name)
paramlists.append(params_harmpart)
name = 'UB'
params['kUB'] *= canonical_force_scale
params_ubpart = {k:v for (k,v) in params.items() if k in ['r','kUB'] }
names.append(name)
paramlists.append(params_ubpart)
else:
if direction == 'from':
names.append(name)
paramlists.append(params)
if direction == 'into':
return angle
elif direction == 'from':
return names, paramlists
else:
raise UnsupportedFunctional(angle, ENGINE)
desmond_dihedrals = {'IMPROPER_HARM': ImproperHarmonicDihedral,
'PROPER_TRIG': TrigDihedral,
'IMPROPER_TRIG': TrigDihedral,
'OPLS_PROPER': TrigDihedral,
'OPLS_IMPROPER': TrigDihedral
}
lookup_desmond_dihedrals = {TrigDihedral: 'PROPER_TRIG',
ImproperHarmonicDihedral: 'IMPROPER_HARM'
}
lookup_desmond_dihedral = create_lookup(desmond_dihedrals)
desmond_dihedral_types = create_type(desmond_dihedrals)
def canonical_dihedral(self, dihedral, params, direction = 'into', name = None, molecule_type = None):
if direction == 'into':
canonical_force_scale = self.canonical_force_scale_into
phase = 'Read'
else:
try:
name = self.lookup_desmond_dihedrals[dihedral.__class__]
except:
raise UnsupportedFunctional(dihedral, ENGINE)
canonical_force_scale = self.canonical_force_scale_from
phase = 'Write'
if dihedral.__class__ in [ImproperHarmonicDihedral, TrigDihedral]:
if direction == 'into':
#Improper Diehdral 2 ---NOT SURE ABOUT MULTIPLICITY
if name == "IMPROPER_HARM":
dihedral.improper = True
elif name == "PROPER_TRIG" or name == "IMPROPER_TRIG":
if name == "IMPROPER_TRIG":
dihedral.improper = True
else:
dihedral.improper = False
elif name == "OPLS_PROPER" or name == "OPLS_IMPROPER":
# OPLS_IMPROPER actually isn't any different from OPLS_PROPER
dihedral.improper = False
try:
# we can have multiple parameters with DESMOND, and append if we do
dihedralmatch = molecule_type.match_dihedrals(dihedral)
# this will fail if it's the wrong type of dihedral
if dihedralmatch:
dihedralmatch.sum_parameters(dihedral)
except Exception as e:
logger.exception(e)
return dihedral
else:
names = []
paramlists = []
if dihedral.__class__ in [ImproperHarmonicDihedral]:
params['k'] = params['k'] * canonical_force_scale
name = 'IMPROPER_HARM'
elif dihedral.__class__ in [TrigDihedral]:
name = 'PROPER_TRIG'
if hasattr(dihedral,'improper'):
if getattr(dihedral,'improper'):
name = 'IMPROPER_TRIG'
names.append(name)
paramlists.append(params)
return names, paramlists
def __init__(self, cms_file, system=None):
"""
Initializes a DesmondParse object which serves to read in a CMS file
into the abstract representation.
Args:
"""
self.cms_file = cms_file
if not system:
system = System()
self.system = system
self.vdwtypes = []
self.vdwtypeskeys = []
self.viparr = 1
self.fmct_blockpos = []
self.atom_blockpos = []
self.bond_blockpos = []
self.ffio_blockpos = []
self.paramlist = ff.build_paramlist('desmond')
self.unitvars = ff.build_unitvars('desmond', self.paramlist)
self.canonical_force_scale_into = 2.0
self.canonical_force_scale_from = 0.5
self.atom_col_vars = ['i_m_mmod_type',
'r_m_x_coord',
'r_m_y_coord',
'r_m_z_coord',
'i_m_residue_number',
's_m_pdb_residue_name',
'i_m_atomic_number',
's_m_pdb_atom_name',
's_m_atom_name',
'r_ffio_x_vel',
'r_ffio_y_vel',
'r_ffio_z_vel'
]
self.atom_box_vars = ['r_chorus_box_ax',
'r_chorus_box_ay',
'r_chorus_box_az',
'r_chorus_box_bx',
'r_chorus_box_by',
'r_chorus_box_bz',
'r_chorus_box_cx',
'r_chorus_box_cy',
'r_chorus_box_cz'
]
def get_parameter_list_from_kwds(self, force, kwds):
return ff.get_parameter_list_from_kwds(force, kwds, self.paramlist)
def get_parameter_list_from_force(self, force):
return ff.get_parameter_list_from_force(force, self.paramlist)
def get_parameter_kwds_from_force(self, force):
return ff.get_parameter_kwds_from_force(force, self.get_parameter_list_from_force, self.paramlist)
def create_kwd_dict(self, forcetype_object, values, optvalues = None):
kwd = ff.create_kwd_dict(self.unitvars, self.paramlist, forcetype_object, values, optvalues = optvalues)
return kwd
def create_forcetype(self, forcetype_object, paramlist, values, optvalues = None):
return forcetype_object(*paramlist, **self.create_kwd_dict(forcetype_object, values, optvalues))
#LOAD FFIO BLOCKS IN FIRST (CONTAINS TOPOLOGY)
def parse_ffio_block(self,start,end):
# read in a ffio_block that isn't ffio_ff and split it into the
# commands and the values.
# lots of room for additional error checking here, such as whether
# each entry has the correct number of data values, whether they are the correct type, etc.
# scroll to the next ffio entry
while not 'ffio_' in self.lines[start]:
# this is not an ffio block! or, we have reached the end of the file
if ('ffio_' not in self.lines[start]):
start+=1
if start >= end:
return 'Done with ffio', 0, 0, 0, start
self.lines[start].split()[1]
components = re.split('\W', self.lines[start].split()[0]) # get rid of whitespace, split on nonword
ff_type = components[0]
ff_number = int(components[1])
i = start+1
entry_data = []
while not ':::' in self.lines[i]:
entry_data.append(self.lines[i].split()[0])
i+=1
i+=1 # skip the separator we just found
entry_values = []
while not ':::' in self.lines[i]:
if self.lines[i].strip(): # skip the blank spaces.
entry_values.append(self.lines[i])
i+=1
while '}' not in self.lines[i]: # wait until we hit an end to the block
i+=1
i+=1 # step past the end of the block
return ff_type,ff_number,entry_data,entry_values,i
def store_ffio_data(self, ff_type, ff_number, entry_data, entry_values):
self.stored_ffio_data[ff_type] = dict()
self.stored_ffio_data[ff_type]['ff_type'] = ff_type
self.stored_ffio_data[ff_type]['ff_number'] = ff_number
self.stored_ffio_data[ff_type]['entry_data'] = entry_data
self.stored_ffio_data[ff_type]['entry_values'] = entry_values
def retrive_ffio_data(self, ff_type):
return [self.stored_ffio_data[ff_type]['ff_number'],
self.stored_ffio_data[ff_type]['entry_data'],
self.stored_ffio_data[ff_type]['entry_values']
]
def parse_vdwtypes(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
# molecule name is at sites, but vdwtypes come
# before sites. So we store info in vdwtypes and
# edit it later at sites. Eventually, we should
# probably move to a model where we store sections
# we can't use yet, and then process them in the
# order we want.
logger.debug("Parsing [ vdwtypes ] ...")
for j in range(ff_number):
self.vdwtypes.append(entry_values[j].split()[3:]) #THIS IS ASSUMING ALL VDWTYPES ARE STORED AS LJ12_6_SIG_EPSILON
self.vdwtypeskeys.append(entry_values[j].split()[1])
def parse_sites(self, type, molname, i, start):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
#correlate with atomtypes and atoms in GROMACS
logger.debug("Parsing [ sites ] ...")
#set indices to avoid continually calling list functions.
ivdwtype = entry_data.index('s_ffio_vdwtype')+1
icharge = entry_data.index('r_ffio_charge')+1
imass = entry_data.index('r_ffio_mass')+1
stemp = None
etemp = None
if 'i_ffio_resnr' in entry_data:
iresnum = entry_data.index('i_ffio_resnr')+1
iresidue = entry_data.index('s_ffio_residue')+1
cgnr = 0
# create the atom type container for the datax
current_molecule_type = MoleculeType(name=molname)
current_molecule_type.nrexcl = 0 #PLACEHOLDER FOR NREXCL...WE NEED TO FIND OUT WHERE IT IS
#MRS: basically, we have to figure out the furthest number of bonds out
# to exclude OR explicitly set gromacs exclusions. Either should work.
# for now, we'll go with the latter
self.system.add_molecule_type(current_molecule_type)
current_molecule = Molecule(name=molname) # should this be the same molname several as lines up?
for j in range(ff_number):
split = entry_values[j].split()
if split[1] == "atom":
if ('i_ffio_resnr' in entry_data):
atom = Atom(int(split[0]), split[ivdwtype],
int(split[iresnum]),
split[iresidue])
else:
# No residuenr, means we will have identical atoms sharing this.
atom = Atom(int(split[0]), split[ivdwtype])
atom.atomtype = (0, split[ivdwtype])
atom.charge = (0, float(split[icharge])*units.elementary_charge)
atom.mass = (0, float(split[imass]) * units.amu)
stemp = float(self.vdwtypes[self.vdwtypeskeys.index(split[ivdwtype])][0]) * units.angstroms #was in angstroms
etemp = float(self.vdwtypes[self.vdwtypeskeys.index(split[ivdwtype])][1]) * units.kilocalorie_per_mole #was in kilocal per mol
atom.sigma = (0, stemp)
atom.epsilon = (0, etemp)
atom.cgnr = cgnr
cgnr+=1
newAtomType = None
current_molecule.add_atom(atom)
if not self.system._atomtypes.get(AbstractAtomType(atom.atomtype.get(0))): #if atomtype not in self.system, add it
if self.system.combination_rule == 'Multiply-C6C12':
sigma = (etemp/stemp)**(1/6)
epsilon = (stemp)/(4*sigma**6)
newAtomType = AtomCType(split[ivdwtypes], #atomtype/name
split[ivdwtype], #bondtype
-1, #atomic_number
float(split[imass]) * units.amu, #mass
float(split[icharge]) * units.elementary_charge, #charge--NEED TO CONVERT TO ACTUAL UNIT
'A', #pcharge...saw this in top--NEED TO CONVERT TO ACTUAL UNITS
sigma * units.kilocalorie_per_mole * angstroms**(6),
epsilon * units.kilocalorie_per_mole * unit.angstro,s**(12))
elif (self.system.combination_rule == 'Lorentz-Berthelot') or (self.system.combination_rule == 'Multiply-Sigeps'):
newAtomType = AtomSigepsType(split[ivdwtype], #atomtype/name
split[ivdwtype], #bondtype
-1, #atomic_number
float(split[imass]) * units.amu, #mass--NEED TO CONVERT TO ACTUAL UNITS
float(split[icharge]) * units.elementary_charge, #charge--NEED TO CONVERT TO ACTUAL UNIT
'A', #pcharge...saw this in top--NEED TO CONVERT TO ACTUAL UNITS
stemp,
etemp)
self.system.add_atomtype(newAtomType)
if len(self.atom_blockpos) > 1: #LOADING M_ATOMS
if self.atom_blockpos[0] < start:
# generate the new molecules for this block; the number of molecules depends on
# The number of molecules depends on the number of entries in ffio_sites (ff_number)
new_molecules = self.loadMAtoms(self.lines, self.atom_blockpos[0], i, current_molecule, ff_number)
self.atom_blockpos.pop(0)
index = 0
for molecule in new_molecules:
self.system.add_molecule(molecule)
# now construct an atomlist with all the atoms
for atom in molecule.atoms:
# does this need to be a deep copy?
# tmpatom = copy.deepcopy(atom)
# tmpatom.index = index
self.atomlist.append(atom)
index +=1
return self.system._molecule_types[molname]
def parse_bonds(self, type, current_molecule_type, i, start):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
if len(self.bond_blockpos) > 1: #LOADING M_BONDS
if self.bond_blockpos[0] < start:
for molecule in iter(current_molecule_type.molecules):
npermol = len(molecule.atoms)
break
# of the parsers, this is the only one that uses 'lines'. Can we remove?
current_molecule_type.bond_forces = self.loadMBonds(self.lines, self.bond_blockpos[0], i, npermol)
self.bond_blockpos.pop(0)
logger.debug("Parsing [ bonds ]...")
for j in range(ff_number):
entries = entry_values[j].split()
key = entries[3].upper()
atoms = [int(x) for x in entries[1:3]]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
atoms.extend(bondingtypes)
params = [float(x) for x in entries[4:6]]
new_bond = self.create_forcetype(self.desmond_bonds[key], atoms, params)
kwds = self.get_parameter_kwds_from_force(new_bond)
new_bond = self.canonical_bond(new_bond, kwds, direction = 'into', name = key)
# removing the placeholder from matoms (should be a better way to do this?)
if new_bond:
old_bond = current_molecule_type.match_bonds(new_bond)
if old_bond:
new_bond.order = old_bond.order
current_molecule_type.bond_forces.remove(old_bond)
current_molecule_type.bond_forces.add(new_bond)
def parse_pairs(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Parsing [ pairs ] ...")
for j in range(ff_number):
ljcorr = False
coulcorr = False
new_pair = None
split = entry_values[j].split()
atoms = [int(x) for x in split[1:3]]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
params = atoms + bondingtypes
key = split[3].upper()
if key == "LJ12_6_SIG_EPSILON":
new_pair = self.create_forcetype(LjSigepsPair, params, [float(x) for x in split[4:6]])
elif key == "LJ" or key == "COULOMB":
# I think we just need LjSigepsPair, not LjPair?
new_pair = self.create_forcetype(LjDefaultPair, params, [0, 0])
if key == "LJ":
ljcorr = float(split[4])
new_pair.scaleLJ = ljcorr
elif key == "COULOMB":
coulcorr = float(split[4])
new_pair.scaleQQ = coulcorr
else:
warn("ReadError: didn't recognize type %s in line %s", split[3], entry_values[j])
# now, we catch the matches and read them into a single potential
pair_match = current_molecule_type.match_pairs(new_pair)
if pair_match: # we found a pair with the same atoms; let's insert or delete information as needed.
remove_old = False
remove_new = False
if isinstance(new_pair, LjSigepsPair) and isinstance(pair_match, LjDefaultPair) and pair_match.scaleQQ:
#Need to add old scaleQQ to this new pair
new_pair.scaleQQ = pair_match.scaleQQ
remove_old = True
elif isinstance(pair_match, LjSigepsPair) and isinstance(new_pair, LjDefaultPair) and new_pair.scaleQQ:
#Need to add the scaleQQ to the old pair
pair_match.scaleQQ = new_pair.scaleQQ
remove_new = True
elif isinstance(new_pair,LjDefaultPair) and isinstance(pair_match,LjDefaultPair):
if pair_match.scaleQQ and not new_pair.scaleQQ:
new_pair.scaleQQ = pair_match.scaleQQ
remove_old = True
elif not pair_match.scaleQQ and new_pair.scaleQQ:
pair_match.scaleQQ = new_pair.scaleQQ
remove_new = True
if pair_match.scaleLJ and not new_pair.scaleLJ:
new_pair.scaleLJ = pair_match.scaleLJ
remove_new = True
elif not pair_match.scaleLJ and new_pair.scaleLJ:
pair_match.scaleLJ = new_pair.scaleLJ
remove_old = True
if remove_old:
current_molecule_type.pair_forces.remove(pair_match)
if remove_new:
new_pair = None
if coulcorr:
self.system.coulomb_correction = coulcorr # need this for gromacs to have the global declared
#If we have difference between global and local, catch in gromacs.
if ljcorr:
self.system.lj_correction = ljcorr # need this for gromacs to have the global declared
#If we have difference between global and local, catch in gromacs.
if new_pair:
current_molecule_type.pair_forces.add(new_pair)
# IMPORTANT: we are going to assume that all pairs are both LJ and COUL.
# if COUL is not included, then it is because the charges are zero, and they will give the
# same energy. This could eventually be improved by checking versus the sites.
def parse_angles(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Parsing [ angles ] ...")
for j in range(ff_number):
split = entry_values[j].split()
key = split[4].upper()
atoms = [int(x) for x in split[1:4]]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
atoms.extend(bondingtypes)
kwds = [float(x) for x in split[5:7]]
new_angle = self.create_forcetype(self.desmond_angles[key], atoms, kwds)
kwds = self.get_parameter_kwds_from_force(new_angle)
new_angle = self.canonical_angle(new_angle, kwds, direction = 'into', name = key,
molecule_type = current_molecule_type)
if new_angle:
current_molecule_type.angle_forces.add(new_angle)
def parse_dihedrals(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Parsing [ dihedrals ] ...")
for j in range(ff_number):
split = entry_values[j].split()
new_dihedral = None
dihedral_type = None
atoms = [int(x) for x in split[1:5]]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
key = split[5].upper()
atoms.extend(bondingtypes)
# not sure how to put the following lines in canonical, since it expects keywords,
# not strings of variable length. will have to fix later.
if key == "IMPROPER_HARM":
kwds = [float(split[6]), 2*float(split[7])]
elif key == "PROPER_TRIG" or key == "IMPROPER_TRIG":
kwds = [float(x) for x in split[6:14]]
elif key == "OPLS_PROPER" or key == "OPLS_IMPROPER":
# next 3 lines definitely not the right way to do it.
opls_kwds = {key: value for key, value in zip("c1 c2 c3 c4".split(), [units.kilocalorie_per_mole * float(s) for s in split[7:11]])}
opls_kwds = convert_dihedral_from_fourier_to_trig(opls_kwds)
kwds = np.zeros(8) # will fill this in later.
new_dihedral = self.create_forcetype(self.desmond_dihedrals[key], atoms, kwds)
# really should be some way to get rid of this code below
if key == "OPLS_PROPER" or key == "OPLS_IMPROPER":
for key in opls_kwds.keys():
setattr(new_dihedral,key,opls_kwds[key])
# really should be some way to get rid of this code above
kwds = self.get_parameter_kwds_from_force(new_dihedral)
new_dihedral = self.canonical_dihedral(new_dihedral, kwds, direction = 'into', name = key,
molecule_type = current_molecule_type)
if new_dihedral:
current_molecule_type.dihedral_forces.add(new_dihedral)
def parse_torsion_torsion(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Parsing [ torsion-torsion ] ...")
for j in range(ff_number):
split = entry_values[j].split()
new_torsiontorsion = None
key = split[9].upper()
if key == "CMAP":
# we shouldn't need to try/accept because there are no units.
new_torsiontorsion = TorsionTorsionCMAP(int(split[1]),
int(split[2]),
int(split[3]),
int(split[4]),
int(split[5]),
int(split[6]),
int(split[7]),
int(split[8]),
'cmap',
int(split[10]))
else:
warn("ReadError: found unsupported torsion-torsion type in: %s" % str(line[i]))
if new_torsiontorsion:
current_molecule_type.torsiontorsion_forces.add(new_torsiontorsion)
def parse_exclusions(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Parsing [ exclusions ] ...")
for j in range(ff_number):
temp = entry_values[j].split()
temp.remove(temp[0])
current_molecule_type.exclusions.add(tuple([int(x) for x in temp]))
def parse_restraints(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Warning: Parsing [ restraints] not yet implemented")
def parse_constraints(self, type, current_molecule_type):
ff_number, entry_data, entry_values = self.retrive_ffio_data(type)
logger.debug("Parsing [ constraints ] ...")
ctype = 1
funct_pos = 0
atompos = [] #position of atoms in constraints; spread all over the place
lenpos = [] #position of atom length; spread all over the place
tempatom = []
templength = []
templen = 0
for j in range(len(entry_data)):
if entry_data[j] == 's_ffio_funct':
funct_pos = ctype
elif 'i_ffio' in entry_data[j]:
atompos.append(ctype)
elif 'r_ffio' in entry_data[j]:
lenpos.append(ctype)
ctype+=1
for j in range(ff_number):
# water constraints actually get written to rigidwater (i.e. settles) constraints.
if 'HOH' in entry_values[j] or 'AH' in entry_values[j]:
split = entry_values[j].split()
tempatom = []
templength = []
for a in atompos:
if not '<>' in split[a]:
tempatom.append(int(split[a]))
else:
tempatom.append(None)
for l in lenpos:
if not '<>' in split[l]:
if 'AH' in entry_values[j]:
templength.append(float(split[l])*units.angstroms) # Check units?
else:
templength.append(None*units.angstroms)
constr_type = split[funct_pos]
if 'HOH' in constr_type:
dOH = float(split[lenpos[1]])
if dOH != float(split[lenpos[2]]):
logger.debug("Warning: second length in a rigid water specification (%s) is not the same as the first (%s)" % (split[lenpos[1]],split[lenpos[2]]))
angle = float(split[lenpos[0]])/(180/math.pi)
dHH = 2*dOH*math.sin(angle/2)
params = [atompos[0], atompos[1], atompos[2], dOH*units.angstroms, dHH*units.angstroms]
new_rigidwater = RigidWater(*params)
if new_rigidwater:
current_molecule_type.rigidwaters.add(new_rigidwater)
elif 'AH' in constr_type:
templen = int(list(constr_type)[-1])
params = [tempatom[0], tempatom[1], templength[0], constr_type]
for t in range(2,templen+1):
params.extend([tempatom[t],templength[t-1]])
new_constraint = Constraint(*params)
if new_constraint:
current_molecule_type.constraints.add(new_constraint)
else:
warn("ReadError: found unsupported constraint type %s" % (entry_values[j]))
def load_ffio_block(self, molname, start, end):
# Loading in ffio blocks from Desmond format
# Args:
# molname: name of current molecule
# start: beginning of where ffio_ff starts for each molecule
# end: ending of where ffio_ff ends for each molecule
i = start
j = start
self.stored_ffio_types = [] # a list of stored ffio_type to keep track
# of the ordering later
self.stored_ffio_data = {} # dictionary of stored ffio_entries
split = []
constraints = []
temp = []
current_molecule_type = None
#There are several sections which require sites information to
#process. We keep a flag for this so that we are aware when
#we have seen the sites
bPreambleRead = False
namecol = 0
combrcol = 0
vdwtypercol = 0
#DEFAULT VALUES WHEN CONVERTING TO GROMACS
self.system.nonbonded_function = 1
self.system.genpairs = 'yes'
logger.debug('Parsing [ molecule %s ]'%(molname))
logger.debug('Parsing [ ffio ]')
while i < end:
if not bPreambleRead:
# read the first section for the forces field info
while not (':::' in self.lines[i]):
if 's_ffio_name' in self.lines[i]:
namecol = i-start-1
elif 's_ffio_comb_rule' in self.lines[i]:
combrcol = i-start-1
elif 's_ffio_vdw_func' in self.lines[i]:
vdwtypercol = i-start-1
i+=1
i+=1 # skip the ':::'
# figure out combination rule
combrule = self.lines[i+combrcol].upper()
if "ARITHMETIC/GEOMETRIC" in combrule:
self.system.combination_rule = 'Lorentz-Berthelot'
elif "GEOMETRIC" in combrule:
self.system.combination_rule = 'Multiply-Sigeps'
elif "LJ12_6_C6C12" in combrule:
self.system.combination_rule = 'Multiply-C6C12'
if (vdwtypercol > 0):
vdwrule = self.lines[i+vdwtypercol]
# MISSING: need to identify vdw rule here -- currently assuming LJ12_6_sig_epsilon!
# skip to the next ffio entry
while not ("ffio" in self.lines[i]):
i+=1
bPreambleRead = True
ff_type, ff_number, entry_data, entry_values, i = self.parse_ffio_block(i, end)
self.stored_ffio_types.append(ff_type)
self.store_ffio_data(ff_type,ff_number, entry_data, entry_values)
# Reorder so 'vdwtypes' is first, then 'sites'. Could eventually get some simplification
# by putting sites first, but too much rewriting for now.
self.stored_ffio_types.insert(0, self.stored_ffio_types.pop(self.stored_ffio_types.index('ffio_sites')))
self.stored_ffio_types.insert(0, self.stored_ffio_types.pop(self.stored_ffio_types.index('ffio_vdwtypes')))
# now process all the data
for type in self.stored_ffio_types:
if type in self.sysDirective:
params = [type]
if type == 'ffio_sites':
params += [molname, i, start]
else:
params += [current_molecule_type]
if type == 'ffio_bonds':
params += [i, start]
if type == 'ffio_sites':
current_molecule_type = self.sysDirective[type](*params)
else:
self.sysDirective[type](*params)
elif type == 'Done with ffio':
continue
else:
while '}' not in self.lines[i]:
i+=1 # not the most robust if there is nesting in a particular pattern
def loadMBonds(self, lines, start, end, npermol): #adds new bonds for each molecule in System
# Loading in m_bonds in Desmond format
# Args:
# lines: list of all data in CMS format
# start: beginning of where m_bonds starts for each molecule
# end: ending of where m_bondsends for each molecule
logger.debug("Parsing [ m_bonds ] ...")
bg = False
newbond_force = None
split = []
i = start
bonds = set()
while i < end:
if ':::' in lines[i]:
if bg:
break
else:
bg = True
i+=1
if bg:
split = lines[i].split()
atomi = int(split[1])
atomj = int(split[2])
bondingtypei = self.atomlist[atomi-1].name
bondingtypej = self.atomlist[atomj-1].name
params = [atomi, atomj, bondingtypei, bondingtypej]
if atomi > npermol: # we've collected the number of atoms per molecule. Exit.
break
order = int(split[3])
kwd = [0, 0]
optkwd = {'order': order, 'c': False}
new_bond = self.create_forcetype(HarmonicBond, params, kwd, optkwd)
bonds.add(new_bond)
i+=1
return bonds
def loadMAtoms(self, lines, start, end, currentMolecule, slength): #adds positions and such to atoms in each molecule in System
# Loading in m_atoms from Desmond format
# Args:
# lines: list of all data in CMS format
# start: beginning of where m_atoms starts for each molecule
# end: ending of where m_atoms ends for each molecule
# currentMolecule
# slength: number of unique atoms in m_atoms, used to calculate repetitions
logger.debug("Parsing [ m_atom ] ...")
i = start
bg = False
pdbaname = ""
aname = ""
mult = int(re.split('\W',lines[start].split()[0])[1])/slength
cols = dict()
while i < end:
if ':::' in lines[i]:
i+=1
break
else:
if 'First column' in lines[i]:
start += 1
for c in self.atom_col_vars:
if c in lines[i]:
logger.debug(" Parsing [ %s ] ..." % c)
cols[c] = i - start
break
i+=1
atom = None
newMoleculeAtoms = []
j = 0
logger.debug(" Parsing atoms...")
molecules = []
while j < mult:
newMolecule = copy.deepcopy(currentMolecule)
for atom in newMolecule.atoms:
if ':::' in lines[i]:
break
else:
aline = split_with_quotes(lines[i])
atom.residue_index = int(aline[cols['i_m_residue_number']])
atom.residue_name = aline[cols['s_m_pdb_residue_name']].strip()
try:
atom.atomic_number = int(aline[cols['i_m_atomic_number']])
except Exception as e:
logger.exception(e) # EDZ: just pass statement before, now exception is recorded, but supressed
atom.position = [float(aline[cols['r_m_x_coord']]) * units.angstroms,
float(aline[cols['r_m_y_coord']]) * units.angstroms,
float(aline[cols['r_m_z_coord']]) * units.angstroms]
atom.velocity = [0.0 * units.angstroms * units.picoseconds**(-1),
0.0 * units.angstroms * units.picoseconds**(-1),
0.0 * units.angstroms * units.picoseconds**(-1)]
if 'r_ffio_x_vel' in cols:
atom.velocity[0] = float(aline[cols['r_ffio_x_vel']]) * units.angstroms * units.picoseconds**(-1)
if 'r_ffio_y_vel' in cols:
atom.velocity[1] = float(aline[cols['r_ffio_y_vel']]) * units.angstroms * units.picoseconds**(-1)
if 'r_ffio_z_vel' in cols:
atom.velocity[2] = float(aline[cols['r_ffio_z_vel']]) * units.angstroms * units.picoseconds**(-1)
if 's_m_pdb_atom_name' in cols:
pdbaname = aline[cols['s_m_pdb_atom_name']].strip()
if 's_m_atom_name' in cols:
aname = aline[cols['s_m_atom_name']].strip()
if re.match('$^',pdbaname) and not re.match('$^',aname):
atom.name = aname
elif re.match('$^',aname) and not re.match('$^',pdbaname):
atom.name = pdbaname
elif re.search("\d+",pdbaname) and not re.search("\d+",aname):
if re.search("\D+",pdbaname) and re.search("\w+",pdbaname):
atom.name = pdbaname
else:
atom.name = aname
elif re.search("\d+",aname) and not re.search("\d+",pdbaname):
if re.search("\D+",aname) and re.search("\w+",aname):
atom.name = aname
else:
atom.name = pdbaname
elif re.match('$^',pdbaname) and re.match('$^',aname):
atom.name = "None"
else:
atom.name = aname #doesn't matter which we choose, so we'll go with atom name instead of pdb
i+=1
molecules.append(newMolecule)
j+=1
return molecules
def load_box_vector(self, lines, start, end):
# Loading Box Vector
# Create a Box Vector to load into the System
# Args:
# lines: all the lines of the file stored in an array
# start: starting position
# end: ending position
v = np.zeros([3, 3]) * units.angstroms
for i, line in enumerate(lines[start:end]):
if self.atom_box_vars[0] in line:
startboxlabel = i
if ':::' in line:
endlabel = i + start
break
startbox = startboxlabel + endlabel
for nvec, line in enumerate(lines[startbox:startbox + 9]):
j = nvec // 3
k = nvec % 3
v[j, k] = float(line.strip()) * units.angstrom
self.system.box_vector = v
def read(self):
# Load in data from file
# Read data in Desmond format
# Args:
molnames = []
with open(self.cms_file, 'r') as fl:
self.lines = list(fl)
i=0
j=0
self.atomtypes = dict()
self.atomlist = []
# figure out on which lines the different blocks begin and end.
for line in self.lines:
if 'f_m_ct' in line:
if j > 0:
self.fmct_blockpos.append(i)
j+=1
if 'm_atom' in line and not (('i_m' in line) or ('s_m' in line)):
if j > 1:
self.atom_blockpos.append(i)
j+=1
if 'm_bond' in line:
if j > 2:
self.bond_blockpos.append(i)
j+=1
if 'ffio_ff' in line:
if j > 2:
self.ffio_blockpos.append(i)
j+=1
i+=1
i-=1
self.fmct_blockpos.append(i)
self.atom_blockpos.append(i)
self.bond_blockpos.append(i)
self.ffio_blockpos.append(i)
self.sysDirective = {'ffio_vdwtypes': self.parse_vdwtypes,
'ffio_sites': self.parse_sites,
'ffio_bonds': self.parse_bonds,
'ffio_pairs': self.parse_pairs,
'ffio_angles': self.parse_angles,
'ffio_dihedrals': self.parse_dihedrals,
'ffio_torsion_torsion': self.parse_torsion_torsion,
'ffio_constraints': self.parse_constraints,
'ffio_exclusions': self.parse_exclusions,
'ffio_restraints': self.parse_restraints
}
#LOADING Ffio blocks
logger.debug("Reading ffio block...")
#MRS: warning -- currently no check to avoid duplicated molecule names. Investigate.
i = 0
j = 0
while i < (len(self.ffio_blockpos)-1):
j = self.fmct_blockpos[i]
while ':::' not in self.lines[j]:
j+=1
# make sure we have reasonable molecular names.
molname = self.lines[j+1].strip()
molname = molname.replace("\"","") # get rid of quotation marks so we can find unique names
if molname == "":
molname = "Molecule_"+str(len(molnames)+1)
molnames.append(molname)
self.load_ffio_block(molname, self.ffio_blockpos[i], self.fmct_blockpos[i+1]-1)
i+=1
i = 0
#LOAD RAW BOX VECTOR-Same throughout cms
logger.debug("Reading Box Vector...")
self.load_box_vector(self.lines, self.fmct_blockpos[0], self.atom_blockpos[0])
return self.system
def write_vdwtypes_and_sites(self, molecule):
#-ADDING VDWTYPES AND SITES
i = 0
sites = []
vdwtypes = []
sig = None
ep = None
stemp = None
etemp = None
combrule = self.system.combination_rule
for atom in molecule.atoms:
i+=1
if atom.residue_index:
sites.append(' %3d %5s %9.8f %9.8f %2s %1d %4s\n' % (
i, 'atom',
atom._charge[0].value_in_unit(units.elementary_charge),
atom._mass[0].value_in_unit(units.atomic_mass_unit),
atom.atomtype[0], atom.residue_index, atom.residue_name))
else:
sites.append(' %3d %5s %9.8f %9.8f %2s\n' % (
i, 'atom',
atom._charge[0].value_in_unit(units.elementary_charge),
atom._mass[0].value_in_unit(units.atomic_mass_unit),
atom.atomtype[0]))
sig = float(atom.sigma[0].value_in_unit(units.angstroms))
ep = float(atom.epsilon[0].value_in_unit(units.kilocalorie_per_mole))
if combrule == 'Multiply-C6C12': #MRS: seems like this should be automated more?
stemp = ep * (4 * (sig**6))
etemp = stemp * (sig**6)
elif combrule in ['Lorentz-Berthelot','Multiply-Sigeps']:
stemp = sig
etemp = ep
if ' %2s %18s %8.8f %8.8f\n' % (atom.atomtype[0], "LJ12_6_sig_epsilon", float(stemp), float(etemp)) not in vdwtypes:
vdwtypes.append(' %2s %18s %8.8f %8.8f\n' % (atom.atomtype[0], "LJ12_6_sig_epsilon", float(stemp), float(etemp)))
lines = []
logger.debug(" -Writing vdwtypes...")
lines.append(" ffio_vdwtypes[%d] {\n"%(len(vdwtypes)))
lines.append(" s_ffio_name\n")
lines.append(" s_ffio_funct\n")
lines.append(" r_ffio_c1\n")
lines.append(" r_ffio_c2\n")
lines.append(" :::\n")
i = 0
for v in vdwtypes:
i+=1
lines.append(' %d%2s'%(i,v))
lines.append(" :::\n")
lines.append(" }\n")
logger.debug(" -Writing sites...")
lines.append(" ffio_sites[%d] {\n"%(len(sites)))
lines.append(" s_ffio_type\n")
lines.append(" r_ffio_charge\n")
lines.append(" r_ffio_mass\n")
lines.append(" s_ffio_vdwtype\n")
if len(sites[0].split()) > 5: # fix this to explicitly ask if resnr is in here rather than length
lines.append(" i_ffio_resnr\n")
lines.append(" s_ffio_residue\n")
lines.append(" :::\n")
for s in sites:
lines.append(' %s'%(s))
lines.append(" :::\n")
lines.append(" }\n")
return lines
def write_bonds(self, moleculetype):
#-ADDING BONDS
logger.debug(" -Writing bonds...")
dlines = list()
hlines = list()
hlines.append('ffio_bonds_placeholder\n')
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" s_ffio_funct\n")
hlines.append(" r_ffio_c1\n")
hlines.append(" r_ffio_c2\n")
hlines.append(" :::\n")
i = 0
bondlist = sorted(list(moleculetype.bond_forces), key=lambda x: (x.atom1, x.atom2))
for bond in bondlist:
atoms = [bond.atom1,bond.atom2]
kwds = self.get_parameter_kwds_from_force(bond)
names, paramlists = self.canonical_bond(bond, kwds, direction = 'from')
# could in general return multiple types and paramlists
for nbond, name in enumerate(names):
i += 1
converted_bond = self.desmond_bonds[name](*atoms, **paramlists[nbond])
line = ' %d %d %d %s' %(i, atoms[0], atoms[1], name)
bond_params = self.get_parameter_list_from_force(converted_bond)
param_units = self.unitvars[converted_bond.__class__.__name__]
for param, param_unit in zip(bond_params, param_units):
line += " %15.8f" % (param.value_in_unit(param_unit))
line += '\n'
dlines.append(line)
header = " ffio_bonds[%d] {\n" % (i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_angles(self, moleculetype):
#-ADDING ANGLES
logger.debug(" -Writing angles...")
dlines = list()
hlines = list()
hlines.append(" ffio_angles_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" i_ffio_ak\n")
hlines.append(" s_ffio_funct\n")
hlines.append(" r_ffio_c1\n")
hlines.append(" r_ffio_c2\n")
hlines.append(" :::\n")
i = 0
anglelist = sorted(list(moleculetype.angle_forces), key=lambda x: (x.atom1,x.atom2,x.atom3))
for angle in anglelist:
atoms = [angle.atom1,angle.atom2,angle.atom3]
kwds = self.get_parameter_kwds_from_force(angle)
names, paramlists = self.canonical_angle(angle, kwds, direction = 'from')
# could return multiple names and kwd lists
for nangle, name in enumerate(names):
i+=1
converted_angle = self.desmond_angles[name](*atoms, **paramlists[nangle])
line = ' %d %d %d %d %s' % (i, atoms[0], atoms[1], atoms[2], name)
angle_params = self.get_parameter_list_from_force(converted_angle)
param_units = self.unitvars[converted_angle.__class__.__name__]
for param, param_unit in zip(angle_params, param_units):
line += " %15.8f" % (param.value_in_unit(param_unit))
line += '\n'
dlines.append(line)
header = " ffio_angles[%d] {\n" % (i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_dihedrals(self, moleculetype):
#-ADDING DIHEDRALS
logger.debug(" -Writing dihedrals...")
dlines = list()
hlines = list()
hlines.append(" ffio_dihedrals_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" i_ffio_ak\n")
hlines.append(" i_ffio_al\n")
hlines.append(" s_ffio_funct\n")
# we assume the maximum number of dihedral terms
hmax = 8
# assume the maximum number of dihedral terms (8) to simplify things for now
for ih in range(hmax):
hlines.append(" r_ffio_c%d\n" %(ih))
hlines.append(" :::\n")
i = 0
#sorting by first index
dihedrallist = sorted(list(moleculetype.dihedral_forces), key=lambda x: (x.atom1, x.atom2, x.atom3, x.atom4))
# first, identify the number of terms we will print
for dihedral in dihedrallist:
atoms = [dihedral.atom1,dihedral.atom2,dihedral.atom3,dihedral.atom4]
kwds = self.get_parameter_kwds_from_force(dihedral)
names, paramlists = self.canonical_dihedral(dihedral, kwds, direction = 'from')
for ndihedrals, name in enumerate(names):
i+=1
line = ' %d %d %d %d %d %s' %(i, atoms[0], atoms[1], atoms[2], atoms[3], name)
converted_dihedral= self.desmond_dihedrals[name](*atoms,**paramlists[ndihedrals])
dihedral_params = self.get_parameter_list_from_force(converted_dihedral)
param_units = self.unitvars[converted_dihedral.__class__.__name__]
for param, param_unit in zip(dihedral_params, param_units):
line += " %15.8f" % (param.value_in_unit(param_unit))
for j in range(8-len(dihedral_params)):
line += " %6.3f" % (0.0)
line += '\n'
dlines.append(line)
header = " ffio_dihedrals[%d] {\n" % (i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_torsion_torsion(self, moleculetype):
# adding TORSION-TORSION terms
logger.debug(" -Writing torsion-torsions...")
hlines = list()
dlines = list()
hlines.append(" ffio_torsion_torsion_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" i_ffio_ak\n")
hlines.append(" i_ffio_al\n")
hlines.append(" i_ffio_am\n")
hlines.append(" i_ffio_an\n")
hlines.append(" i_ffio_ao\n")
hlines.append(" i_ffio_ap\n")
hlines.append(" s_ffio_func\n")
hlines.append(" i_ffio_c1\n")
hlines.append(" :::\n")
i = 0
for torsiontorsion in moleculetype.torsiontorsion_forces:
i+=1
# only type of torsion/torsion is CMAP currently
dlines.append(' %d %d %d %d %d %d %d %d %d %s %d\n' % (
i,
int(torsiontorsion.atom1), int(torsiontorsion.atom2),
int(torsiontorsion.atom3), int(torsiontorsion.atom4),
int(torsiontorsion.atom5), int(torsiontorsion.atom6),
int(torsiontorsion.atom7), int(torsiontorsion.atom8),
'cmap', torsiontorsion.chart))
header = " ffio_torsion_torsion[%d] {\n"%(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
dlines = list()
# write out the cmap terms: for now, write out all the
# charts. Later, we can scan through and only print out the ones we use
# and only include the relevant charts
if (i > 0): # only include cmap_charts if we need to
cmap_charts = cmap_parameters.get_cmap_charts()
for chart in cmap_charts:
chartlines = chart.split('\n')
for line in chartlines:
dlines.append(line + '\n')
hlines.extend(dlines)
return hlines
def write_exclusions(self, moleculetype):
#ADDING EXCLUSIONS
i = 0
logger.debug(" -Writing exclusions...")
hlines = list()
dlines = list()
hlines.append(" ffio_exclusions_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" :::\n")
if moleculetype.nrexcl == 0:
# Should probably be determined entirely by the bonds,
# since settles now adds bonds. For now, leave this in
# for Desmond to Desmond conversion, where nrexcl is not
# determined. Probably should switch eventually.
exclusionlist = sorted(list(moleculetype.exclusions), key=lambda x: (x[0], x[1]))
for exclusion in moleculetype.exclusions:
i+=1
dlines.append(' %d %d %d\n'%(i, int(exclusion[0]), int(exclusion[1])))
else:
if moleculetype.nrexcl > 4:
warn("Can't handle more than excluding 1-4 interactions right now!")
fullbondlist = []
fullbondlist = sorted(list(moleculetype.bond_forces), key=lambda x: (x.atom1, x.atom2))
# exclude HarmonicPotential types, which do not have exclusions.
bondlist = [bond for bond in fullbondlist if (not isinstance(bond, HarmonicPotentialBond))]
# first, figure out the first appearance of each atom in the bondlist
currentatom = 0
atompos = []
bondindex = 0
for molecule in moleculetype.molecules:
nsize = len(molecule.atoms)+1
break # only need the first
atombonds = np.zeros([nsize,8],int) # assume max of 8 for now
natombonds = np.zeros(nsize,int)
for bond in bondlist:
atombonds[bond.atom1,natombonds[bond.atom1]] = bond.atom2
natombonds[bond.atom1] += 1
atombonds[bond.atom2,natombonds[bond.atom2]] = bond.atom1
natombonds[bond.atom2] += 1
for atom in range(1,nsize):
atomexclude = set() # will be a unique set
# need to make this recursive! And there must be a better algorithm
for j1 in range(natombonds[atom]):
toatom1 = atombonds[atom,j1];
atomexclude.add(toatom1)
if moleculetype.nrexcl > 1:
for j2 in range(natombonds[toatom1]):
toatom2 = atombonds[toatom1,j2]
atomexclude.add(toatom2)
if moleculetype.nrexcl > 2:
for j3 in range(natombonds[toatom2]):
toatom3 = atombonds[toatom2,j3]
atomexclude.add(toatom3)
if moleculetype.nrexcl > 3:
for j4 in range(natombonds[toatom3]):
toatom4 = atombonds[toatom1,j4]
atomexclude.add(toatom4)
uniqueexclude = set(atomexclude)
for a in atomexclude:
if (a > atom):
i+=1
dlines.append(' %d %d %d\n' % (i, atom, a))
header = " ffio_exclusions[%d] {\n"%(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_pairs(self, moleculetype):
#-ADDING PAIRS
logger.debug(" -Writing pairs...")
dlines = list()
hlines = list()
hlines.append("ffio_pairs_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" s_ffio_funct\n")
hlines.append(" r_ffio_c1\n")
hlines.append(" r_ffio_c2\n")
hlines.append(" :::\n")
i = 0
for pair in sorted(list(moleculetype.pair_forces), key=lambda x: (x.atom1, x.atom2)):
atoms = ' %d %d ' % (pair.atom1, pair.atom2)
# first, the COUL part.
if pair.__class__ in (LjDefaultPair, LjqDefaultPair, LjSigepsPair, LjCPair):
# the first two appear to be duplicates: consider merging.
if pair.scaleQQ:
scaleQQ = pair.scaleQQ
else:
scaleQQ = self.system.coulomb_correction
i += 1
dlines += ' %d %s Coulomb %10.8f <>\n' % (i, atoms, scaleQQ)
elif pair._class in (LjqSigepsPair, LjqCPair):
warn("Desmond does not support pairtype %s!",pair.__class__.__name__ ) # may not be true?
else:
warn("Unknown pair type %s!",pair.__class__.__name__ )
# now the LJ part.
if pair.__class__ in (LjDefaultPair,LjqDefaultPair):
if pair.scaleLJ:
scaleLJ = pair.scaleLJ
else:
scaleLJ = self.system.lj_correction
i += 1
dlines += ' %d %s LJ %10.8f <>\n' % (i, atoms, scaleLJ)
elif pair.__class__ in (LjSigepsPair, LjqSigepsPair, LjCPair, LjqCPair):
# Check logic here -- not clear that we can correctly determine which type it is.
# Basically, I think it's whether scaleLJ is defined or not.
if pair.__class__ in (LjCPair, LjqCPair):
epsilon = 0.25 * (pair.C6**2) / pair.C12 # (16*eps^2*sig^12 / 4 eps*sig^12) = 4 eps
sigma = (0.25 * pair.C6 / epsilon)**(1.0/6.0) # (0.25 * 4 eps sig^6 / eps)^(1/6)
elif pair.__class__ in (LjSigepsPair, LjqCPair):
epsilon = pair.epsilon
sigma = pair.sigma
i += 1
dlines += ' %d %s LJ12_6_sig_epsilon %10.8f %10.8f\n' % (i, atoms,
sigma.value_in_unit(units.angstroms),
epsilon.value_in_unit(units.kilocalorie_per_mole))
else:
warn("Unknown pair type %s!",pair.__class__.__name__ )
header = " ffio_pairs[%d] {\n"%(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_constraints(self, moleculetype):
#ADDING CONSTRAINTS
logger.debug(" -Writing constraints...")
isHOH = False
if len(moleculetype.rigidwaters) > 0:
alen = 3
clen = 3
else:
alen = 0
clen = 0
alen_max = alen
clen_max = clen
for constraint in moleculetype.constraints:
if constraint.type[0:2] == 'AH':
alen = constraint.n+1
clen = alen-1
if alen_max < alen:
alen_max = alen
clen_max = clen
# we now know the maximum length of all constraint types
# not sure we need to sort these, but makes it easier to debug
i = 0
constraintlist = sorted(list(moleculetype.constraints),key=lambda x: x.atom1)
dlines = list()
hlines = list()
for constraint in constraintlist: #calculate the max number of atoms in constraint
i+=1
if constraint.type == 'HOH':
cline = ' %d %d %d %d ' % (i,int(constraint.atom1),int(constraint.atom2),int(constraint.atom3))
for j in range(alen_max-3):
cline += '0 '
cline += constraint.type
cline += ' %10.8f' % (float(constraint.length1.value_in_unit(units.degrees)))
cline += ' %10.8f' % (float(constraint.length2.value_in_unit(units.angstroms)))
cline += ' %10.8f' % (float(constraint.length2.value_in_unit(units.angstroms)))
for j in range(clen_max-3):
cline += ' <>'
elif constraint.type[0:2] == 'AH':
alen = constraint.n+1
clen = alen-1
catoms = [constraint.atom1]
clengths = []
for j in range(1,alen+1):
atomname = 'atom'+str(j+1)
lengthname = 'length'+str(j)
if hasattr(constraint,atomname):
catoms.append(getattr(constraint,atomname))
clengths.append(getattr(constraint,lengthname))
cline = ' %d ' % i
for j in range(alen):
cline += ' %d ' % int(catoms[j])
for j in range(alen,alen_max):
cline += ' <> '
cline += constraint.type
for j in range(clen):
cline += ' %10.8f' % (float(clengths[j].value_in_unit(units.angstroms)))
for j in range(clen,clen_max):
cline += ' <>'
cline += '\n'
dlines.append(cline)
# now need to add the constraints specified through settles. Only one settles per molecule
for rigidwater in moleculetype.rigidwaters:
i += 1
# Assumes the water arrangement O, H, H, which might not always be the case. Consider adding detection.
cline = ' %d %d %d %d ' % (i, rigidwater.atom1, rigidwater.atom2, rigidwater.atom3)
for j in range(alen_max-3):
cline += '0 '
cline += ' HOH '
dOH = rigidwater.dOH.value_in_unit(units.angstroms)
dHH = rigidwater.dHH.value_in_unit(units.angstroms)
angle = 2.0*math.asin(0.5*dHH/dOH)*(180/math.pi) # could automate conversion. . .
cline += " %.8f %.8f %.8f" % (angle,dOH,dOH)
cline += '\n'
for j in range(alen,alen_max):
cline += ' 0.0'
dlines.append(cline)
hlines.append(" ffio_constraints[%d] {\n"%(i))
if (i==0):
hlines.append(" :::\n")
else:
letters = ['i','j','k','l','m','n','o','p','q']
for j in range(alen_max):
hlines.append(' i_ffio_a%s\n'%letters[j])
hlines.append(' s_ffio_funct\n')
for j in range(clen_max):
hlines.append(' r_ffio_c%d\n' %(j+1))
hlines.append(" :::\n")
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write(self):
# Write this topology to file
# Write out this topology in Desmond format
# Args:
# filename: the name of the file to write out to
lines = list()
pos = 0
name = ''
logger.warning("MacroModel atom type is not defined in other files, is set to 1 for all cases as it must be validly defined for desmond files to run. However, it does not affect the energies.")
# for all CMS files
lines.append('{\n')
lines.append(' s_m_m2io_version\n')
lines.append(' :::\n')
lines.append(' 2.0.0\n')
lines.append('}\n')
#FIRST F_M_CT BLOCK
logger.debug("Writing first f_m_ct...")
lines.append('f_m_ct {\n')
lines.append(' s_m_title\n')
for c in self.atom_box_vars:
lines.append(' %s\n' % c)
lines.append(' s_ffio_ct_type\n')
lines.append(' :::\n')
#box vector
bv = self.system.box_vector
lines.append(' "Desmond file converted by InterMol"\n')
for bi in range(3):
for bj in range(3):
lines.append('%22s\n' % float(bv[bi][bj].value_in_unit(units.angstroms)))
lines.append(' full_system\n')
#M_ATOM
apos = len(lines) #pos of where m_atom will be; will need to overwite later based on the number of atoms
lines.append('m_atom\n')
lines.append(' # First column is atom index #\n')
for vars in self.atom_col_vars:
if '_pdb_atom' not in vars:
lines.append(' %s\n' % vars)
lines.append(' :::\n')
i = 0
nmol = 0
totalatoms = []
totalatoms.append(0)
for moleculetype in self.system._molecule_types.values():
for molecule in moleculetype.molecules:
for atom in molecule.atoms:
i += 1
line = ' %d %d' % (i,1) #HAVE TO PUT THE 1 HERE OR ELSE DESMOND DIES, EVEN THOUGH IT DOESN'T USE IT
for j in range(3):
line += " %10.8f" % (float(atom._position[j].value_in_unit(units.angstroms)))
line += " %2d %4s %2d %2s" % (
atom.residue_index,
'"%s"'%atom.residue_name,
atom.atomic_number,
'"%s"'%atom.name)
if np.any(atom._velocity):
for j in range(3):
line += " %10.8f" % (float(atom._velocity[j].value_in_unit(units.angstroms / units.picoseconds)))
else:
for j in range(3):
line += " %10.8f" % (0)
lines.append(line + '\n')
totalatoms.append(i)
lines[apos] = ' m_atom[%d] {\n'%(i)
lines.append(' :::\n')
lines.append(' }\n')
bpos = len(lines)
i = 0
#M_BOND
hlines = list()
dlines = list()
hlines.append(' m_bond_placeholder\n')
hlines.append(' i_m_from\n')
hlines.append(' i_m_to\n')
hlines.append(' i_m_order\n')
hlines.append(' i_m_from_rep\n')
hlines.append(' i_m_to_rep\n')
hlines.append(' :::\n')
i = 0
nonecnt = 0
for moleculetype in self.system._molecule_types.values():
# sort the bondlist because Desmond requires the first time a bond is listed to have
# the atoms in ascending order
repeatmol = len(moleculetype.molecules)
#MRS: need to be fixed; gromacs loads in one set of bonds per molecue; desmond loads in all
# OrderedSet isn't indexable so get the first molecule by iterating.
for molecule in moleculetype.molecules:
atoms_per_molecule = len(molecule.atoms)
# all should have the same, once we have info from one, break.
break
bondlist = sorted(list(moleculetype.bond_forces), key=lambda x: (x.atom1,x.atom2))
for n in range(repeatmol):
for bond in bondlist:
if bond and bond.order:
i += 1
dlines.append(' %d %d %d %d %d %d\n'
%(i,
bond.atom1 + n*atoms_per_molecule + totalatoms[nmol],
bond.atom2 + n*atoms_per_molecule + totalatoms[nmol],
int(bond.order),
1,
1))
elif not bond:
nonecnt+=1
if nonecnt > 0:
logger.debug('FOUND %d BONDS THAT DO NOT EXIST' % nonecnt)
nmol +=1
hlines[0] = ' m_bond[%d] {\n' % i
if (i > 0):
lines.extend(hlines)
lines.extend(dlines)
lines.append(' :::\n')
lines.append(' }\n')
lines.append('}\n')
#WRITE OUT ALL FFIO AND F_M_CT BLOCKS
for molecule_name, moleculetype in self.system.molecule_types.items():
logger.debug('Writing molecule block %s...' % (molecule_name))
#BEGINNING BLOCK
logger.debug(" Writing f_m_ct...")
lines.append('f_m_ct {\n')
lines.append(' s_m_title\n')
for c in self.atom_box_vars:
lines.append(' %s\n' % c)
lines.append(' s_ffio_ct_type\n')
lines.append(' :::\n')
lines.append(' "' + molecule_name + '"\n')
for bi in range(3):
for bj in range(3):
lines.append('%22s\n' % float(bv[bi][bj].value_in_unit(units.angstroms)))
lines.append(' solute\n')
#M_ATOMS
logger.debug(" Writing m_atoms...")
apos = len(lines) #pos of where m_atom will be; will need to overwite later based on the number of atoms
lines.append('m_atom\n')
lines.append(' # First column is atom index #\n')
for vars in self.atom_col_vars:
if '_pdb_atom' not in vars: # kludge, have better filter
lines.append(' %s\n' % vars)
lines.append(' :::\n')
i = 0
for molecule in moleculetype.molecules:
for atom in molecule.atoms:
i += 1
#NOT SURE WHAT TO PUT FOR MMOD TYPE; 1 is currently used.
#This can't be determined currently from the information provided,
# unless it is stored previous, nor is it used by desmond
line = ' %d %d' % (i,1)
for j in range(3):
line += " %10.8f" % (float(atom._position[j].value_in_unit(units.angstroms)))
line += " %2d %4s %2d %2s" % (
atom.residue_index,
'"%s"'%atom.residue_name,
atom.atomic_number,
'"%s"'%atom.name)
if np.any(atom._velocity):
for j in range(3):
line += " %10.8f" % (float(atom._velocity[j].value_in_unit(units.angstroms / units.picoseconds)))
else:
for j in range(3):
line += " %10.8f" % (0)
lines.append(line + '\n')
lines[apos] = ' m_atom[%d] {\n'%(i)
lines.append(' :::\n')
lines.append(' }\n')
#M_BONDS
logger.debug(" Writing m_bonds...")
hlines = list()
dlines = list()
hlines.append('m_bond_placeholder\n')
hlines.append(' i_m_from\n')
hlines.append(' i_m_to\n')
hlines.append(' i_m_order\n')
hlines.append(' i_m_from_rep\n')
hlines.append(' i_m_to_rep\n')
hlines.append(' :::\n')
i = 0
nonecnt = 0
repeatmol = len(moleculetype.molecules)
for molecule in moleculetype.molecules:
atoms_per_molecule = len(molecule.atoms)
break
bondlist = sorted(list(moleculetype.bond_forces), key=lambda x: x.atom1)
for n in range(repeatmol):
for bond in bondlist:
if bond and bond.order:
i += 1
dlines.append(' %d %d %d %d %d %d\n'
%(i,
bond.atom1 + n*atoms_per_molecule,
bond.atom2 + n*atoms_per_molecule,
int(bond.order),
1,
1))
else:
nonecnt+=1
if nonecnt > 0:
logger.debug('FOUND %d BONDS THAT DO NOT EXIST' % nonecnt)
header = ' m_bond[%d] {\n'%i
if (i>0):
hlines = end_header_section(False,header,hlines)
lines.extend(hlines)
lines.extend(dlines)
lines.append(' :::\n')
lines.append(' }\n')
#FFIO
# only need the first molecule
molecule = next(iter(moleculetype.molecules))
logger.debug(" Writing ffio...")
lines.append(' ffio_ff {\n')
lines.append(' s_ffio_name\n')
lines.append(' s_ffio_comb_rule\n')
lines.append(' i_ffio_version\n')
lines.append(' :::\n')
#Adding Molecule Name
if "Viparr" in molecule_name:
lines.append(' Generated by Viparr\n')
else:
lines.append(' "%s"\n' % molecule_name)
#Adding Combination Rule
if self.system.combination_rule == 'Multiply-C6C12':
lines.append(' C6C12\n') # this may not exist in DESMOND, or if so, need to be corrected
elif self.system.combination_rule == 'Lorentz-Berthelot':
lines.append(' ARITHMETIC/GEOMETRIC\n')
elif self.system.combination_rule == 'Multiply-Sigeps':
lines.append(' GEOMETRIC\n')
#Adding Version
lines.append(' 1.0.0\n') #All files had this, check if version is 1.0.0
lines += self.write_vdwtypes_and_sites(molecule)
lines += self.write_bonds(moleculetype)
lines += self.write_angles(moleculetype)
lines += self.write_dihedrals(moleculetype)
lines += self.write_torsion_torsion(moleculetype)
lines += self.write_exclusions(moleculetype)
lines += self.write_pairs(moleculetype)
lines += self.write_constraints(moleculetype)
#STILL NEED TO ADD RESTRAINTS
lines.append(" }\n")
lines.append("}\n")
with open(self.cms_file, 'w') as fout:
for line in lines:
fout.write(line)
```
#### File: intermol/forces/abstract_type.py
```python
class AbstractType(object):
def __repr__(self):
"""Print the object and all of its non-magic attributes. """
attributes = ["{0}={1}".format(x, getattr(self, x)) for x in dir(self)
if not (x.startswith('__') or x.endswith('__'))]
printable_attributes = ', '.join(attributes)
return "{0}({1})".format(self.__class__.__name__, printable_attributes)
```
#### File: intermol/forces/bending_torsion_dihedral_type.py
```python
import parmed.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_dihedral_type import AbstractDihedralType
class BendingTorsionDihedralType(AbstractDihedralType):
__slots__ = ['a0', 'a1', 'a2', 'a3', 'a4', 'improper']
@accepts_compatible_units(None, None, None, None,
a0=units.kilojoules_per_mole,
a1=units.kilojoules_per_mole,
a2=units.kilojoules_per_mole,
a3=units.kilojoules_per_mole,
a4=units.kilojoules_per_mole,
improper=None)
def __init__(self, bondingtype1, bondingtype2, bondingtype3, bondingtype4,
a0=0.0 * units.kilojoules_per_mole,
a1=0.0 * units.kilojoules_per_mole,
a2=0.0 * units.kilojoules_per_mole,
a3=0.0 * units.kilojoules_per_mole,
a4=0.0 * units.kilojoules_per_mole,
improper=False):
AbstractDihedralType.__init__(self, bondingtype1, bondingtype2, bondingtype3, bondingtype4, improper)
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.a4 = a4
class BendingTorsionDihedral(BendingTorsionDihedralType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, atom3, atom4, bondingtype1=None, bondingtype2=None, bondingtype3=None, bondingtype4=None,
a0=0.0 * units.kilojoules_per_mole,
a1=0.0 * units.kilojoules_per_mole,
a2=0.0 * units.kilojoules_per_mole,
a3=0.0 * units.kilojoules_per_mole,
a4=0.0 * units.kilojoules_per_mole,
improper=False):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
self.atom4 = atom4
BendingTorsionDihedralType.__init__(self, bondingtype1, bondingtype2, bondingtype3, bondingtype4,
a0=a0,
a1=a1,
a2=a2,
a3=a3,
a4=a4,
improper=improper)
```
#### File: intermol/forces/connection_bond_type.py
```python
import parmed.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_bond_type import AbstractBondType
class ConnectionBondType(AbstractBondType):
__slots__ = ['order', 'c']
@accepts_compatible_units(None, None,
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
class ConnectionBond(ConnectionBondType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
ConnectionBondType.__init__(self, bondingtype1, bondingtype2,
order=order, c=c)
```
#### File: intermol/lammps/__init__.py
```python
from collections import OrderedDict
import logging
import os
from subprocess import Popen, PIPE
import warnings
import parmed.unit as units
from intermol.utils import run_subprocess, which
from intermol.lammps.lammps_parser import load, save
# Python 2/3 compatibility.
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
logger = logging.getLogger('InterMolLog')
to_canonical = {
'Bond': ['bond'],
'Angle': ['angle'],
'Proper Dih.': ['dihedral', 'proper'],
'Improper': ['dihedral', 'improper'],
'Dispersive': ['vdw total'],
'Disper. corr.': ['vdw total', 'vdw (LR)'],
'Electrostatic': ['coulomb total'],
'Coul.recip.': ['coulomb total','coulomb (LR)'],
'Non-bonded': ['nonbonded'],
'Potential': ['potential']
}
for exe in ['lammps', 'lmp_mpi', 'lmp_serial', 'lmp_openmpi',
'lmp_mac_mpi', '/home/mish4610/software/lammps/src/lmp_serial']:
if which(exe):
LMP_PATH = exe
break
else:
LMP_PATH = None
def energies(input_file, lmp_path=None):
"""Evaluate energies of LAMMPS files
Args:
input_file = path to input file (expects data file in same folder)
lmp_path = path to LAMMPS binaries
"""
if lmp_path is None and LMP_PATH is not None:
lmp_path = LMP_PATH
elif LMP_PATH is None:
raise IOError('Unable to find LAMMPS executables.')
logger.info('Evaluating energy of {0}'.format(input_file))
directory, input_file = os.path.split(os.path.abspath(input_file))
stdout_path = os.path.join(directory, 'lammps_stdout.txt')
stderr_path = os.path.join(directory, 'lammps_stderr.txt')
# TODO: Read energy info from stdout in memory instead of from log files.
try:
os.remove(stdout_path)
except FileNotFoundError:
pass
try:
os.remove(stderr_path)
except FileNotFoundError:
pass
# Step into the directory.
saved_path = os.getcwd()
os.chdir(directory)
cmd = [lmp_path, '-in', input_file]
proc = run_subprocess(cmd, 'lammps', stdout_path, stderr_path)
if proc.returncode != 0:
logger.error('LAMMPS failed. See %s/lammps_stderr.txt' % directory)
# Step back out.
os.chdir(saved_path)
return _group_energy_terms(stdout_path)
def _group_energy_terms(stdout_path):
"""Parse LAMMPS stdout to extract and group the energy terms in a dict. """
proc = Popen(["awk '/E_bond/{getline; print}' %s" % stdout_path], stdout=PIPE, shell=True)
energies, err = proc.communicate()
if not energies:
raise Exception('Unable to read LAMMPS energy output')
energy_values = [float(x) * units.kilocalories_per_mole for x in energies.split()]
energy_types = ['Bond', 'Angle', 'Proper Dih.', 'Improper', 'Non-bonded',
'Dispersive', 'Electrostatic', 'Coul. recip.',
'Disper. corr.', 'Potential']
e_out = OrderedDict(zip(energy_types, energy_values))
return e_out, stdout_path
``` |
{
"source": "JoshuaSBrown/PyCGADExample2",
"score": 3
} |
#### File: PyCGADExample2/bin/upload_download.py
```python
import argparse
from uploaddownload.upload_download_app import UploadDownloadApp
def main(**kwargs):
app = UploadDownloadApp(kwargs["verbose"])
app.initialize(pem_file=kwargs["permissions"])
if __name__ == "__main__":
message = "Example implementation of Py-CGAD, is authenticated to upload"
message += " and download files to the repository and its wiki"
parser = argparse.ArgumentParser(message)
desc = "Permissions file, allows us to interact with the github repository"
parser.add_argument(
"--permissions", "-p", type=str, nargs=1, required=True, help=desc
)
desc = "Vebosity of output."
parser.add_argument("--verbose", "-v", type=int, nargs=1, default=0, help=desc)
args = parser.parse_args()
main(**vars(args))
```
#### File: PyCGADExample2/tests/test_upload_download.py
```python
import pytest
import os
import json
import sys
from uploaddownload.upload_download_app import UploadDownloadApp
@pytest.fixture
def test_app():
"""Sets up a class with authentication to the PyCGADExample2 repository."""
app = UploadDownloadApp()
current_path, _ = os.path.split(os.path.abspath(__file__))
repo_path = os.path.normpath(os.path.join(current_path, "../"))
for file_name in os.listdir(repo_path):
if file_name.lower().endswith(".pem"):
if "uploaddownloadapp" in file_name:
print("Found pem file {}".format(file_name))
pem_file_path = os.path.join(repo_path, file_name)
break
app.initialize(pem_file=pem_file_path, path_to_repo=app.generateCandidateRepoPath())
return app
def test_branches(test_app):
branches = test_app.branches
found_master = False
for branch in branches:
if branch == "master":
found_master = True
break
assert found_master
def test_get_branch_tree(test_app):
branch_tree = test_app.getBranchTree("master")
assert branch_tree.exists("bin")
assert branch_tree.exists("README.md")
assert branch_tree.type("bin") == "dir"
assert branch_tree.type("README.md") == "file"
rel_paths = branch_tree.getRelativePaths("test_upload_download.py")
assert len(rel_paths) == 1
assert rel_paths[0] == "./tests/test_upload_download.py"
def test_get_contents(test_app):
branch_content = test_app.getContents("master")
assert "./bin" in branch_content
assert "./README.md" in branch_content
def test_upload_remove_file(test_app):
test_branch = "test_upload_remove_file_python"
test_branch += str(sys.version_info[0]) + "_" + str(sys.version_info[1])
print("Test branch is {}".format(test_branch))
# Check that test_upload_remove_file branch exists
branches = test_app.branches
found_test_branch = False
default_branch = None
for branch in branches:
print("Cycling existing branches {}".format(branch))
if branch == test_branch:
print("Found branch {}".format(test_branch))
found_test_branch = True
elif branch == "master":
print("Found master branch setting default to master")
default_branch = "master"
elif branch == "main":
print("Found main branch setting default to main")
default_branch = "main"
# If it doesn't exist create a new branch by splitting off of whatever
# default is available "master | main"
if not found_test_branch:
test_app.createBranch(test_branch, default_branch)
# Next check to see if a sample test file exists on the branch
sample_file = "sample_file.txt"
sample_file_path = "./" + sample_file
branch_tree = test_app.getBranchTree(test_branch)
# If for some reason the file exists we will delete it
if branch_tree.exists(sample_file_path):
test_app.remove(sample_file_path, test_branch)
# Now we are going to verify that the file no longer exists on the github
# repository, we will update our branch tree cache in our app
test_app.refreshBranchTreeCache(test_branch)
# Refresh the actual branch tree object
branch_tree = test_app.getBranchTree(test_branch)
# At this point the sample file should not exist
assert branch_tree.exists(sample_file_path) == False
# Now we are going to create the sample file at the top of our repo
local_repo_path = test_app.generateCandidateRepoPath()
local_sample_file_path = local_repo_path + "/" + sample_file
f = open(local_sample_file_path, "w")
f.write("This is a sample file!")
f.close()
# Now we want to try to upload the file to the test branch
test_app.upload(local_sample_file_path, test_branch)
# At this point the file should have been uploaded to the github repository
# on the specified test branch, so we will once again refresh our local
# branch tree to synchronize the contents
test_app.refreshBranchTreeCache(test_branch)
branch_tree = test_app.getBranchTree(test_branch)
# Now we should be able to verify that the file has been uploaded
assert branch_tree.exists(sample_file_path)
```
#### File: PyCGADExample2/uploaddownload/upload_download_app.py
```python
from py_cgad.githubapp import GitHubApp
import os
class UploadDownloadApp(GitHubApp):
def __init__(self, verbosity_in=0):
"""Upload Download app can upload and download files from repo and its wiki."""
if isinstance(verbosity_in, list):
verbosity_in = verbosity_in[0]
super().__init__(
120492,
"UploadDownloadApp",
"JoshuaSBrown",
"PyCGADExample2",
os.path.abspath(__file__),
verbosity=verbosity_in,
)
``` |
{
"source": "JoshuaSBrown/Py-CGAD_Example",
"score": 2
} |
#### File: Py-CGAD_Example/status/status_app.py
```python
from py_cgad.githubapp import GitHubApp
import os
class StatusApp(GitHubApp):
def __init__(self, verbosity_in=0):
"""Status app uploads, status for a commit, can also retrieve"""
if isinstance(verbosity_in, list):
verbosity_in = verbosity_in[0]
super().__init__(
117711,
"StatusApp",
"JoshuaSBrown",
"PyCGADExample",
os.path.abspath(__file__),
verbosity=verbosity_in,
)
``` |
{
"source": "joshuaschoep/crosswalk-sim",
"score": 3
} |
#### File: joshuaschoep/crosswalk-sim/main.py
```python
from events.event_queue import EventQueue
from events.event import Event, EventType
from variates import exponential_u, uniform_ab
import pedestrians as peds
import signal_state_machine as ssm
event_queue = EventQueue()
autos_left = 0
peds_left = 0
traffic_signal = ssm.TrafficSignal(event_queue)
autos_generator = None
peds_generator = None
button_generator = None
def start( N: int, autos_g, pedestrians_g, button_g ):
global event_queue, autos_left, peds_left
global autos_generator, peds_generator, button_generator
#Begin end conditons
autos_left = N - 1
peds_left = N - 1
#Init all generators
autos_generator = autos_g
peds_generator = pedestrians_g
button_generator = button_g
#Initialize all beginning arrival events
event_queue.push(Event(
EventType.PED_ARRIVAL,
exponential_u(float(next(peds_generator)), 20),
{
"Index": 0,
"Direction": "EAST",
"Speed": uniform_ab(next(peds_generator), 2.6, 4.1)
}
))
'''event_queue.push(
Event(EventType.PED_ARRIVAL,
exponential_1(float(next(peds_generator))),
{
"Index": 1,
"Direction": "WEST",
"Speed": uniform_ab(next(peds_generator), 2.6, 4.1)
}
))'''
#event_queue.push(Event(EventType.AUTO_ARRIVAL, exponential_1(float(next(autos_generator))), {"Direction": "EAST"}))
#event_queue.push(Event(EventType.AUTO_ARRIVAL, exponential_1(float(next(autos_generator))), {"Direction": "WEST"}))
#EVENT LOOP
while not event_queue.empty():
nextEvent = next(event_queue)
processEvent(nextEvent)
def processEvent(event: Event):
print(event)
if event.type == EventType.PED_ARRIVAL:
handlePedArrival(event)
elif event.type == EventType.PED_AT_BUTTON:
handlePedAtButton(event)
elif event.type == EventType.PED_IMPATIENT:
handlePedImpatient(event)
elif event.type == EventType.AUTO_ARRIVAL:
handleCarArrival(event)
elif event.type == EventType.GREEN_EXPIRES:
traffic_signal.handle_event(event)
elif event.type == EventType.YELLOW_EXPIRES:
walk()
traffic_signal.handle_event(event)
elif event.type == EventType.RED_EXPIRES:
traffic_signal.handle_event(event)
## PEDESTRIAN HELPERS
def ped_at_crosswalk(ped: Event):
print(peds.crosswalk_peds)
for element in peds.crosswalk_peds:
if ped.metadata["Index"] == element.metadata["Index"]:
return True
return False
def can_walk(ped: Event):
if traffic_signal.current_state != ssm.SignalState.RED:
return False
elif traffic_signal.red_expire < (46 / ped.metadata["Speed"]) + ped.at:
return False
return True
def walk():
n = 20
i = 0
while n > 0 and len(peds.crosswalk_peds) > i:
if can_walk(peds.crosswalk_peds[i]):
n -= 1
print("PEDESTRIAN", peds.crosswalk_peds[i].__repr__(), "WALKS")
peds.crosswalk_peds.pop(i)
else:
i += 1
##EVENT PROCESSING
def handlePedArrival(event: Event):
global peds_left
if peds_left > 0:
at = exponential_u(float(next(peds_generator)), 20) + event.at
direction = event.metadata["Direction"]
meta = {
"Index": event.metadata["Index"] + 2,
"Direction": direction,
"Speed": uniform_ab(next(peds_generator), 2.6, 4.1)
}
newPed = Event(EventType.PED_ARRIVAL, at, meta)
event_queue.push(newPed)
peds_left -= 1
##Pedestrian gets to crosswalk
arrivalAtCrosswalk = Event(
EventType.PED_AT_BUTTON,
event.at + peds.DISTANCE_TO_CROSSWALK / event.metadata["Speed"],
event.metadata
)
event_queue.push(arrivalAtCrosswalk)
def handlePedAtButton(event: Event):
if len(peds.crosswalk_peds) == 0:
if float(next(button_generator)) < (15/16):
traffic_signal.press_button(event.at)
elif float(next(button_generator)) < 1 / (len(peds.crosswalk_peds) + 1):
traffic_signal.press_button(event.at)
peds.crosswalk_peds.append(event)
## PED_IMPATIENT handles whether the ped would actually press the button, so we don't have to here
## If they don't walk immediately, they plan to press the button in a minute.
if traffic_signal.current_state != ssm.SignalState.RED:
event_queue.push(Event(
EventType.PED_IMPATIENT,
event.at + 60,
event.metadata
))
def handlePedImpatient(event: Event):
if not ped_at_crosswalk(event):
print("PEDESTRIAN NO LONGER AT CROSSWALK: EVENT DROPPED")
return
else:
traffic_signal.press_button(event.at)
def handleCarArrival(event: Event):
global autos_left
if autos_left > 0:
at = exponential_1(float(next(autos_generator))) + event.at
direction = event.metadata["Direction"]
newAuto = Event(EventType.AUTO_ARRIVAL, at, {"Direction": direction})
event_queue.push(newAuto)
autos_left -= 1
``` |
{
"source": "Joshua-Schroijen/deepproblog",
"score": 2
} |
#### File: Joshua-Schroijen/deepproblog/generate_seeds.py
```python
import fire
import os
def generate_seeds(no_seeds):
for _ in range(no_seeds):
print(int.from_bytes(os.urandom(32), 'big'))
if __name__ == "__main__":
fire.Fire(generate_seeds)
```
#### File: examples/CLUTRR/clutrr.py
```python
import fire
import sys
from json import dumps
from deepproblog.engines import ApproximateEngine, ExactEngine
from deepproblog.evaluate import get_confusion_matrix
from deepproblog.network import Network
from deepproblog.calibrated_network import TemperatureScalingNetwork, NetworkECECollector
from deepproblog.model import Model
from deepproblog.dataset import DataLoader
from deepproblog.examples.CLUTRR.architecture import Encoder, RelNet, GenderNet
from deepproblog.examples.CLUTRR.data import CLUTRR, dataset_names
from deepproblog.heuristics import *
from deepproblog.train import TrainObject
from deepproblog.utils import get_configuration, config_to_string, format_time_precise, split_dataset
from deepproblog.utils.stop_condition import Threshold, StopOnPlateau
def main(
i = 0,
calibrate = False,
calibrate_after_each_train_iteration = False,
logging = False
):
dsets = ["sys_gen_{}".format(i) for i in range(3)] + ["noise_{}".format(i) for i in range(4)]
configurations = {"method": ["gm"], "dataset": dsets, "run": range(5)}
configuration = get_configuration(configurations, i)
name = "clutrr_" + config_to_string(configuration) + "_" + format_time_precise()
print(name)
torch.manual_seed(configuration["run"])
clutrr = CLUTRR(configuration["dataset"])
dataset = clutrr.get_dataset(".*train", gender = True, type = "split")
train_dataset, val_dataset = split_dataset(dataset)
test_datasets = clutrr.get_dataset(".*test", gender = True, type = "split", separate = True)
print(dataset_names[configuration["dataset"]])
loader = DataLoader(train_dataset, 4)
val_loader = DataLoader(val_dataset, 4)
embed_size = 32
lstm = Encoder(clutrr.get_vocabulary(), embed_size, p_drop = 0.0)
networks_evolution_collectors = {}
if calibrate == True:
lstm_net = TemperatureScalingNetwork(
lstm, "encoder", val_loader, optimizer = torch.optim.Adam(lstm.parameters(), lr = 1e-2)
)
rel_net = Network(RelNet(embed_size, 2 * embed_size), "rel_extract", val_loader)
gender_net = GenderNet(clutrr.get_vocabulary(), embed_size)
gender_net = Network(
gender_net,
"gender_net",
val_loader,
optimizer = torch.optim.Adam(gender_net.parameters(), lr = 1e-2),
)
networks_evolution_collectors["calibration_collector"] = NetworkECECollector()
else:
lstm_net = Network(
lstm, "encoder", optimizer = torch.optim.Adam(lstm.parameters(), lr = 1e-2)
)
rel_net = Network(RelNet(embed_size, 2 * embed_size), "rel_extract")
gender_net = GenderNet(clutrr.get_vocabulary(), embed_size)
gender_net = Network(
gender_net,
"gender_net",
optimizer=torch.optim.Adam(gender_net.parameters(), lr=1e-2),
)
rel_net.optimizer = torch.optim.Adam(rel_net.parameters(), lr = 1e-2)
model_filename = "model_forward.pl"
model = Model(model_filename, [rel_net, lstm_net, gender_net])
heuristic = GeometricMean()
if configuration["method"] == "exact":
raise Exception('The CLUTRR experiment is currently not supported in the Exact Engine')
# model.set_engine(ExactEngine(model))
elif configuration["method"] == "gm":
model.set_engine(ApproximateEngine(model, 1, heuristic, exploration = True))
train_log = TrainObject(model)
train_log.train(
loader,
Threshold("Accuracy", 1.0) + StopOnPlateau("Accuracy", patience=5, warm_up = 10),
initial_test = False,
test = lambda x: [
(
"Accuracy",
get_confusion_matrix(x, val_dataset, verbose = 0).accuracy(),
)
],
log_iter = 50,
test_iter = 250,
)
model.save_state("models/" + name + ".pth")
for dataset in test_datasets:
cm = get_confusion_matrix(model, test_datasets[dataset], verbose = 0)
final_acc = cm.accuracy()
if logging == True:
train_log.logger.comment("{}\t{}".format(dataset, final_acc))
if logging == True:
train_log.logger.comment(dumps(model.get_hyperparameters()))
train_log.write_to_file("log/" + name)
if __name__ == "__main__":
fire.Fire(main)
```
#### File: examples/Coins/coins.py
```python
import fire
import torch
from torch.utils.data import DataLoader as TorchDataLoader
from deepproblog.dataset import DataLoader
from deepproblog.engines import ExactEngine
from deepproblog.evaluate import get_confusion_matrix
from deepproblog.examples.Coins.data.dataset import train_dataset, test_dataset, RawCoinsNet1ValidationDataset, RawCoinsNet2ValidationDataset
from deepproblog.model import Model
from deepproblog.network import Network
from deepproblog.calibrated_network import TemperatureScalingNetwork, NetworkECECollector
from deepproblog.train import train_model
from deepproblog.utils import split_dataset
from deepproblog.utils.standard_networks import smallnet
from deepproblog.utils.stop_condition import Threshold, StopOnPlateau
def main(
calibrate = False,
calibrate_after_each_train_iteration = False
):
batch_size = 5
if calibrate == True:
rest_train_set, validation_set = split_dataset(train_dataset)
train_loader = DataLoader(rest_train_set, batch_size)
calibration_net1_valid_loader = TorchDataLoader(RawCoinsNet1ValidationDataset(validation_set), batch_size)
calibration_net2_valid_loader = TorchDataLoader(RawCoinsNet2ValidationDataset(validation_set), batch_size)
else:
train_loader = DataLoader(train_dataset, batch_size)
lr = 1e-4
networks_evolution_collectors = {}
coin_network1 = smallnet(num_classes = 2, pretrained = True)
coin_network2 = smallnet(num_classes = 2, pretrained = True)
if calibrate == True:
coin_net1 = TemperatureScalingNetwork(coin_network1, "net1", calibration_net1_valid_loader, batching = True, calibrate_after_each_train_iteration = calibrate_after_each_train_iteration)
coin_net2 = TemperatureScalingNetwork(coin_network2, "net2", calibration_net2_valid_loader, batching = True, calibrate_after_each_train_iteration = calibrate_after_each_train_iteration)
networks_evolution_collectors["calibration_collector"] = NetworkECECollector()
else:
coin_net1 = Network(coin_network1, "net1", batching = True)
coin_net2 = Network(coin_network2, "net2", batching = True)
coin_net1.optimizer = torch.optim.Adam(coin_network1.parameters(), lr = lr)
coin_net2.optimizer = torch.optim.Adam(coin_network2.parameters(), lr = lr)
model = Model("model.pl", [coin_net1, coin_net2])
if calibrate == True:
model.add_tensor_source("train", rest_train_set)
else:
model.add_tensor_source("train", train_dataset)
model.add_tensor_source("test", test_dataset)
model.set_engine(ExactEngine(model), cache = True)
train_obj = train_model(
model,
train_loader,
StopOnPlateau("Accuracy", warm_up = 10, patience = 10) | Threshold("Accuracy", 1.0, duration = 2),
networks_evolution_collectors,
log_iter = 100 // batch_size,
test_iter = 100 // batch_size,
test = lambda x: [("Accuracy", get_confusion_matrix(x, test_dataset).accuracy())],
infoloss = 0.25
)
if calibrate:
coin_net1.calibrate()
coin_net2.calibrate()
return [train_obj, get_confusion_matrix(model, test_dataset, verbose = 0)]
if __name__ == "__main__":
fire.Fire(main)
```
#### File: Add/data/for_calibration.py
```python
from abc import ABC, abstractmethod
import ast
from pathlib import Path
import sqlite3
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
class RawAddDatasetDatabase:
def initialize(self):
self.connection = sqlite3.connect(Path(__file__).parent / 'raw_add_dataset.sqlite')
self.cursor = self.connection.cursor()
if not self._is_add_samples_db_ready():
self.cursor.execute("CREATE TABLE add_neural1_raw_data ( I1 integer, I2 integer, Carry integer, O integer)")
self.cursor.execute("CREATE TABLE add_neural2_raw_data ( I1 integer, I2 integer, Carry integer, NewCarry integer)")
self.cursor.execute("CREATE TABLE add_raw_data_lengths ( predicate text, length integer )")
with open(Path(__file__).parent / "neural1.txt", "r") as neural1_file:
neural1 = [ast.literal_eval(s.strip()) for s in neural1_file.readlines()]
with open(Path(__file__).parent / "neural2.txt", "r") as neural2_file:
neural2 = [ast.literal_eval(s.strip()) for s in neural2_file.readlines()]
with self.connection:
for sample in neural1:
I1, I2, Carry = sample
O = self._get_neural1_label(I1, I2, Carry)
self.cursor.execute("INSERT INTO add_neural1_raw_data VALUES (:I1, :I2, :Carry, :O)", {'I1': I1, 'I2': I2, 'Carry': Carry, 'O': 0})
for sample in neural2:
I1, I2, Carry = sample
NewCarry = self._get_neural2_label(I1, I2, Carry)
self.cursor.execute("INSERT INTO add_neural2_raw_data VALUES (:I1, :I2, :Carry, :NewCarry)", {'I1': I1, 'I2': I2, 'Carry': Carry, 'NewCarry': NewCarry})
self.cursor.execute("INSERT INTO add_raw_data_lengths VALUES (:predicate, :length)", {'predicate': 'neural1', 'length': len(neural1)})
self.cursor.execute("INSERT INTO add_raw_data_lengths VALUES (:predicate, :length)", {'predicate': 'neural2', 'length': len(neural2)})
def get_neural1_sample(self, i):
self.cursor.execute(f"SELECT * FROM add_neural1_raw_data LIMIT 1 OFFSET {i};")
result = self.cursor.fetchone()
if result != []:
return (*result,)
else:
return None
def get_neural2_sample(self, i):
self.cursor.execute(f"SELECT * FROM add_neural2_raw_data LIMIT 1 OFFSET {i};")
result = self.cursor.fetchone()
if result != []:
return (*result,)
else:
return None
def get_length_neural1(self):
self.cursor.execute("SELECT length FROM add_raw_data_lengths WHERE predicate = 'neural1'")
result = self.cursor.fetchone()
if result != []:
return result[0]
else:
return None
def get_length_neural2(self):
self.cursor.execute("SELECT length FROM add_raw_data_lengths WHERE predicate = 'neural2'")
result = self.cursor.fetchone()
if result != []:
return result[0]
else:
return None
def _is_add_samples_db_ready(self):
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'add_neural1_raw_data';")
add_neural1_raw_data_table_exists = (self.cursor.fetchall() != [])
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'add_neural2_raw_data';")
add_neural2_raw_data_table_exists = (self.cursor.fetchall() != [])
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'add_raw_data_lengths';")
add_raw_data_lengths_table_exists = (self.cursor.fetchall() != [])
return (add_neural1_raw_data_table_exists and \
add_neural2_raw_data_table_exists and \
add_raw_data_lengths_table_exists)
def _get_neural1_label(self, I1, I2, Carry):
return ((I1 + I2 + Carry) % 10)
def _get_neural2_label(self, I1, I2, Carry):
return ((I1 + I2 + Carry) // 10)
class RawAddValidationDataset(Dataset, ABC):
def __init__(self):
super(Dataset, self).__init__()
self.dataset_db = RawAddDatasetDatabase()
self.dataset_db.initialize()
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __getitem__(self, idx):
pass
class RawAddNeural1ValidationDataset(RawAddValidationDataset):
def __init__(self):
super().__init__()
def __len__(self):
return self.dataset_db.get_length_neural1()
def __getitem__(self, idx):
I1, I2, Carry, O = self.dataset_db.get_neural1_sample(idx)
return (I1, I2, Carry), self._encode_label(O)
def _encode_label(self, label):
return F.one_hot(torch.tensor(label), num_classes = 10).type(torch.FloatTensor)
class RawAddNeural2ValidationDataset(RawAddValidationDataset):
def __init__(self):
super().__init__()
def __len__(self):
return self.dataset_db.get_length_neural2()
def __getitem__(self, idx):
I1, I2, Carry, NewCarry = self.dataset_db.get_neural2_sample(idx)
return (I1, I2, Carry), NewCarry
def _encode_label(self, label):
return F.one_hot(torch.tensor(label), num_classes = 2).type(torch.FloatTensor)
```
#### File: WAP/data/for_calibration.py
```python
from abc import ABC, abstractmethod
import ast
import csv
from enum import Enum
import math
from pathlib import Path
import random
import re
import sqlite3
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
import problog.logic
from deepproblog.utils import bytes_to_tensor, tensor_to_bytes
class RawWAPDatasetDatabase:
def initialize(self):
self.connection = sqlite3.connect(Path(__file__).parent / 'raw_wap_dataset.sqlite', detect_types = sqlite3.PARSE_DECLTYPES)
sqlite3.register_adapter(bool, int)
sqlite3.register_converter("BOOLEAN", lambda v: bool(int(v)))
self.cursor = self.connection.cursor()
if not self._is_WAP_samples_db_ready():
self.cursor.execute("CREATE TABLE wap_op1_raw_data ( WAP text, embedding blob, operator integer )")
self.cursor.execute("CREATE TABLE wap_op2_raw_data ( WAP text, embedding blob, operator integer )")
self.cursor.execute("CREATE TABLE wap_permute_raw_data ( WAP text, embedding blob, permutation integer )")
self.cursor.execute("CREATE TABLE wap_swap_raw_data ( WAP text, embedding blob, swap boolean )")
samples = self._read_samples_from_file()
with self.connection:
for sample in samples:
zeros_embedding = tensor_to_bytes(torch.zeros(4096))
WAP, op1, op2, permutation, swapped = sample
self.cursor.execute("INSERT INTO wap_op1_raw_data VALUES (:WAP, :embedding, :operator)", {'WAP': WAP, 'embedding': zeros_embedding, 'operator': op1})
self.cursor.execute("INSERT INTO wap_op2_raw_data VALUES (:WAP, :embedding, :operator)", {'WAP': WAP, 'embedding': zeros_embedding, 'operator': op2})
self.cursor.execute("INSERT INTO wap_permute_raw_data VALUES (:WAP, :embedding, :permutation)", {'WAP': WAP, 'embedding': zeros_embedding, 'permutation': permutation})
self.cursor.execute("INSERT INTO wap_swap_raw_data VALUES (:WAP, :embedding, :swap)", {'WAP': WAP, 'embedding': zeros_embedding, 'swap': swapped})
def get_sample_op1(self, i):
self.cursor.execute(f"SELECT * FROM wap_op1_raw_data LIMIT 1 OFFSET {i};")
result = self.cursor.fetchone()
if result != []:
return (*result,)
else:
return None
def get_sample_op2(self, i):
self.cursor.execute(f"SELECT * FROM wap_op2_raw_data LIMIT 1 OFFSET {i};")
result = self.cursor.fetchone()
if result != []:
return (*result,)
else:
return None
def get_sample_permute(self, i):
self.cursor.execute(f"SELECT * FROM wap_permute_raw_data LIMIT 1 OFFSET {i};")
result = self.cursor.fetchone()
if result != []:
return (*result,)
else:
return None
def get_sample_swap(self, i):
self.cursor.execute(f"SELECT * FROM wap_swap_raw_data LIMIT 1 OFFSET {i};")
result = self.cursor.fetchone()
if result != []:
return (*result,)
else:
return None
def get_length_op1(self):
self.cursor.execute("SELECT COUNT(*) FROM wap_op1_raw_data")
result = self.cursor.fetchone()
if result != []:
return result[0]
else:
return None
def get_length_op2(self):
self.cursor.execute("SELECT COUNT(*) FROM wap_op2_raw_data")
result = self.cursor.fetchone()
if result != []:
return result[0]
else:
return None
def get_length_permute(self):
self.cursor.execute("SELECT COUNT(*) FROM wap_permute_raw_data")
result = self.cursor.fetchone()
if result != []:
return result[0]
else:
return None
def get_length_swap(self):
self.cursor.execute("SELECT COUNT(*) FROM wap_swap_raw_data")
result = self.cursor.fetchone()
if result != []:
return result[0]
else:
return None
def update_embedding_op1(self, WAP, rnn):
new_embedding = tensor_to_bytes(rnn.forward(problog.logic.Constant(WAP)))
with self.connection:
self.cursor.execute("UPDATE wap_op1_raw_data SET embedding = ? WHERE WAP = ?;", [new_embedding, WAP])
def update_embeddings_op1(self, rnn):
self.cursor.execute(f"SELECT WAP FROM wap_op1_raw_data;")
results = self.cursor.fetchall()
for result in results:
WAP, = result
self.update_embedding_op1(WAP, rnn)
def update_embedding_op2(self, WAP, rnn):
new_embedding = tensor_to_bytes(rnn.forward(problog.logic.Constant(WAP)))
with self.connection:
self.cursor.execute("UPDATE wap_op2_raw_data SET embedding = ? WHERE WAP = ?;", [new_embedding, WAP])
def update_embeddings_op2(self, rnn):
self.cursor.execute(f"SELECT WAP FROM wap_op2_raw_data;")
results = self.cursor.fetchall()
for result in results:
WAP, = result
self.update_embedding_op2(WAP, rnn)
def update_embedding_permute(self, WAP, rnn):
new_embedding = tensor_to_bytes(rnn.forward(problog.logic.Constant(WAP)))
with self.connection:
self.cursor.execute("UPDATE wap_permute_raw_data SET embedding = ? WHERE WAP = ?;", [new_embedding, WAP])
def update_embeddings_permute(self, rnn):
self.cursor.execute(f"SELECT WAP FROM wap_permute_raw_data;")
results = self.cursor.fetchall()
for result in results:
WAP, = result
self.update_embedding_permute(WAP, rnn)
def update_embedding_swap(self, WAP, rnn):
new_embedding = tensor_to_bytes(rnn.forward(problog.logic.Constant(WAP)))
with self.connection:
self.cursor.execute("UPDATE wap_swap_raw_data SET embedding = ? WHERE WAP = ?;", [new_embedding, WAP])
def update_embeddings_swap(self, rnn):
self.cursor.execute(f"SELECT WAP FROM wap_swap_raw_data;")
results = self.cursor.fetchall()
for result in results:
WAP, = result
self.update_embedding_swap(WAP, rnn)
def _is_WAP_samples_db_ready(self):
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'wap_op1_raw_data';")
wap_op1_raw_data_table_exists = (self.cursor.fetchall() != [])
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'wap_op2_raw_data';")
wap_op2_raw_data_table_exists = (self.cursor.fetchall() != [])
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'wap_permute_raw_data';")
wap_permute_raw_data_table_exists = (self.cursor.fetchall() != [])
self.cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table' AND tbl_name = 'wap_swap_raw_data';")
wap_swap_raw_data_table_exists = (self.cursor.fetchall() != [])
return (wap_op1_raw_data_table_exists and \
wap_op2_raw_data_table_exists and \
wap_permute_raw_data_table_exists and
wap_swap_raw_data_table_exists)
def _read_samples_from_file(self):
inputs = []
with open(Path(__file__).parent / 'dev_formulas.csv', 'r') as dev_formulas_file:
dev_formulas_rows = list(csv.reader(dev_formulas_file, delimiter = ','))
for dev_formulas_row in dev_formulas_rows:
WAP, formula, _ = dev_formulas_row
formula = formula.strip()
operators = re.findall("([\+\-\*/])", formula)
numbers = self._get_numbers(WAP)
numbers_permuted = self._get_numbers(formula)
permutation = self._get_permutation(numbers, numbers_permuted)
swapped = self._get_swapped(formula)
inputs.append([WAP, *operators, permutation, swapped])
return inputs
def _get_numbers(self, s):
return [int(r) for r in re.findall(r"\b(\d+)\b", s)]
def _get_permutation(self, numbers, permutation):
if numbers[0] == permutation[0] and \
numbers[1] == permutation[1]:
return 0
elif numbers[0] == permutation[0] and \
numbers[1] == permutation[2]:
return 1
elif numbers[0] == permutation[1] and \
numbers[1] == permutation[0]:
return 2
elif numbers[0] == permutation[2] and \
numbers[1] == permutation[0]:
return 3
elif numbers[0] == permutation[1] and \
numbers[1] == permutation[2]:
return 4
elif numbers[0] == permutation[2] and \
numbers[1] == permutation[1]:
return 5
def _get_swapped(self, formula):
return re.match(".*[\+\-\*/]\(.*", formula) != None
class RawWAPValidationDataset(Dataset, ABC):
def __init__(self):
super(Dataset, self).__init__()
self.dataset_db = RawWAPDatasetDatabase()
self.dataset_db.initialize()
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __getitem__(self, idx):
pass
@abstractmethod
def update_embedding(self, WAP, rnn):
pass
@abstractmethod
def update_embeddings(self, rnn):
pass
class RawWAPOpValidationDataset(RawWAPValidationDataset):
mapping = {
"+": 0,
"-": 1,
"*": 2,
"/": 3
}
def __init__(self):
super().__init__()
def _encode_operator(self, operator):
return F.one_hot(torch.tensor(self.mapping[operator]), num_classes = 4).type(torch.FloatTensor)
class RawWAPOp1ValidationDataset(RawWAPOpValidationDataset):
def __init__(self):
super().__init__()
def __len__(self):
return self.dataset_db.get_length_op1()
def __getitem__(self, idx):
_, embedding, operator = self.dataset_db.get_sample_op1(idx)
return bytes_to_tensor(embedding), self._encode_operator(operator)
def update_embedding(self, WAP, rnn):
self.dataset_db.update_embedding_op1(WAP, rnn)
def update_embeddings(self, rnn):
self.dataset_db.update_embeddings_op1(rnn)
class RawWAPOp2ValidationDataset(RawWAPOpValidationDataset):
def __init__(self):
super().__init__()
def __len__(self):
return self.dataset_db.get_length_op2()
def __getitem__(self, idx):
_, embedding, operator = self.dataset_db.get_sample_op2(idx)
return bytes_to_tensor(embedding), self._encode_operator(operator)
def update_embedding(self, WAP, rnn):
self.dataset_db.update_embedding_op2(WAP, rnn)
def update_embeddings(self, rnn):
self.dataset_db.update_embeddings_op2(rnn)
class RawWAPPermuteValidationDataset(RawWAPValidationDataset):
def __init__(self):
super().__init__()
def __len__(self):
return self.dataset_db.get_length_permute()
def __getitem__(self, idx):
_, embedding, permutation = self.dataset_db.get_sample_permute(idx)
return bytes_to_tensor(embedding), self._encode_permutation(permutation)
def update_embedding(self, WAP, rnn):
self.dataset_db.update_embedding_permute(WAP, rnn)
def update_embeddings(self, rnn):
self.dataset_db.update_embeddings_permute(rnn)
def _encode_permutation(self, permutation_number):
return F.one_hot(torch.tensor(permutation_number), num_classes = 6).type(torch.FloatTensor)
class RawWAPSwapValidationDataset(RawWAPValidationDataset):
def __init__(self):
super().__init__()
def __len__(self):
return self.dataset_db.get_length_swap()
def __getitem__(self, idx):
_, embedding, swap = self.dataset_db.get_sample_swap(idx)
return bytes_to_tensor(embedding), self._encode_swap(swap)
def update_embedding(self, WAP, rnn):
self.dataset_db.update_embedding_swap(WAP, rnn)
def update_embeddings(self, rnn):
self.dataset_db.update_embeddings_swap(rnn)
def _encode_swap(self, swapped):
return F.one_hot(torch.tensor(int(swapped)), num_classes = 2).type(torch.FloatTensor)
```
#### File: examples/HWF/hwf.py
```python
import fire
from json import dumps
from sys import argv
from torch.optim import Adam
from torch.utils.data import DataLoader as TorchDataLoader
from deepproblog.engines import ApproximateEngine, ExactEngine
from deepproblog.evaluate import get_confusion_matrix
from deepproblog.model import Model
from deepproblog.network import Network
from deepproblog.calibrated_network import TemperatureScalingNetwork, NetworkECECollector
from deepproblog.dataset import DataLoader
from deepproblog.train import train_model
from deepproblog.examples.HWF.data import HWFDataset, hwf_images
from deepproblog.examples.HWF.data.for_calibration import RawHWFDatasetDatabase, RawHWFNumbersValidationDataset, RawHWFOperatorsValidationDataset
from deepproblog.examples.HWF.network import SymbolEncoder, SymbolClassifier
from deepproblog.heuristics import *
from deepproblog.utils import format_time_precise, get_configuration, config_to_string
def main(
i = 0,
calibrate = False,
calibrate_after_each_train_iteration = False,
logging = False
):
configurations = {
"method": ["exact"],
"curriculum": [False],
"N": [1, 3],
"run": range(5),
}
configuration = get_configuration(configurations, i)
name = "hwf_" + config_to_string(configuration) + "_" + format_time_precise()
torch.manual_seed(configuration["run"])
N = configuration["N"]
if configuration["method"] == "exact":
if N > 3:
exit()
curriculum = configuration["curriculum"]
print("Training HWF with N={} and curriculum={}".format(N, curriculum))
if curriculum:
dataset_filter = lambda x: x <= N
dataset = HWFDataset("train2", dataset_filter)
val_dataset = HWFDataset("val", dataset_filter)
test_dataset = HWFDataset("test", dataset_filter)
else:
dataset_filter = lambda x: x == N
dataset = HWFDataset("train2", dataset_filter)
val_dataset = HWFDataset("val", dataset_filter)
test_dataset = HWFDataset("test", dataset_filter)
loader = DataLoader(dataset, 32, shuffle = True)
encoder = SymbolEncoder()
network1 = SymbolClassifier(encoder, 10)
network2 = SymbolClassifier(encoder, 4)
networks_evolution_collectors = {}
if calibrate == True:
raw_hwf_dataset_database = RawHWFDatasetDatabase()
raw_hwf_dataset_database.initialize(dataset_filter)
raw_hwf_numbers_validation_dataset = RawHWFNumbersValidationDataset()
raw_hwf_operators_validation_dataset = RawHWFOperatorsValidationDataset()
net1_valid_loader = TorchDataLoader(raw_hwf_numbers_validation_dataset, 32, shuffle = True)
net2_valid_loader = TorchDataLoader(raw_hwf_operators_validation_dataset, 32, shuffle = True)
net1 = TemperatureScalingNetwork(network1, "net1", net1_valid_loader, Adam(network1.parameters(), lr = 3e-3), batching = True, calibrate_after_each_train_iteration = calibrate_after_each_train_iteration)
net2 = TemperatureScalingNetwork(network2, "net2", net2_valid_loader, Adam(network2.parameters(), lr = 3e-3), batching = True, calibrate_after_each_train_iteration = calibrate_after_each_train_iteration)
networks_evolution_collectors["calibration_collector"] = NetworkECECollector()
else:
net1 = Network(network1, "net1", Adam(network1.parameters(), lr = 3e-3), batching = True)
net2 = Network(network2, "net2", Adam(network2.parameters(), lr = 3e-3), batching = True)
model = Model("model.pl", [net1, net2])
model.add_tensor_source("hwf", hwf_images)
heuristic = GeometricMean()
if configuration["method"] == "exact":
model.set_engine(ExactEngine(model), cache = True)
elif configuration["method"] == "approximate":
model.set_engine(
ApproximateEngine(
model, 1, heuristic, timeout = 30, ignore_timeout = True, exploration = True
)
)
print("Training on size {}".format(N))
train_log = train_model(
model,
loader,
50,
networks_evolution_collectors,
log_iter = 50,
inital_test = False,
test_iter = 100,
test = lambda x: [
("Val_accuracy", get_confusion_matrix(x, val_dataset, eps = 1e-6).accuracy()),
("Test_accuracy", get_confusion_matrix(x, test_dataset, eps = 1e-6).accuracy()),
],
)
if calibrate == True:
net1.calibrate()
net2.calibrate()
model.save_state("models/" + name + ".pth")
cm = get_confusion_matrix(model, test_dataset, eps = 1e-6, verbose = 0)
final_acc = cm.accuracy()
if logging == True:
train_log.logger.comment("Accuracy {}".format(final_acc))
train_log.logger.comment(dumps(model.get_hyperparameters()))
train_log.write_to_file("log/" + name)
return [train_log, cm]
if __name__ == "__main__":
fire.Fire(main)
```
#### File: src/deepproblog/train.py
```python
import signal
import time
from typing import Dict, List, Callable, Union
from deepproblog.dataset import DataLoader
from deepproblog.model import Model
from deepproblog.query import Query
from deepproblog.utils import load_list
from deepproblog.utils.logger import Logger
from deepproblog.utils.stop_condition import EpochStop
from deepproblog.utils.stop_condition import StopCondition
from .networks_evolution_collector import NetworksEvolutionCollector
class TrainObject(object):
"""
An object that performs the training of the model and keeps track of the state of the training.
"""
def __init__(self, model: Model, networks_evolution_collectors: Dict[str, NetworksEvolutionCollector] = {}):
self.model = model
self.networks_evolution_collectors = networks_evolution_collectors
self.logger = Logger()
self.accumulated_loss = 0
self.i = 1
self.start = 0
self.loss_history = []
self.prev_iter_time = 0
self.epoch = 0
self.previous_handler = None
self.interrupt = False
self.hooks = []
self.timing = [0, 0, 0]
def get_loss(self, batch: List[Query], backpropagate_loss: Callable) -> float:
"""
Calculates and propagates the loss for a given batch of queries and loss function.
:param batch: The batch of queries.
:param backpropagate_loss: The loss function. It should also perform the backpropagation.
:return: The average loss over the batch
"""
total_loss = 0
result = self.model.solve(batch)
for r in result:
self.timing[0] += r.ground_time / len(batch)
self.timing[1] += r.compile_time / len(batch)
self.timing[2] += r.eval_time / len(batch)
result = [
(result[i], batch[i]) for i in range(len(batch)) if len(result[i]) > 0
]
for r, q in result:
total_loss += backpropagate_loss(
r, q.p, weight=1 / len(result), q=q.substitute().query
)
return total_loss
def get_loss_with_negatives(
self, batch: List[Query], backpropagate_loss: Callable
) -> float:
"""
Calculates and propagates the loss for a given batch of queries and loss function.
This includes negative examples. Negative examples are found by using the query.replace_var method.
:param batch: The batch of queries.
:param backpropagate_loss: The loss function. It should also perform the backpropagation.
:return: The average loss over the batch
"""
total_loss = 0
result = self.model.solve([q.variable_output() for q in batch])
result = [(result[i], batch[i]) for i in range(len(batch))]
for r, q in result:
expected = q.substitute().query
try:
total_loss += backpropagate_loss(
r, q.p, weight=1 / len(result), q=expected
)
except KeyError:
self.get_loss([q], backpropagate_loss)
neg_proofs = [x for x in r if x != expected]
for neg in neg_proofs:
# print('penalizing wrong answer {} vs {}'.format(q.substitute().query, k))
total_loss += backpropagate_loss(
r, 0, weight=1 / (len(result) * len(neg_proofs)), q=neg
)
return total_loss
def train(
self,
loader: DataLoader,
stop_criterion: Union[int, StopCondition],
verbose: int = 1,
loss_function_name: str = "cross_entropy",
with_negatives: bool = False,
log_iter: int = 100,
initial_test: bool = True,
**kwargs
) -> Logger:
self.previous_handler = signal.getsignal(signal.SIGINT)
loss_function = getattr(self.model.solver.semiring, loss_function_name)
for networks_evolution_collector in self.networks_evolution_collectors.values():
networks_evolution_collector.collect_before_training(self.model.networks)
self.accumulated_loss = 0
self.timing = [0, 0, 0]
self.epoch = 0
self.start = time.time()
self.prev_iter_time = time.time()
epoch_size = len(loader)
if "test" in kwargs and initial_test:
value = kwargs["test"](self.model)
self.logger.log_list(self.i, value)
print("Test: ", value)
if type(stop_criterion) is int:
stop_criterion = EpochStop(stop_criterion)
print("Training ", stop_criterion)
while not (stop_criterion.is_stop(self) or self.interrupt):
for networks_evolution_collector in self.networks_evolution_collectors.values():
networks_evolution_collector.collect_before_epoch(self.model.networks)
epoch_start = time.time()
self.model.optimizer.step_epoch()
if verbose and epoch_size > log_iter:
print("Epoch", self.epoch + 1)
index_loader = load_list(loader.indices, loader.batch_size)
for batch in loader:
batch_indices = next(index_loader)
if self.interrupt:
break
self.i += 1
for networks_evolution_collector in self.networks_evolution_collectors.values():
networks_evolution_collector.collect_before_iteration(self.model.networks)
self.model.train()
self.model.optimizer.zero_grad()
if with_negatives:
loss = self.get_loss_with_negatives(batch, loss_function)
else:
loss = self.get_loss(batch, loss_function)
if self.i % log_iter == 0:
self.loss_history.append(loss)
self.accumulated_loss += loss
self.model.optimizer.step()
for networks_evolution_collector in self.networks_evolution_collectors.values():
networks_evolution_collector.collect_after_iteration(self.model.networks)
self.log(verbose=verbose, log_iter=log_iter, **kwargs)
for j, hook in self.hooks:
if self.i % j == 0:
hook(self)
if stop_criterion.is_stop(self):
break
if verbose and epoch_size > log_iter:
print("Epoch time: ", time.time() - epoch_start)
self.epoch += 1
for networks_evolution_collector in self.networks_evolution_collectors.values():
networks_evolution_collector.collect_after_epoch(self.model.networks)
if "snapshot_name" in kwargs:
filename = "{}_final.mdl".format(kwargs["snapshot_name"])
print("Writing snapshot to " + filename)
self.model.save_state(filename)
for networks_evolution_collector in self.networks_evolution_collectors.values():
networks_evolution_collector.collect_after_training(self.model.networks)
signal.signal(signal.SIGINT, self.previous_handler)
return self.logger
def log(
self, snapshot_iter=None, log_iter=100, test_iter=1000, verbose=1, **kwargs
):
iter_time = time.time()
if (
"snapshot_name" in kwargs
and snapshot_iter is not None
and self.i % snapshot_iter == 0
):
filename = "{}_iter_{}.mdl".format(kwargs["snapshot_name"], self.i)
print("Writing snapshot to " + filename)
self.model.save_state(filename)
if verbose and self.i % log_iter == 0:
print(
"Iteration: ",
self.i,
"\ts:%.4f" % (iter_time - self.prev_iter_time),
"\tAverage Loss: ",
self.accumulated_loss / log_iter,
)
if len(self.model.parameters):
print("\t".join(str(parameter) for parameter in self.model.parameters))
self.logger.log("time", self.i, iter_time - self.start)
self.logger.log("loss", self.i, self.accumulated_loss / log_iter)
self.logger.log("ground_time", self.i, self.timing[0] / log_iter)
self.logger.log("compile_time", self.i, self.timing[1] / log_iter)
self.logger.log("eval_time", self.i, self.timing[2] / log_iter)
# for k in self.model.parameters:
# self.logger.log(str(k), self.i, self.model.parameters[k])
# print(str(k), self.model.parameters[k])
self.accumulated_loss = 0
self.timing = [0, 0, 0]
self.prev_iter_time = iter_time
if "test" in kwargs and self.i % test_iter == 0:
value = kwargs["test"](self.model)
self.logger.log_list(self.i, value)
print("Test: ", value)
def write_to_file(self, *args, **kwargs):
self.logger.write_to_file(*args, **kwargs)
def train_model(
model: Model,
loader: DataLoader,
stop_condition: Union[int, StopCondition],
networks_evolution_collectors: Dict[str, NetworksEvolutionCollector] = {},
**kwargs
) -> TrainObject:
train_object = TrainObject(model, networks_evolution_collectors)
train_object.train(loader, stop_condition, **kwargs)
return train_object
```
#### File: deepproblog/utils/__init__.py
```python
import io
import os
import random
import sys
from configparser import ConfigParser
from datetime import datetime
from itertools import islice
from pathlib import Path
from statistics import mean, stdev
from time import strftime
from typing import Union, Any, Dict, List
import torch
from torch.utils.data import Dataset
import problog
from problog.logic import Term
parser = problog.parser.PrologParser(problog.program.ExtendedPrologFactory())
cred = "\033[91m"
cend = "\033[0m"
cgreen = "\033[92m"
def log_exists(location: Union[str, os.PathLike], name: str):
return Path(location).glob(name + "*")
def check_path(path: Union[str, os.PathLike]):
path_dir = os.path.dirname(str(path))
if not os.path.exists(path_dir):
os.makedirs(path_dir)
def get_top_path(pattern: str, reverse=True):
paths = sorted(Path(".").glob(pattern), reverse=reverse)
if len(paths) > 0:
return paths[0]
return None
def split(text: str, splitchar: str, lb="(", rb=")"):
depth = 0
splits = [""]
for c in text:
if c in splitchar and depth == 0:
splits.append("")
continue
if c in lb:
depth += 1
if c in rb:
depth -= 1
splits[-1] += c
return splits
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def nth(iterable, n, default=None):
"""Returns the nth item or a default value"""
return next(islice(iterable, n, None), default)
def get_configuration(parameters: dict, i: int) -> dict:
config = dict()
for k in parameters:
l = len(parameters[k])
j, i = i % l, i // l
config[k] = parameters[k][j]
return config
def parse(string: str) -> Term:
parsed = parser.parseString(string)
for term in parsed:
return term
class Table(object):
class Dimension(object):
def __init__(self, name):
self.name = name
self.type = "categorical"
self.categories = set()
def add_value(self, val):
self.categories.add(val)
def __repr__(self):
return self.name
def __init__(self, *dimensions):
self.data = []
self.dimensions = [Table.Dimension(d) for d in dimensions]
def store(self, *args, **kwargs):
if len(args) == 0:
data = tuple(kwargs[str(d)] for d in self.dimensions)
else:
if len(args) != len(self.dimensions):
raise ValueError(
"Data dimensions {} not equal to table dimensions {}".format(
len(args), len(self.dimensions)
)
)
data = args
self.data.append(data)
for i, d in enumerate(self.dimensions):
d.add_value(data[i])
def get_dimension_index(self, dim):
for i, d in enumerate(self.dimensions):
if hash(dim) == hash(d):
return i
raise ValueError("{} not in dimensions".format(dim))
def aggregate(self, l):
if l is None or len(l) == 0:
return ""
mu, sig = mean(l), stdev(l)
return "{:.5f} ± {:.5f}".format(mu, sig)
def format(self, x, y, val, **kwargs):
categories = tuple(kwargs.items())
categories = tuple((self.get_dimension_index(c[0]), c[1]) for c in categories)
x = self.get_dimension_index(x)
y = self.get_dimension_index(y)
val = self.get_dimension_index(val)
x_cats = list(self.dimensions[x].categories)
y_cats = list(self.dimensions[y].categories)
data = [[None] * len(x_cats) for _ in y_cats]
for d in self.data:
j = x_cats.index(d[x])
i = y_cats.index(d[y])
correct_categories = True
for k, v in categories:
if d[k] != v:
correct_categories = False
break
if correct_categories:
if data[i][j] is None:
data[i][j] = []
data[i][j].append(d[val])
data = [[self.aggregate(d) for d in row] for row in data]
return TabularFormatter.format(data, x_cats, y_cats)
class TabularFormatter(object):
@staticmethod
def format(data, x=None, y=None):
if y is not None:
data = [[y[i]] + data[i] for i in range(len(data))]
if x is not None:
data = [[""] + x] + data
else:
if x is not None:
data = x + data
nr_columns = len(data[0])
column_widths = [0] * nr_columns
for row in data:
for i, value in enumerate(row):
column_widths[i] = max(column_widths[i], len(str(value)))
padded_rows = [
"\t".join(
[
" " * (column_widths[i] - len(str(v))) + str(v)
for i, v in enumerate(row)
]
)
for row in data
]
return "\n".join(padded_rows)
def format_time():
return strftime("_%y%m%d_%H%M")
def format_time_precise():
return datetime.utcnow().strftime("%y%m%d_%H%M%S%f")
class NoConfigException(Exception):
def __str__(self):
return "No config file specified as an argument."
def load_config(filename: str = None):
"""
Loads a config file.
:param filename: Filename of configuration file to load. If None, it uses the first commandline argument as filename.
:return: None
"""
try:
if filename is None:
filename = sys.argv[1]
config = ConfigParser()
config.read(filename)
return config["Default"]
except IndexError:
raise NoConfigException()
def term2list2(term: Term):
result = []
while (
not problog.logic.is_variable(term) and term.functor == "." and term.arity == 2
):
result.append(term.args[0])
term = term.args[1]
if not term == problog.logic.Term("[]"):
raise ValueError("Expected fixed list.")
return result
def config_to_string(configuration: Dict[str, Any]) -> str:
return "_".join(
"{}_{}".format(parameter, configuration[parameter])
for parameter in configuration
)
def tensor_to_bytes(tensor):
buffer = io.BytesIO()
torch.save(tensor, buffer)
buffer.seek(0)
return buffer.read()
def bytes_to_tensor(blob):
buffer = io.BytesIO()
buffer.write(blob)
buffer.seek(0)
return torch.load(buffer)
class MutatingRawDataset(Dataset):
def __init__(self, inner_raw_dataset, mutator, p, seed = None):
super(Dataset, self).__init__()
self.inner_raw_dataset = inner_raw_dataset
self.mutator = mutator
self.p = p
if seed is None:
seed = random.SystemRandom().randint(0, 2 ** 64)
self.seed = seed
def __len__(self):
return len(self.inner_raw_dataset)
def __getitem__(self, idx):
rng = random.Random(self.seed ^ idx)
if rng.random() < self.p:
return self.mutator(idx, self.inner_raw_dataset[idx])
else:
return self.inner_raw_dataset[idx]
def split_dataset(dataset: Dataset, split_ratio: float = 0.8):
dataset_length = len(dataset)
dataset_part_1 = dataset.subset(round(split_ratio * dataset_length))
dataset_part_2 = dataset.subset(round(split_ratio * dataset_length), dataset_length)
return [dataset_part_1, dataset_part_2]
def load_list(the_list: List[Any], batch_size: int):
if batch_size < 1:
return []
start = 0
end = batch_size
while start < len(the_list):
yield the_list[start:end]
start += batch_size
end += batch_size
```
#### File: deepproblog/src/test.py
```python
import fire
def main(infile_name):
with open(infile_name, "r") as f:
for l in f:
print(l)
if __name__ == "__main__":
fire.Fire(main)
```
#### File: deepproblog/tests/test_dataset.py
```python
import copy
from deepproblog.dataset import (
DataLoader,
QueryDataset,
MutatingDataset,
NumericIncorrectOutputMutator,
NoiseMutatorDecorator,
)
from deepproblog.query import Query
from deepproblog.utils import parse
from problog.logic import Term, Constant
def test_query_dataset(tmpdir):
with tmpdir.join("queries.txt").open(mode="wt") as tmpfile:
tmpfile.write(
"a(2,3,5).\nsubs(a(3,3,b),[[b,123]]).\n0.5 :: subs(a(3,3,c),[[c,123]]).\n0.7 :: b(1,2,3)."
)
dataset_file = QueryDataset(tmpdir.join("queries.txt"))
dataset_list = QueryDataset(
[
Query(parse("a(2,3,5).")),
Query(parse("a(3,3,b)."), substitution={Term("b"): Constant(123)}),
Query(parse("a(3,3,c)."), substitution={Term("c"): Constant(123)}, p=0.5),
Query(parse("b(1,2,3)."), p=0.7),
]
)
assert len(dataset_file) == 4
assert len(dataset_list) == 4
assert dataset_file.queries == dataset_list.queries
assert dataset_list.to_queries() == dataset_list.queries
def test_dataset_write_to_file(tmpdir):
dataset_list = QueryDataset(
[
Query(parse("a(2,3,5).")),
Query(parse("a(3,3,b)."), substitution={Term("b"): Constant(123)}),
]
)
with tmpdir.join("queries_out.txt").open(mode="wt") as tmpfile:
dataset_list.write_to_file(tmpfile)
# Test that we can reload it.
dataset_reloaded = QueryDataset(tmpdir.join("queries_out.txt"))
assert dataset_reloaded.queries == dataset_list.queries
def test_subset():
dataset = QueryDataset(
[
Query(parse("a(2,3,5).")),
Query(parse("a(3,3,b)."), substitution={Term("b"): Constant(123)}),
]
)
assert dataset.subset(1).to_queries() == [dataset.queries[0]]
def test_extension():
dataset1 = QueryDataset(
[
Query(parse("a(2,3,5).")),
Query(parse("a(3,3,b)."), substitution={Term("b"): Constant(123)}),
]
)
dataset2 = QueryDataset([Query(parse("a(1,2,3)."))])
assert (dataset1 + dataset2).to_queries() == dataset1.queries + dataset2.queries
def test_mutating_dataset():
dataset1 = QueryDataset(
[
Query(parse("a(2,3,5).")),
Query(parse("a(3,3,b)."), substitution={Term("b"): Constant(123)}),
]
)
def mutator(i: int, q: Query):
q2 = copy.copy(q)
q2.visited = True
return q2
mutated = MutatingDataset(dataset1, mutator)
assert all(hasattr(e, "visited") for e in mutated.to_queries())
def test_noise_mutator():
"""Test that the noise is approximately correct"""
hit_count = 0
def inner_mutator(i: int, q: Query):
nonlocal hit_count
hit_count += 1
return q
mutator = NoiseMutatorDecorator(p=0.75, inner_mutator=inner_mutator, seed=123)
total_count = 2000
for i in range(total_count):
mutator(i, Query(Term("dummy")))
assert 0.7 < hit_count / total_count < 0.8
# Check we get the same result twice
hit_count1 = hit_count
hit_count = 0
for i in range(total_count):
mutator(i, Query(Term("dummy")))
assert hit_count == hit_count1
def test_numeric_incorrect_output_mutator():
mutator = NumericIncorrectOutputMutator(domain=list(range(10)), seed=123)
r1 = mutator(1, Query(parse("a(1,2,3).")))
r2 = mutator(1, Query(parse("a(1,2,3).")))
r3 = mutator(2, Query(parse("a(1,2,3).")))
r4 = mutator(2, Query(parse("a(1,2,3).")))
assert r1 == r2
assert r3 == r4
assert r1 != r3
assert r1.query.args[-1].value != 3
assert r3.query.args[-1].value != 3
def test_dataloader():
dataset = QueryDataset(
[
Query(parse("a(2,3,5).")),
Query(parse("a(1,2,3).")),
Query(parse("a(3,3,b)."), substitution={Term("b"): Constant(123)}),
]
)
loader = DataLoader(dataset, 2, False)
one_epoch = list(loader)
assert one_epoch[0] + one_epoch[1] == dataset.queries
```
#### File: deepproblog/tests/test_engine_utils.py
```python
import pytest
import deepproblog.engines.engine
import deepproblog.engines.utils
from deepproblog.engines import ApproximateEngine
from deepproblog.engines.prolog_engine.swi_program import SWIProgram
from deepproblog.heuristics import geometric_mean
from deepproblog.model import Model
from deepproblog.query import Query
from deepproblog.utils import parse
initial_program = """
parent(ann,steve).
parent(ann,amy).
parent(amy,amelia).
"""
def _create_model() -> Model:
"""Setup code: Load a program minimally"""
model = Model(initial_program, [], load=False)
engine = ApproximateEngine(model, 1, geometric_mean)
model.set_engine(engine)
return model
def test_with_terms():
"""Test to ensure the context manager correctly registers and un-registers clauses"""
model = _create_model()
program: SWIProgram = model.solver.program
pl_query_term = parse("a(2,2).")
pl_query_term2 = parse("b(2,3).")
def _verify_not_registered():
"""Test to verify that the atoms were not registered"""
assert len(program.query("fa(_,_,a(2,2),none).")) == 0
assert len(program.query("cl(_,b(_,_),_).")) == 0
r = model.solve([Query(pl_query_term), Query(pl_query_term2)])
assert len(r) == 2
assert len(r[0].result) == 0
assert len(r[1].result) == 0
_verify_not_registered()
with deepproblog.engines.utils.with_terms(
model, [parse("a(2, 2)."), parse("b(X, Y) :- Y is X + 1.")]
):
assert len(program.query("fa(_,_,a(2,2),none).")) == 1
assert len(program.query("cl(_,b(_,_),_).")) == 1
r = model.solve([Query(pl_query_term), Query(pl_query_term2)])
assert len(r) == 2
assert len(r[0].result) == 1
assert pytest.approx(1.0) == r[0].result[pl_query_term]
assert len(r[1].result) == 1
assert pytest.approx(1.0) == r[0].result[pl_query_term]
_verify_not_registered()
def test_with_terms_grandparent():
model = _create_model()
program: SWIProgram = model.solver.program
# The first statement is provable, the second is not.
pl_query_term = parse("grandparent(ann,amelia).")
pl_query_term2 = parse("grandparent(ann,steve).")
def _verify_not_registered():
"""Test to verify that the atoms were not registered"""
assert len(program.query("cl(_,grandparent(_,_),_).")) == 0
r = model.solve([Query(pl_query_term), Query(pl_query_term2)])
assert len(r) == 2
assert len(r[0].result) == 0
assert len(r[1].result) == 0
_verify_not_registered()
with deepproblog.engines.utils.with_terms(
model, [parse("grandparent(X, Y) :- parent(X,Z), parent(Z,Y).")]
):
assert len(program.query("cl(_,grandparent(_,_),_).")) == 1
r = model.solve([Query(pl_query_term), Query(pl_query_term2)])
assert len(r) == 2
assert len(r[0].result) == 1
assert pytest.approx(1.0) == r[0].result[pl_query_term]
assert len(r[1].result) == 0
_verify_not_registered()
``` |
{
"source": "joshua-s/django-channels-handlers",
"score": 2
} |
#### File: django-channels-handlers/channels_handlers/consumers.py
```python
import json
class ConsumerHandlerMixin:
"""
Integrates one or more handlers with a Django Channels JsonWebsocketConsumer
"""
handler_classes = []
models = []
def __init__(self, *args, **kwargs):
self._initialize_handlers()
super().__init__(*args, **kwargs)
def _initialize_handlers(self):
"""
Instantiates any provided handlers with the proper context
"""
self.handlers = [handler(self) for handler in self.handler_classes]
def handle_message(self, message):
for handler in self.handlers:
handler.handle_message(message)
def serialize_message(self, message):
return message.json()
def deserialize_message(self, pickled_message):
return json.loads(pickled_message)
@classmethod
def encode_json(cls, content):
try:
return content.json()
except AttributeError:
return super().encode_json(content)
class AsyncConsumerHandlerMixin(ConsumerHandlerMixin):
"""
Asynchronous version of ConsumerHandlerMixin for usage with
AsyncJsonWebsocketConsumer
"""
async def handle_message(self, message):
for handler in self.handlers:
await handler.handle_message(message)
@classmethod
async def encode_json(cls, content):
try:
return content.json()
except AttributeError:
return await super().encode_json(content)
```
#### File: django-channels-handlers/channels_handlers/handlers.py
```python
import pydantic
class MessageHandler:
"""
Executes different actions based on message type
"""
namespace = "request"
handled_types = {}
models = {}
def __init__(self, consumer):
self.consumer = consumer
def _get_handler_function(self, message):
# Check for type
if "type" not in message:
raise ValueError("Message must have a type")
# Get handler function name
try:
# Check if a custom handler is defined
func_name = self.handled_types[message["type"]]
except KeyError:
message_namespace, message_function = message.type.split(".")
if message_namespace == self.namespace and callable(
getattr(self, message_function, None)
):
# We can automatically determine the name
func_name = message_function
else:
# Silently exit if the handler does not recognize the message type
return
return getattr(self, func_name)
def _run_message_validation(self, message):
try:
return self.construct_message(message.pop("type"), message)
except KeyError:
return message
def construct_message(self, message_type, data):
model_class = self.models.get(message_type)
return model_class(**data)
def handle_message(self, message):
"""
Handles the given message
:param message: The message to handle
:returns: The results of the handling function or None if the handler does not
recognize the message type
"""
# Get handler function
handler_function = self._get_handler_function(message)
# Validate message
message = self.validate_message(message)
# Fire handler actions
return handler_function(message)
def receive_json(self, json):
# Execute any parent logic first
super().receive_json(json)
# Handle message
self.handle_message(json)
def validate_message(self, message):
try:
return self._run_message_validation(message)
except pydantic.ValidationError as e:
self.consumer.send_json(
content={
"type": f"{self.namespace}.invalid_message",
"errors": e.errors(),
},
close=4002,
)
class AsyncMessageHandler(MessageHandler):
async def handle_message(self, message):
"""
Handles the given message
:param message: The message to handle
:returns: The results of the handling function or None if the handler does not
recognize the message type
"""
# Get handler function
handler_function = self._get_handler_function(message)
# Validate message
message = await self.validate_message(message)
# Fire handler actions
return await handler_function(message)
async def receive_json(self, json):
# Execute any parent logic first
await super().receive_json(json)
# Handle message
await self.handle_message(json)
async def validate_message(self, message):
try:
return self._run_message_validation(message)
except pydantic.ValidationError as e:
await self.consumer.send_json(
content={
"type": f"{self.namespace}.invalid_message",
"errors": e.errors(),
},
close=4002,
)
``` |
{
"source": "joshua-s/fjord",
"score": 2
} |
#### File: fjord/analytics/views.py
```python
import json
from datetime import datetime, timedelta
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from elasticutils.contrib.django import F, es_required_or_50x
from funfactory.urlresolvers import reverse
from mobility.decorators import mobile_template
from tower import ugettext as _
from fjord.analytics.tools import (
JSONDatetimeEncoder,
generate_query_parsed,
counts_to_options,
zero_fill)
from fjord.base.helpers import locale_name
from fjord.base.util import (
check_new_user,
smart_int,
smart_date,
Atom1FeedWithRelatedLinks)
from fjord.feedback.models import Response, ResponseMappingType
@check_new_user
@mobile_template('analytics/{mobile/}response.html')
def response_view(request, responseid, template):
response = get_object_or_404(Response, id=responseid)
# We don't pass the response directly to the template and instead
# do some data tweaks here to make it more palatable for viewing.
return render(request, template, {
'response': response,
})
def generate_json_feed(request, search):
"""Generates JSON feed for first 100 results"""
search_query = request.GET.get('q', None)
responses = search.values_dict()[:100]
json_data = {
'total': len(responses),
'results': list(responses),
'query': search_query
}
return HttpResponse(
json.dumps(json_data, cls=JSONDatetimeEncoder),
mimetype='application/json')
def generate_atom_feed(request, search):
"""Generates ATOM feed for first 100 results"""
search_query = request.GET.get('q', None)
if search_query:
title = _(u'Firefox Input: {query}').format(query=search_query)
else:
title = _(u'Firefox Input')
# Build the non-atom dashboard url and maintain all the
# querystring stuff we have
dashboard_url = request.build_absolute_uri()
dashboard_url = dashboard_url.replace('format=atom', '')
dashboard_url = dashboard_url.replace('&&', '&')
if dashboard_url.endswith(('?', '&')):
dashboard_url = dashboard_url[:-1]
feed = Atom1FeedWithRelatedLinks(
title=title,
link=dashboard_url,
description=_('Search Results From Firefox Input'),
author_name=_('Firefox Input'),
)
for response in search[:100]:
categories = {
'sentiment': _('Happy') if response.happy else _('Sad'),
'platform': response.platform,
'locale': response.locale
}
categories = (':'.join(item) for item in categories.items())
link_url = reverse('response_view', args=(response.id,))
link_url = request.build_absolute_uri(link_url)
feed.add_item(
title=_('Response id: {id}').format(id=response.id),
description=response.description,
link=link_url,
pubdate=response.created,
categories=categories,
link_related=response.url_domain,
)
return HttpResponse(
feed.writeString('utf-8'), mimetype='application/atom+xml')
def generate_dashboard_url(request, output_format='atom',
viewname='dashboard'):
"""For a given request, generates the dashboard url for the given format"""
qd = request.GET.copy()
# Remove anything from the querystring that isn't good for a feed:
# page, start_date, end_date, etc.
for mem in qd.keys():
if mem not in ('happy', 'locale', 'platform', 'product',
'version', 'q'):
del qd[mem]
qd['format'] = output_format
return reverse(viewname) + '?' + qd.urlencode()
@check_new_user
@es_required_or_50x(error_template='analytics/es_down.html')
def dashboard(request):
template = 'analytics/dashboard.html'
output_format = request.GET.get('format', None)
page = smart_int(request.GET.get('page', 1), 1)
# Note: If we add additional querystring fields, we need to add
# them to generate_dashboard_url.
search_happy = request.GET.get('happy', None)
search_platform = request.GET.get('platform', None)
search_locale = request.GET.get('locale', None)
search_product = request.GET.get('product', None)
search_version = request.GET.get('version', None)
search_query = request.GET.get('q', None)
search_date_start = smart_date(
request.GET.get('date_start', None), fallback=None)
search_date_end = smart_date(
request.GET.get('date_end', None), fallback=None)
search_bigram = request.GET.get('bigram', None)
selected = request.GET.get('selected', None)
filter_data = []
current_search = {'page': page}
search = ResponseMappingType.search()
f = F()
# If search happy is '0' or '1', set it to False or True, respectively.
search_happy = {'0': False, '1': True}.get(search_happy, None)
if search_happy in [False, True]:
f &= F(happy=search_happy)
current_search['happy'] = int(search_happy)
def unknown_to_empty(text):
"""Convert "Unknown" to "" to support old links"""
return u'' if text.lower() == u'unknown' else text
if search_platform is not None:
f &= F(platform=unknown_to_empty(search_platform))
current_search['platform'] = search_platform
if search_locale is not None:
f &= F(locale=unknown_to_empty(search_locale))
current_search['locale'] = search_locale
if search_product is not None:
f &= F(product=unknown_to_empty(search_product))
current_search['product'] = search_product
if search_version is not None:
# Note: We only filter on version if we're filtering on
# product.
f &= F(version=unknown_to_empty(search_version))
current_search['version'] = search_version
if search_date_start is None and search_date_end is None:
selected = '7d'
if search_date_end is None:
search_date_end = datetime.now()
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
# Add one day, so that the search range includes the entire day.
end = search_date_end + timedelta(days=1)
# Note 'less than', not 'less than or equal', because of the added
# day above.
f &= F(created__lt=end)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
f &= F(created__gte=search_date_start)
if search_query:
current_search['q'] = search_query
es_query = generate_query_parsed('description', search_query)
search = search.query_raw(es_query)
if search_bigram is not None:
f &= F(description_bigrams=search_bigram)
filter_data.append({
'display': _('Bigram'),
'name': 'bigram',
'options': [{
'count': 'all',
'name': search_bigram,
'display': search_bigram,
'value': search_bigram,
'checked': True
}]
})
search = search.filter(f).order_by('-created')
# If the user asked for a feed, give him/her a feed!
if output_format == 'atom':
return generate_atom_feed(request, search)
elif output_format == 'json':
return generate_json_feed(request, search)
# Search results and pagination
if page < 1:
page = 1
page_count = 20
start = page_count * (page - 1)
end = start + page_count
search_count = search.count()
opinion_page = search[start:end]
# Navigation facet data
facets = search.facet(
'happy', 'platform', 'locale', 'product', 'version',
filtered=bool(search._process_filters(f.filters)))
# This loop does two things. First it maps 'T' -> True and 'F' ->
# False. This is probably something EU should be doing for
# us. Second, it restructures the data into a more convenient
# form.
counts = {
'happy': {},
'platform': {},
'locale': {},
'product': {},
'version': {}
}
for param, terms in facets.facet_counts().items():
for term in terms:
name = term['term']
if name == 'T':
name = True
elif name == 'F':
name = False
counts[param][name] = term['count']
def empty_to_unknown(text):
return _('Unknown') if text == u'' else text
filter_data.extend([
counts_to_options(
counts['happy'].items(),
name='happy',
display=_('Sentiment'),
display_map={True: _('Happy'), False: _('Sad')},
value_map={True: 1, False: 0},
checked=search_happy),
counts_to_options(
counts['product'].items(),
name='product',
display=_('Product'),
display_map=empty_to_unknown,
checked=search_product)
])
# Only show the version if we're showing a specific
# product.
if search_product:
filter_data.append(
counts_to_options(
counts['version'].items(),
name='version',
display=_('Version'),
display_map=empty_to_unknown,
checked=search_version)
)
filter_data.extend(
[
counts_to_options(
counts['platform'].items(),
name='platform',
display=_('Platform'),
display_map=empty_to_unknown,
checked=search_platform),
counts_to_options(
counts['locale'].items(),
name='locale',
display=_('Locale'),
checked=search_locale,
display_map=locale_name),
]
)
# Histogram data
happy_data = []
sad_data = []
happy_f = f & F(happy=True)
sad_f = f & F(happy=False)
histograms = search.facet_raw(
happy={
'date_histogram': {'interval': 'day', 'field': 'created'},
'facet_filter': search._process_filters(happy_f.filters)
},
sad={
'date_histogram': {'interval': 'day', 'field': 'created'},
'facet_filter': search._process_filters(sad_f.filters)
},
).facet_counts()
# p['time'] is number of milliseconds since the epoch. Which is
# convenient, because that is what the front end wants.
happy_data = dict((p['time'], p['count']) for p in histograms['happy'])
sad_data = dict((p['time'], p['count']) for p in histograms['sad'])
zero_fill(search_date_start, search_date_end, [happy_data, sad_data])
histogram = [
{'label': _('Happy'), 'name': 'happy',
'data': sorted(happy_data.items())},
{'label': _('Sad'), 'name': 'sad',
'data': sorted(sad_data.items())},
]
return render(request, template, {
'opinions': opinion_page,
'opinion_count': search_count,
'filter_data': filter_data,
'histogram': histogram,
'page': page,
'prev_page': page - 1 if start > 0 else None,
'next_page': page + 1 if end < search_count else None,
'current_search': current_search,
'selected': selected,
'atom_url': generate_dashboard_url(request),
})
```
#### File: fjord/base/util.py
```python
from functools import wraps
import datetime
import time
from django.contrib.auth.decorators import permission_required
from django.http import HttpResponseRedirect
from django.utils.dateparse import parse_date
from django.utils.feedgenerator import Atom1Feed
from funfactory.urlresolvers import reverse
from product_details import product_details
from rest_framework.throttling import AnonRateThrottle
from statsd import statsd
def translate_country_name(current_language, country_code, country_name,
country_name_l10n):
"""Translates country name from product details or gettext
It might seem a bit weird we're not doing the _lazy gettext
translation here, but if we did, then we'd be translating a
variable value rather than a string and then it wouldn't get
picked up by extract script.
:arg current_language: the language of the user viewing the page
:arg country_code: the iso 3166 two-letter country code
:arg country_name: the country name
:arg country_name_l10n: the country name wrapped in a lazy gettext call
:returns: translated country name
"""
# FIXME: this is a lousy way to alleviate the problem where we
# have a "locale" and we really need a "language".
language_fix = {
'es': 'es-ES',
}
current_language = language_fix.get(current_language, current_language)
# If the country name has been translated, then use that
if unicode(country_name) != unicode(country_name_l10n):
return country_name_l10n
current_language = current_language.split('-')
current_language[0] = current_language[0].lower()
if len(current_language) > 1:
current_language[1] = current_language[1].upper()
current_language = '-'.join(current_language)
country_code = country_code.lower()
try:
countries = product_details.get_regions(current_language)
except IOError:
return country_name
return countries.get(country_code, country_name)
def smart_truncate(content, length=100, suffix='...'):
"""Truncate text at space before length bound.
:arg content: string to truncate
:arg length: length to truncate at
:arg suffix: text to append to truncated content
:returns: string
Example:
>>> smart_truncate('abcde fghij', length=8)
'abcde...'
>>> smart_truncate('abcde fghij', length=100)
'abcde fghij'
"""
if len(content) <= length:
return content
else:
return content[:length].rsplit(' ', 1)[0] + suffix
def smart_str(s, fallback=u''):
"""Returns the string or the fallback if it's not a string"""
if isinstance(s, basestring):
return s
return fallback
def smart_int(s, fallback=0):
"""Convert a string to int, with fallback for invalid strings or types."""
try:
return int(float(s))
except (ValueError, TypeError, OverflowError):
return fallback
def smart_date(s, fallback=None):
"""Convert a string to a datetime.date with a fallback for invalid input.
:arg s: The string to convert to a date.
:arg fallback: Value to use in case of an error. Default: ``None``.
"""
if isinstance(s, datetime.date):
return s
try:
dt = parse_date(s)
# The strftime functions require a year >= 1900, so if this
# has a year before that, then we treat it as an invalid date so
# later processing doesn't get hosed.
if dt and dt.year >= 1900:
return dt
except (ValueError, TypeError):
pass
return fallback
def smart_bool(s, fallback=False):
"""Convert a string that has a semantic boolean value to a real boolean.
Note that this is not the same as ``s`` being "truthy". The string
``'False'`` will be returned as False, even though it is Truthy, and non-
boolean values like ``'apple'`` would return the fallback parameter, since
it doesn't represent a boolean value.
"""
try:
s = s.lower()
if s in ['true', 't', 'yes', 'y', '1']:
return True
elif s in ['false', 'f', 'no', 'n', '0']:
return False
except AttributeError:
pass
return fallback
def epoch_milliseconds(d):
"""Convert a datetime to a number of milliseconds since the epoch."""
return time.mktime(d.timetuple()) * 1000
class FakeLogger(object):
"""Fake logger that we can pretend is a Python Logger
Why? Well, because Django has logging settings that prevent me
from setting up a logger here that uses the stdout that the Django
BaseCommand has. At some point p while fiddling with it, I
figured, 'screw it--I'll just write my own' and did.
The minor ramification is that this isn't a complete
implementation so if it's missing stuff, we'll have to add it.
"""
def __init__(self, stdout):
self.stdout = stdout
def _out(self, level, msg, *args):
msg = msg % args
self.stdout.write('%s %-8s: %s\n' % (
time.strftime('%H:%M:%S'), level, msg))
def info(self, msg, *args):
self._out('INFO', msg, *args)
def error(self, msg, *args):
self._out('ERROR', msg, *args)
class Atom1FeedWithRelatedLinks(Atom1Feed):
"""Atom1Feed with related links
This adds a "link_related" item as::
<link rel="related">url</link>
"""
def add_item_elements(self, handler, item):
super(Atom1FeedWithRelatedLinks, self).add_item_elements(handler, item)
if item.get('link_related'):
handler.addQuickElement(
'link',
attrs={'href': item['link_related'], 'rel': 'related'})
class MeasuredAnonRateThrottle(AnonRateThrottle):
"""On throttle failure, does a statsd call"""
def throttle_failure(self):
statsd.incr('api.throttle.failure')
def check_new_user(fun):
@wraps(fun)
def _wrapped_view(request, *args, **kwargs):
# Do this here to avoid circular imports
from fjord.base.models import Profile
try:
request.user.profile
except AttributeError:
pass
except Profile.DoesNotExist:
url = reverse('new-user-view') + '?next=' + request.path
return HttpResponseRedirect(url)
return fun(request, *args, **kwargs)
return _wrapped_view
analyzer_required = permission_required(
'analytics.can_view_dashboard',
raise_exception=True)
```
#### File: feedback/migrations/0009_backfill_again.py
```python
import os
from south.v2 import DataMigration
from django.conf import settings
# Does a second pass on the data migration to catch the responses that
# were being created when the migration was happening and thus didn't
# get updated.
class Migration(DataMigration):
def forwards(self, orm):
working_set = orm.Response.objects.filter(product='')
if not getattr(settings, 'TEST'):
print os.path.basename(__file__), '{0} responses to update'.format(len(working_set))
for resp in working_set:
# This replicates the logic in Response.infer_product.
if resp.platform == u'Unknown':
resp.product = u'Unknown'
resp.channel = u'Unknown'
resp.version = u'Unknown'
else:
if resp.platform == u'FirefoxOS':
resp.product = u'Firefox OS'
elif resp.platform == u'Android':
resp.product = u'Firefox for Android'
else:
resp.product = u'Firefox'
resp.channel = u'stable'
resp.version = resp.browser_version
resp.save()
def backwards(self, orm):
pass
models = {
'feedback.response': {
'Meta': {'ordering': "['-created']", 'object_name': 'Response'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'channel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'translated_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
'feedback.responseemail': {
'Meta': {'object_name': 'ResponseEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedback.Response']"})
}
}
complete_apps = ['feedback']
symmetrical = True
```
#### File: feedback/migrations/0018_strip_whitespace.py
```python
import os
from south.v2 import DataMigration
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
affected_count = 0
working_set = orm.Response.objects.all()
for resp in working_set:
resp_stripped = resp.description.strip()
if resp.description != resp_stripped:
resp.description = resp_stripped
resp.save()
affected_count += 1
if not getattr(settings, 'TEST'):
print os.path.basename(__file__), 'Stripped {0} descriptions'.format(affected_count)
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'feedback.response': {
'Meta': {'ordering': "['-created']", 'object_name': 'Response'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'channel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'translated_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
'feedback.responseemail': {
'Meta': {'object_name': 'ResponseEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedback.Response']"})
}
}
complete_apps = ['feedback']
symmetrical = True
```
#### File: feedback/migrations/0020_add_initial_products.py
```python
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
Product = orm['feedback.Product']
p = Product(
enabled=True,
notes=u'',
display_name=u'Firefox',
db_name=u'Firefox',
slug=u'firefox',
on_dashboard=True
)
p.save()
p = Product(
enabled=True,
notes=u'',
display_name=u'Firefox for Android',
db_name=u'Firefox for Android',
slug=u'android',
on_dashboard=True
)
p.save()
p = Product(
enabled=True,
notes=u'',
display_name=u'Firefox OS',
db_name=u'Firefox OS',
slug=u'fxos',
on_dashboard=True
)
p.save()
p = Product(
enabled=True,
notes=u'',
display_name=u'Firefox Metro',
db_name=u'Firefox Metro',
slug=u'metrofirefox',
on_dashboard=False
)
p.save()
def backwards(self, orm):
Product = orm['feedback.Product']
Product.objects.all().delete()
models = {
u'feedback.product': {
'Meta': {'object_name': 'Product'},
'db_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'feedback.response': {
'Meta': {'ordering': "['-created']", 'object_name': 'Response'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'channel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'translated_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
u'feedback.responseemail': {
'Meta': {'object_name': 'ResponseEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
}
}
complete_apps = ['feedback']
symmetrical = True
```
#### File: fjord/feedback/models.py
```python
from datetime import datetime
import urlparse
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import models
from elasticutils.contrib.django import Indexable
from rest_framework import serializers
from tower import ugettext_lazy as _
from product_details import product_details
from fjord.base.domain import get_domain
from fjord.base.models import ModelBase
from fjord.base.util import smart_truncate
from fjord.feedback.config import CODE_TO_COUNTRY
from fjord.feedback.utils import compute_grams
from fjord.search.index import (
register_mapping_type, FjordMappingType,
boolean_type, date_type, integer_type, keyword_type, terms_type,
text_type)
from fjord.search.tasks import register_live_index
from fjord.translations.models import get_translation_system_choices
from fjord.translations.tasks import register_auto_translation
from fjord.translations.utils import compose_key
# This defines the number of characters the description can have. We
# do this in code rather than in the db since it makes it easier to
# tweak the value.
TRUNCATE_LENGTH = 10000
class Product(ModelBase):
"""Represents a product we capture feedback for"""
# Whether or not this product is enabled
enabled = models.BooleanField(default=True)
# Used internally for notes to make it easier to manage products
notes = models.CharField(max_length=255, blank=True, default=u'')
# This is the name we display everywhere
display_name = models.CharField(max_length=20)
# We're not using foreign keys, so when we save something to the
# database, we use this name
db_name = models.CharField(max_length=20)
# This is the slug used in the feedback product urls; we don't use
# the SlugField because we don't require slugs be unique
slug = models.CharField(max_length=20)
# Whether or not this product shows up on the dashboard
on_dashboard = models.BooleanField(default=True)
# System slated for automatic translation, or null if none;
# See translation app for details.
translation_system = models.CharField(
choices=get_translation_system_choices(),
null=True,
blank=True,
max_length=20,
)
@classmethod
def get_product_map(cls):
"""Returns map of product slug -> db_name"""
products = cls.objects.values_list('slug', 'db_name')
return dict(prod for prod in products)
@register_auto_translation
@register_live_index
class Response(ModelBase):
"""Basic feedback response
This consists of a bunch of information some of which is inferred
and some of which comes from the source.
Some fields are "sacrosanct" and should never be edited after the
response was created:
* happy
* url
* description
* user_agent
* manufacturer
* device
* created
"""
# This is the product/channel.
# e.g. "firefox.desktop.stable", "firefox.mobile.aurora", etc.
prodchan = models.CharField(max_length=255)
# Data coming from the user
happy = models.BooleanField(default=True)
url = models.URLField(blank=True)
description = models.TextField(blank=True)
# Translation into English of the description
translated_description = models.TextField(blank=True)
# Data inferred from urls or explicitly stated by the thing saving
# the data (webform, client of the api, etc)
product = models.CharField(max_length=30, blank=True)
channel = models.CharField(max_length=30, blank=True)
version = models.CharField(max_length=30, blank=True)
locale = models.CharField(max_length=8, blank=True)
country = models.CharField(max_length=4, blank=True, null=True,
default=u'')
manufacturer = models.CharField(max_length=255, blank=True)
device = models.CharField(max_length=255, blank=True)
# User agent and inferred data from the user agent
user_agent = models.CharField(max_length=255, blank=True)
browser = models.CharField(max_length=30, blank=True)
browser_version = models.CharField(max_length=30, blank=True)
platform = models.CharField(max_length=30, blank=True)
source = models.CharField(max_length=100, blank=True, null=True,
default=u'')
campaign = models.CharField(max_length=100, blank=True, null=True,
default=u'')
created = models.DateTimeField(default=datetime.now)
class Meta:
ordering = ['-created']
def __unicode__(self):
return u'(%s) %s' % (self.sentiment, self.truncated_description)
def __repr__(self):
return self.__unicode__().encode('ascii', 'ignore')
def generate_translation_jobs(self):
"""Returns a list of tuples, one for each translation job
If the locale of this response is English, then we just copy over
the description and we're done.
If the product of this response isn't set up for auto-translation,
then we're done.
If we already have a response with this text that's
translated, we copy the most recent translation over.
Otherwise we generate a list of jobs to be done.
.. Note::
This happens in a celery task, so feel free to do what you need
to do here.
"""
# If the text is in English, we copy it over and we're
# done. We do this regardless of whether auto-translation is
# enabled or not for this product.
if self.locale == 'en-US':
self.translated_description = self.description
self.save()
return []
try:
prod = Product.objects.get(db_name=self.product)
system = prod.translation_system
except Product.DoesNotExist:
# If the product doesn't exist, then I don't know what's
# going on, but we shouldn't create any translation jobs
return []
if not system:
# If this product isn't set up for translation, don't
# translate it.
return []
try:
# See if this text has been translated already--if so, use
# the most recent translation.
existing_obj = (
Response.objects
.filter(description=self.description)
.exclude(translated_description__isnull=True)
.exclude(translated_description=u'')
.latest('id'))
self.translated_description = existing_obj.translated_description
self.save()
return []
except Response.DoesNotExist:
pass
return [
# key, system, src language, src field, dst language, dst field
(compose_key(self), system, self.locale, 'description',
u'en-US', 'translated_description')
]
@classmethod
def get_export_keys(cls, confidential=False):
"""Returns set of keys that are interesting for export
Some parts of the Response aren't very interesting. This lets
us explicitly state what is available for export.
Note: This returns the name of *properties* of Response which
aren't all database fields. Some of them are finessed.
:arg confidential: Whether or not to include confidential data
"""
keys = [
'id',
'created',
'sentiment',
'description',
'translated_description',
'product',
'channel',
'version',
'locale_name',
'manufacturer',
'device',
'platform',
]
if confidential:
keys.extend([
'url',
'country_name',
'user_email',
])
return keys
def save(self, *args, **kwargs):
self.description = self.description.strip()[:TRUNCATE_LENGTH]
super(Response, self).save(*args, **kwargs)
@property
def url_domain(self):
"""Returns the domain part of a url"""
return get_domain(self.url)
@property
def user_email(self):
"""Associated email address or u''"""
if self.responseemail_set.count() > 0:
return self.responseemail_set.all()[0].email
return u''
@property
def sentiment(self):
if self.happy:
return _(u'Happy')
return _(u'Sad')
@property
def truncated_description(self):
"""Shorten feedback for list display etc."""
return smart_truncate(self.description, length=70)
@property
def locale_name(self, native=False):
"""Convert a locale code into a human readable locale name"""
locale = self.locale
if locale in product_details.languages:
display_locale = 'native' if native else 'English'
return product_details.languages[locale][display_locale]
return locale
@property
def country_name(self, native=False):
"""Convert a country code into a human readable country name"""
country = self.country
if country in CODE_TO_COUNTRY:
display_locale = 'native' if native else 'English'
return CODE_TO_COUNTRY[country][display_locale]
return country
@classmethod
def get_mapping_type(self):
return ResponseMappingType
@classmethod
def infer_product(cls, platform):
if platform == u'Firefox OS':
return u'Firefox OS'
elif platform == u'Android':
return u'Firefox for Android'
elif platform in (u'', u'Unknown'):
return u''
return u'Firefox'
@register_mapping_type
class ResponseMappingType(FjordMappingType, Indexable):
@classmethod
def get_model(cls):
return Response
@classmethod
def get_mapping(cls):
return {
'id': integer_type(),
'prodchan': keyword_type(),
'happy': boolean_type(),
'url': keyword_type(),
'url_domain': keyword_type(),
'has_email': boolean_type(),
'description': text_type(),
'description_bigrams': keyword_type(),
'description_terms': terms_type(),
'user_agent': keyword_type(),
'product': keyword_type(),
'channel': keyword_type(),
'version': keyword_type(),
'browser': keyword_type(),
'browser_version': keyword_type(),
'platform': keyword_type(),
'locale': keyword_type(),
'country': keyword_type(),
'device': keyword_type(),
'manufacturer': keyword_type(),
'created': date_type()
}
@classmethod
def extract_document(cls, obj_id, obj=None):
if obj is None:
obj = cls.get_model().objects.get(pk=obj_id)
def empty_to_unknown(text):
return u'Unknown' if text == u'' else text
doc = {
'id': obj.id,
'prodchan': obj.prodchan,
'happy': obj.happy,
'url': obj.url,
'url_domain': obj.url_domain,
'has_email': bool(obj.user_email),
'description': obj.description,
'description_terms': obj.description,
'user_agent': obj.user_agent,
'product': obj.product,
'channel': obj.channel,
'version': obj.version,
'browser': obj.browser,
'browser_version': obj.browser_version,
'platform': obj.platform,
'locale': obj.locale,
'country': obj.country,
'device': obj.device,
'manufacturer': obj.manufacturer,
'created': obj.created,
}
# We only compute bigrams for english because the analysis
# uses English stopwords, stemmers, ...
if obj.locale.startswith(u'en') and obj.description:
bigrams = compute_grams(obj.description)
doc['description_bigrams'] = bigrams
return doc
@property
def truncated_description(self):
"""Shorten feedback for dashboard view."""
return smart_truncate(self.description, length=500)
@classmethod
def get_products(cls):
"""Returns a list of all products
This is cached.
"""
key = 'feedback:response_products1'
products = cache.get(key)
if products is not None:
return products
facet = cls.search().facet('product').facet_counts()
products = [prod['term'] for prod in facet['product']]
cache.add(key, products)
return products
@classmethod
def get_indexable(cls):
return super(ResponseMappingType, cls).get_indexable().reverse()
class ResponseEmail(ModelBase):
"""Holds email addresses related to Responses."""
opinion = models.ForeignKey(Response)
email = models.EmailField()
class NoNullsCharField(serializers.CharField):
"""Further restricts CharField so it doesn't accept nulls
DRF lets CharFields take nulls which is not what I want. This
raises a ValidationError if the value is a null.
"""
def from_native(self, value):
if value is None:
raise ValidationError('Value cannot be null')
return super(NoNullsCharField, self).from_native(value)
class ResponseSerializer(serializers.Serializer):
"""This handles incoming feedback
This handles responses as well as the additional data for response
emails.
"""
happy = serializers.BooleanField(required=True)
url = serializers.URLField(required=False, default=u'')
description = serializers.CharField(required=True)
# Note: API clients don't provide a user_agent, so we skip that and
# browser since those don't make sense.
# product, channel, version, locale, platform
product = NoNullsCharField(max_length=20, required=True)
channel = NoNullsCharField(max_length=30, required=False, default=u'')
version = NoNullsCharField(max_length=30, required=False, default=u'')
locale = NoNullsCharField(max_length=8, required=False, default=u'')
platform = NoNullsCharField(max_length=30, required=False, default=u'')
country = NoNullsCharField(max_length=4, required=False, default=u'')
# device information
manufacturer = NoNullsCharField(required=False, default=u'')
device = NoNullsCharField(required=False, default=u'')
# user's email address
email = serializers.EmailField(required=False)
def validate_product(self, attrs, source):
"""Validates the product against Product model"""
value = attrs[source]
# This looks goofy, but it makes it more likely we have a
# cache hit.
products = Product.objects.values_list('display_name', flat=True)
if value not in products:
raise serializers.ValidationError(
'{0} is not a valid product'.format(value))
return attrs
def restore_object(self, attrs, instance=None):
# Note: instance should never be anything except None here
# since we only accept POST and not PUT/PATCH.
# prodchan is composed of product + channel. This is a little
# goofy, but we can fix it later if we bump into issues with
# the contents.
prodchan = u'.'.join([
attrs['product'].lower().replace(' ', '') or 'unknown',
attrs['channel'].lower().replace(' ', '') or 'unknown'])
opinion = Response(
prodchan=prodchan,
happy=attrs['happy'],
url=attrs['url'].strip(),
description=attrs['description'].strip(),
user_agent=u'api', # Hard-coded
product=attrs['product'].strip(),
channel=attrs['channel'].strip(),
version=attrs['version'].strip(),
platform=attrs['platform'].strip(),
locale=attrs['locale'].strip(),
manufacturer=attrs['manufacturer'].strip(),
device=attrs['device'].strip(),
country=attrs['country'].strip()
)
# If there is an email address, stash it on this instance so
# we can save it later in .save() and so it gets returned
# correctly in the response. This doesn't otherwise affect the
# Response model instance.
opinion.email = attrs.get('email', '').strip()
return opinion
def save_object(self, obj, **kwargs):
obj.save(**kwargs)
if obj.email:
opinion_email = ResponseEmail(
email=obj.email,
opinion=obj
)
opinion_email.save(**kwargs)
return obj
```
#### File: feedback/tests/test_models.py
```python
from nose.tools import eq_
from fjord.base.tests import TestCase
from fjord.feedback.models import Product, Response
from fjord.feedback.tests import response
from fjord.feedback.utils import compute_grams
from fjord.search.tests import ElasticTestCase
class TestResponseModel(TestCase):
def test_description_truncate_on_save(self):
# Extra 10 characters get lopped off on save.
resp = response(description=('a' * 10010), save=True)
eq_(resp.description, 'a' * 10000)
def test_description_strip_on_save(self):
# Nix leading and trailing whitespace.
resp = response(description=u' \n\tou812\t\n ', save=True)
eq_(resp.description, u'ou812')
def test_url_domain(self):
# Test a "normal domain"
resp = response(url=u'http://foo.example.com.br/blah')
eq_(resp.url_domain, u'example.com.br')
assert isinstance(resp.url_domain, unicode)
# Test a unicode domain
resp = response(
url=u'http://\u30c9\u30e9\u30af\u30a810.jp/dq10_skillpoint.html',
save=True)
eq_(resp.url_domain, u'\u30c9\u30e9\u30af\u30a810.jp')
assert isinstance(resp.url_domain, unicode)
class TestAutoTranslation(TestCase):
def setUp(self):
# Wipe out translation system for all products.
# FIXME - might be better to save the state and restore it in tearDown
# rather than stomp in both cases. But stomping works for now.
Product.objects.update(translation_system=u'')
super(TestAutoTranslation, self).setUp()
def tearDown(self):
# Wipe out translation system for all products.
Product.objects.update(translation_system=u'')
super(TestAutoTranslation, self).tearDown()
def test_auto_translation(self):
prod = Product.uncached.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
resp = response(
locale=u'es',
product=u'firefox',
description=u'hola',
save=True
)
# Fetch it from the db again
resp = Response.uncached.get(id=resp.id)
eq_(resp.translated_description, u'\xabHOLA\xbb')
class TestGenerateTranslationJobs(TestCase):
def setUp(self):
# Wipe out translation system for all products.
# FIXME - might be better to save the state and restore it in tearDown
# rather than stomp in both cases. But stomping works for now.
Product.objects.update(translation_system=u'')
super(TestGenerateTranslationJobs, self).setUp()
def tearDown(self):
# Wipe out translation system for all products.
Product.objects.update(translation_system=u'')
super(TestGenerateTranslationJobs, self).tearDown()
def test_english_no_translation(self):
"""English descriptions should get copied over"""
resp = response(
locale=u'en-US',
description=u'hello',
translated_description=u'',
save=True
)
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.uncached.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_english_with_dennis(self):
"""English descriptions should get copied over"""
resp = response(
locale=u'en-US',
product=u'firefox',
description=u'hello',
translated_description=u'',
save=True
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.uncached.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.uncached.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_spanish_no_translation(self):
"""Spanish should not get translated"""
resp = response(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u'',
save=True
)
# No jobs should be translated
eq_(len(resp.generate_translation_jobs()), 0)
# Nothing should be translated
eq_(resp.translated_description, u'')
def test_spanish_with_dennis(self):
"""Spanish should get translated"""
resp = response(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u'',
save=True
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.uncached.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# One job should be generated
jobs = resp.generate_translation_jobs()
eq_(len(jobs), 1)
job = jobs[0]
eq_(job[1:], (u'dennis', u'es', u'description',
u'en-US', 'translated_description'))
eq_(resp.translated_description, u'')
def test_spanish_with_dennis_and_existing_translations(self):
"""Response should pick up existing translation"""
existing_resp = response(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u'DUDE!',
save=True
)
resp = response(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u'',
save=True
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.uncached.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# No jobs should be translated
eq_(len(resp.generate_translation_jobs()), 0)
eq_(resp.translated_description, existing_resp.translated_description)
class TestComputeGrams(ElasticTestCase):
def test_empty(self):
eq_(compute_grams(u''), [])
def test_parsing(self):
# stop words are removed
eq_(compute_grams(u'i me him her'), [])
# capital letters don't matter
eq_(compute_grams(u'I ME HIM HER'), [])
# punctuation nixed
eq_(compute_grams(u'i, me, him, her'), [])
def test_bigrams(self):
# Note: Tokens look weird after being analyzed probably due to
# the stemmer. We could write a bunch of code to "undo" some
# of the excessive stemming, but it's probably an exercise in
# futility. Ergo the tests look a little odd. e.g. "youtub"
# One word a bigram does not make
eq_(compute_grams(u'youtube'), [])
# Two words is the minimum number to create a bigram
eq_(sorted(compute_grams(u'youtube crash')),
['crash youtub'])
# Three words creates two bigrams
eq_(sorted(compute_grams(u'youtube crash flash')),
['crash flash', 'crash youtub'])
# Four words creates three bigrams
eq_(sorted(compute_grams(u'youtube crash flash bridge')),
['bridg flash', 'crash flash', 'crash youtub'])
# Nix duplicate bigrams
eq_(sorted(compute_grams(u'youtube crash youtube flash')),
['crash youtub', 'flash youtub'])
```
#### File: fjord/feedback/views.py
```python
from functools import wraps
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils import translation
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.http import require_POST
from funfactory.urlresolvers import reverse
from mobility.decorators import mobile_template
from rest_framework import generics
from fjord.base.browsers import UNKNOWN
from fjord.base.util import smart_str, translate_country_name
from fjord.feedback import config
from fjord.feedback import models
from fjord.feedback.forms import ResponseForm
from fjord.feedback.utils import actual_ip_plus_desc, clean_url, ratelimit
def happy_redirect(request):
# TODO: Remove this when the addon gets fixed and is pointing to
# the correct urls.
return HttpResponseRedirect(reverse('feedback') + '#happy')
def sad_redirect(request):
# TODO: Remove this when the addon gets fixed and is pointing to
# the correct urls.
return HttpResponseRedirect(reverse('feedback') + '#sad')
@mobile_template('feedback/{mobile/}download_firefox.html')
def download_firefox(request, template):
return render(request, template)
@mobile_template('feedback/{mobile/}thanks.html')
def thanks(request, template):
return render(request, template)
def requires_firefox(func):
"""Redirects to "download firefox" page if not Firefox.
If it isn't a Firefox browser, then we don't want to deal with it.
This is a temporary solution. See bug #848568.
"""
@wraps(func)
def _requires_firefox(request, *args, **kwargs):
# Note: This is sort of a lie. What's going on here is that
# parse_ua only parses Firefox-y browsers. So if it's UNKNOWN
# at this point, then it's not Firefox-y. If parse_ua ever
# changes, then this will cease to be true.
if request.BROWSER.browser == UNKNOWN:
return HttpResponseRedirect(reverse('download-firefox'))
return func(request, *args, **kwargs)
return _requires_firefox
@ratelimit(rulename='doublesubmit_1pm', keyfun=actual_ip_plus_desc, rate='1/m')
@ratelimit(rulename='100ph', rate='100/h')
def _handle_feedback_post(request, locale=None, product=None,
version=None, channel=None):
if getattr(request, 'limited', False):
# If we're throttled, then return the thanks page, but don't
# add the response to the db.
return HttpResponseRedirect(reverse('thanks'))
# Get the form and run is_valid() so it goes through the
# validation and cleaning machinery. We don't really care if it's
# valid, though, since we will take what we got and do the best we
# can with it. Error validation is now in JS.
form = ResponseForm(request.POST)
form.is_valid()
data = form.cleaned_data
description = data.get('description', u'').strip()
if not description:
# If there's no description, then there's nothing to do here,
# so thank the user and move on.
return HttpResponseRedirect(reverse('thanks'))
# Do some data validation of product, channel and version
# coming from the url.
product = models.Product.get_product_map().get(smart_str(product), u'')
# FIXME - validate these better
channel = smart_str(channel).lower()
version = smart_str(version)
# src, then source, then utm_source
source = request.GET.get('src', u'')
if not source:
source = request.GET.get('utm_source', u'')
campaign = request.GET.get('utm_campaign', u'')
# Most platforms aren't different enough between versions to care.
# Windows is.
platform = request.BROWSER.platform
if platform == 'Windows':
platform += ' ' + request.BROWSER.platform_version
opinion = models.Response(
# Data coming from the user
happy=data['happy'],
url=clean_url(data.get('url', u'')),
description=data['description'].strip(),
# Inferred data from user agent
prodchan=_get_prodchan(request, product, channel),
user_agent=request.META.get('HTTP_USER_AGENT', ''),
browser=request.BROWSER.browser,
browser_version=request.BROWSER.browser_version,
platform=platform,
# Pulled from the form data or the url
locale=data.get('locale', locale),
# Data from mobile devices which is probably only
# applicable to mobile devices
manufacturer=data.get('manufacturer', ''),
device=data.get('device', ''),
)
if source:
opinion.source = source[:100]
if campaign:
opinion.campaign = campaign[:100]
if product:
# If we picked up the product from the url, we use url
# bits for everything.
product = product or u''
version = version or u''
channel = channel or u''
elif opinion.browser != UNKNOWN:
# If we didn't pick up a product from the url, then we
# infer as much as we can from the user agent.
product = data.get(
'product', models.Response.infer_product(platform))
version = data.get(
'version', request.BROWSER.browser_version)
# Assume everything we don't know about is stable channel.
channel = u'stable'
else:
product = channel = version = u''
opinion.product = product or u''
opinion.version = version or u''
opinion.channel = channel or u''
opinion.save()
# If there was an email address, save that separately.
if data.get('email_ok') and data.get('email'):
e = models.ResponseEmail(email=data['email'], opinion=opinion)
e.save()
return HttpResponseRedirect(reverse('thanks'))
def _get_prodchan(request, product=None, channel=None):
# FIXME - redo this to handle product/channel
meta = request.BROWSER
product = ''
platform = ''
channel = 'stable'
if meta.browser == 'Firefox':
product = 'firefox'
else:
product = 'unknown'
if meta.platform == 'Android':
platform = 'android'
elif meta.platform == 'Firefox OS':
platform = 'fxos'
elif product == 'firefox':
platform = 'desktop'
else:
platform = 'unknown'
return '{0}.{1}.{2}'.format(product, platform, channel)
@csrf_protect
def generic_feedback(request, locale=None, product=None, version=None,
channel=None):
"""Generic feedback form for desktop and mobile"""
form = ResponseForm()
if request.method == 'POST':
return _handle_feedback_post(request, locale, product,
version, channel)
return render(request, 'feedback/generic_feedback.html', {
'form': form,
})
@requires_firefox
@csrf_exempt
def firefox_os_stable_feedback(request, locale=None, product=None,
version=None, channel=None):
# Localized country names are in region files in product
# details. We try really hard to use localized country names, so
# we use gettext and if that's not available, use whatever is in
# product details.
countries = [
(code, translate_country_name(translation.get_language(),
code, name, name_l10n))
for code, name, name_l10n in config.FIREFOX_OS_COUNTRIES
]
return render(request, 'feedback/mobile/fxos_feedback.html', {
'countries': countries,
'devices': config.FIREFOX_OS_DEVICES,
})
@csrf_exempt
@require_POST
def android_about_feedback(request, locale=None, product=None,
version=None, channel=None):
"""A view specifically for Firefox for Android.
Firefox for Android has a feedback form built in that generates
POSTS directly to Input, and is always sad or ideas. Since Input no
longer supports idea feedbacks, everything is Sad.
FIXME - measure usage of this and nix it when we can. See bug
#964292.
"""
# Firefox for Android only sends up sad and idea responses, but it
# uses the old `_type` variable from old Input. Tweak the data to do
# what FfA means, not what it says.
# Make `request.POST` mutable.
request.POST = request.POST.copy()
# For _type, 1 is happy, 2 is sad, 3 is idea. We convert that so
# that _type = 1 -> happy = 1 and everything else -> happy = 0.
if request.POST.get('_type') == '1':
happy = 1
else:
happy = 0
request.POST['happy'] = happy
# Note: product, version and channel are always None in this view
# since this is to handle backwards-compatibility. So we don't
# bother passing them along.
# We always return Thanks! now and ignore errors.
return _handle_feedback_post(request, locale)
@csrf_exempt
@never_cache
def feedback_router(request, product=None, version=None, channel=None,
*args, **kwargs):
"""Determine a view to use, and call it.
If product is given, reference `product_routes` to look up a view.
If `product` is not passed, or isn't found in `product_routes`,
asssume the user is either a stable desktop Firefox or a stable
mobile Firefox based on the parsed UA, and serve them the
appropriate page. This is to handle the old formname way of doing
things. At some point P, we should measure usage of the old
formnames and deprecate them.
This also handles backwards-compatability with the old Firefox for
Android form which can't have a CSRF token.
Note: We never want to cache this view.
"""
if '_type' in request.POST:
# Checks to see if `_type` is in the POST data and if so this
# is coming from Firefox for Android which doesn't know
# anything about csrf tokens. If that's the case, we send it
# to a view specifically for FfA Otherwise we pass it to one
# of the normal views, which enforces CSRF. Also, nix the
# product just in case we're crossing the streams and
# confusing new-style product urls with old-style backwards
# compatability for the Android form.
#
# FIXME: Remove this hairbrained monstrosity when we don't need to
# support the method that Firefox for Android currently uses to
# post feedback which worked with the old input.mozilla.org.
view = android_about_feedback
product = None
else:
if product:
# If they passed in a product and we don't know about it, stop
# here.
if product not in models.Product.get_product_map():
return render(request, 'feedback/unknownproduct.html', {
'product': product
})
if product == 'fxos' or request.BROWSER.browser == 'Firefox OS':
# Firefox OS gets shunted to a different form which has
# different Firefox OS specific questions.
view = firefox_os_stable_feedback
else:
view = generic_feedback
return view(request, request.locale, product, version, channel,
*args, **kwargs)
def cyoa(request):
template = 'feedback/cyoa.html'
products = models.Product.objects.all()
return render(request, template, {
'products': products
})
class PostFeedbackAPI(generics.CreateAPIView):
serializer_class = models.ResponseSerializer
```
#### File: management/commands/esstatus.py
```python
from django.core.management.base import BaseCommand
from fjord.base.util import FakeLogger
from fjord.search.index import es_status_cmd
class Command(BaseCommand):
help = 'Shows elastic search index status.'
def handle(self, *args, **options):
es_status_cmd(log=FakeLogger(self.stdout))
```
#### File: fjord/translations/models.py
```python
from dennis.translator import Translator
_translation_systems = {}
def get_translation_systems():
"""Returns translation systems map
"""
return _translation_systems
def get_translation_system_choices():
"""Returns a tuple of (value, display-name) tuples for Choices field
This inserts a "no choice" choice at the beginning, too, the value of
which is the empty string.
"""
choices = [(key, key) for key in _translation_systems.keys()]
choices.insert(0, (u'', u'None'))
return tuple(choices)
class TranslationSystemMeta(type):
"""Metaclass to register TranslationSystem subclasses"""
def __new__(cls, name, bases, attrs):
new_cls = super(TranslationSystemMeta, cls).__new__(
cls, name, bases, attrs)
if new_cls.name:
_translation_systems[new_cls.name] = new_cls
return new_cls
class TranslationSystem(object):
"""Translation system base class
All translation system plugins should subclass this. They should
additionally do the following:
1. set the name property to something unique
2. implement translate method
See FakeTranslator and DennisTranslator for sample
implementations.
"""
__metaclass__ = TranslationSystemMeta
# Name of this translation system
name = ''
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
"""Implement this to translation fields on an instance
This translates in-place.
If this is an asynchronous system, then this can either push
the text to be translated now or queue the text to be pushed
later in a batch of things to be translated.
"""
raise NotImplementedError()
def push_translations(self):
"""Implement this to do any work required to push translations
This is for asynchronous systems that take a batch of translations,
perform some work, and then return results some time later.
Print any status text to stdout.
"""
raise NotImplementedError()
def pull_translations(self):
"""Implement this to do any work required to pull translations
This is for asynchronous systems that take a batch of translations,
perform some work, and then return results some time later.
Print any status text to stdout.
"""
raise NotImplementedError()
# ---------------------------------------------------------
# Fake translation system
# ---------------------------------------------------------
class FakeTranslator(TranslationSystem):
"""Translates by uppercasing text"""
name = 'fake'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
setattr(instance, dst_field, getattr(instance, src_field).upper())
instance.save()
def pull_translations(self):
# This is a no-op for testing purposes.
pass
def push_translations(self):
# This is a no-op for testing purposes.
pass
# ---------------------------------------------------------
# Dennis translation system
# ---------------------------------------------------------
class DennisTranslator(TranslationSystem):
"""Translates using shouty and anglequote"""
name = 'dennis'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
text = getattr(instance, src_field)
if text:
pipeline = ['shouty', 'anglequote']
translated = Translator([], pipeline).translate_string(text)
setattr(instance, dst_field, translated)
instance.save()
```
#### File: fjord/translations/tasks.py
```python
from django.db.models.signals import post_save
from celery import task
from .utils import (
compose_key,
decompose_key,
translate
)
@task()
def translate_task(instance_key):
"""Celery task to kick off translation
:arg instance_key: The key for the instance we're translating
"""
instance = decompose_key(instance_key)
jobs = instance.generate_translation_jobs()
if not jobs:
return
# Handle each job
for key, system, src_lang, src_field, dst_lang, dst_field in jobs:
instance = decompose_key(key)
translate(instance, system, src_lang, src_field, dst_lang, dst_field)
def translate_handler(sender, instance=None, created=False, **kwargs):
"""Handles possible translation
This asks the instance to generate translation jobs. If there
are translation jobs to do, then this throws them into a celery
task.
"""
if not created or instance is None:
return
translate_task.delay(compose_key(instance))
def register_auto_translation(model_cls):
"""Decorator that Registers model class for automatic translation
The model class has to have a ``generate_translation_jobs`` method
that takes an instance and generates a list of translation jobs
that need to be performed.
A translation job is a tuple in the form::
(key, system, src_lang, src_field, dst_lang, dst_field)
The key is some string that uniquely identifies the instance so
that we can save the data back to the instance later.
"""
uid = str(model_cls) + 'translation'
post_save.connect(translate_handler, model_cls, dispatch_uid=uid)
return model_cls
```
#### File: translations/tests/test__utils.py
```python
from unittest import TestCase
from nose.tools import eq_
from . import fakeinstance
from ..utils import compose_key, decompose_key, translate
_foo_cache = {}
class FakeModelManager(object):
def get(self, **kwargs):
return _foo_cache[kwargs['id']]
class FakeModel(object):
def __init__(self, id_):
self.id = id_
_foo_cache[id_] = self
objects = FakeModelManager()
class TestKeys(TestCase):
def tearDown(self):
_foo_cache.clear()
def test_compose_key(self):
foo = FakeModel(15)
eq_(compose_key(foo), 'fjord.translations.tests.test__utils:FakeModel:15')
def test_decompose_key(self):
foo = FakeModel(15)
key = 'fjord.translations.tests.test__utils:FakeModel:15'
eq_(decompose_key(key), foo)
class TestTranslate(TestCase):
def test_translate_fake(self):
obj = fakeinstance(
fields={'desc': 'trans_desc'},
translate_with=lambda x: 'fake',
desc=u'This is a test string'
)
eq_(getattr(obj, 'trans_desc', None), None)
translate(obj, 'fake', 'br', 'desc', 'en-US', 'trans_desc')
eq_(getattr(obj, 'trans_desc', None), u'THIS IS A TEST STRING')
def test_translate_dennis(self):
obj = fakeinstance(
fields={'desc': 'trans_desc'},
translate_with=lambda x: 'dennis',
desc=u'This is a test string'
)
eq_(getattr(obj, 'trans_desc', None), None)
translate(obj, 'dennis', 'br', 'desc', 'en-US', 'trans_desc')
eq_(getattr(obj, 'trans_desc', None),
u'\xabTHIS IS A TEST STRING\xbb')
``` |
{
"source": "joshuashort/doodba-copier-template",
"score": 2
} |
#### File: joshuashort/doodba-copier-template/migrations.py
```python
import shutil
from pathlib import Path
from invoke import task
@task
def from_doodba_scaffolding_to_copier(c):
print("Removing remaining garbage from doodba-scaffolding.")
shutil.rmtree(Path(".vscode", "doodba"), ignore_errors=True)
garbage = (
Path(".travis.yml"),
Path(".vscode", "doodbasetup.py"),
Path("odoo", "custom", "src", "private", ".empty"),
)
for path in garbage:
try:
path.unlink()
except FileNotFoundError:
pass
# When using Copier >= 3.0.5, this file didn't get properly migrated
editorconfig_file = Path(".editorconfig")
editorconfig_contents = editorconfig_file.read_text()
editorconfig_contents = editorconfig_contents.replace(
"[*.yml]", "[*.{code-snippets,code-workspace,json,md,yaml,yml}{,.jinja}]", 1
)
editorconfig_file.write_text(editorconfig_contents)
@task
def remove_odoo_auto_folder(c):
"""This folder makes no more sense for us.
The `invoke develop` task now handles its creation, which is done with
host user UID and GID to avoid problems.
There's no need to have it in our code tree anymore.
"""
shutil.rmtree(Path("odoo", "auto"), ignore_errors=True)
```
#### File: doodba-copier-template/tests/test_nitpicking.py
```python
import json
from pathlib import Path
from textwrap import dedent
import pytest
import yaml
from copier.main import copy
from plumbum import local
from plumbum.cmd import diff, git, invoke, pre_commit
WHITESPACE_PREFIXED_LICENSES = (
"AGPL-3.0-or-later",
"Apache-2.0",
"LGPL-3.0-or-later",
)
@pytest.mark.parametrize("project_license", WHITESPACE_PREFIXED_LICENSES)
def test_license_whitespace_prefix(
tmp_path: Path, cloned_template: Path, project_license
):
dst = tmp_path / "dst"
copy(
str(cloned_template),
str(dst),
vcs_ref="test",
force=True,
data={"project_license": project_license},
)
assert (dst / "LICENSE").read_text().startswith(" ")
def test_no_vscode_in_private(tmp_path: Path):
"""Make sure .vscode folders are git-ignored in private folder."""
copy(".", str(tmp_path), vcs_ref="HEAD", force=True)
with local.cwd(tmp_path):
git("add", ".")
git("commit", "--no-verify", "-am", "hello world")
vscode = tmp_path / "odoo" / "custom" / "src" / "private" / ".vscode"
vscode.mkdir()
(vscode / "something").touch()
assert not git("status", "--porcelain")
def test_mqt_configs_synced(
tmp_path: Path, cloned_template: Path, supported_odoo_version: float
):
"""Make sure configs from MQT are in sync."""
copy(
str(cloned_template),
str(tmp_path),
vcs_ref="test",
force=True,
data={"odoo_version": supported_odoo_version},
)
mqt = Path("vendor", "maintainer-quality-tools", "sample_files", "pre-commit-13.0")
good_diffs = Path("tests", "samples", "mqt-diffs")
for conf in (".pylintrc", ".pylintrc-mandatory"):
good = (good_diffs / f"{conf}.diff").read_text()
tested = diff(tmp_path / conf, mqt / conf, retcode=1)
assert good == tested
def test_gitlab_badges(tmp_path: Path):
"""Gitlab badges are properly formatted in README."""
copy(
".",
str(tmp_path),
vcs_ref="HEAD",
force=True,
data={"gitlab_url": "https://gitlab.example.com/Tecnativa/my-badged-odoo"},
)
expected_badges = dedent(
"""
[](https://gitlab.example.com/Tecnativa/my-badged-odoo/commits/13.0)
[](https://gitlab.example.com/Tecnativa/my-badged-odoo/commits/13.0)
"""
)
assert expected_badges.strip() in (tmp_path / "README.md").read_text()
def test_alt_domains_rules(tmp_path: Path, cloned_template: Path):
"""Make sure alt domains redirections are good for Traefik."""
copy(
str(cloned_template),
str(tmp_path),
vcs_ref="HEAD",
force=True,
data={
"domain_prod": "www.example.com",
"domain_prod_alternatives": [
"old.example.com",
"example.com",
"example.org",
"www.example.org",
],
},
)
with local.cwd(tmp_path):
git("add", "prod.yaml")
pre_commit("run", "-a", retcode=1)
expected = Path("tests", "samples", "alt-domains", "prod.yaml").read_text()
generated = (tmp_path / "prod.yaml").read_text()
generated_scalar = yaml.safe_load(generated)
# Any of these characters in a traefik label is an error almost for sure
error_chars = ("\n", "'", '"')
for service in generated_scalar["services"].values():
for key, value in service.get("labels", {}).items():
if not key.startswith("traefik."):
continue
for char in error_chars:
assert char not in key
assert char not in str(value)
assert generated == expected
def test_cidr_whitelist_rules(tmp_path: Path, cloned_template: Path):
"""Make sure CIDR whitelist redirections are good for Traefik."""
copy(
str(cloned_template),
str(tmp_path),
vcs_ref="HEAD",
force=True,
data={"cidr_whitelist": ["192.168.3.11/24", "456.456.456.456"]},
)
with local.cwd(tmp_path):
git("add", "prod.yaml", "test.yaml")
pre_commit("run", "-a", retcode=1)
expected = Path("tests", "samples", "cidr-whitelist")
assert (tmp_path / "prod.yaml").read_text() == (expected / "prod.yaml").read_text()
assert (tmp_path / "test.yaml").read_text() == (expected / "test.yaml").read_text()
def test_code_workspace_file(tmp_path: Path, cloned_template: Path):
"""The file is generated as expected."""
copy(
str(cloned_template), str(tmp_path), vcs_ref="HEAD", force=True,
)
assert (tmp_path / f"doodba.{tmp_path.name}.code-workspace").is_file()
(tmp_path / f"doodba.{tmp_path.name}.code-workspace").rename(
tmp_path / "doodba.other1.code-workspace"
)
with local.cwd(tmp_path):
invoke("write-code-workspace-file")
assert (tmp_path / "doodba.other1.code-workspace").is_file()
assert not (tmp_path / f"doodba.{tmp_path.name}.code-workspace").is_file()
# Do a stupid and dirty git clone to check it's sorted fine
git("clone", cloned_template, Path("odoo", "custom", "src", "zzz"))
invoke("write-code-workspace-file", "-c", "doodba.other2.code-workspace")
assert not (tmp_path / f"doodba.{tmp_path.name}.code-workspace").is_file()
assert (tmp_path / "doodba.other1.code-workspace").is_file()
assert (tmp_path / "doodba.other2.code-workspace").is_file()
with (tmp_path / "doodba.other2.code-workspace").open() as fp:
workspace_definition = json.load(fp)
assert workspace_definition == {
"folders": [
{"path": "odoo/custom/src/zzz"},
{"path": "odoo/custom/src/private"},
{"path": "."},
]
}
def test_dotdocker_ignore_content(tmp_path: Path, cloned_template: Path):
"""Everything inside .docker must be ignored."""
copy(
str(cloned_template), str(tmp_path), vcs_ref="HEAD", force=True,
)
with local.cwd(tmp_path):
git("add", ".")
git("commit", "-am", "hello", retcode=1)
git("commit", "-am", "hello")
(tmp_path / ".docker" / "some-file").touch()
assert not git("status", "--porcelain")
def test_template_update_badge(tmp_path: Path, cloned_template: Path):
"""Test that the template update badge is properly formatted."""
tag = "v99999.0.0-99999-bye-bye"
with local.cwd(cloned_template):
git("tag", "--delete", "test")
git("tag", "--force", tag)
copy(str(cloned_template), str(tmp_path), vcs_ref=tag, force=True)
expected = "[](https://github.com/Tecnativa/doodba-copier-template/tree/v99999.0.0-99999-bye-bye)"
assert expected in (tmp_path / "README.md").read_text()
def test_pre_commit_config(
tmp_path: Path, cloned_template: Path, supported_odoo_version: float
):
"""Test that .pre-commit-config.yaml has some specific settings fine."""
copy(
str(cloned_template),
str(tmp_path),
vcs_ref="HEAD",
force=True,
data={"odoo_version": supported_odoo_version},
)
pre_commit_config = yaml.safe_load(
(tmp_path / ".pre-commit-config.yaml").read_text()
)
is_py3 = supported_odoo_version >= 11
found = 0
should_find = 1
for repo in pre_commit_config["repos"]:
if repo["repo"] == "https://github.com/pre-commit/pre-commit-hooks":
found += 1
if is_py3:
assert {"id": "debug-statements"} in repo["hooks"]
assert {"id": "fix-encoding-pragma", "args": ["--remove"]} in repo[
"hooks"
]
else:
assert {"id": "debug-statements"} not in repo["hooks"]
assert {"id": "fix-encoding-pragma", "args": ["--remove"]} not in repo[
"hooks"
]
assert {"id": "fix-encoding-pragma"} in repo["hooks"]
assert found == should_find
def test_no_python_write_bytecode_in_devel(
tmp_path: Path, cloned_template: Path, supported_odoo_version: float
):
copy(
str(cloned_template),
str(tmp_path),
vcs_ref="HEAD",
force=True,
data={"odoo_version": supported_odoo_version},
)
devel = yaml.safe_load((tmp_path / "devel.yaml").read_text())
assert devel["services"]["odoo"]["environment"]["PYTHONDONTWRITEBYTECODE"] == 1
```
#### File: doodba-copier-template/tests/test_tasks_downstream.py
```python
import os
from pathlib import Path
import pytest
from copier import copy
from plumbum import ProcessExecutionError, local
from plumbum.cmd import docker_compose, invoke
def _install_status(module, dbname="devel"):
return docker_compose(
"run",
"--rm",
"-e",
"LOG_LEVEL=WARNING",
"-e",
f"PGDATABASE={dbname}",
"odoo",
"psql",
"-tc",
f"select state from ir_module_module where name='{module}'",
).strip()
@pytest.mark.skipif(
os.environ.get("DOCKER_TEST") != "1", reason="Missing DOCKER_TEST=1 env variable"
)
def test_resetdb(tmp_path: Path, cloned_template: Path, supported_odoo_version: float):
"""Test the dropdb task.
On this test flow, other downsream tasks are also tested:
- img-build
- git-aggregate
- stop --purge
"""
try:
with local.cwd(tmp_path):
copy(
src_path=str(cloned_template),
vcs_ref="HEAD",
force=True,
data={"odoo_version": supported_odoo_version},
)
# Imagine the user is in the src subfolder for these tasks
with local.cwd(tmp_path / "odoo" / "custom" / "src"):
invoke("img-build")
invoke("git-aggregate")
# No ir_module_module table exists yet
with pytest.raises(ProcessExecutionError):
_install_status("base")
# Imagine the user is in the odoo subrepo for these tasks
with local.cwd(tmp_path / "odoo" / "custom" / "src" / "odoo"):
# This should install just "base
stdout = invoke("resetdb")
assert "Creating database cache" in stdout
assert "from template devel" in stdout
assert _install_status("base") == "installed"
assert _install_status("purchase") == "uninstalled"
assert _install_status("sale") == "uninstalled"
# Install "purchase"
stdout = invoke("resetdb", "-m", "purchase")
assert "Creating database cache" in stdout
assert "from template devel" in stdout
assert _install_status("base") == "installed"
assert _install_status("purchase") == "installed"
assert _install_status("sale") == "uninstalled"
# Install "sale" in a separate database
stdout = invoke("resetdb", "-m", "sale", "-d", "sale_only")
assert "Creating database cache" in stdout
assert "from template sale_only" in stdout
assert _install_status("base") == "installed"
assert _install_status("purchase") == "installed"
assert _install_status("sale") == "uninstalled"
assert _install_status("base", "sale_only") == "installed"
assert _install_status("purchase", "sale_only") == "uninstalled"
assert _install_status("sale", "sale_only") == "installed"
# Install "sale" in main database
stdout = invoke("resetdb", "-m", "sale")
assert "Creating database devel from template cache" in stdout
assert "Found matching database template" in stdout
assert _install_status("base") == "installed"
assert _install_status("purchase") == "uninstalled"
assert _install_status("sale") == "installed"
finally:
# Imagine the user is in the odoo subrepo for this command
with local.cwd(tmp_path / "odoo" / "custom" / "src" / "odoo"):
invoke("stop", "--purge")
``` |
{
"source": "joshuashzha/snewpy",
"score": 2
} |
#### File: snewpy/tests/test_xforms.py
```python
from snewpy.flavor_transformation \
import MassHierarchy, MixingParameters, \
NoTransformation, AdiabaticMSW, NonAdiabaticMSW, \
TwoFlavorDecoherence, ThreeFlavorDecoherence, NeutrinoDecay
from astropy import units as u
from astropy import constants as c
import numpy as np
from numpy import sin, cos, exp, abs
# Dummy time and energy arrays, with proper dimensions.
t = np.arange(10) * u.s
E = np.linspace(1,100,21) * u.MeV
# Dummy mixing angles.
theta12 = 33 * u.deg
theta13 = 9 * u.deg
theta23 = 49 * u.deg
# Dummy neutrino decay parameters; see arXiv:1910.01127.
mass3 = 0.5 * u.eV/c.c**2
lifetime = 1 * u.day
distance = 10 * u.kpc
def test_noxform():
# No transformations.
xform = NoTransformation()
assert(xform.prob_ee(t, E) == 1)
assert(xform.prob_ex(t, E) == 0)
assert(xform.prob_xx(t, E) == 1)
assert(xform.prob_xe(t, E) == 0)
assert(xform.prob_eebar(t, E) == 1)
assert(xform.prob_exbar(t, E) == 0)
assert(xform.prob_xxbar(t, E) == 1)
assert(xform.prob_xebar(t, E) == 0)
def test_adiabaticmsw_nmo():
# Adiabatic MSW: normal mass ordering; override default mixing angles.
xform = AdiabaticMSW(mix_angles=(theta12, theta13, theta23), mh=MassHierarchy.NORMAL)
assert(xform.prob_ee(t, E) == sin(theta13)**2)
assert(xform.prob_ex(t, E) == 1. - sin(theta13)**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + sin(theta13)**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - sin(theta13)**2))
assert(xform.prob_eebar(t, E) == (cos(theta12)*cos(theta13))**2)
assert(xform.prob_exbar(t, E) == 1. - (cos(theta12)*cos(theta13))**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + (cos(theta12)*cos(theta13))**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - (cos(theta12)*cos(theta13))**2))
# Test interface using default mixing angles defined in the submodule.
mixpars = MixingParameters(MassHierarchy.NORMAL)
th12, th13, th23 = mixpars.get_mixing_angles()
xform = AdiabaticMSW()
assert(xform.prob_ee(t, E) == sin(th13)**2)
assert(xform.prob_ex(t, E) == 1. - sin(th13)**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + sin(th13)**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - sin(th13)**2))
assert(xform.prob_eebar(t, E) == (cos(th12)*cos(th13))**2)
assert(xform.prob_exbar(t, E) == 1. - (cos(th12)*cos(th13))**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + (cos(th12)*cos(th13))**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - (cos(th12)*cos(th13))**2))
def test_adiabaticmsw_imo():
# Adiabatic MSW: inverted mass ordering; override default mixing angles.
xform = AdiabaticMSW(mix_angles=(theta12, theta13, theta23), mh=MassHierarchy.INVERTED)
assert(xform.prob_ee(t, E) == (sin(theta12)*cos(theta13))**2)
assert(xform.prob_ex(t, E) == 1. - (sin(theta12)*cos(theta13))**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + (sin(theta12)*cos(theta13))**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - (sin(theta12)*cos(theta13))**2))
assert(xform.prob_eebar(t, E) == sin(theta13)**2)
assert(xform.prob_exbar(t, E) == 1. - sin(theta13)**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + sin(theta13)**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - sin(theta13)**2))
# Test interface using default mixing angles defined in the submodule.
mixpars = MixingParameters(MassHierarchy.INVERTED)
th12, th13, th23 = mixpars.get_mixing_angles()
xform = AdiabaticMSW(mh=MassHierarchy.INVERTED)
assert(xform.prob_ee(t, E) == (sin(th12)*cos(th13))**2)
assert(xform.prob_ex(t, E) == 1. - (sin(th12)*cos(th13))**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + (sin(th12)*cos(th13))**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - (sin(th12)*cos(th13))**2))
assert(xform.prob_eebar(t, E) == sin(th13)**2)
assert(xform.prob_exbar(t, E) == 1. - sin(th13)**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + sin(th13)**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - sin(th13)**2))
def test_nonadiabaticmsw_nmo():
# Adiabatic MSW: normal mass ordering; override the default mixing angles.
xform = NonAdiabaticMSW(mix_angles=(theta12, theta13, theta23), mh=MassHierarchy.NORMAL)
assert(xform.prob_ee(t, E) == (sin(theta12)*cos(theta13))**2)
assert(xform.prob_ex(t, E) == 1. - (sin(theta12)*cos(theta13))**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + (sin(theta12)*cos(theta13))**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - (sin(theta12)*cos(theta13))**2))
assert(xform.prob_eebar(t, E) == (cos(theta12)*cos(theta13))**2)
assert(xform.prob_exbar(t, E) == 1. - (cos(theta12)*cos(theta13))**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + (cos(theta12)*cos(theta13))**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - (cos(theta12)*cos(theta13))**2))
# Test interface using default mixing angles defined in the submodule.
mixpars = MixingParameters(MassHierarchy.NORMAL)
th12, th13, th23 = mixpars.get_mixing_angles()
xform = NonAdiabaticMSW()
assert(xform.prob_ee(t, E) == (sin(th12)*cos(th13))**2)
assert(xform.prob_ex(t, E) == 1. - (sin(th12)*cos(th13))**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + (sin(th12)*cos(th13))**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - (sin(th12)*cos(th13))**2))
assert(xform.prob_eebar(t, E) == (cos(th12)*cos(th13))**2)
assert(xform.prob_exbar(t, E) == 1. - (cos(th12)*cos(th13))**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + (cos(th12)*cos(th13))**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - (cos(th12)*cos(th13))**2))
def test_nonadiabaticmsw_imo():
# Adiabatic MSW: inverted mass ordering; override default mixing angles.
xform = NonAdiabaticMSW(mix_angles=(theta12, theta13, theta23), mh=MassHierarchy.NORMAL)
assert(xform.prob_ee(t, E) == (sin(theta12)*cos(theta13))**2)
assert(xform.prob_ex(t, E) == 1. - (sin(theta12)*cos(theta13))**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + (sin(theta12)*cos(theta13))**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - (sin(theta12)*cos(theta13))**2))
assert(xform.prob_eebar(t, E) == (cos(theta12)*cos(theta13))**2)
assert(xform.prob_exbar(t, E) == 1. - (cos(theta12)*cos(theta13))**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + (cos(theta12)*cos(theta13))**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - (cos(theta12)*cos(theta13))**2))
# Test interface using default mixing angles defined in the submodule.
mixpars = MixingParameters(MassHierarchy.INVERTED)
th12, th13, th23 = mixpars.get_mixing_angles()
xform = NonAdiabaticMSW(mh=MassHierarchy.INVERTED)
assert(xform.prob_ee(t, E) == (sin(th12)*cos(th13))**2)
assert(xform.prob_ex(t, E) == 1. - (sin(th12)*cos(th13))**2)
assert(xform.prob_xx(t, E) == 0.5*(1. + (sin(th12)*cos(th13))**2))
assert(xform.prob_xe(t, E) == 0.5*(1. - (sin(th12)*cos(th13))**2))
assert(xform.prob_eebar(t, E) == (cos(th12)*cos(th13))**2)
assert(xform.prob_exbar(t, E) == 1. - (cos(th12)*cos(th13))**2)
assert(xform.prob_xxbar(t, E) == 0.5*(1. + (cos(th12)*cos(th13))**2))
assert(xform.prob_xebar(t, E) == 0.5*(1. - (cos(th12)*cos(th13))**2))
def test_2fd():
# Two-flavor decoherence.
xform = TwoFlavorDecoherence()
assert(xform.prob_ee(t, E) == 0.5)
assert(xform.prob_ex(t, E) == 0.5)
assert(xform.prob_xx(t, E) == 0.75)
assert(xform.prob_xe(t, E) == 0.25)
assert(xform.prob_eebar(t, E) == 0.5)
assert(xform.prob_exbar(t, E) == 0.5)
assert(xform.prob_xxbar(t, E) == 0.75)
assert(xform.prob_xebar(t, E) == 0.25)
def test_3fd():
# Three-flavor decoherence.
xform = ThreeFlavorDecoherence()
assert(xform.prob_ee(t, E) == 1./3)
assert(abs(xform.prob_ex(t, E) - 2./3) < 1e-12)
assert(abs(xform.prob_xx(t, E) - 2./3) < 1e-12)
assert(abs(xform.prob_xe(t, E) - 1./3) < 1e-12)
assert(xform.prob_eebar(t, E) == 1./3)
assert(abs(xform.prob_exbar(t, E) - 2./3) < 1e-12)
assert(abs(xform.prob_xxbar(t, E) - 2./3) < 1e-12)
assert(abs(xform.prob_xebar(t, E) - 1./3) < 1e-12)
def test_nudecay_nmo():
# Neutrino decay with NMO, overriding the default mixing angles.
xform = NeutrinoDecay(mix_angles=(theta12, theta13, theta23), mass=mass3, tau=lifetime, dist=distance, mh=MassHierarchy.NORMAL)
# Test computation of the decay length.
_E = 10*u.MeV
assert(xform.gamma(_E) == mass3*c.c / (_E*lifetime))
De1 = (cos(theta12) * cos(theta13))**2
De2 = (sin(theta12) * cos(theta13))**2
De3 = sin(theta13)**2
# Check transition probabilities.
prob_ee = np.asarray([De1*(1.-exp(-xform.gamma(_E)*distance)) + De3*exp(-xform.gamma(_E)*distance) for _E in E])
assert(np.array_equal(xform.prob_ee(t, E), prob_ee))
assert(xform.prob_ex(t, E) == De1 + De3)
assert(xform.prob_xx(t, E) == 1 - 0.5*(De1 + De3))
assert(np.array_equal(xform.prob_xe(t, E), 0.5*(1 - prob_ee)))
prob_exbar = np.asarray([De1*(1.-exp(-xform.gamma(_E)*distance)) + De2 + De3*exp(-xform.gamma(_E)*distance) for _E in E])
assert(xform.prob_eebar(t, E) == De3)
assert(np.array_equal(xform.prob_exbar(t, E), prob_exbar))
assert(np.array_equal(xform.prob_xxbar(t, E), 1. - 0.5*prob_exbar))
assert(xform.prob_xebar(t, E) == 0.5*(1. - De3))
def test_nudecay_nmo_default_mixing():
# Test interface using default mixing angles defined in the submodule.
xform = NeutrinoDecay(mass=mass3, tau=lifetime, dist=distance)
# Check transition probabilities (normal hierarchy is default).
mixpars = MixingParameters()
th12, th13, th23 = mixpars.get_mixing_angles()
De1 = (cos(th12) * cos(th13))**2
De2 = (sin(th12) * cos(th13))**2
De3 = sin(th13)**2
prob_ee = np.asarray([De1*(1.-exp(-xform.gamma(_E)*distance)) + De3*exp(-xform.gamma(_E)*distance) for _E in E])
assert(np.array_equal(xform.prob_ee(t, E), prob_ee))
assert(xform.prob_ex(t, E) == De1 + De3)
assert(xform.prob_xx(t, E) == 1 - 0.5*(De1 + De3))
assert(np.array_equal(xform.prob_xe(t, E), 0.5*(1 - prob_ee)))
prob_exbar = np.asarray([De1*(1.-exp(-xform.gamma(_E)*distance)) + De2 + De3*exp(-xform.gamma(_E)*distance) for _E in E])
assert(xform.prob_eebar(t, E) == De3)
assert(np.array_equal(xform.prob_exbar(t, E), prob_exbar))
assert(np.array_equal(xform.prob_xxbar(t, E), 1. - 0.5*prob_exbar))
assert(xform.prob_xebar(t, E) == 0.5*(1. - De3))
def test_nudecay_imo():
# Neutrino decay with IMO, overriding the default mixing angles.
xform = NeutrinoDecay(mix_angles=(theta12, theta13, theta23), mass=mass3, tau=lifetime, dist=distance, mh=MassHierarchy.INVERTED)
De1 = (cos(theta12) * cos(theta13))**2
De2 = (sin(theta12) * cos(theta13))**2
De3 = sin(theta13)**2
# Check transition probabilities.
prob_ee = np.asarray([De2*exp(-xform.gamma(_E)*distance) +
De3*(1.-exp(-xform.gamma(_E)*distance)) for _E in E])
assert(np.array_equal(xform.prob_ee(t, E), prob_ee))
assert(xform.prob_ex(t, E) == De1 + De2)
assert(xform.prob_xx(t, E) == 1 - 0.5*(De1 + De2))
assert(np.array_equal(xform.prob_xe(t, E), 0.5*(1 - prob_ee)))
prob_exbar = np.asarray([De1 + De2*np.exp(-xform.gamma(_E)*distance) +
De3*(1-np.exp(-xform.gamma(_E)*distance)) for _E in E])
assert(xform.prob_eebar(t, E) == De3)
assert(np.array_equal(xform.prob_exbar(t, E), prob_exbar))
assert(np.array_equal(xform.prob_xxbar(t, E), 1. - 0.5*prob_exbar))
assert(xform.prob_xebar(t, E) == 0.5*(1. - De3))
def test_nudecay_imo_default_mixing():
# Test interface using default mixing angles defined in the submodule.
xform = NeutrinoDecay(mass=mass3, tau=lifetime, dist=distance, mh=MassHierarchy.INVERTED)
# Check transition probabilities.
mixpars = MixingParameters(MassHierarchy.INVERTED)
th12, th13, th23 = mixpars.get_mixing_angles()
De1 = (cos(th12) * cos(th13))**2
De2 = (sin(th12) * cos(th13))**2
De3 = sin(th13)**2
prob_ee = np.asarray([De2*exp(-xform.gamma(_E)*distance) +
De3*(1.-exp(-xform.gamma(_E)*distance)) for _E in E])
assert(np.array_equal(xform.prob_ee(t, E), prob_ee))
assert(xform.prob_ex(t, E) == De1 + De2)
assert(xform.prob_xx(t, E) == 1 - 0.5*(De1 + De2))
assert(np.array_equal(xform.prob_xe(t, E), 0.5*(1 - prob_ee)))
prob_exbar = np.asarray([De1 + De2*np.exp(-xform.gamma(_E)*distance) +
De3*(1-np.exp(-xform.gamma(_E)*distance)) for _E in E])
assert(xform.prob_eebar(t, E) == De3)
assert(np.array_equal(xform.prob_exbar(t, E), prob_exbar))
assert(np.array_equal(xform.prob_xxbar(t, E), 1. - 0.5*prob_exbar))
assert(xform.prob_xebar(t, E) == 0.5*(1. - De3))
``` |
{
"source": "JoshuaSimon/Machine-Learning",
"score": 4
} |
#### File: Machine-Learning/Search Algorithms/bfs_dfs.py
```python
from data import init_routes, create_child_node_list
def bfs(target, node_list, data):
for node in node_list:
if node == target:
print("Solution found.")
return
new_nodes = create_child_node_list(node_list, data)
print("Layer:", new_nodes)
if new_nodes:
bfs(target, new_nodes, data)
else:
print("There is no solution.")
def dfs(target, node, data, seen_nodes=[]):
seen_nodes.append(node)
if node == target:
print("Solution found.")
return True
new_nodes = create_child_node_list([node], data)
new_nodes = list(new_nodes - set(seen_nodes))
print("Layer:", new_nodes)
while new_nodes:
result = dfs(target, new_nodes[0], data, seen_nodes)
if result:
print("Solution found.")
return True
new_nodes = new_nodes[1:]
print("There is no solution.")
return False
if __name__ == "__main__":
# Get the test data.
routes = init_routes()
# The city you want to start and end at.
start = "Linz"
target = "Ulm"
bfs(target, [start], routes)
dfs(target, start, routes)
```
#### File: Machine-Learning/Simple Perceptron Model/perceptron_example.py
```python
import matplotlib.pyplot as plt
import numpy as np
import unittest
class Perceptron_Model_Test(unittest.TestCase):
def test_prediction_1(self):
x_1 = np.array([6,7,8,9,8,8,9,9])
x_2 = np.array([1,3,2,0,4,6,2,5])
x_class = np.array([0,0,0,0,1,1,1,1])
training_inputs = []
for x1, x2 in zip(x_1, x_2):
training_inputs.append(np.array([x1,x2]))
# Initialize and train the model.
perceptron = Perceptron(2, 1000)
perceptron.train(training_inputs, x_class)
inputs = np.array([7.5, 0.7])
prediction = perceptron.predict(inputs)
self.assertEqual(prediction, 0, 'Should be class 0.')
inputs = np.array([6, 6])
prediction = perceptron.predict(inputs)
self.assertEqual(prediction, 1, 'Should be class 1.')
inputs = np.array([0, 0])
prediction = perceptron.predict(inputs)
self.assertEqual(prediction, 0, 'Should be class 0.')
inputs = np.array([-5.5, -10.7])
prediction = perceptron.predict(inputs)
self.assertEqual(prediction, 0, 'Should be class 0.')
class Perceptron:
def __init__(self, no_of_inputs, threshold=100, learning_rate=0.01):
self.threshold = threshold
self.learning_rate = learning_rate
self.weights = np.zeros(no_of_inputs + 1)
def predict(self, inputs):
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else:
activation = 0
return activation
def train(self, training_inputs, labels):
for _ in range(self.threshold):
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
self.weights[1:] += self.learning_rate * (label - prediction) * inputs
self.weights[0] += self.learning_rate * (label - prediction)
def get_weights(self):
return self.weights
if __name__ == "__main__":
#unittest.main()
# Example data.
x_1 = np.array([6,7,8,9,8,8,9,9])
x_2 = np.array([1,3,2,0,4,6,2,5])
x_class = np.array([0,0,0,0,1,1,1,1])
training_inputs = []
for x1, x2 in zip(x_1, x_2):
training_inputs.append(np.array([x1,x2]))
# Initialize and train the model.
perceptron = Perceptron(2, 1000)
perceptron.train(training_inputs, x_class)
# Calculated final weights and linear separator.
# Hyperplane for linear separation: w0 + w1 * x1 + w2 * x2 = 0
# ==> x2 = (-w0 - w1 * x1) / w2
weights = perceptron.get_weights()
bias = weights[0]
weights = weights[1:]
slope = -weights[0] / weights[1]
intercept = -bias / weights[1]
# Plot the data to verify, that the data poinrs
# are linearly separable.
plt.plot(x_1[0:4], x_2[0:4], 'o', label='Class 0')
plt.plot(x_1[4:], x_2[4:], 'o', label='Class 1')
plt.axline((0, intercept), slope=slope, linestyle='--', color='r', label='Separator')
plt.xlim(5, 10)
plt.ylim(-1, 10)
plt.legend()
plt.show()
``` |
{
"source": "JoshuaSimon/N-Body-Problem",
"score": 4
} |
#### File: N-Body-Problem/Python/old_N_Body_Problem.py
```python
from math import sqrt
import matplotlib.pyplot as plt
import operator
from functools import reduce
from itertools import cycle
import random
class Vector():
""" n-dimensional Vector """
def __init__(self, *components):
self._components = list(components)
def __str__(self):
return f"Vector{self._components}"
__repr__ = __str__
def two_vector_elementwise(self, other, func):
if len(self) != len(other):
raise ValueError("Dimensions of vectors are different")
return Vector(*[func(s, o) for (s, o) in zip(self._components, other._components)])
def elementwise(self, func):
return Vector(*[func(x) for x in self._components])
def __sub__(self, other):
return self.two_vector_elementwise(other, operator.sub)
def __add__(self, other):
return self.two_vector_elementwise(other, operator.add)
def __mul__(self, other):
if type(other) is self.__class__:
raise NotImplementedError("Vector multiplication isn't implemented")
else:
return self.elementwise(lambda x: x * other)
@property
def norm(self):
return sqrt(sum(x**2 for x in self._components))
__abs__ = norm
def __getitem__(self, index):
return self._components[index]
def __setitem__(self, index, value):
self._components[index] = value
def __len__(self):
return len(self._components)
dim = __len__
def euler (delta_t, i, v_i, R, m, G):
""" Euler method to solve ODEs """
def new_r(component):
return R[i][-1][component] + v_i[-1][component] * delta_t
def new_v(component):
return v_i[-1][component] + a[component] * delta_t
a = a_nd(R, G, m, i)
v_i_new = Vector(*[new_v(component) for component in range(len(v_i[0]))])
r_new = Vector(*[new_r(component) for component in range(len(R[0][0]))])
return v_i_new, r_new
def euler_cormer (delta_t, i, v_i, R, m, G):
""" Euler-Cormer method to solve ODEs """
def new_r(component):
return R[i][-1][component] + 0.5 * (v_i[-1][component] + v_i_new[component])* delta_t
def new_v(component):
return v_i[-1][component] + a[component] * delta_t
a = a_nd(R, G, m, i)
v_i_new = Vector(*[new_v(component) for component in range(len(v_i[0]))])
r_new = Vector(*[new_r(component) for component in range(len(R[0][0]))])
return v_i_new, r_new
def verlet (t, delta_t, i, v_i, R, m, G):
""" Verlet algorithm """
def new_r(component):
if t == 0:
r_help = R[i][-1][component] - v_i[-1][component] * delta_t + a[component]/2 * delta_t**2
return 2 * R[i][-1][component] - r_help + a[component] * delta_t**2
else:
return 2 * R[i][-1][component] - R[i][-2][component] + a[component] * delta_t**2
def new_v(component):
if t == 0:
r_help = R[i][-1][component] - v_i[-1][component] * delta_t + a[component]/2 * delta_t**2
return (r_new[component] - r_help) / (2 * delta_t)
else:
return (r_new[component] - R[i][-2][component]) / (2 * delta_t)
a = a_nd(R, G, m, i)
r_new = Vector(*[new_r(component) for component in range(len(R[0][0]))])
v_i_new = Vector(*[new_v(component) for component in range(len(v_i[0]))])
return v_i_new, r_new
def rk4 (delta_t, i, v_i, R, m, G):
""" Forth-order Runge Kutta method """
def a_rk(R, G, m, i, weight, r_tilde):
""" Special acceleration for Runge Kutta method """
a_new = []
for j in range(len(R)):
if i == j: continue
r_i = R[i][-1]
r_j = R[j][-1]
r_ij = r_j - r_i
r_ij[0] = r_ij[0] + weight * r_tilde[0]
r_ij[1] = r_ij[1] + weight * r_tilde[1]
a_i = r_ij.elementwise(lambda x_n: G * m[j] * x_n / r_ij.norm**3)
a_new.append(a_i)
a = reduce(lambda v1, v2: v1 + v2, a_new)
return a
def v_tilde1(component):
return a_1[component] * delta_t
def r_tilde1(component):
return v_i[-1][component] * delta_t
def v_tilde2(component):
return a_2[component] * delta_t
def r_tilde2(component):
return (v_i[-1][component] + 0.5 * v_tilde1_new[component]) * delta_t
def v_tilde3(component):
return a_3[component] * delta_t
def r_tilde3(component):
return (v_i[-1][component] + 0.5 * v_tilde2_new[component]) * delta_t
def v_tilde4(component):
return a_4[component] * delta_t
def r_tilde4(component):
return (v_i[-1][component] + 0.5 * v_tilde3_new[component]) * delta_t
def new_v(component):
return v_i[-1][component] + 1/6 * v_tilde1_new[component] \
+ 1/3 * v_tilde2_new[component] \
+ 1/3 * v_tilde3_new[component] \
+ 1/6 * v_tilde4_new[component]
def new_r(component):
return R[i][-1][component] + 1/6 * r_tilde1_new[component] \
+ 1/3 * r_tilde2_new[component] \
+ 1/3 * r_tilde3_new[component] \
+ 1/6 * r_tilde4_new[component]
a_1 = a_nd(R, G, m, i)
v_tilde1_new = Vector(*[v_tilde1(component) for component in range(len(v_i[0]))])
r_tilde1_new = Vector(*[r_tilde1(component) for component in range(len(v_i[0]))])
a_2 = a_rk(R, G, m, i, 0.5, r_tilde1_new)
v_tilde2_new = Vector(*[v_tilde2(component) for component in range(len(v_i[0]))])
r_tilde2_new = Vector(*[r_tilde2(component) for component in range(len(v_i[0]))])
a_3 = a_rk(R, G, m, i, 0.5, r_tilde2_new)
v_tilde3_new = Vector(*[v_tilde3(component) for component in range(len(v_i[0]))])
r_tilde3_new = Vector(*[r_tilde3(component) for component in range(len(v_i[0]))])
a_4 = a_rk(R, G, m, i, 1, r_tilde3_new)
v_tilde4_new = Vector(*[v_tilde4(component) for component in range(len(v_i[0]))])
r_tilde4_new = Vector(*[r_tilde4(component) for component in range(len(v_i[0]))])
v_new = Vector(*[new_v(component) for component in range(len(v_i[0]))])
r_new = Vector(*[new_r(component) for component in range(len(v_i[0]))])
return v_new, r_new
def a_nd(R, G, m, i):
""" Acceleration of next timestep for 1 body in a system of n bodies
Acceleration as x and y components
Params:
R: Vector of vector of position tuples of elements
G: Gravitational constant
m: Vector of masses
"""
a_new = []
for j in range(len(R)):
if i == j: continue
r_i = R[i][-1]
r_j = R[j][-1]
r_ij = r_j - r_i
a_i = r_ij.elementwise(lambda x_n: G * m[j] * x_n / r_ij.norm**3)
a_new.append(a_i)
a = reduce(lambda v1, v2: v1 + v2, a_new)
return a
def prod(lst):
return reduce(lambda a,b: a * b, lst)
# 1 Input Data
# ---------------
# Number of bodys
n = 3
# Maximum integration time
t_max = 250.0
# Time step length
delta_t = 0.100
# Mass
m = [
0.999,
0.0005,
0.0005,
]
M = sum(m)
# my = prod(m) / M # only for two body problem
# Initial position r and velocity v of the two bodys
r1_start = Vector(0, 0)
v1_start = Vector(0.25, 0)
r2_start = Vector(1, 0)
v2_start = Vector(0, 1)
r3_start = Vector(2, 0)
v3_start = Vector(0, 0.7)
r_start = [[r1_start], [r2_start], [r3_start]]
v_start = [[v1_start], [v2_start], [v3_start]]
# Gravity
G = 1.0
# 2 Calculation
# -------------
R = r_start
V = v_start
# Loop over time steps (start at 0, end at t_max, step = delta_t)
for t in range(0, int(t_max//delta_t)):
for i in range(n):
# v_i_new, r_i_new = euler(delta_t, i, V[i], R, m, G)
# v_i_new, r_i_new = euler_cormer(delta_t, i, V[i], R, m, G)
v_i_new, r_i_new = verlet(t, delta_t, i, V[i], R, m, G)
#v_i_new, r_i_new = rk4(delta_t, i, V[i], R, m, G)
R[i].append(r_i_new)
V[i].append(v_i_new)
plt.axis([-5, 50, -5, 5])
colors = ["blue", "green", "red", "yellow", "purple", "black", "cyan"]
random.shuffle(colors)
# adds the related color to each coordinate pair
a = (((coords, color) for (coords, color) in zip(body, cycle([color]))) for (body, color) in zip(R, cycle(colors)))
# zip coordinate pairs for each timestep together
b = iter(zip(*a))
previous_timestep = next(b)
for timestep in b:
plt.cla()
plt.axis([-5, 20, -5, 5])
for body in zip(previous_timestep, timestep):
(old_coords, _), (coords, body_color) = body
plt.plot(*zip(old_coords), "o", color=body_color)
plt.pause(0.0001)
previous_timestep = timestep
plt.show()
``` |
{
"source": "joshuasimon-taulia/spectacles",
"score": 2
} |
#### File: spectacles/tests/conftest.py
```python
from typing import Iterable
import os
import vcr
import pytest
from spectacles.client import LookerClient
from spectacles.exceptions import SqlError
from spectacles.lookml import Project, Model, Explore, Dimension
from tests.utils import load_resource
@pytest.fixture(scope="session")
def vcr_config():
return {"filter_headers": ["Authorization"]}
@pytest.fixture(scope="session")
def looker_client(record_mode) -> Iterable[LookerClient]:
with vcr.use_cassette(
"tests/cassettes/init_client.yaml",
filter_post_data_parameters=["client_id", "client_secret"],
filter_headers=["Authorization"],
record_mode=record_mode,
):
client = LookerClient(
base_url="https://spectacles.looker.com",
client_id=os.environ.get("LOOKER_CLIENT_ID", ""),
client_secret=os.environ.get("LOOKER_CLIENT_SECRET", ""),
)
client.update_workspace(project="eye_exam", workspace="production")
yield client
@pytest.fixture
def dimension():
return Dimension(
name="age",
model_name="eye_exam",
explore_name="users",
type="number",
sql='${TABLE}."AGE"',
url="/projects/eye_exam/files/views%2Fusers.view.lkml?line=6",
)
@pytest.fixture
def explore():
return Explore(name="users", model_name="eye_exam")
@pytest.fixture
def model():
return Model(name="eye_exam", project_name="eye_exam", explores=[])
@pytest.fixture
def project():
return Project(name="eye_exam", models=[])
@pytest.fixture
def sql_error():
return SqlError(
dimension="users.age",
explore="users",
model="eye_exam",
sql="SELECT age FROM users WHERE 1=2 LIMIT 1",
message="An error occurred.",
explore_url="https://spectacles.looker.com/x/qCJsodAZ2Y22QZLbmD0Gvy",
)
@pytest.fixture
def schema():
return load_resource("validation_schema.json")
``` |
{
"source": "JoshuaSimon/University-Projects",
"score": 3
} |
#### File: University-Projects/Cluster Analysis/k_means.py
```python
Analysis/k_means.py
import numpy as np
from copy import deepcopy
from sklearn.cluster import KMeans
class KMeansClustering:
def __init__(self, num_culsters) -> None:
self.num_culsters = num_culsters
self.labels_ = None
self.cluster_centers_ = None
def fit(self, data):
# Number of training data.
self.num_observations = data.shape[0]
# Number of features in the data.
self.num_features = data.shape[1]
# Generate random centers for initialization.
mean = np.mean(data, axis = 0)
std = np.std(data, axis = 0)
centers = np.random.randn(self.num_culsters, self.num_features) * std + mean
# Setting up shape for containers.
centers_old = np.zeros(centers.shape)
centers_new = deepcopy(centers)
clusters = np.zeros(self.num_observations)
distances = np.zeros((self.num_observations, self.num_culsters))
error = np.linalg.norm(centers_new - centers_old)
# When, after an update, the estimate of that center stays the same, exit loop.
while error != 0:
# Measure the distance to every center.
for i in range(self.num_culsters):
distances[:,i] = np.linalg.norm(data - centers[i], axis=1)
# Assign all training data to closest center.
clusters = np.argmin(distances, axis = 1)
centers_old = deepcopy(centers_new)
# Calculate mean for every cluster and update the center.
for i in range(self.num_culsters):
centers_new[i] = np.mean(data[clusters == i], axis=0)
error = np.linalg.norm(centers_new - centers_old)
self.cluster_centers_ = centers_new
self.labels_ = clusters
if __name__ == "__main__":
regions = ["Munster", "Bielefeld", "Duisburg", "Bonn", "Rhein-Main", "Dusseldorf"]
data = np.array([
[1524.8, 265.4, 272, 79, 24, 223.5],
[1596.9, 323.6, 285, 87, 23, 333.9],
[2299.7, 610.3, 241, 45, 9, 632.1],
[864.1, 303.9, 220, 53, 11, 484.7],
[2669.9, 645.5, 202, 61, 15, 438.6],
[2985.2, 571.2, 226, 45, 16, 1103.9]
])
names = ["<NAME>", "Own implementaion of KMeans"]
models = [
KMeans(n_clusters=2, random_state=42),
KMeansClustering(num_culsters=2)
]
for name, model in zip(names, models):
print(f"Model: {name}")
model.fit(data)
for region, label in zip(regions, model.labels_):
print(f"Region: {region.ljust(15)} Label: {label}")
print("\n")
``` |
{
"source": "JoshuaSimon/Vorausberechnung-Studierende-in-Bayern",
"score": 4
} |
#### File: JoshuaSimon/Vorausberechnung-Studierende-in-Bayern/normalize.py
```python
import numpy as np
from math import sqrt
from sklearn.preprocessing import minmax_scale
def normalize_l2 (x):
""" Rerturns a normalized copy of
an array x. Uses L2 vector norm
for normalization.
"""
l2 = sqrt(sum(i**2 for i in x))
return [i/l2 for i in x]
def de_normalize_l2 (x_norm, x):
""" Transforms a normalized vector
back to its un-normalized form.
"""
l2 = sqrt(sum(i**2 for i in x))
return [i*l2 for i in x_norm]
def max_scaling(x):
""" Scaling the values of an array x
with respect to the maximum value of x.
"""
return [i/max(x) for i in x]
def normalize_minmax(x, min_r, max_r):
""" Normalizing and scaling given data
in an array x to the range of min_r
to max_r.
"""
x_s = [(i - min(x))/(max(x) - min(x)) for i in x]
return [i * (max_r - min_r) + min_r for i in x_s]
def de_normalize_minmax(x_scale, x, min_r, max_r):
""" Transforms a min-max normalized and
scaled vector back to its un-normalized
form.
"""
x_t = [((i - min_r)/(max_r - min_r)) for i in x_scale]
x_inv = [(i*(max(x) - min(x)) + min(x)) for i in x_t]
return x_inv
# Test data
x = [2000, 2001, 2003]
# Exampels:
# L2 normalization
print(normalize_l2(x))
print(de_normalize_l2(normalize_l2(x), x))
# Max scaling
print(max_scaling(x))
# Min-Max scaling
print(normalize_minmax(x, 0, 10))
print(de_normalize_minmax(normalize_minmax(x, 0, 10), x, 0, 10))
# sklearn data scaling
print(minmax_scale(x, feature_range=(0, 1), axis=0, copy=False))
``` |
{
"source": "joshua-s-jensen/random_fun_projects",
"score": 3
} |
#### File: atari_reinforcement_learning/old/preprocess.py
```python
import numpy as np
def preprocess_obs(obs):
# crop & resize
img = obs[1:176:2, ::2]
# greyscale
img = img.mean(axis=2)
# increase contrast
color = np.array([210, 164, 74]).mean()
img[img==color] = 0
# normalize
img = ((img - 128) / 128) - 1
# reshape
img = img.reshape(88,80)
return img
``` |
{
"source": "joshuaskelly/1000-blades",
"score": 3
} |
#### File: joshuaskelly/1000-blades/generate.py
```python
import errno
import glob
import json
import os
import random
import numpy
import tracery
from tracery.modifiers import base_english
from PIL import Image
from extended_english import extended_english
def calculate_image_possibilities():
"""Computes the total possible combinations for sword pieces
Returns:
The total number of unique combinations of sword pieces
"""
# Reordering the color ramps in the palette yields 3! combinations
palette_reorder_possibilities = 6
return len(palettes) * palette_reorder_possibilities * len(grips) * len(pommels) * len(crossguards) * len(blades)
def print_possibility_space():
"""Displays the total combinations of various proc gen items."""
print("Possibility space:")
print(" {} unique sword images".format(calculate_image_possibilities()))
def generate_sword_image():
"""Generates a sword image from pieces
Returns:
A PIL Image object.
"""
# Chose a random set of pieces
palette = Image.open(random.choice(palettes), 'r')
grip = Image.open(random.choice(grips), 'r')
pommel = Image.open(random.choice(pommels), 'r')
crossguard = Image.open(random.choice(crossguards), 'r')
blade = Image.open(random.choice(blades), 'r')
# Small chance to reorder palette
primary_palette = palette.palette.palette[0:15]
secondary_palette = palette.palette.palette[15:30]
accent_palette = palette.palette.palette[30:45]
transparency = palette.palette.palette[45:]
p = primary_palette + secondary_palette + accent_palette + transparency
if random.random() > 0.95:
reordered_palettes = [
primary_palette + accent_palette + secondary_palette + transparency,
secondary_palette + primary_palette + accent_palette + transparency,
secondary_palette + accent_palette + primary_palette + transparency,
accent_palette + primary_palette + secondary_palette + transparency,
accent_palette + secondary_palette + primary_palette + transparency
]
p = random.choice(reordered_palettes)
# Apply palette
for image in (grip, pommel, crossguard, blade):
image.putpalette(p)
composite = Image.new('RGBA', (32, 32))
# Paste with mask needs to be RGBA data. Convert() is used to accomplish this.
composite.paste(grip)
composite.paste(pommel, (0, 0), pommel.convert())
composite.paste(blade, (0, 0), blade.convert())
composite.paste(crossguard, (0, 0), crossguard.convert())
return composite
damage_type_color = {
'MAGIC': {"x": 0.6172, "y": 0.0937, "z": 0.7695},
'FIRE': {"x": 1.0, "y": 0.0, "z": 0.0},
'ICE': {"x": 0.0, "y": 0.0, "z": 1.0},
'LIGHTNING': {"x": 1.0, "y": 1.0, "z": 1.0},
'POISON': {"x": 0.1529, "y": 1.0, "z": 0.3333},
'HEALING': {"x": 0.0, "y": 1.0, "z": 1.0},
'PARALYZE': {"x": 0.9294, "y": 0.7882, "z": 0.1921},
'VAMPIRE': {"x": 0.13, "y": 0.1, "z": 0.15}
}
def generate_sword_data(index):
"""Generates sword JSON data
Returns:
Sword data as dict
"""
with open('./json/sword.json') as file:
sword_data = json.loads(file.read())
with open('./json/names.json') as file:
name_rules = json.loads(file.read())
name_grammar = tracery.Grammar(name_rules)
name_grammar.add_modifiers(base_english)
name_grammar.add_modifiers(extended_english)
sword_data['name'] = f'Blade {index + 1}:\n{name_grammar.flatten("#root#")}'
sword_data['tex'] = index
sword_data['brokenTex'] = index
sword_data['spriteAtlas'] = 'blades'
sword_data['baseDamage'] = int(numpy.random.normal(10, 4))
sword_data['randDamage'] = int(numpy.random.normal(10, 4))
sword_data['durability'] = int(numpy.random.normal(100, 40))
sword_data['knockback'] = numpy.random.normal(0.15, 0.025)
sword_data['reach'] = numpy.random.normal(0.5, 0.125) + 0.25
sword_data['speed'] = ((1 - (sword_data['baseDamage'] + sword_data['randDamage']) / 44) * 2.0) + 0.25
sword_data['damageType'] = numpy.random.choice(
[
'PHYSICAL',
'MAGIC',
'FIRE',
'ICE',
'LIGHTNING',
'POISON',
'HEALING',
'PARALYZE',
'VAMPIRE'
],
p=[
0.5,
0.1,
0.1,
0.1,
0.1,
0.04,
0.0,
0.03,
0.03
]
)
sword_data['shader'] = {
'PHYSICAL': None,
'MAGIC': 'magic-item-purple',
'FIRE': 'magic-item-red',
'ICE': 'magic-item',
'LIGHTNING': 'magic-item-white',
'POISON': 'magic-item-green',
'HEALING': 'magic-item',
'PARALYZE': 'magic-item',
'VAMPIRE': 'magic-item-red'
}[sword_data['damageType']]
sword_data['attackAnimation'] = numpy.random.choice(
[
'swordAttack',
'swordAttackSlow',
'daggerAttack',
'maceAttack'
],
p=[
0.4,
0.2,
0.35,
0.05
]
)
sword_data['attackStrongAnimation'] = numpy.random.choice(
[
'swordAttackStrong',
'thrustAttack',
'daggerAttackStrong',
'maceAttackStrong'
],
p=[
0.4,
0.2,
0.35,
0.05
]
)
sword_data['chargeAnimation'] = numpy.random.choice(
[
'swordCharge',
'thrustCharge',
'daggerCharge',
'maceCharge'
],
p=[
0.35,
0.2,
0.35,
0.1
]
)
# Add a light?
if (sword_data['damageType'] != 'PHYSICAL' and random.random() < 0.125):
with open('./json/light.json') as file:
light_data = json.loads(file.read())
light_data['lightColor'] = damage_type_color[sword_data['damageType']]
sword_data['attached'].append(light_data)
return sword_data
def create_directory_structure():
"""Generates the output mod directory structure
Raises:
If fails to create directory
"""
def ensure_directory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
ensure_directory('./out/textures')
ensure_directory('./out/data')
if __name__ == "__main__":
print("Generating blades...")
# Create mod directory structure
create_directory_structure()
# Set the random seed to have deterministic results.
random.seed('teddybear')
numpy.random.seed(8888888)
# Load up individual pieces
palettes = [os.path.normpath(g) for g in glob.glob('./images/palettes/*.png')]
grips = [os.path.normpath(g) for g in glob.glob('./images/grips/*.png')]
pommels = [os.path.normpath(g) for g in glob.glob('./images/pommels/*.png')]
crossguards = [os.path.normpath(g) for g in glob.glob('./images/crossguards/*.png')]
blades = [os.path.normpath(g) for g in glob.glob('./images/blades/*.png')]
print_possibility_space()
sheet_size = 32 * 16, 32 * 64
sprite_sheet = Image.new('RGBA', sheet_size)
with open('./json/items.json') as file:
sword_definitions = json.loads(file.read())
# Build the sprite sheet
for y in range(0, sheet_size[1], 32):
for x in range(0, sheet_size[0], 32):
index = y // 32 * sheet_size[0] // 32 + x // 32
image = generate_sword_image()
sprite_sheet.paste(image, (x, y))
s = generate_sword_data(index)
sword_definitions['unique'].append(s)
# Save the sprite sheet to file
sprite_sheet.save('./out/textures/blades.png')
# Save the item definitions to file
with open('./out/data/items.dat', 'w') as file:
file.write(json.dumps(sword_definitions, indent=4))
# Save the sprite sheet definition
with open('./out/data/spritesheets.dat', 'w') as file, open('./json/spritesheets.json') as json_data:
data = json.loads(json_data.read())
data[0]['columns'] = sheet_size[0] / 32
file.write(json.dumps(data, indent=4))
print("Done!")
``` |
{
"source": "joshuaskelly/game-tools",
"score": 3
} |
#### File: vgio/_core/__init__.py
```python
import io
import os
import shutil
import stat
try:
import threading
except ImportError:
import dummy_threading as threading
from types import SimpleNamespace
__all__ = ['ReadWriteFile', 'ArchiveInfo', 'ArchiveFile']
class ReadWriteFile:
"""ReadWriteFile serves as base class for serializing/deserialing
binary data.
Attributes:
fp: The handle to the open file. Will be None if file closed.
mode: File mode. Is one of 'r', 'w', or 'a'.
"""
def __init__(self):
"""Initializes a ReadWriteFile object. Derving classes must call this."""
self.fp = None
self.mode = None
self._did_modify = False
@classmethod
def open(cls, file, mode='r'):
"""Open a ReadWriteFile object where file can be a path to a file (a
string), or a file-like object.
The mode parameter should be ‘r’ to read an existing file, ‘w’ to
truncate and write a new file, or ‘a’ to append to an existing file.
open() is also a context manager and supports the with statement::
with ReadWriteFile.open('file.ext') as file:
file.save('file2.ext')
Args:
file: Either the path to the file, a file-like object, or bytes.
mode: An optional string that indicates which mode to open the file
Returns:
An ReadWriteFile object constructed from the information read from
the file-like object.
Raises:
ValueError: If an invalid file mode is given.
TypeError: If attempting to write to a bytes object.
OSError: If the file argument is not a file-like object.
"""
if mode not in ('r', 'w', 'a'):
raise ValueError(f"invalid mode: '{mode}'")
filemode = {'r': 'rb', 'w': 'w+b', 'a': 'r+b'}[mode]
if isinstance(file, str):
file = io.open(file, filemode)
elif isinstance(file, bytes):
if mode != 'r':
raise TypeError('Unable to write to bytes object')
file = io.BytesIO(file)
elif not hasattr(file, 'read'):
raise OSError('Bad file descriptor')
# Read
if mode == 'r':
read_file = cls._read_file(file, mode)
read_file.fp = file
read_file.mode = 'r'
return read_file
# Write
elif mode == 'w':
write_file = cls()
write_file.fp = file
write_file.mode = 'w'
write_file._did_modify = True
return write_file
# Append
else:
append_file = cls._read_file(file, mode)
append_file.fp = file
append_file.mode = 'a'
append_file._did_modify = True
return append_file
@staticmethod
def _read_file(file, mode):
raise NotImplementedError
@staticmethod
def _write_file(file, object_):
raise NotImplementedError
def save(self, file):
"""Writes data to file.
Args:
file: Either the path to the file, or a file-like object.
Raises:
OSError: If file argument is not a file-like object.
"""
should_close = False
if isinstance(file, str):
file = io.open(file, 'r+b')
should_close = True
elif not hasattr(file, 'write'):
raise OSError('Bad file descriptor')
self.__class__._write_file(file, self)
if should_close:
file.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def close(self):
"""Closes the file pointer if possible. If mode is 'w' or 'a', the file
will be written to.
"""
if self.fp:
if self.mode in ('w', 'a') and self._did_modify:
self.fp.seek(0)
self.__class__._write_file(self.fp, self)
self.fp.truncate()
file_object = self.fp
self.fp = None
file_object.close()
class ArchiveInfo:
"""ArchiveInfo objects store information about a single entry in the
ArchiveFile archive. Instances of the ArchiveInfo class are returned by the getinfo() and
infolist() methods of ArchiveFile objects.
Attributes:
filename: Name of file.
file_offset: Offset of file in bytes.
file_size: Size of the file in bytes.
"""
__slots__ = (
'filename',
'file_offset',
'file_size'
)
def __init__(self, filename, file_offset=0, file_size=0):
raise NotImplementedError
@classmethod
def from_file(cls, filename):
"""Construct an ArchiveInfo instance for a file on the filesystem, in
preparation for adding it to an archive file. filename should be the
path to a file or directory on the filesystem.
Args:
filename: A path to a file or directory.
Returns:
An ArchiveInfo object.
"""
raise NotImplementedError
class _SharedFile:
def __init__(self, file, position, size, close, lock, writing):
self._file = file
self._position = position
self._start = position
self._end = position + size
self._close = close
self._lock = lock
self._writing = writing
def seek(self, offset, whence=0):
with self._lock:
if self._writing():
raise ValueError(
"Can't reposition in the archive file while there is an "
"open writing handle on it. Close the writing handle "
"before trying to read."
)
self._file.seek(offset, whence)
self._position = self._file.tell()
def read(self, n=-1):
with self._lock:
self._file.seek(self._position)
if n < 0 or n > self._end:
n = self._end - self._position
data = self._file.read(n)
self._position = self._file.tell()
return data
def tell(self):
return self._file.tell()
def close(self):
if self._file is not None:
file_object = self._file
self._file = None
self._close(file_object)
class ArchiveExtFile(io.BufferedIOBase):
"""A file-like object for reading an entry.
It is returned by ArchiveFile.open()
"""
MAX_N = 1 << 31 - 1
MIN_READ_SIZE = 4096
MAX_SEEK_READ = 1 << 24
def __init__(self, file_object, mode, archive_info, close_file_object=False):
self._file_object = file_object
self._close_file_object = close_file_object
self._bytes_left = archive_info.file_size
self._original_size = archive_info.file_size
self._original_start = file_object.tell()
self._eof = False
self._readbuffer = b''
self._offset = 0
self._size = archive_info.file_size
self.mode = mode
self.name = archive_info.filename
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument n is omitted, None, or negative, data will be read
until EOF.
"""
if n is None or n < 0:
buffer = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buffer += self._read_internal(self.MAX_N)
return buffer
end = n + self._offset
if end < len(self._readbuffer):
buffer = self._readbuffer[self._offset:end]
self._offset = end
return buffer
n = end - len(self._readbuffer)
buffer = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read_internal(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buffer += data[:n]
break
buffer += data
n -= len(data)
return buffer
def _read_internal(self, n):
"""Read up to n bytes with at most one read() system call"""
if self._eof or n <= 0:
return b''
# Read from file.
n = max(n, self.MIN_READ_SIZE)
data = self._file_object.read(n)
if not data:
raise EOFError
data = data[:self._bytes_left]
self._bytes_left -= len(data)
if self._bytes_left <= 0:
self._eof = True
return data
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def seek(self, offset, whence=0):
current_position = self.tell()
if whence == 0:
new_position = offset
elif whence == 1:
new_position = current_position + offset
elif whence == 2:
new_position = self._original_size + offset
else:
raise ValueError("whence must be os.SEEK_SET (0), os.SEEK_CUR (1), or os.SEEK_END (2)")
if new_position > self._original_size:
new_position = self._original_size
if new_position < 0:
new_position = 0
read_offset = new_position - current_position
buff_offset = read_offset + self._offset
if buff_offset >= 0 and buff_offset < len(self._readbuffer):
self._offset = buff_offset
read_offset = 0
elif read_offset < 0:
self._file_object.seek(self._original_start)
self._bytes_left = self._original_size
self._readbuffer = b''
self._offset = 0
self._eof = False
read_offset = new_position
while read_offset > 0:
read_length = min(self.MAX_SEEK_READ, read_offset)
self.read(read_length)
read_offset -= read_length
def tell(self):
filepos = self._original_size - self._bytes_left - len(self._readbuffer) + self._offset
return filepos
def close(self):
try:
if self._close_file_object:
self._file_object.close()
finally:
super().close()
class _ArchiveWriteFile(io.BufferedIOBase):
def __init__(self, archive_file, archive_info):
self._archive_info = archive_info
self._archive_file = archive_file
self._file_size = 0
self._start_of_file = self._fileobj.tell()
@property
def _fileobj(self):
return self._archive_file.fp
def writable(self):
return True
def write(self, data):
number_of_bytes = len(data)
relative_offset = self.tell()
self._file_size = max(self._file_size, relative_offset + number_of_bytes)
self._fileobj.write(data)
return number_of_bytes
def peek(self, n=1):
return self._fileobj.peek(n)
def seek(self, offset, whence=0):
self._fileobj.seek(offset + self._start_of_file, whence)
def tell(self):
return self._fileobj.tell() - self._start_of_file
def close(self):
super().close()
self._archive_info.file_size = self._file_size
self._archive_file.end_of_data = self._fileobj.tell()
self._archive_file._writing = False
self._archive_file.file_list.append(self._archive_info)
self._archive_file.NameToInfo[self._archive_info.filename] = self._archive_info
class ArchiveFile:
"""ArchiveFile serves as base class for working with binary archive data.
Attributes:
file: Either the path to the file, or a file-like object. If it is a path,
the file will be opened and closed by ArchiveFile.
mode: File mode. Is one of 'r', 'w', or 'a'.
"""
fp = None
_windows_illegal_name_trans_table = None
class factory:
ArchiveExtFile = ArchiveExtFile
ArchiveInfo = ArchiveInfo
ArchiveWriteFile = _ArchiveWriteFile
SharedFile = _SharedFile
def __init__(self, file, mode='r'):
if mode not in ('r', 'w', 'a'):
raise RuntimeError("ArchiveFile requires mode 'r', 'w', or 'a'")
self.NameToInfo = {}
self.file_list = []
self.mode = mode
self._did_modify = False
self._file_reference_count = 1
self._lock = threading.RLock()
self._writing = False
filemode = {'r': 'rb', 'w': 'w+b', 'a': 'r+b'}[mode]
if isinstance(file, str):
self.filename = file
self.fp = io.open(file, filemode)
self._file_passed = 0
else:
self.fp = file
self.filename = getattr(file, 'name', None)
self._file_passed = 1
try:
if mode == 'r':
self._read_file(mode)
elif mode == 'w':
self._did_modify = True
self._write_directory()
self.end_of_data = self.fp.tell()
elif mode == 'a':
self._read_file(mode)
self.fp.seek(self.end_of_data)
else:
raise ValueError("Mode must be 'r', 'w', or 'a'")
except Exception as e:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise e
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _read_file(self, mode='r'):
raise NotImplementedError
def _write_directory(self):
raise NotImplementedError
def namelist(self):
"""Return a list of archive members by name.
Returns:
A sequence of filenames.
"""
return [data.filename for data in self.file_list]
def infolist(self):
"""Return a list containing an ArchiveInfo object for each member of the
archive. The objects are in the same order as their entries in the
actual archive file on disk if an existing archive was opened.
Returns:
A sequence of ArchiveInfo objects.
"""
return self.file_list
def getinfo(self, name):
"""Return a ArchiveInfo object with information about the archive member
name. Calling getinfo() for a name not currently contained in the
archive will raise a KeyError.
Args:
name: AchiveInfo name.
Returns:
An ArchiveInfo object.
Raises:
KeyError: If no archive item exists for the given name.
"""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError('There is no item named %r in the archive file' % name)
return info
def read(self, name):
"""Return the bytes of the file name in the archive. name is the name of
the file in the archive, or a ArchiveInfo object. The archive must be
open for read or append.
Args:
name: ArchiveInfo name.
Returns:
File as bytes.
"""
info = self.getinfo(name)
with self.open(name, 'r') as fp:
return fp.read(info.file_size)
def open(self, name, mode='r'):
"""Access a member of the archive as a binary file-like object. name can
be either the name of a file within the archive or an ArchiveInfo object.
The mode parameter, if included, must be 'r' (the default) or 'w'.
open() is also a context manager and supports the with statement::
with ArchiveFile('archive.file') as archive_file:
with archive_file.open('entry') as entry_file:
print(entry_file.read())
Args:
name: Name or ArchiveInfo object.
mode: File mode to open object.
Returns:
A binary file-like object.
Raises:
ValueError: If mode isn't 'r' or 'w'.
RuntimeError: If file was already closed.
"""
if mode not in ('r', 'w'):
raise ValueError("open() requires mode 'r' or 'w'")
if not self.fp:
raise RuntimeError('Attempt to read archive that was already closed')
if isinstance(name, self.factory.ArchiveInfo):
info = name
elif mode == 'w':
info = self.factory.ArchiveInfo(name)
info.file_offset = self.fp.tell()
else:
info = self.getinfo(name)
if mode == 'w':
return self._open_to_write(info)
self._file_reference_count += 1
shared_file = self.factory.SharedFile(
self.fp,
info.file_offset,
info.file_size,
self._fpclose,
self._lock,
lambda: self._writing
)
try:
return self.factory.ArchiveExtFile(shared_file, mode, info, True)
except:
shared_file.close()
raise
def _open_to_write(self, archive_info):
if self._writing:
raise ValueError("Can't write to the archive file while there is "
"another write handle open on it. Close the first"
" handle before opening another.")
self._write_check(archive_info)
self._did_modify = True
self._writing = True
return self.factory.ArchiveWriteFile(self, archive_info)
def extract(self, member, path=None):
"""Extract a member from the archive to the current working directory;
member must be its full name or a ArchiveInfo object. Its file
information is extracted as accurately as possible. path specifies a
different directory to extract to. member can be a filename or an
ArchiveInfo object.
Args:
member: Either the name of the member to extract or a ArchiveInfo
instance.
path: The directory to extract to. The current working directory
will be used if None.
Returns:
Path to extracted file.
"""
if not isinstance(member, self.factory.ArchiveInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path)
def extractall(self, path=None, members=None):
"""Extract all members from the archive to the current working
directory. path specifies a different directory to extract to. members
is optional and must be a subset of the list returned by namelist().
Args:
path: The directory to extract to. The current working directory
will be used if None.
members: The names of the members to extract. This must be a subset
of the list returned by namelist(). All members will be
extracted if None.
"""
if members is None:
members = self.namelist()
for archiveinfo in members:
self.extract(archiveinfo, path)
@classmethod
def _sanitize_windows_name(cls, archive_name, path_separator):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
archive_name = archive_name.translate(table)
# Remove trailing dots
archive_name = (x.rstrip('.') for x in archive_name.split(path_separator))
# Rejoin, removing empty parts.
archive_name = path_separator.join(x for x in archive_name if x)
return archive_name
def _extract_member(self, member, target_path):
"""Extract the ArchiveInfo object 'member' to a physical file on the path
target_path.
"""
# Build the destination pathname, replacing forward slashes to
# platform specific separators.
archive_name = member.filename.replace('/', os.path.sep)
if os.path.altsep:
archive_name = archive_name.replace(os.path.altsep, os.path.sep)
# Interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
archive_name = os.path.splitdrive(archive_name)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
archive_name = os.path.sep.join(x for x in archive_name.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# Filter illegal characters on Windows
archive_name = self._sanitize_windows_name(archive_name, os.path.sep)
target_path = os.path.join(target_path, archive_name)
target_path = os.path.normpath(target_path)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(target_path)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(target_path):
os.mkdir(target_path)
return target_path
with self.open(member) as source, open(target_path, "wb") as target:
shutil.copyfileobj(source, target)
return target_path
def _write_check(self, archive_info):
if self.mode not in ('w', 'x', 'a'):
raise ValueError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise ValueError('Attempting to write to archive that was already closed')
def write(self, filename, arcname=None):
"""Write the file named filename to the archive, giving it the archive
name arcname (by default, this will be the same as filename, but without
a drive letter and with leading path separators removed). The archive
must be open with mode 'w', or 'a'.
Args:
filename:
arcname: Optional. Name of the info object. If omitted filename
will be used.
"""
if not self.fp:
raise ValueError('Attempting to write to archive that was'
' already closed')
if self._writing:
raise ValueError("Can't write to archive while an open writing"
" handle exists")
info = self.factory.ArchiveInfo.from_file(filename)
info.file_offset = self.fp.tell()
if arcname:
info.filename = arcname
if filename[-1] == '/':
raise RuntimeError('ArchiveFile expects a file, got a directory')
else:
with open(filename, 'rb') as src, self.open(info, 'w') as dest:
shutil.copyfileobj(src, dest, 8*1024)
def writestr(self, info_or_arcname, data):
"""Write a file into the archive. The contents is data, which may be
either a string or a bytes instance; if it is a string, it is encoded as
UTF-8 first. info_or_arcname is either the file name it will be given in
the archive, or a ArchiveInfo instance. If it’s an instance, at least
the filename must be given. The archive must be opened with mode 'w'
or 'a'.
Args:
info_or_arcname:
data: Data to be written. Either a string or bytes.
"""
if not self.fp:
raise ValueError('Attempting to write to archive that was'
' already closed')
if self._writing:
raise ValueError("Can't write to archive while an open writing"
" handle exists")
if not isinstance(info_or_arcname, self.factory.ArchiveInfo):
info = self.factory.ArchiveInfo(info_or_arcname)
else:
info = info_or_arcname
info.file_offset = self.fp.tell()
if not info.file_size:
info.file_size = len(data)
should_close = False
if isinstance(data, str):
data = data.encode('ascii')
if isinstance(data, bytes):
data = io.BytesIO(data)
should_close = True
if not hasattr(data, 'read'):
raise TypeError('Invalid data type. ArchiveFile.writestr expects a string or bytes')
with data as src, self.open(info, 'w') as dest:
shutil.copyfileobj(src, dest, 8*1024)
if should_close:
data.close()
def close(self):
"""Close the archive file. You must call close() before exiting your
program or essential records will not be written.
Raises:
ValueError: If open writing handles exist.
"""
if self.fp is None:
return
if self._writing:
raise ValueError("Can't close archive file while there is an open"
"writing handle on it. Close the writing handle"
"before closing the archive.")
try:
if self.mode in ('w', 'x', 'a') and self._did_modify and hasattr(self, 'end_of_data'):
with self._lock:
self.fp.seek(self.end_of_data)
self._write_directory()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _fpclose(self, fp):
assert self._file_reference_count > 0
self._file_reference_count -= 1
if not self._file_reference_count and not self._file_passed:
fp.close()
```
#### File: vgio/devildaggers/hxshader.py
```python
import struct
from vgio._core import ReadWriteFile
class Header:
"""Class for representing a HxShader file header
Attributes:
name_size: Length of shader name.
vertex_shader_size: Length of the vertex shader.
frag_shader_size: Length of the fragment shader.
"""
format = '<3i'
size = struct.calcsize(format)
__slots__ = (
'name_size',
'vertex_shader_size',
'frag_shader_size'
)
def __init__(self,
name_size,
vertex_shader_size,
frag_shader_size):
self.name_size = name_size
self.vertex_shader_size = vertex_shader_size
self.frag_shader_size = frag_shader_size
@classmethod
def write(cls, file, header):
header_data = struct.pack(cls.format,
header.name_size,
header.vertex_shader_size,
header.frag_shader_size)
file.write(header_data)
@classmethod
def read(cls, file):
header_data = file.read(cls.size)
header_struct = struct.unpack(cls.format, header_data)
return Header(*header_struct)
class HxShader(ReadWriteFile):
"""Class for working with HxShaders
Attributes:
name: Shader name.
vertex_shader: Vertex shader code.
fragment_shader: Fragment shader code.
"""
def __init__(self):
"""Constructs an HxShader object."""
super().__init__()
self.name = ''
self.vertex_shader = ''
self.fragment_shader = ''
@classmethod
def _read_file(cls, file, mode):
hs = HxShader()
hs.fp = file
hs.mode = mode
def read_string(size):
string_format = f'<{size}s'
string_data = struct.unpack(string_format, file.read(struct.calcsize(string_format)))[0]
return string_data.decode('ascii')
header = Header.read(file)
hs.name = read_string(header.name_size)
hs.vertex_shader = read_string(header.vertex_shader_size)
hs.fragment_shader = read_string(header.frag_shader_size)
return hs
@classmethod
def _write_file(cls, file, shader):
header = Header(
len(shader.name),
len(shader.vertex_shader),
len(shader.fragment_shader)
)
Header.write(file, header)
file.write(shader.name.encode())
file.write(shader.vertex_shader.encode())
file.write(shader.fragment_shader.encode())
```
#### File: vgio/devildaggers/hxtexture.py
```python
import struct
from vgio._core import ReadWriteFile
class Header:
"""Class for representing a HxTexture file header.
Attributes:
texture_format: Texture format. Should be 0x4011
width: Width of the texture at mip level 0.
height: Height of the texture at mip level 0.
mip_level_count: Number of mip levels.
"""
format = '<h2iB'
size = struct.calcsize(format)
__slots__ = (
'texture_format',
'width',
'height',
'mip_level_count'
)
def __init__(self,
texture_format,
width,
height,
mip_level_count):
self.texture_format = texture_format
self.width = width
self.height = height
self.mip_level_count = mip_level_count
@classmethod
def write(cls, file, header):
header_data = struct.pack(
cls.format,
header.texture_format,
header.width,
header.height,
header.mip_level_count
)
file.write(header_data)
@classmethod
def read(cls, file):
header_data = file.read(cls.size)
header_struct = struct.unpack(cls.format, header_data)
return Header(*header_struct)
class HxTexture(ReadWriteFile):
"""Class for working with HxTextures.
Attributes:
texture_format: Likely a texture format?
width: Texture width at mip level 0.
height: Texture height at mip level 0.
mip_level_count: Number of mip levels
pixels: An unstructured sequence of interleaved RGBA data as bytes.
"""
def __init__(self):
"""Constructs an HxTexture object."""
super().__init__()
self.texture_format = 0x4011
self.width = 0
self.height = 0
self.mip_level_count = 0
self.pixels = None
@classmethod
def _read_file(cls, file, mode):
ht = HxTexture()
ht.fp = file
ht.mode = mode
header = Header.read(file)
ht.texture_format = header.texture_format
ht.width = header.width
ht.height = header.height
ht.mip_level_count = header.mip_level_count
ht.pixels = file.read()
return ht
@classmethod
def _write_file(cls, file, hxtex):
header = Header(
hxtex.texture_format,
hxtex.width,
hxtex.height,
hxtex.mip_level_count
)
Header.write(file, header)
file.write(hxtex.pixels)
```
#### File: devildaggers/tests/test_hxtexture.py
```python
import io
import unittest
from vgio.devildaggers import hxtexture
class TestHxtextureReadWrite(unittest.TestCase):
def setUp(self):
self.buff = io.BytesIO()
def test_header(self):
texture_format = 0x4011
width = 64
height = 64
mip_level_count = 8
expected = hxtexture.Header(
texture_format,
width,
height,
mip_level_count
)
hxtexture.Header.write(self.buff, expected)
self.buff.seek(0)
actual = hxtexture.Header.read(self.buff)
self.assertEqual(expected.texture_format, actual.texture_format, 'Format values should be equal')
self.assertEqual(expected.width, actual.width, 'Width values should be equal')
self.assertEqual(expected.height, actual.height, 'Height values should be equal')
self.assertEqual(expected.mip_level_count, actual.mip_level_count, 'Mip_level_count values should be equal')
self.assertEqual(self.buff.read(), b'', 'Buffer should be fully consumed')
def test_hxtexture(self):
h0 = hxtexture.HxTexture()
h0.width = 4
h0.height = 4
h0.mip_level_count = 1
h0.pixels = b'\x7f\x7f\x7f\xff' * 16
h0.save(self.buff)
self.buff.seek(0)
h1 = hxtexture.HxTexture.open(self.buff)
self.assertEqual(h0.texture_format, h1.texture_format, 'Texture formats should be equal')
self.assertEqual(h0.width, h1.width, 'Widths should be equal')
self.assertEqual(h0.height, h1.height, 'Heights should be equal')
self.assertEqual(h0.mip_level_count, h1.mip_level_count, 'Mip level counts should be equal')
self.assertEqual(h0.pixels, h1.pixels, 'Pixels should be equal')
self.assertEqual(self.buff.read(), b'', 'Buffer should be fully consumed')
if __name__ == '__main__':
unittest.main()
```
#### File: vgio/quake2/dm2.py
```python
from vgio._core import ReadWriteFile
from . import protocol
__all__ = ['Dm2']
class Dm2(ReadWriteFile):
"""Class for working with Dm2 files
Example:
Basic usage::
from vgio.quake2.dm2 import Dm2
d = Dm2.open(file)
Attributes:
message_blocks: A sequence of MessageBlock objects
"""
def __init__(self):
super().__init__()
self.message_blocks = []
@staticmethod
def _read_file(file, mode):
dm2 = Dm2()
dm2.mode = mode
dm2.fp = file
# Message Blocks
while file.peek(4)[:4] != b'':
message_block = protocol.MessageBlock.read(file)
dm2.message_blocks.append(message_block)
return dm2
@staticmethod
def _write_file(file, dm2):
for message_block in dm2.message_blocks:
protocol.MessageBlock.write(file, message_block)
```
#### File: quake2/tests/test_protocol.py
```python
import io
import unittest
from vgio.quake2.tests.basecase import TestCase
from vgio.quake2 import protocol
significant_digits = 5
angle_resolution = 360 / 256
class TestProtocolReadWrite(TestCase):
def test_bad_message(self):
protocol.Bad.write(self.buff)
self.buff.seek(0)
protocol.Bad.read(self.buff)
def test_muzzle_flash(self):
entity = 0
weapon = 4
mf0 = protocol.MuzzleFlash(entity, weapon)
protocol.MuzzleFlash.write(self.buff, mf0)
self.buff.seek(0)
mf1 = protocol.MuzzleFlash.read(self.buff)
self.assertEqual(mf0.entity, mf1.entity, 'Entity should be equal')
self.assertEqual(mf0.weapon, mf1.weapon, 'Weapon should be equal')
def test_muzzle_flash2(self):
entity = 128
flash_number = 64
mf0 = protocol.MuzzleFlash2(entity, flash_number)
protocol.MuzzleFlash2.write(self.buff, mf0)
self.buff.seek(0)
mf1 = protocol.MuzzleFlash2.read(self.buff)
self.assertEqual(mf0.entity, mf1.entity, 'Entity should be equal')
self.assertEqual(mf0.flash_number, mf1.flash_number, 'Flash number should be equal')
def test_temp_entity_particles(self):
type = protocol.TE_BLOOD
position = 0, 0, 0
direction = 16
te0 = protocol.TempEntity(type,
position,
direction)
protocol.TempEntity.write(self.buff, te0)
self.buff.seek(0)
te1 = protocol.TempEntity.read(self.buff)
self.assertEqual(te0.position, te1.position, 'Position should be equal')
self.assertEqual(te0.direction, te1.direction, 'Direction should be equal')
def test_temp_entity_splashes(self):
type = protocol.TE_SPLASH
count = 8
position = -16, 255, 0
direction = 16
te0 = protocol.TempEntity(type,
count,
position,
direction)
protocol.TempEntity.write(self.buff, te0)
self.buff.seek(0)
te1 = protocol.TempEntity.read(self.buff)
self.assertEqual(te0.count, te1.count, 'Count should be equal')
self.assertEqual(te0.position, te1.position, 'Position should be equal')
self.assertEqual(te0.direction, te1.direction, 'Direction should be equal')
def test_temp_entity_blue_hyperblaster(self):
type = protocol.TE_BLUEHYPERBLASTER
position = 0, 0, 0
direction = -0.5, 0.0, 0.75
te0 = protocol.TempEntity(type,
position,
direction)
protocol.TempEntity.write(self.buff, te0)
self.buff.seek(0)
te1 = protocol.TempEntity.read(self.buff)
self.assertEqual(te0.position, te1.position, 'Position should be equal')
self.assertEqual(te0.direction, te1.direction, 'Direction should be equal')
def test_temp_entity_trails(self):
type = protocol.TE_RAILTRAIL
position = 0, 0, 0
position2 = 16, -128, 255
te0 = protocol.TempEntity(type,
position,
position2)
protocol.TempEntity.write(self.buff, te0)
self.buff.seek(0)
te1 = protocol.TempEntity.read(self.buff)
self.assertEqual(te0.position, te1.position, 'Position should be equal')
self.assertEqual(te0.position2, te1.position2, 'Position2 should be equal')
def test_temp_entity_explosions(self):
type = protocol.TE_EXPLOSION1
position = 0, 0, 0
te0 = protocol.TempEntity(type, position)
protocol.TempEntity.write(self.buff, te0)
self.buff.seek(0)
te1 = protocol.TempEntity.read(self.buff)
self.assertEqual(te0.position, te1.position, 'Position should be equal')
def test_temp_entity_raise(self):
with self.assertRaises(TypeError, msg='Should raise if __init__ called with too few args'):
protocol.TempEntity(protocol.TE_BLOOD)
with self.assertRaises(TypeError, msg='Should raise if __init__ called with too many args'):
position = 0, 0, 0
bad_arg = 16
protocol.TempEntity(protocol.TE_EXPLOSION1,
position,
bad_arg)
def test_layout(self):
text = 'xv 32 yv 8 picn help'
l0 = protocol.Layout(text)
protocol.Layout.write(self.buff, l0)
self.buff.seek(0)
l1 = protocol.Layout.read(self.buff)
self.assertEqual(l0.text, l1.text, 'Text should be equal')
def test_inventory(self):
inventory = [i << 7 for i in range(256)]
i0 = protocol.Inventory(inventory)
protocol.Inventory.write(self.buff, i0)
self.buff.seek(0)
i1 = protocol.Inventory.read(self.buff)
self.assertEqual(tuple(i0.inventory), tuple(i1.inventory), 'Inventories should be equal')
def test_nop_message(self):
protocol.Nop.write(self.buff)
self.buff.seek(0)
protocol.Nop.read(self.buff)
def test_disconnect_message(self):
protocol.Disconnect.write(self.buff)
self.buff.seek(0)
protocol.Disconnect.read(self.buff)
def test_reconnect_message(self):
protocol.Reconnect.write(self.buff)
self.buff.seek(0)
protocol.Reconnect.read(self.buff)
def test_sound_no_optional_args(self):
flags = 0
sound_number = 64
s0 = protocol.Sound(flags, sound_number)
protocol.Sound.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.Sound.read(self.buff)
self.assertEqual(s0.flags, s1.flags, 'Flags should be equal')
self.assertEqual(s0.sound_number, s1.sound_number, 'Sound_number should be equal')
self.assertEqual(s0.volume, s1.volume, 'Volume should be equal')
self.assertEqual(s0.attenuation, s1.attenuation, 'Attenuation should be equal')
self.assertEqual(s0.offset, s1.offset, 'Offset should be equal')
self.assertEqual(s0.channel, s1.channel, 'Channel should be equal')
self.assertEqual(s0.entity, s1.entity, 'Entity should be equal')
self.assertEqual(s0.position, s1.position, 'Position should be equal')
def test_sound_all_optional_args(self):
flags = 31
sound_number = 0
volume = 127 / 255
attenuation = 2
offset = 0
channel = 7
entity = 127
position = 128, -128, 0
s0 = protocol.Sound(flags,
sound_number,
volume,
attenuation,
offset,
channel,
entity,
position)
protocol.Sound.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.Sound.read(self.buff)
self.assertEqual(s0.flags, s1.flags, 'Flags should be equal')
self.assertEqual(s0.sound_number, s1.sound_number, 'Sound_number should be equal')
self.assertAlmostEqual(s0.volume, s1.volume, significant_digits, 'Volume should be equal')
self.assertEqual(s0.attenuation, s1.attenuation, 'Attenuation should be equal')
self.assertEqual(s0.offset, s1.offset, 'Offset should be equal')
self.assertEqual(s0.channel, s1.channel, 'Channel should be equal')
self.assertEqual(s0.entity, s1.entity, 'Entity should be equal')
self.assertEqual(s0.position, s1.position, 'Position should be equal')
def test_print(self):
level = 3
text = 'This is a test message!'
p0 = protocol.Print(level, text)
protocol.Print.write(self.buff, p0)
self.buff.seek(0)
p1 = protocol.Print.read(self.buff)
self.assertEqual(p0.level, p1.level, 'Level should be the same')
self.assertEqual(p0.text, p1.text, 'Text should be the same')
def test_stuff_text(self):
text = '+forward'
t0 = protocol.StuffText(text)
protocol.StuffText.write(self.buff, t0)
self.buff.seek(0)
t1 = protocol.StuffText.read(self.buff)
self.assertEqual(t0.text, t1.text, 'Text should be equal')
def test_server_data(self):
protocol_version = 25
server_count = 1
attract_loop = 0
game_directory = ''
player_number = 1
map_name = 'base1.bsp'
sd0 = protocol.ServerData(protocol_version,
server_count,
attract_loop,
game_directory,
player_number,
map_name)
protocol.ServerData.write(self.buff, sd0)
self.buff.seek(0)
sd1 = protocol.ServerData.read(self.buff)
self.assertEqual(sd0.protocol_version, sd1.protocol_version, 'Protocol_version should be equal')
self.assertEqual(sd0.server_count, sd1.server_count, 'Server_count should be equal')
self.assertEqual(sd0.attract_loop, sd1.attract_loop, 'Attract_loop should be equal')
self.assertEqual(sd0.game_directory, sd1.game_directory, 'Game_directory should be equal')
self.assertEqual(sd0.player_number, sd1.player_number, 'Player_number should be equal')
self.assertEqual(sd0.map_name, sd1.map_name, 'Map_name should be equal')
def test_config_string(self):
index = 0
text = 'Test text'
cs0 = protocol.ConfigString(index, text)
protocol.ConfigString.write(self.buff, cs0)
self.buff.seek(0)
cs1 = protocol.ConfigString.read(self.buff)
self.assertEqual(cs0.index, cs1.index, 'Index should be equal')
self.assertEqual(cs0.text, cs1.text, 'Text should be equal')
def test_spawn_baseline_no_optional_data(self):
number = 16
origin_x = 0.0
origin_y = 22.5
angle_y = -45.0
angle_z = 12.125
frame = 4
event = 1
sb0 = protocol.SpawnBaseline(number=number,
origin_x=origin_x,
origin_y=origin_y,
angles_y=angle_y,
angles_z=angle_z,
frame=frame,
event=event)
protocol.SpawnBaseline.write(self.buff, sb0)
self.buff.seek(0)
sb1 = protocol.SpawnBaseline.read(self.buff)
self.assertEqual(sb0.number, sb1.number, 'Number should be equal')
self.assertEqual(sb0.origin, sb1.origin, 'origin_x should be equal')
angle_error_y = abs(sb0.angles[1] - sb1.angles[1])
self.assertLessEqual(angle_error_y, angle_resolution, 'Angles should be within %s degrees of error' % angle_resolution)
angle_error_z = abs(sb0.angles[2] - sb1.angles[2])
self.assertLessEqual(angle_error_z, angle_resolution, 'Angles should be within %s degrees of error' % angle_resolution)
self.assertEqual(sb0.frame, sb1.frame, 'frame should be equal')
self.assertEqual(sb0.event, sb1.event, 'event should be equal')
def test_spawn_baseline_morebits1_optional_data(self):
number = 300
origin_z = 12.5
angle_x = 6.25
model_index = 1
render_fx = 4
effects = 2
sb0 = protocol.SpawnBaseline(number=number,
origin_z=origin_z,
angles_x=angle_x,
model_index=model_index,
render_fx=render_fx,
effects=effects)
protocol.SpawnBaseline.write(self.buff, sb0)
self.buff.seek(0)
sb1 = protocol.SpawnBaseline.read(self.buff)
self.assertEqual(sb0.number, sb1.number, 'Number should be equal')
self.assertEqual(sb0.origin, sb1.origin, 'Origins should be equal')
angle_error_x = abs(sb0.angles[0] - sb1.angles[0])
self.assertLessEqual(angle_error_x, angle_resolution, 'Angles should be within %s degrees of error' % angle_resolution)
self.assertEqual(sb0.model_index, sb1.model_index, 'Model indices should be equal')
self.assertEqual(sb0.render_fx, sb1.render_fx, 'Render FX values should be equal')
self.assertEqual(sb0.effects, sb1.effects, 'Effect values should be equal')
def test_spawn_baseline_morebits2_optional_data(self):
skin = 16
frame = 276
render_fx = 315
model_index_2 = 1
model_index_3 = 2
model_index_4 = 4
sb0 = protocol.SpawnBaseline(skin_number=skin,
frame=frame,
render_fx=render_fx,
model_index_2=model_index_2,
model_index_3=model_index_3,
model_index_4=model_index_4)
protocol.SpawnBaseline.write(self.buff, sb0)
self.buff.seek(0)
sb1 = protocol.SpawnBaseline.read(self.buff)
self.assertEqual(sb0.skin_number, sb1.skin_number, 'Skin_number should be equal')
self.assertEqual(sb0.frame, sb1.frame, 'Frame should be equal')
self.assertEqual(sb0.render_fx, sb1.render_fx, 'Render_fx should be equal')
self.assertEqual(sb0.model_index_2, sb1.model_index_2, 'Model_index_2 should be equal')
self.assertEqual(sb0.model_index_3, sb1.model_index_3, 'Model_index_3 should be equal')
self.assertEqual(sb0.model_index_4, sb1.model_index_4, 'Model_index_4 should be equal')
def test_spawn_baseline_morebits3_optional_data(self):
old_origin_x, old_origin_y, old_origin_z = 0.0, 12.25, -265
skin = 315
sound = 1
solid = 9250
sb0 = protocol.SpawnBaseline(old_origin_x=old_origin_x,
old_origin_y=old_origin_y,
old_origin_z=old_origin_z,
skin_number=skin,
sound=sound,
solid=solid)
protocol.SpawnBaseline.write(self.buff, sb0)
self.buff.seek(0)
sb1 = protocol.SpawnBaseline.read(self.buff)
self.assertEqual(sb0.old_origin[0], sb1.old_origin[0], 'Old_origin_x should be equal')
self.assertEqual(sb0.old_origin[1], sb1.old_origin[1], 'Old_origin_y should be equal')
self.assertEqual(sb0.old_origin[2], sb1.old_origin[2], 'Old_origin_z should be equal')
self.assertEqual(sb0.skin_number, sb1.skin_number, 'Skin should be equal')
self.assertEqual(sb0.sound, sb1.sound, 'Sound should be equal')
self.assertEqual(sb0.solid, sb1.solid, 'Solid should be equal')
def test_centerprint(self):
text = "Crouch here"
cp0 = protocol.CenterPrint(text)
protocol.CenterPrint.write(self.buff, cp0)
self.buff.seek(0)
cp1 = protocol.CenterPrint.read(self.buff)
self.assertEqual(cp0.text, cp1.text, 'Text should be the same')
def test_frame(self):
server_frame = 100
delta_frame = 2
areas = [1, 2, 3, 4]
f0 = protocol.Frame(server_frame,
delta_frame,
areas)
protocol.Frame.write(self.buff, f0)
self.buff.seek(0)
f1 = protocol.Frame.read(self.buff)
self.assertEqual(f0.server_frame, f1.server_frame, 'Server frame should be equal')
self.assertEqual(f0.delta_frame, f1.delta_frame, 'Delta frame should be equal')
self.assertEqual(f0.areas, f1.areas, 'Areas should be equal')
if __name__ == '__main__':
unittest.main()
```
#### File: vgio/quake/protocol.py
```python
import io
import struct
__all__ = ['Bad', 'Nop', 'Disconnect', 'UpdateStat', 'Version', 'SetView',
'Sound', 'Time', 'Print', 'StuffText', 'SetAngle', 'ServerInfo',
'LightStyle', 'UpdateName', 'UpdateFrags', 'ClientData',
'StopSound', 'UpdateColors', 'Particle', 'Damage', 'SpawnStatic',
'SpawnBinary', 'SpawnBaseline', 'TempEntity', 'SetPause',
'SignOnNum', 'CenterPrint', 'KilledMonster', 'FoundSecret',
'SpawnStaticSound', 'Intermission', 'Finale', 'CdTrack',
'SellScreen', 'CutScene', 'UpdateEntity', 'MessageBlock']
class _IO:
"""Simple namespace for protocol IO"""
@staticmethod
def _read(fmt, file):
return struct.unpack(fmt, file.read(struct.calcsize(fmt)))[0]
class read:
"""Read IO namespace"""
@staticmethod
def char(file):
return int(_IO._read('<b', file))
@staticmethod
def byte(file):
return int(_IO._read('<B', file))
@staticmethod
def short(file):
return int(_IO._read('<h', file))
@staticmethod
def long(file):
return int(_IO._read('<l', file))
@staticmethod
def float(file):
return float(_IO._read('<f', file))
@staticmethod
def coord(file):
return _IO.read.short(file) * 0.125
@staticmethod
def position(file):
return _IO.read.coord(file), _IO.read.coord(file), _IO.read.coord(file)
@staticmethod
def angle(file):
return _IO.read.char(file) * 360 / 256
@staticmethod
def angles(file):
return _IO.read.angle(file), _IO.read.angle(file), _IO.read.angle(file)
@staticmethod
def string(file, terminal_byte=b'\x00'):
string = b''
char = _IO._read('<s', file)
while char != terminal_byte:
string += char
char = _IO._read('<s', file)
return string.decode('ascii')
@staticmethod
def _write(fmt, file, value):
data = struct.pack(fmt, value)
file.write(data)
class write:
"""Write IO namespace"""
@staticmethod
def char(file, value):
_IO._write('<b', file, int(value))
@staticmethod
def byte(file, value):
_IO._write('<B', file, int(value))
@staticmethod
def short(file, value):
_IO._write('<h', file, int(value))
@staticmethod
def long(file, value):
_IO._write('<l', file, int(value))
@staticmethod
def float(file, value):
_IO._write('<f', file, float(value))
@staticmethod
def coord(file, value):
_IO.write.short(file, value / 0.125)
@staticmethod
def position(file, values):
_IO.write.coord(file, values[0]), _IO.write.coord(file, values[1]), _IO.write.coord(file, values[2])
@staticmethod
def angle(file, value):
_IO.write.char(file, int(value * 256 / 360))
@staticmethod
def angles(file, values):
_IO.write.angle(file, values[0]), _IO.write.angle(file, values[1]), _IO.write.angle(file, values[2])
@staticmethod
def string(file, value, terminal_byte=b'\x00'):
value = value[:2048]
size = len(value)
format = '<%is' % (size + 1)
v = value.encode('ascii') + terminal_byte
data = struct.pack(format, v)
file.write(data)
class BadMessage(Exception):
pass
SVC_BAD = 0
SVC_NOP = 1
SVC_DISCONNECT = 2
SVC_UPDATESTAT = 3
SVC_VERSION = 4
SVC_SETVIEW = 5
SVC_SOUND = 6
SVC_TIME = 7
SVC_PRINT = 8
SVC_STUFFTEXT = 9
SVC_SETANGLE = 10
SVC_SERVERINFO = 11
SVC_LIGHTSTYLE = 12
SVC_UPDATENAME = 13
SVC_UPDATEFRAGS = 14
SVC_CLIENTDATA = 15
SVC_STOPSOUND = 16
SVC_UPDATECOLORS = 17
SVC_PARTICLE = 18
SVC_DAMAGE = 19
SVC_SPAWNSTATIC = 20
SVC_SPAWNBINARY = 21
SVC_SPAWNBASELINE = 22
SVC_TEMP_ENTITY = 23
SVC_SETPAUSE = 24
SVC_SIGNONNUM = 25
SVC_CENTERPRINT = 26
SVC_KILLEDMONSTER = 27
SVC_FOUNDSECRET = 28
SVC_SPAWNSTATICSOUND = 29
SVC_INTERMISSION = 30
SVC_FINALE = 31
SVC_CDTRACK = 32
SVC_SELLSCREEN = 33
SVC_CUTSCENE = 34
class Bad:
"""Class for representing a Bad message
This is an error message and should not appear.
"""
__slots__ = ()
@staticmethod
def write(file, bad=None):
_IO.write.byte(file, SVC_BAD)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_BAD
return Bad()
class Nop:
"""Class for representing a Nop message"""
__slots__ = ()
@staticmethod
def write(file, nop=None):
_IO.write.byte(file, SVC_NOP)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_NOP
return Nop()
class Disconnect:
"""Class for representing a Disconnect message
Disconnect from the server and end the game. Typically this the last
message of a demo.
"""
__slots__ = ()
@staticmethod
def write(file, disconnect=None):
_IO.write.byte(file, SVC_DISCONNECT)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_DISCONNECT
return Disconnect()
class UpdateStat:
"""Class for representing UpdateStat messages
Updates a player state value.
Attributes:
index: The index to update in the player state array.
value: The new value to set.
"""
__slots__ = (
'index',
'value'
)
def __init__(self):
self.index = None
self.value = None
@staticmethod
def write(file, update_stat):
_IO.write.byte(file, SVC_UPDATESTAT)
_IO.write.byte(file, update_stat.index)
_IO.write.long(file, update_stat.value)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATESTAT
update_stat = UpdateStat()
update_stat.index = _IO.read.byte(file)
update_stat.value = _IO.read.long(file)
return update_stat
class Version:
"""Class for representing Version messages
Attributes:
protocol_version: Protocol version of the server. Quake uses 15.
"""
__slots__ = (
'protocol_version'
)
def __init__(self):
self.protocol_version = None
@staticmethod
def write(file, version):
_IO.write.byte(file, SVC_VERSION)
_IO.write.long(file, version.protocol_version)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_VERSION
version = Version()
version.protocol_version = _IO.read.long(file)
return version
class SetView:
"""Class for representing SetView messages
Sets the camera position to the given entity.
Attributes:
entity: The entity number
"""
__slots__ = (
'entity'
)
def __init__(self):
self.entity = None
@staticmethod
def write(file, set_view):
_IO.write.byte(file, SVC_SETVIEW)
_IO.write.short(file, set_view.entity)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SETVIEW
set_view = SetView()
set_view.entity = _IO.read.short(file)
return set_view
SND_VOLUME = 0b0001
SND_ATTENUATION = 0b0010
SND_LOOPING = 0b0100
class Sound:
"""Class for representing Sound messages
Plays a sound on a channel at a position.
Attributes:
entity: The entity that caused the sound.
bit_mask: A bit field indicating what data is sent.
volume: Optional. The sound volume or None.
attenuation: Optional. The sound attenuation or None.
channel: The sound channel, maximum of eight.
sound_number: The number of the sound in the sound table.
origin: The position of the sound.
"""
__slots__ = (
'entity',
'bit_mask',
'volume',
'attenuation',
'channel',
'sound_number',
'origin'
)
def __init__(self):
self.entity = None
self.bit_mask = 0b0000
self.volume = 255
self.attenuation = 1.0
self.channel = None
self.sound_number = None
self.origin = None, None, None
@staticmethod
def write(file, sound):
_IO.write.byte(file, SVC_SOUND)
_IO.write.byte(file, sound.bit_mask)
if sound.bit_mask & SND_VOLUME:
_IO.write.byte(file, sound.volume)
if sound.bit_mask & SND_ATTENUATION:
_IO.write.byte(file, sound.attenuation * 64)
channel = sound.entity << 3
channel |= sound.channel
_IO.write.short(file, channel)
_IO.write.byte(file, sound.sound_number)
_IO.write.position(file, sound.origin)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SOUND
sound = Sound()
sound.bit_mask = _IO.read.byte(file)
if sound.bit_mask & SND_VOLUME:
sound.volume = _IO.read.byte(file)
if sound.bit_mask & SND_ATTENUATION:
sound.attenuation = _IO.read.byte(file) / 64
sound.channel = _IO.read.short(file)
sound.entity = sound.channel >> 3
sound.channel &= 7
sound.sound_number = _IO.read.byte(file)
sound.origin = _IO.read.position(file)
return sound
class Time:
"""Class for representing Time messages
A time stamp that should appear in each block of messages.
Attributes:
time: The amount of elapsed time(in seconds) since the start of the
game.
"""
__slots__ = (
'time'
)
def __init__(self):
self.time = None
@staticmethod
def write(file, time):
_IO.write.byte(file, SVC_TIME)
_IO.write.float(file, time.time)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_TIME
time = Time()
time.time = _IO.read.float(file)
return time
class Print:
"""Class for representing Print messages
Prints text in the top left corner of the screen and console.
Attributes:
text: The text to be shown.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, _print):
_IO.write.byte(file, SVC_PRINT)
_IO.write.string(file, _print.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_PRINT
_print = Print()
_print.text = _IO.read.string(file)
return _print
class StuffText:
"""Class for representing StuffText messages
Text sent to the client console and ran.
Attributes:
text: The text to send to the client console.
Note: This string is terminated with the newline character.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, stuff_text):
_IO.write.byte(file, SVC_STUFFTEXT)
_IO.write.string(file, stuff_text.text, b'\n')
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_STUFFTEXT
stuff_text = StuffText()
stuff_text.text = _IO.read.string(file, b'\n')
return stuff_text
class SetAngle:
"""Class for representing SetAngle messages
Sets the camera's orientation.
Attributes:
angles: The new angles for the camera.
"""
__slots__ = (
'angles'
)
def __init__(self):
self.angles = None
@staticmethod
def write(file, set_angle):
_IO.write.byte(file, SVC_SETANGLE)
_IO.write.angles(file, set_angle.angles)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SETANGLE
set_angle = SetAngle()
set_angle.angles = _IO.read.angles(file)
return set_angle
class ServerInfo:
"""Class for representing ServerInfo messages
Handles the loading of assets. Usually first message sent after a level
change.
Attributes:
protocol_version: Protocol version of the server. Quake uses 15.
max_clients: Number of clients.
multi: Multiplayer flag. Set to 0 for single-player and 1 for
multiplayer.
map_name: The name of the level.
models: The model table as as sequence of strings.
sounds: The sound table as a sequence of strings.
"""
__slots__ = (
'protocol_version',
'max_clients',
'multi',
'map_name',
'models',
'sounds'
)
def __init__(self):
self.protocol_version = 15
self.max_clients = 0
self.multi = 0
self.map_name = ''
self.models = []
self.sounds = []
@staticmethod
def write(file, server_data):
_IO.write.byte(file, SVC_SERVERINFO)
_IO.write.long(file, server_data.protocol_version)
_IO.write.byte(file, server_data.max_clients)
_IO.write.byte(file, server_data.multi)
_IO.write.string(file, server_data.map_name)
for model in server_data.models:
_IO.write.string(file, model)
_IO.write.byte(file, 0)
for sound in server_data.sounds:
_IO.write.string(file, sound)
_IO.write.byte(file, 0)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SERVERINFO
server_data = ServerInfo()
server_data.protocol_version = _IO.read.long(file)
server_data.max_clients = _IO.read.byte(file)
server_data.multi = _IO.read.byte(file)
server_data.map_name = _IO.read.string(file)
model = _IO.read.string(file)
while model:
server_data.models.append(model)
model = _IO.read.string(file)
server_data.models = tuple(server_data.models)
sound = _IO.read.string(file)
while sound:
server_data.sounds.append(sound)
sound = _IO.read.string(file)
server_data.sounds = tuple(server_data.sounds)
return server_data
class LightStyle:
"""Class for representing a LightStyle message
Defines the style of a light. Usually happens shortly after level change.
Attributes:
style: The light style number.
string: A string of arbitrary length representing the brightness of
the light. The brightness is mapped to the characters 'a' to 'z',
with 'a' being black and 'z' being pure white.
Example:
# Flickering light
light_style_message = LightStyle()
light_style.style = 0
light_style.string = 'aaazaazaaaaaz'
"""
__slots__ = (
'style',
'string'
)
def __init__(self):
self.style = None
self.string = None
@staticmethod
def write(file, light_style):
_IO.write.byte(file, SVC_LIGHTSTYLE)
_IO.write.byte(file, light_style.style)
_IO.write.string(file, light_style.string)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_LIGHTSTYLE
light_style = LightStyle()
light_style.style = _IO.read.byte(file)
light_style.string = _IO.read.string(file)
return light_style
class UpdateName:
"""Class for representing UpdateName messages
Sets the player's name.
Attributes:
player: The player number to update.
name: The new name as a string.
"""
__slots__ = (
'player',
'name'
)
def __init__(self):
self.player = None
self.name = None
@staticmethod
def write(file, update_name):
_IO.write.byte(file, SVC_UPDATENAME)
_IO.write.byte(file, update_name.player)
_IO.write.string(file, update_name.name)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATENAME
update_name = UpdateName()
update_name.player = _IO.read.byte(file)
update_name.name = _IO.read.string(file)
return update_name
class UpdateFrags:
"""Class for representing UpdateFrags messages
Sets the player's frag count.
Attributes:
player: The player to update.
frags: The new frag count.
"""
__slots__ = (
'player',
'frags'
)
def __init__(self):
self.player = None
self.frags = None
@staticmethod
def write(file, update_frags):
_IO.write.byte(file, SVC_UPDATEFRAGS)
_IO.write.byte(file, update_frags.player)
_IO.write.short(file, update_frags.frags)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATEFRAGS
update_frags = UpdateFrags()
update_frags.player = _IO.read.byte(file)
update_frags.frags = _IO.read.short(file)
return update_frags
# Client Data bit mask
SU_VIEWHEIGHT = 0b0000000000000001
SU_IDEALPITCH = 0b0000000000000010
SU_PUNCH1 = 0b0000000000000100
SU_PUNCH2 = 0b0000000000001000
SU_PUNCH3 = 0b0000000000010000
SU_VELOCITY1 = 0b0000000000100000
SU_VELOCITY2 = 0b0000000001000000
SU_VELOCITY3 = 0b0000000010000000
SU_ITEMS = 0b0000001000000000
SU_ONGROUND = 0b0000010000000000
SU_INWATER = 0b0000100000000000
SU_WEAPONFRAME = 0b0001000000000000
SU_ARMOR = 0b0010000000000000
SU_WEAPON = 0b0100000000000000
class ClientData:
"""Class for representing ClientData messages
Server information about this client.
Attributes:
bit_mask: A bit field indicating what data is sent.
view_height: Optional. The view offset from the origin along the z-axis.
ideal_pitch: Optional. The calculated angle for looking up/down slopes.
punch_angle: Optional. A triple representing camera shake.
velocity: Optional. Player velocity.
item_bit_mask: A bit field for player inventory.
on_ground: Flag indicating if player is on the ground.
in_water: Flag indicating if player is in a water volume.
weapon_frame: Optional. The animation frame of the weapon.
armor: Optional. The current armor value.
weapon: Optional. The model number in the model table.
health: The current health value.
active_ammo: The amount count for the active weapon.
ammo: The current ammo counts as a quadruple.
active_weapon: The actively held weapon.
"""
__slots__ = (
'bit_mask',
'view_height',
'ideal_pitch',
'punch_angle',
'velocity',
'item_bit_mask',
'on_ground',
'in_water',
'weapon_frame',
'armor',
'weapon',
'health',
'active_ammo',
'ammo',
'active_weapon'
)
def __init__(self):
self.bit_mask = 0b0000000000000000
self.view_height = 22
self.ideal_pitch = 0
self.punch_angle = 0, 0, 0
self.velocity = 0, 0, 0
self.item_bit_mask = 0b0000
self.on_ground = False
self.in_water = False
self.weapon_frame = 0
self.armor = 0
self.weapon = None
self.health = None
self.active_ammo = None
self.ammo = None
self.active_weapon = None
@staticmethod
def write(file, client_data):
_IO.write.byte(file, SVC_CLIENTDATA)
if client_data.on_ground:
client_data.bit_mask |= SU_ONGROUND
if client_data.in_water:
client_data.bit_mask |= SU_INWATER
_IO.write.short(file, client_data.bit_mask)
if client_data.bit_mask & SU_VIEWHEIGHT:
_IO.write.char(file, client_data.view_height)
if client_data.bit_mask & SU_IDEALPITCH:
_IO.write.char(file, client_data.ideal_pitch)
if client_data.bit_mask & SU_PUNCH1:
pa = client_data.punch_angle
_IO.write.angle(file, pa[0])
if client_data.bit_mask & SU_VELOCITY1:
ve = client_data.velocity
_IO.write.char(file, ve[0] // 16)
if client_data.bit_mask & SU_PUNCH2:
pa = client_data.punch_angle
_IO.write.angle(file, pa[1])
if client_data.bit_mask & SU_VELOCITY2:
ve = client_data.velocity
_IO.write.char(file, ve[1] // 16)
if client_data.bit_mask & SU_PUNCH3:
pa = client_data.punch_angle
_IO.write.angle(file, pa[2])
if client_data.bit_mask & SU_VELOCITY3:
ve = client_data.velocity
_IO.write.char(file, ve[2] // 16)
_IO.write.long(file, client_data.item_bit_mask)
if client_data.bit_mask & SU_WEAPONFRAME:
_IO.write.byte(file, client_data.weapon_frame)
if client_data.bit_mask & SU_ARMOR:
_IO.write.byte(file, client_data.armor)
if client_data.bit_mask & SU_WEAPON:
_IO.write.byte(file, client_data.weapon)
_IO.write.short(file, client_data.health)
_IO.write.byte(file, client_data.active_ammo)
_IO.write.byte(file, client_data.ammo[0])
_IO.write.byte(file, client_data.ammo[1])
_IO.write.byte(file, client_data.ammo[2])
_IO.write.byte(file, client_data.ammo[3])
_IO.write.byte(file, client_data.active_weapon)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CLIENTDATA
client_data = ClientData()
client_data.bit_mask = _IO.read.short(file)
client_data.on_ground = client_data.bit_mask & SU_ONGROUND != 0
client_data.in_water = client_data.bit_mask & SU_INWATER != 0
if client_data.bit_mask & SU_VIEWHEIGHT:
client_data.view_height = _IO.read.char(file)
if client_data.bit_mask & SU_IDEALPITCH:
client_data.ideal_pitch = _IO.read.char(file)
if client_data.bit_mask & SU_PUNCH1:
pa = client_data.punch_angle
client_data.punch_angle = _IO.read.angle(file), pa[1], pa[2]
if client_data.bit_mask & SU_VELOCITY1:
ve = client_data.velocity
client_data.velocity = _IO.read.char(file) * 16, ve[1], ve[2]
if client_data.bit_mask & SU_PUNCH2:
pa = client_data.punch_angle
client_data.punch_angle = pa[0], _IO.read.angle(file), pa[2]
if client_data.bit_mask & SU_VELOCITY2:
ve = client_data.velocity
client_data.velocity = ve[0], _IO.read.char(file) * 16, ve[2]
if client_data.bit_mask & SU_PUNCH3:
pa = client_data.punch_angle
client_data.punch_angle = pa[0], pa[1], _IO.read.angle(file)
if client_data.bit_mask & SU_VELOCITY3:
ve = client_data.velocity
client_data.velocity = ve[0], ve[1], _IO.read.char(file) * 16
client_data.item_bit_mask = _IO.read.long(file)
if client_data.bit_mask & SU_WEAPONFRAME:
client_data.weapon_frame = _IO.read.byte(file)
if client_data.bit_mask & SU_ARMOR:
client_data.armor = _IO.read.byte(file)
if client_data.bit_mask & SU_WEAPON:
client_data.weapon = _IO.read.byte(file)
client_data.health = _IO.read.short(file)
client_data.active_ammo = _IO.read.byte(file)
client_data.ammo = _IO.read.byte(file), _IO.read.byte(file), _IO.read.byte(file), _IO.read.byte(file)
client_data.active_weapon = _IO.read.byte(file)
return client_data
class StopSound:
"""Class for representing StopSound messages
Stops a playing sound.
Attributes:
channel: The channel on which the sound is playing.
entity: The entity that caused the sound.
"""
__slots__ = (
'channel',
'entity'
)
def __init__(self):
self.channel = None
@staticmethod
def write(file, stop_sound):
_IO.write.byte(file, SVC_STOPSOUND)
data = stop_sound.entity << 3 | (stop_sound.channel & 0x07)
_IO.write.short(file, data)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_STOPSOUND
stop_sound = StopSound()
data = _IO.read.short(file)
stop_sound.channel = data & 0x07
stop_sound.entity = data >> 3
return stop_sound
class UpdateColors:
"""Class for representing UpdateColors messages
Sets the player's colors.
Attributes:
player: The player to update.
colors: The combined shirt/pant color.
"""
__slots__ = (
'player',
'colors'
)
def __init__(self):
self.player = None
self.colors = None
@staticmethod
def write(file, update_colors):
_IO.write.byte(file, SVC_UPDATECOLORS)
_IO.write.byte(file, update_colors.player)
_IO.write.byte(file, update_colors.colors)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATECOLORS
update_colors = UpdateColors()
update_colors.player = _IO.read.byte(file)
update_colors.colors = _IO.read.byte(file)
return update_colors
class Particle:
"""Class for representing Particle messages
Creates particle effects
Attributes:
origin: The origin position of the particles.
direction: The velocity of the particles represented as a triple.
count: The number of particles.
color: The color index of the particle.
"""
__slots__ = (
'origin',
'direction',
'count',
'color'
)
def __init__(self):
self.origin = None
self.direction = None
self.count = None
self.color = None
@staticmethod
def write(file, particle):
_IO.write.byte(file, SVC_PARTICLE)
_IO.write.position(file, particle.origin)
_IO.write.char(file, particle.direction[0] * 16)
_IO.write.char(file, particle.direction[1] * 16)
_IO.write.char(file, particle.direction[2] * 16)
_IO.write.byte(file, particle.count)
_IO.write.byte(file, particle.color)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_PARTICLE
particle = Particle()
particle.origin = _IO.read.position(file)
particle.direction = _IO.read.char(file) / 16, _IO.read.char(file) / 16, _IO.read.char(file) / 16,
particle.count = _IO.read.byte(file)
particle.color = _IO.read.byte(file)
return particle
class Damage:
"""Class for representing Damage messages
Damage information
Attributes:
armor: The damage amount to be deducted from player armor.
blood: The damage amount to be deducted from player health.
origin: The position of the entity that inflicted the damage.
"""
__slots__ = (
'armor',
'blood',
'origin'
)
def __init__(self):
self.armor = None
self.blood = None
self.origin = None
@staticmethod
def write(file, damage):
_IO.write.byte(file, SVC_DAMAGE)
_IO.write.byte(file, damage.armor)
_IO.write.byte(file, damage.blood)
_IO.write.position(file, damage.origin)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_DAMAGE
damage = Damage()
damage.armor = _IO.read.byte(file)
damage.blood = _IO.read.byte(file)
damage.origin = _IO.read.position(file)
return damage
class SpawnStatic:
"""Class for representing SpawnStatic messages
Creates a static entity
Attributes:
model_index: The model number in the model table.
frame: The frame number of the model.
color_map: The color map used to display the model.
skin: The skin number of the model.
origin: The position of the entity.
angles: The orientation of the entity.
"""
__slots__ = (
'model_index',
'frame',
'color_map',
'skin',
'origin',
'angles'
)
def __init__(self):
self.model_index = None
self.frame = None
self.color_map = None
self.skin = None
self.origin = None
self.angles = None
@staticmethod
def write(file, spawn_static):
_IO.write.byte(file, SVC_SPAWNSTATIC)
_IO.write.byte(file, spawn_static.model_index)
_IO.write.byte(file, spawn_static.frame)
_IO.write.byte(file, spawn_static.color_map)
_IO.write.byte(file, spawn_static.skin)
_IO.write.position(file, spawn_static.origin)
_IO.write.angles(file, spawn_static.angles)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SPAWNSTATIC
spawn_static = SpawnStatic()
spawn_static.model_index = _IO.read.byte(file)
spawn_static.frame = _IO.read.byte(file)
spawn_static.color_map = _IO.read.byte(file)
spawn_static.skin = _IO.read.byte(file)
spawn_static.origin = _IO.read.position(file)
spawn_static.angles = _IO.read.angles(file)
return spawn_static
class SpawnBinary:
"""Class for representing SpawnBinary messages
This is a deprecated message.
"""
__slots__ = ()
@staticmethod
def write(file):
raise BadMessage('SpawnBinary message obsolete')
@staticmethod
def read(file):
raise BadMessage('SpawnBinary message obsolete')
class SpawnBaseline:
"""Class for representing SpawnBaseline messages
Creates a dynamic entity
Attributes:
entity: The number of the entity.
model_index: The number of the model in the model table.
frame: The frame number of the model.
color_map: The color map used to display the model.
skin: The skin number of the model.
origin: The position of the entity.
angles: The orientation of the entity.
"""
__slots__ = (
'entity',
'model_index',
'frame',
'color_map',
'skin',
'origin',
'angles'
)
def __init__(self):
self.entity = None
self.model_index = None
self.frame = None
self.color_map = None
self.skin = None
self.origin = None
self.angles = None
@staticmethod
def write(file, spawn_baseline):
_IO.write.byte(file, SVC_SPAWNBASELINE)
_IO.write.short(file, spawn_baseline.entity)
_IO.write.byte(file, spawn_baseline.model_index)
_IO.write.byte(file, spawn_baseline.frame)
_IO.write.byte(file, spawn_baseline.color_map)
_IO.write.byte(file, spawn_baseline.skin)
_IO.write.position(file, spawn_baseline.origin)
_IO.write.angles(file, spawn_baseline.angles)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SPAWNBASELINE
spawn_baseline = SpawnBaseline()
spawn_baseline.entity = _IO.read.short(file)
spawn_baseline.model_index = _IO.read.byte(file)
spawn_baseline.frame = _IO.read.byte(file)
spawn_baseline.color_map = _IO.read.byte(file)
spawn_baseline.skin = _IO.read.byte(file)
spawn_baseline.origin = _IO.read.position(file)
spawn_baseline.angles = _IO.read.angles(file)
return spawn_baseline
TE_SPIKE = 0
TE_SUPERSPIKE = 1
TE_GUNSHOT = 2
TE_EXPLOSION = 3
TE_TAREXPLOSION = 4
TE_LIGHTNING1 = 5
TE_LIGHTNING2 = 6
TE_WIZSPIKE = 7
TE_KNIGHTSPIKE = 8
TE_LIGHTNING3 = 9
TE_LAVASPLASH = 10
TE_TELEPORT = 11
TE_EXPLOSION2 = 12
TE_BEAM = 13
class TempEntity:
"""Class for representing TempEntity messages
Creates a temporary entity. The attributes of the message depend on the
type of entity being created.
Attributes:
type: The type of the temporary entity.
"""
def __init__(self):
self.type = None
@staticmethod
def write(file, temp_entity):
_IO.write.byte(file, SVC_TEMP_ENTITY)
_IO.write.byte(file, temp_entity.type)
if temp_entity.type == TE_WIZSPIKE or \
temp_entity.type == TE_KNIGHTSPIKE or \
temp_entity.type == TE_SPIKE or \
temp_entity.type == TE_SUPERSPIKE or \
temp_entity.type == TE_GUNSHOT or \
temp_entity.type == TE_EXPLOSION or \
temp_entity.type == TE_TAREXPLOSION or \
temp_entity.type == TE_LAVASPLASH or \
temp_entity.type == TE_TELEPORT:
_IO.write.position(file, temp_entity.origin)
elif temp_entity.type == TE_LIGHTNING1 or \
temp_entity.type == TE_LIGHTNING2 or \
temp_entity.type == TE_LIGHTNING3 or \
temp_entity.type == TE_BEAM:
_IO.write.short(file, temp_entity.entity)
_IO.write.position(file, temp_entity.start)
_IO.write.position(file, temp_entity.end)
elif temp_entity.type == TE_EXPLOSION2:
_IO.write.position(file, temp_entity.origin)
_IO.write.byte(file, temp_entity.color_start)
_IO.write.byte(file, temp_entity.color_length)
else:
raise BadMessage('Invalid Temporary Entity type: %r' % temp_entity.type)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_TEMP_ENTITY
temp_entity = TempEntity()
temp_entity.type = _IO.read.byte(file)
if temp_entity.type == TE_WIZSPIKE or \
temp_entity.type == TE_KNIGHTSPIKE or \
temp_entity.type == TE_SPIKE or \
temp_entity.type == TE_SUPERSPIKE or \
temp_entity.type == TE_GUNSHOT or \
temp_entity.type == TE_EXPLOSION or \
temp_entity.type == TE_TAREXPLOSION or \
temp_entity.type == TE_LAVASPLASH or \
temp_entity.type == TE_TELEPORT:
temp_entity.origin = _IO.read.position(file)
elif temp_entity.type == TE_LIGHTNING1 or \
temp_entity.type == TE_LIGHTNING2 or \
temp_entity.type == TE_LIGHTNING3 or \
temp_entity.type == TE_BEAM:
temp_entity.entity = _IO.read.short(file)
temp_entity.start = _IO.read.position(file)
temp_entity.end = _IO.read.position(file)
elif temp_entity.type == TE_EXPLOSION2:
temp_entity.origin = _IO.read.position(file)
temp_entity.color_start = _IO.read.byte(file)
temp_entity.color_length = _IO.read.byte(file)
else:
raise BadMessage(f'Invalid Temporary Entity type: {temp_entity.type}')
return temp_entity
class SetPause:
"""Class for representing SetPause messages
Sets the pause state
Attributes:
paused: The pause state. 1 for paused, 0 otherwise.
"""
__slots__ = (
'paused'
)
def __init__(self):
self.paused = None
@staticmethod
def write(file, set_pause):
_IO.write.byte(file, SVC_SETPAUSE)
_IO.write.byte(file, set_pause.paused)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SETPAUSE
set_pause = SetPause()
set_pause.paused = _IO.read.byte(file)
return set_pause
class SignOnNum:
"""Class for representing SignOnNum messages
This message represents the client state.
Attributes:
sign_on: The client state.
"""
__slots__ = (
'sign_on'
)
def __init__(self):
self.sign_on = None
@staticmethod
def write(file, sign_on_num):
_IO.write.byte(file, SVC_SIGNONNUM)
_IO.write.byte(file, sign_on_num.sign_on)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SIGNONNUM
sign_on_num = SignOnNum()
sign_on_num.sign_on = _IO.read.byte(file)
return sign_on_num
class CenterPrint:
"""Class for representing CenterPrint messages
Prints text in the center of the screen.
Attributes:
text: The text to be shown.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, center_print):
_IO.write.byte(file, SVC_CENTERPRINT)
_IO.write.string(file, center_print.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CENTERPRINT
center_print = CenterPrint()
center_print.text = _IO.read.string(file)
return center_print
class KilledMonster:
"""Class for representing KilledMonster messages
Indicates the death of a monster.
"""
__slots__ = ()
@staticmethod
def write(file, killed_monster=None):
_IO.write.byte(file, SVC_KILLEDMONSTER)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_KILLEDMONSTER
return KilledMonster()
class FoundSecret:
"""Class for representing FoundSecret messages
Indicates a secret has been found.
"""
__slots__ = ()
@staticmethod
def write(file, found_secret=None):
_IO.write.byte(file, SVC_FOUNDSECRET)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_FOUNDSECRET
return FoundSecret()
class SpawnStaticSound:
"""Class for representing SpawnStaticSound messages
Creates a static sound
Attributes:
origin: The position of the sound.
sound_number: The sound number in the sound table.
volume: The sound volume.
attenuation: The sound attenuation.
"""
__slots__ = (
'origin',
'sound_number',
'volume',
'attenuation'
)
def __init__(self):
self.origin = None
self.sound_number = None
self.volume = None
self.attenuation = None
@staticmethod
def write(file, spawn_static_sound):
_IO.write.byte(file, SVC_SPAWNSTATICSOUND)
_IO.write.position(file, spawn_static_sound.origin)
_IO.write.byte(file, spawn_static_sound.sound_number)
_IO.write.byte(file, spawn_static_sound.volume * 256)
_IO.write.byte(file, spawn_static_sound.attenuation * 64)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SPAWNSTATICSOUND
spawn_static_sound = SpawnStaticSound()
spawn_static_sound.origin = _IO.read.position(file)
spawn_static_sound.sound_number = _IO.read.byte(file)
spawn_static_sound.volume = _IO.read.byte(file) / 256
spawn_static_sound.attenuation = _IO.read.byte(file) / 64
return spawn_static_sound
class Intermission:
"""Class for representing Intermission messages
Displays the level end screen.
"""
__slots__ = ()
@staticmethod
def write(file, intermission=None):
_IO.write.byte(file, SVC_INTERMISSION)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_INTERMISSION
return Intermission()
class Finale:
"""Class for representing Finale messages
Displays the episode end screen.
Attributes:
text: The text to show.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, finale):
_IO.write.byte(file, SVC_FINALE)
_IO.write.string(file, finale.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_FINALE
finale = Finale()
finale.text = _IO.read.string(file)
return finale
class CdTrack:
"""Class for representing CdTrack messages
Selects the cd track
Attributes:
from_track: The start track.
to_track: The end track.
"""
__slots__ = (
'from_track',
'to_track'
)
def __init__(self):
self.from_track = None
self.to_track = None
@staticmethod
def write(file, cd_track):
_IO.write.byte(file, SVC_CDTRACK)
_IO.write.byte(file, cd_track.from_track)
_IO.write.byte(file, cd_track.to_track)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CDTRACK
cd_track = CdTrack()
cd_track.from_track = _IO.read.byte(file)
cd_track.to_track = _IO.read.byte(file)
return cd_track
class SellScreen:
"""Class for representing SellScreen messages
Displays the help and sell screen.
"""
__slots__ = ()
@staticmethod
def write(file, sell_screen=None):
_IO.write.byte(file, SVC_SELLSCREEN)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SELLSCREEN
return SellScreen()
class CutScene:
"""Class for representing CutScene messages
Displays end screen and text.
Attributes:
text: The text to be shown.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, cut_scene):
_IO.write.byte(file, SVC_CUTSCENE)
_IO.write.string(file, cut_scene.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CUTSCENE
cut_scene = CutScene()
cut_scene.text = _IO.read.string(file)
return cut_scene
_messages = [Bad, Nop, Disconnect, UpdateStat, Version, SetView, Sound,
Time, Print, StuffText, SetAngle, ServerInfo, LightStyle,
UpdateName, UpdateFrags, ClientData, StopSound, UpdateColors,
Particle, Damage, SpawnStatic, SpawnBinary, SpawnBaseline,
TempEntity, SetPause, SignOnNum, CenterPrint, KilledMonster,
FoundSecret, SpawnStaticSound, Intermission, Finale, CdTrack,
SellScreen, CutScene]
U_MOREBITS = 0b0000000000000001
U_ORIGIN1 = 0b0000000000000010
U_ORIGIN2 = 0b0000000000000100
U_ORIGIN3 = 0b0000000000001000
U_ANGLE2 = 0b0000000000010000
U_NOLERP = 0b0000000000100000
U_FRAME = 0b0000000001000000
U_SIGNAL = 0b0000000010000000
U_ANGLE1 = 0b0000000100000000
U_ANGLE3 = 0b0000001000000000
U_MODEL = 0b0000010000000000
U_COLORMAP = 0b0000100000000000
U_SKIN = 0b0001000000000000
U_EFFECTS = 0b0010000000000000
U_LONGENTITY = 0b0100000000000000
class UpdateEntity:
"""Class for representing UpdateEntity messages
Updates an entity.
Attributes:
bit_mask: A bit field indicating what data is sent.
entity: The number of the entity.
model_index: The number of the model in the model table.
frame: The frame number of the model.
color_map: The color map used to display the model.
skin: The skin number of the model.
effects: A bit field indicating special effects.
origin: The position of the entity.
angles: The orientation of the entity.
"""
__slots__ = (
'bit_mask',
'entity',
'model_index',
'frame',
'colormap',
'skin',
'effects',
'origin',
'angles'
)
def __init__(self):
self.bit_mask = 0b0000000000000000
self.entity = None
self.model_index = None
self.frame = None
self.colormap = None
self.skin = None
self.effects = None
self.origin = None, None, None
self.angles = None, None, None
@staticmethod
def write(file, update_entity):
_IO.write.byte(file, update_entity.bit_mask & 0xFF | 0x80)
if update_entity.bit_mask & U_MOREBITS:
_IO.write.byte(file, update_entity.bit_mask >> 8 & 0xFF)
if update_entity.bit_mask & U_LONGENTITY:
_IO.write.short(file, update_entity.entity)
else:
_IO.write.byte(file, update_entity.entity)
if update_entity.bit_mask & U_MODEL:
_IO.write.byte(file, update_entity.model_index)
if update_entity.bit_mask & U_FRAME:
_IO.write.byte(file, update_entity.frame)
if update_entity.bit_mask & U_COLORMAP:
_IO.write.byte(file, update_entity.colormap)
if update_entity.bit_mask & U_SKIN:
_IO.write.byte(file, update_entity.skin)
if update_entity.bit_mask & U_EFFECTS:
_IO.write.byte(file, update_entity.effects)
if update_entity.bit_mask & U_ORIGIN1:
_IO.write.coord(file, update_entity.origin[0])
if update_entity.bit_mask & U_ANGLE1:
_IO.write.angle(file, update_entity.angles[0])
if update_entity.bit_mask & U_ORIGIN2:
_IO.write.coord(file, update_entity.origin[1])
if update_entity.bit_mask & U_ANGLE2:
_IO.write.angle(file, update_entity.angles[1])
if update_entity.bit_mask & U_ORIGIN3:
_IO.write.coord(file, update_entity.origin[2])
if update_entity.bit_mask & U_ANGLE3:
_IO.write.angle(file, update_entity.angles[2])
@staticmethod
def read(file):
update_entity = UpdateEntity()
b = _IO.read.byte(file)
update_entity.bit_mask = b & 0x7F
if update_entity.bit_mask & U_MOREBITS:
update_entity.bit_mask |= _IO.read.byte(file) << 8
if update_entity.bit_mask & U_LONGENTITY:
update_entity.entity = _IO.read.short(file)
else:
update_entity.entity = _IO.read.byte(file)
if update_entity.bit_mask & U_MODEL:
update_entity.model_index = _IO.read.byte(file)
if update_entity.bit_mask & U_FRAME:
update_entity.frame = _IO.read.byte(file)
if update_entity.bit_mask & U_COLORMAP:
update_entity.colormap = _IO.read.byte(file)
if update_entity.bit_mask & U_SKIN:
update_entity.skin = _IO.read.byte(file)
if update_entity.bit_mask & U_EFFECTS:
update_entity.effects = _IO.read.byte(file)
if update_entity.bit_mask & U_ORIGIN1:
update_entity.origin = _IO.read.coord(file), update_entity.origin[1], update_entity.origin[2]
if update_entity.bit_mask & U_ANGLE1:
update_entity.angles = _IO.read.angle(file), update_entity.angles[1], update_entity.angles[2]
if update_entity.bit_mask & U_ORIGIN2:
update_entity.origin = update_entity.origin[0], _IO.read.coord(file), update_entity.origin[2]
if update_entity.bit_mask & U_ANGLE2:
update_entity.angles = update_entity.angles[0], _IO.read.angle(file), update_entity.angles[2]
if update_entity.bit_mask & U_ORIGIN3:
update_entity.origin = update_entity.origin[0], update_entity.origin[1], _IO.read.coord(file)
if update_entity.bit_mask & U_ANGLE3:
update_entity.angles = update_entity.angles[0], update_entity.angles[1], _IO.read.angle(file)
return update_entity
class MessageBlock:
"""Class for representing a message block
Attributes:
view_angles: The client view angles.
messages: A sequence of messages.
"""
__slots__ = (
'view_angles',
'messages'
)
def __init__(self):
self.view_angles = None
self.messages = []
@staticmethod
def write(file, message_block):
start_of_block = file.tell()
_IO.write.long(file, 0)
_IO.write.float(file, message_block.view_angles[0])
_IO.write.float(file, message_block.view_angles[1])
_IO.write.float(file, message_block.view_angles[2])
start_of_messages = file.tell()
for message in message_block.messages:
message.__class__.write(file, message)
end_of_messages = file.tell()
block_size = end_of_messages - start_of_messages
file.seek(start_of_block)
_IO.write.long(file, block_size)
file.seek(end_of_messages )
@staticmethod
def read(file):
message_block = MessageBlock()
blocksize = _IO.read.long(file)
message_block.view_angles = _IO.read.float(file), _IO.read.float(file), _IO.read.float(file)
message_block_data = file.read(blocksize)
buff = io.BufferedReader(io.BytesIO(message_block_data))
message_id = buff.peek(1)[:1]
while message_id != b'':
message_id = struct.unpack('<B', message_id)[0]
if message_id < 128:
message = _messages[message_id].read(buff)
else:
message = UpdateEntity.read(buff)
if message:
message_block.messages.append(message)
message_id = buff.peek(1)[:1]
buff.close()
return message_block
```
#### File: quake/tests/test_protocol.py
```python
import unittest
from vgio.quake.tests.basecase import TestCase
from vgio.quake import protocol
class TestProtocolReadWrite(TestCase):
def test_bad_message(self):
protocol.Bad.write(self.buff)
self.buff.seek(0)
protocol.Bad.read(self.buff)
def test_nop_message(self):
protocol.Nop.write(self.buff)
self.buff.seek(0)
protocol.Nop.read(self.buff)
def test_disconnect_message(self):
protocol.Disconnect.write(self.buff)
self.buff.seek(0)
protocol.Disconnect.read(self.buff)
def test_update_stat_message(self):
u0 = protocol.UpdateStat()
u0.index = 0
u0.value = 75
protocol.UpdateStat.write(self.buff, u0)
self.buff.seek(0)
u1 = protocol.UpdateStat.read(self.buff)
self.assertEqual(u0.index, u1.index,
'Update stat indexes should be equal')
self.assertEqual(u0.value, u1.value,
'Update stat values should be equal')
def test_version_message(self):
v0 = protocol.Version()
v0.protocol_version = 15
protocol.Version.write(self.buff, v0)
self.buff.seek(0)
v1 = protocol.Version.read(self.buff)
self.assertEqual(v0.protocol_version, v1.protocol_version,
'Protocol versions should be equal')
def test_set_view_message(self):
s0 = protocol.SetView()
s0.entity = 16
protocol.SetView.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SetView.read(self.buff)
self.assertEqual(s0.entity, s1.entity, 'Entities should be equal')
def test_sound_message(self):
# No optional arguments
s0 = protocol.Sound()
s0.entity = 16
s0.channel = 2
s0.sound_number = 4
s0.origin = -512, 256, 2048
protocol.Sound.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.Sound.read(self.buff)
self.assertEqual(s0.entity, s1.entity, 'Entity should be equal')
self.assertEqual(s0.channel, s1.channel, 'Channel should be equal')
self.assertEqual(s0.sound_number, s1.sound_number,
'Sound number should be equal')
self.assertEqual(s0.origin, s1.origin, 'Origin should be equal')
self.clear_buffer()
# Both optional arguments
s0 = protocol.Sound()
s0.entity = 16
s0.channel = 2
s0.sound_number = 4
s0.origin = -512, 256, 2048
s0.attenuation = 0.5
s0.volume = 64
s0.bit_mask |= protocol.SND_ATTENUATION | protocol.SND_VOLUME
protocol.Sound.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.Sound.read(self.buff)
self.assertEqual(s0.entity, s1.entity, 'Entities should be equal')
self.assertEqual(s0.channel, s1.channel, 'Channels should be equal')
self.assertEqual(s0.sound_number, s1.sound_number,
'Sound numbers should be equal')
self.assertEqual(s0.origin, s1.origin, 'Origins should be equal')
self.assertEqual(s0.attenuation, s1.attenuation,
'Attenuations should be equal')
self.assertEqual(s0.volume, s1.volume, 'Volumes should be equal')
protocol.Sound.write(self.buff, s0)
self.buff.seek(0)
def test_time_message(self):
t0 = protocol.Time()
t0.time = 4.125
protocol.Time.write(self.buff, t0)
self.buff.seek(0)
t1 = protocol.Time.read(self.buff)
self.assertEqual(t0.time, t1.time, 'Should be equal')
def test_print_message(self):
p0 = protocol.Print()
p0.text = "This hall selects EASY skill"
protocol.Print.write(self.buff, p0)
self.buff.seek(0)
p1 = protocol.Print.read(self.buff)
self.assertEqual(p0.text, p1.text, 'Text values should be equal')
def test_stuff_text_message(self):
s0 = protocol.StuffText()
s0.text = "This hall selects NORMAL skill"
protocol.StuffText.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.StuffText.read(self.buff)
self.assertEqual(s0.text, s1.text, 'Text values should be equal')
def test_set_angle_message(self):
s0 = protocol.SetAngle()
s0.angles = 0, -90, 22.5
protocol.SetAngle.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SetAngle.read(self.buff)
self.assertEqual(s0.angles, s1.angles, 'Angles should be equal')
def test_server_info_message(self):
s0 = protocol.ServerInfo()
s0.protocol_version = 15
s0.max_clients = 1
s0.multi = 0
s0.map_name = 'the Necropolis'
s0.models = 'maps/e1m3.bsp', 'progs/player.mdl'
s0.sounds = 'weapons/ric1.wav', 'weapons/ric2.wav'
protocol.ServerInfo.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.ServerInfo.read(self.buff)
self.assertEqual(s0.protocol_version, s1.protocol_version,
'Protocol versions should be equal')
self.assertEqual(s0.max_clients, s1.max_clients,
'Max clients should be equal')
self.assertEqual(s0.multi, s1.multi, 'Multi values should be equal')
self.assertEqual(s0.map_name, s1.map_name, 'Map names Should be equal')
self.assertEqual(s0.models, s1.models, 'Models should be equal')
self.assertEqual(s0.sounds, s1.sounds, 'Sounds should be equal')
def test_light_style_message(self):
l0 = protocol.LightStyle()
l0.style = 15
l0.string = 'azazaaazzz'
protocol.LightStyle.write(self.buff, l0)
self.buff.seek(0)
l1 = protocol.LightStyle.read(self.buff)
self.assertEqual(l0.style, l1.style, 'Styles should be equal')
self.assertEqual(l0.string, l1.string, 'Strings should be equal')
def test_update_name_message(self):
u0 = protocol.UpdateName()
u0.player = 0
u0.name = "Player"
protocol.UpdateName.write(self.buff, u0)
self.buff.seek(0)
u1 = protocol.UpdateName.read(self.buff)
self.assertEqual(u0.player, u1.player, 'Player values should be equal')
self.assertEqual(u0.name, u1.name, 'Names should be equal')
def test_update_frags_message(self):
u0 = protocol.UpdateFrags()
u0.player = 1
u0.frags = 100
protocol.UpdateFrags.write(self.buff, u0)
self.buff.seek(0)
u1 = protocol.UpdateFrags.read(self.buff)
self.assertEqual(u0.player, u1.player, 'Player values should be equal')
self.assertEqual(u0.frags, u1.frags, 'Frags should be equal')
def test_client_data_message(self):
c0 = protocol.ClientData()
c0.on_ground = True
c0.in_water = False
c0.health = 75
c0.active_ammo = 1
c0.ammo = 25, 0, 0, 0
c0.active_weapon = 16
protocol.ClientData.write(self.buff, c0)
self.buff.seek(0)
c1 = protocol.ClientData.read(self.buff)
self.assertEqual(c0.on_ground, c1.on_ground,
'On ground flags should be equal')
self.assertEqual(c0.in_water, c1.in_water,
'In water flags should be equal')
self.assertEqual(c0.health, c1.health, 'Health values should be equal')
self.assertEqual(c0.active_ammo, c1.active_ammo,
'Active ammo values should be equal')
self.assertEqual(c0.ammo, c1.ammo, 'Ammo counts should be equal')
self.assertEqual(c0.active_weapon, c1.active_weapon,
'Active weapons should be equal')
self.clear_buffer()
c0 = protocol.ClientData()
c0.bit_mask = 0b0111111111111111
c0.view_height = 18
c0.ideal_pitch = 45
c0.punch_angle = -22.5, 0, 90
c0.velocity = 0, 16, -32
c0.item_bit_mask = 0b01111111111111111111111111111111
c0.on_ground = True
c0.in_water = True
c0.weapon_frame = 8
c0.armor = 2
c0.weapon = 32
c0.health = 99
c0.active_ammo = 1
c0.ammo = 25, 0, 0, 0
c0.active_weapon = 16
protocol.ClientData.write(self.buff, c0)
self.buff.seek(0)
c1 = protocol.ClientData.read(self.buff)
self.assertEqual(c0.bit_mask, c1.bit_mask, 'Bit masks should be equal')
self.assertEqual(c0.view_height, c1.view_height,
'View heights should be equal')
self.assertEqual(c0.ideal_pitch, c1.ideal_pitch,
'Ideal pitches should be equal')
self.assertEqual(c0.punch_angle, c1.punch_angle,
'Punch angles should be equal')
self.assertEqual(c0.velocity, c1.velocity,
'Velocities should be equal')
self.assertEqual(c0.item_bit_mask, c1.item_bit_mask,
'Item bit masks should be equal')
self.assertEqual(c0.on_ground, c1.on_ground,
'On ground flags should be equal')
self.assertEqual(c0.in_water, c1.in_water,
'In water flags should be equal')
self.assertEqual(c0.weapon_frame, c1.weapon_frame,
'Weapon frames should be equal')
self.assertEqual(c0.armor, c1.armor, 'Armor values should be equal')
self.assertEqual(c0.weapon, c1.weapon, 'Weapon values should be equal')
self.assertEqual(c0.health, c1.health, 'Health values should be equal')
self.assertEqual(c0.active_ammo, c1.active_ammo,
'Active ammo values should be equal')
self.assertEqual(c0.ammo, c1.ammo, 'Ammo values should be equal')
self.assertEqual(c0.active_weapon, c1.active_weapon,
'Active weapon values should be equal')
def test_stop_sound_message(self):
s0 = protocol.StopSound()
s0.channel = 2
s0.entity = 64
protocol.StopSound.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.StopSound.read(self.buff)
self.assertEqual(s0.channel, s1.channel, 'Channels should be equal')
self.assertEqual(s0.entity, s1.entity, 'Entities should be equal')
def test_update_colors_message(self):
u0 = protocol.UpdateColors()
u0.player = 1
u0.colors = 0b00010001
protocol.UpdateColors.write(self.buff, u0)
self.buff.seek(0)
u1 = protocol.UpdateColors.read(self.buff)
self.assertEqual(u0.player, u1.player, 'Player values should be equal')
self.assertEqual(u0.colors, u1.colors, 'Colors values should be equal')
def test_particle_message(self):
p0 = protocol.Particle()
p0.origin = 0, 16, -1024
p0.direction = 0, 1, 2
p0.count = 8
p0.color = 73
protocol.Particle.write(self.buff, p0)
self.buff.seek(0)
p1 = protocol.Particle.read(self.buff)
self.assertEqual(p0.origin, p1.origin, 'Origin should be equal')
self.assertEqual(p0.direction, p1.direction,
'Direction should be equal')
self.assertEqual(p0.count, p1.count, 'Count should be equal')
self.assertEqual(p0.color, p1.color, 'Color should be equal')
def test_damage_message(self):
d0 = protocol.Damage()
d0.armor = 8
d0.blood = 4
d0.origin = 0, 16, -512
protocol.Damage.write(self.buff, d0)
self.buff.seek(0)
d1 = protocol.Damage.read(self.buff)
self.assertEqual(d0.armor, d1.armor, 'Armor values should be equal')
self.assertEqual(d0.blood, d1.blood, 'Blood values should be equal')
self.assertEqual(d0.origin, d1.origin, 'Origins should be equal')
def test_spawn_static_message(self):
s0 = protocol.SpawnStatic()
s0.model_index = 127
s0.frame = 8
s0.color_map = 1
s0.skin = 2
s0.origin = 0, -32, 1600
s0.angles = 22.5, 0, -45
protocol.SpawnStatic.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SpawnStatic.read(self.buff)
self.assertEqual(s0.model_index, s1.model_index,
'Model indices should be equal')
self.assertEqual(s0.frame, s1.frame, 'Frames should be equal')
self.assertEqual(s0.color_map, s1.color_map,
'Color maps should be equal')
self.assertEqual(s0.skin, s1.skin, 'Skins should be equal')
self.assertEqual(s0.origin, s1.origin, 'Origins should be equal')
self.assertEqual(s0.angles, s1.angles, 'Angles should be equal')
def test_spawn_binary_message(self):
with self.assertRaises(protocol.BadMessage):
protocol.SpawnBinary.write(self.buff)
with self.assertRaises(protocol.BadMessage):
protocol.SpawnBinary.read(self.buff)
def test_spawn_baseline_message(self):
s0 = protocol.SpawnBaseline()
s0.entity = 10
s0.model_index = 127
s0.frame = 8
s0.color_map = 1
s0.skin = 2
s0.origin = 0, -32, 1600
s0.angles = 22.5, 0, -45
protocol.SpawnBaseline.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SpawnBaseline.read(self.buff)
self.assertEqual(s0.entity, s1.entity, 'Entities should be equal')
self.assertEqual(s0.model_index, s1.model_index,
'Model indices should be equal')
self.assertEqual(s0.frame, s1.frame, 'Frames should be equal')
self.assertEqual(s0.color_map, s1.color_map,
'Color maps should be equal')
self.assertEqual(s0.skin, s1.skin, 'Skins should be equal')
self.assertEqual(s0.origin, s1.origin, 'Origins should be equal')
self.assertEqual(s0.angles, s1.angles, 'Angles should be equal')
def test_temp_entity_message(self):
t0 = protocol.TempEntity()
t0.type = protocol.TE_WIZSPIKE
t0.origin = 0, 128, -768
protocol.TempEntity.write(self.buff, t0)
self.buff.seek(0)
t1 = protocol.TempEntity.read(self.buff)
self.assertEqual(t0.type, t1.type, 'Types should be equal')
self.assertEqual(t0.origin, t1.origin, 'Origins should be equal')
self.clear_buffer()
t0 = protocol.TempEntity()
t0.type = protocol.TE_LIGHTNING1
t0.entity = 8
t0.start = 0, 0, 0
t0.end = 16, -96, 2048
protocol.TempEntity.write(self.buff, t0)
self.buff.seek(0)
t1 = protocol.TempEntity.read(self.buff)
self.assertEqual(t0.type, t1.type, 'Types should be equal')
self.assertEqual(t0.entity, t1.entity, 'Entity values should be equal')
self.assertEqual(t0.start, t1.start, 'Start vectors should be equal')
self.assertEqual(t0.end, t1.end, 'End vectors should be equal')
self.clear_buffer()
t0 = protocol.TempEntity()
t0.type = protocol.TE_EXPLOSION2
t0.origin = 0, 128, -768
t0.color_start = 0
t0.color_length = 16
protocol.TempEntity.write(self.buff, t0)
self.buff.seek(0)
t1 = protocol.TempEntity.read(self.buff)
self.assertEqual(t0.type, t1.type, 'Types should be equal')
self.assertEqual(t0.origin, t1.origin, 'Origins should be equal')
self.assertEqual(t0.color_start, t1.color_start,
'Color start values should be equal')
self.assertEqual(t0.color_length, t1.color_length,
'Color length values should be equal')
self.clear_buffer()
with self.assertRaises(protocol.BadMessage):
t0 = protocol.TempEntity()
t0.type = 14
protocol.TempEntity.write(self.buff, t0)
self.clear_buffer()
with self.assertRaises(protocol.BadMessage):
self.buff.write(b'\x17\x0e')
self.buff.seek(0)
protocol.TempEntity.read(self.buff)
def test_set_pause_message(self):
s0 = protocol.SetPause()
s0.paused = 1
protocol.SetPause.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SetPause.read(self.buff)
self.assertEqual(s0.paused, s1.paused, 'Paused values should be equal')
def test_sign_on_num_message(self):
s0 = protocol.SignOnNum()
s0.sign_on = 1
protocol.SignOnNum.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SignOnNum.read(self.buff)
self.assertEqual(s0.sign_on, s1.sign_on,
'Sign on values should be equal')
def test_center_print_message(self):
c0 = protocol.CenterPrint()
c0.text = 'This hall selects HARD skill'
protocol.CenterPrint.write(self.buff, c0)
self.buff.seek(0)
c1 = protocol.CenterPrint.read(self.buff)
self.assertEqual(c0.text, c1.text, 'Text values should be equal')
def test_killed_monster_message(self):
protocol.KilledMonster.write(self.buff)
self.buff.seek(0)
protocol.KilledMonster.read(self.buff)
def test_found_secret_message(self):
protocol.FoundSecret.write(self.buff)
self.buff.seek(0)
protocol.FoundSecret.read(self.buff)
def test_spawn_static_sound_message(self):
s0 = protocol.SpawnStaticSound()
s0.origin = 0, -32, 1096
s0.sound_number = 2
s0.volume = 0.5
s0.attenuation = 0.25
protocol.SpawnStaticSound.write(self.buff, s0)
self.buff.seek(0)
s1 = protocol.SpawnStaticSound.read(self.buff)
self.assertEqual(s0.origin, s1.origin, 'Origins should be equal')
self.assertEqual(s0.sound_number, s1.sound_number,
'Sound numbers should be equal')
self.assertEqual(s0.volume, s1.volume, 'Volume values should be equal')
self.assertEqual(s0.attenuation, s1.attenuation,
'Attenuation values should be equal')
def test_intermission_message(self):
protocol.Intermission.write(self.buff)
self.buff.seek(0)
protocol.Intermission.read(self.buff)
def test_finale_message(self):
f0 = protocol.Finale()
f0.text = 'Game Over'
protocol.Finale.write(self.buff, f0)
self.buff.seek(0)
f1 = protocol.Finale.read(self.buff)
self.assertEqual(f0.text, f1.text, 'Should be equal')
def test_cd_track_message(self):
c0 = protocol.CdTrack()
c0.from_track = 2
c0.to_track = 3
protocol.CdTrack.write(self.buff, c0)
self.buff.seek(0)
c1 = protocol.CdTrack.read(self.buff)
self.assertEqual(c0.from_track, c1.from_track,
'From track values should be equal')
self.assertEqual(c0.to_track, c1.to_track, 'To track should be equal')
def test_sell_screen_message(self):
protocol.SellScreen.write(self.buff)
self.buff.seek(0)
protocol.SellScreen.read(self.buff)
def test_cut_scene_message(self):
c0 = protocol.CutScene()
c0.text = 'Cut scene'
protocol.CutScene.write(self.buff, c0)
self.buff.seek(0)
c1 = protocol.CutScene.read(self.buff)
self.assertEqual(c0.text, c1.text, 'Text values should be equal')
def test_update_entity_message(self):
# Quick update
u0 = protocol.UpdateEntity()
u0.bit_mask |= protocol.U_ORIGIN1 | protocol.U_ORIGIN2 | protocol.U_ORIGIN3 | protocol.U_ANGLE2 | protocol.U_FRAME
u0.entity = 4
u0.origin = u0.origin = 128.5, 250, -980
u0.angles = None, 90, None
u0.frame = 1
protocol.UpdateEntity.write(self.buff, u0)
self.buff.seek(0)
u1 = protocol.UpdateEntity.read(self.buff)
self.assertEqual(u0.bit_mask, u1.bit_mask, 'Bit masks should be equal')
self.assertEqual(u0.entity, u1.entity, 'Entities should be equal')
self.assertEqual(u0.origin, u1.origin, 'Origins should be equal')
self.assertEqual(u0.angles, u1.angles, 'Angles should be equal')
self.assertEqual(u0.frame, u1.frame, 'Frames should be equal')
self.clear_buffer()
# Full update
u0 = protocol.UpdateEntity()
u0.bit_mask |= 0x7F7F
u0.entity = 4
u0.model_index = 8
u0.frame = 0
u0.colormap = 1
u0.skin = 2
u0.effects = 3
u0.origin = 128.5, 250, -980
u0.angles = 22.5, 0, -90
protocol.UpdateEntity.write(self.buff, u0)
self.buff.seek(0)
u1 = protocol.UpdateEntity.read(self.buff)
self.assertEqual(u0.bit_mask, u1.bit_mask, 'Bit masks should be equal')
self.assertEqual(u0.entity, u1.entity, 'Entities should be equal')
self.assertEqual(u0.model_index, u1.model_index,
'Models should be equal')
self.assertEqual(u0.frame, u1.frame, 'Frames should be equal')
self.assertEqual(u0.colormap, u1.colormap, 'Colormaps should be equal')
self.assertEqual(u0.skin, u1.skin, 'Skins should be equal')
self.assertEqual(u0.effects, u1.effects, 'Effects should be equal')
self.assertEqual(u0.origin, u1.origin, 'Origins should be equal')
self.assertEqual(u0.angles, u1.angles, 'Angles should be equal')
if __name__ == '__main__':
unittest.main()
```
#### File: quake/tests/test_wad.py
```python
import io
import unittest
from vgio.quake.tests.basecase import TestCase
from vgio.quake import wad
class TestWadReadWrite(TestCase):
def test_check_file_type(self):
self.assertFalse(wad.is_wadfile('./test_data/test.bsp'))
self.assertFalse(wad.is_wadfile('./test_data/test.lmp'))
self.assertFalse(wad.is_wadfile('./test_data/test.map'))
self.assertFalse(wad.is_wadfile('./test_data/test.mdl'))
self.assertFalse(wad.is_wadfile('./test_data/test.pak'))
self.assertFalse(wad.is_wadfile('./test_data/test.spr'))
self.assertTrue(wad.is_wadfile('./test_data/test.wad'))
def test_read(self):
wad_file = wad.WadFile('./test_data/test.wad', 'r')
self.assertFalse(wad_file.fp.closed, 'File should be open')
info = wad_file.getinfo('test')
self.assertIsNotNone(info, 'FileInfo should not be None')
self.assertEqual(info.filename, 'test')
self.assertEqual(info.file_size, 5480, 'FileInfo size of test file should be 5480')
self.assertEqual(info.file_offset, 12, 'FileInfo offset of test file should be 12')
self.assertEqual(info.type, wad.LumpType.MIPTEX, 'FileInfo type of test file should be MIPTEX')
file = wad_file.open('test')
self.assertIsNotNone(file, 'File should not be None')
file.close()
fp = wad_file.fp
wad_file.close()
self.assertTrue(fp.closed, 'File should be closed')
self.assertIsNone(wad_file.fp, 'File pointer should be cleaned up')
def test_read_file(self):
wad_file = wad.WadFile('./test_data/test.wad', 'r')
self.assertFalse(wad_file.fp.closed, 'File should be open')
info = wad_file.infolist()[0]
read_file = wad_file.open(info)
self.assertEqual(read_file.mode, 'r', 'File mode should be "r"')
wad_file.close()
def test_read_file_raises_on_write(self):
with self.assertRaises(ValueError):
with wad.WadFile('./test_data/test.wad', 'r') as wad_file:
wad_file.open('new_file', 'w')
def test_write(self):
wad_file = wad.WadFile(self.buff, 'w')
self.assertFalse(wad_file.fp.closed, 'File should be open')
wad_file.write('./test_data/test.mdl')
wad_file.write('./test_data/test.bsp', 'e1m1.bsp')
self.assertTrue('test.mdl' in wad_file.namelist(), 'Mdl file should be in Wad file')
self.assertTrue('e1m1.bsp' in wad_file.namelist(), 'Bsp file should be in Wad file')
fp = wad_file.fp
wad_file.close()
self.assertFalse(fp.closed, 'File should be open')
self.assertIsNone(wad_file.fp, 'File pointer should be cleaned up')
self.buff.close()
def test_write_string(self):
w0 = wad.WadFile(self.buff, 'w')
w0.writestr('test.cfg', b'bind ALT +strafe')
w0.writestr(wad.WadInfo('readme.txt'), 'test')
info = wad.WadInfo('bytes')
info.file_size = len(b'bytes')
info.type = wad.LumpType.LUMP
w0.writestr(info, io.BytesIO(b'bytes'))
w0.close()
self.buff.seek(0)
w1 = wad.WadFile(self.buff, 'r')
self.assertTrue('test.cfg' in w1.namelist(), 'Cfg file should be in Wad file')
self.assertTrue('readme.txt' in w1.namelist(), 'Txt file should be in Wad file')
self.assertTrue('bytes' in w1.namelist(), 'Bytes should be in Wad file')
self.assertEqual(w1.read('test.cfg'), b'bind ALT +strafe', 'Cfg file content should not change')
self.assertEqual(w1.read('readme.txt').decode('ascii'), 'test', 'Txt file content should not change')
self.assertEqual(w1.read('bytes'), b'bytes', 'Bytes content should not change')
w1.close()
self.buff.close()
def test_write_file(self):
wad_file = wad.WadFile(self.buff, 'w')
self.assertFalse(wad_file.fp.closed, 'File should be open')
from vgio.quake.bsp.bsp29 import Bsp
bsp_file = Bsp.open('./test_data/test.bsp')
bsp_file.close()
# Write with an ArchiveWriteFile object
write_file = wad_file.open('test.bsp', 'w')
self.assertTrue(wad_file._writing, 'Wad file should be flagged as being written to')
bsp_file.save(write_file)
write_file.close()
self.assertFalse(wad_file._writing, 'Wad file should no longer be flagged as being written to')
self.assertTrue('test.bsp' in wad_file.namelist(), 'Written file should be in Wad file')
fp = wad_file.fp
wad_file.close()
self.assertFalse(fp.closed, 'File should be open')
self.assertIsNone(wad_file.fp, 'File pointer should be cleaned up')
info = wad_file.infolist()[0]
self.assertEqual('test.bsp', info.filename, 'Filename should be "test.bsp"')
self.assertEqual(12, info.file_offset, 'Offset should be 12')
self.assertEqual(58422, info.file_size, 'File size should be 58428')
def test_append(self):
f = open('./test_data/test.wad', 'rb')
buff = io.BytesIO(f.read(-1))
f.close()
wad_file = wad.WadFile(buff, 'a')
wad_file.write('./test_data/test.bsp')
wad_file.close()
buff.seek(0)
wad_file = wad.WadFile(buff, 'r')
self.assertTrue('test.bsp' in wad_file.namelist(), 'Appended file should be in Wad file')
fp = wad_file.fp
wad_file.close()
self.assertFalse(buff.closed, 'Wad file should not close passed file-like object')
buff.close()
def test_context_manager(self):
with wad.WadFile('./test_data/test.wad', 'r') as wad_file:
self.assertFalse(wad_file.fp.closed, 'File should be open')
self.assertEqual(wad_file.mode, 'r', 'File mode should be \'r\'')
fp = wad_file.fp
wad_file._did_modify = False
self.assertTrue(fp.closed, 'File should be closed')
self.assertIsNone(wad_file.fp, 'File pointer should be cleaned up')
def test_empty_pak_file(self):
with wad.WadFile(self.buff, 'w'):
pass
self.buff.seek(0)
with wad.WadFile(self.buff, 'r') as wad_file:
self.assertEqual(len(wad_file.namelist()), 0, 'Wad file should have not entries')
self.assertEqual(wad_file.end_of_data, 12, 'Directory should start immediately after header')
def test_zero_byte_file(self):
with wad.WadFile(self.buff, 'w') as wad_file:
wad_file.writestr('zero.txt', b'')
self.buff.seek(0)
with wad.WadFile(self.buff) as wad_file:
info = wad_file.getinfo('zero.txt')
self.assertEqual(info.file_offset, 12, 'File Info offset of test file should be 12')
self.assertEqual(info.file_size, 0, 'File Info size of test file should be 0')
data = wad_file.read('zero.txt')
self.assertEqual(len(data), 0, 'Length of bytes read should be zero.')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshuaskelly/hrot-cli-tools",
"score": 3
} |
#### File: hcli/unpak/cli.py
```python
import argparse
import os
import sys
from tabulate import tabulate
from vgio.hrot import pak
import hcli
from hcli.common import Parser, ResolvePathAction
def main():
parser = Parser(
prog='unpak',
description='Default action is to extract files to xdir.',
epilog='example: unpak PAK0.PAK -d ./out => extract all files to ./out'
)
parser.add_argument(
'file',
metavar='file.pak',
action=ResolvePathAction
)
parser.add_argument(
'-l', '--list',
action='store_true',
help='list files'
)
parser.add_argument(
'-d',
metavar='xdir',
dest='dest',
default=os.getcwd(),
action=ResolvePathAction,
help='extract files into xdir'
)
parser.add_argument(
'-q',
dest='quiet',
action='store_true',
help='quiet mode'
)
parser.add_argument(
'-v', '--version',
dest='version',
action='version',
help=argparse.SUPPRESS,
version=f'{parser.prog} version {hcli.__version__}'
)
args = parser.parse_args()
if not pak.is_pakfile(args.file):
print(f'{parser.prog}: cannot find or open {args.file}', file=sys.stderr)
sys.exit(1)
if args.list:
with pak.PakFile(args.file) as pak_file:
info_list = sorted(pak_file.infolist(), key=lambda i: i.filename)
headers = ['Length', 'Name']
table = [[i.file_size, i.filename] for i in info_list]
length = sum([i.file_size for i in info_list])
count = len(info_list)
table.append([length, f'{count} file{"s" if count == 1 else ""}'])
separator = []
for i in range(len(headers)):
t = max(len(str(length)), len(headers[i]) + 2)
separator.append('-' * t)
table.insert(-1, separator)
print(f'Archive: {os.path.basename(args.file)}')
print(tabulate(table, headers=headers))
sys.exit(0)
with pak.PakFile(args.file) as pak_file:
info_list = pak_file.infolist()
for item in sorted(info_list, key=lambda i: i.filename):
filename = item.filename
fullpath = os.path.join(args.dest, filename)
if not args.quiet:
print(f' extracting: {fullpath}')
try:
pak_file.extract(filename, args.dest)
except:
print(f'{parser.prog}: error: {sys.exc_info()[0]}', file=sys.stderr)
sys.exit(0)
if __name__ == '__main__':
main()
``` |
{
"source": "joshuaskelly/io_mesh_md2",
"score": 3
} |
#### File: io_mesh_md2/io_mesh_md2/perfmon.py
```python
import sys
import time
from contextlib import contextmanager
class PerformanceMonitor:
"""Simple class for timing addon performance. Adapted from the official
Blender FBX addon.
Example:
pmon = PerformanceMonitor('Demo')
pmon.push_scope('Starting')
with pmon.scope():
for i in range(4):
pmon.progress('Doing work:', i, 4)
pmon.pop_scope('Finished')
"""
def __init__(self, identifier=''):
self.level = -1
self.reference_time = []
self.identifier = identifier
def push_scope(self, message=''):
self.level += 1
self.reference_time.append(time.process_time())
self.log(message)
def pop_scope(self, message=''):
if not self.reference_time:
self.log(message)
return
reference_time = self.reference_time[self.level]
delta = time.process_time() - reference_time if reference_time else 0
print(f'{" " * (self.level)}Done ({delta} sec)\n')
self.log(message)
del self.reference_time[self.level]
self.level -= 1
@contextmanager
def scope(self, message=''):
self.push_scope(message)
yield
self.pop_scope()
def progress(self, message='', current=None, maximum=None):
p = ''
if current:
p = f' {current + 1}'
if maximum:
p = f' {current + 1} of {maximum}'
if current + 1 == maximum:
p += '\n'
sys.stdout.write(f'\r{" " * self.level}[{self.identifier}] {message}{p}')
sys.stdout.flush()
def log(self, message=''):
if message:
print(f'{" " * self.level}[{self.identifier}] {message}')
def __del__(self):
while self.level >= 0:
self.pop_scope()
``` |
{
"source": "joshuaskelly/Toast",
"score": 2
} |
#### File: examples/emitter/emitter_performance_test.py
```python
from toast.scene_graph import Component, Scene
from toast.sprite import Sprite
from toast.animation import Animation
from toast.image_sheet import ImageSheet
from toast.resource_loader import ResourceLoader
from toast.emitter import Emitter
from toast.math.vector import Vector2D
from toast.gradient import Gradient
from toast.timer import Timer
import random
import pygame
from examples.demo_game import DemoGame
class EndGameAfter(Component):
def __init__(self, milliseconds=0):
super(EndGameAfter, self).__init__()
self.__life_timer = Timer(milliseconds)
def update(self, milliseconds=0):
super(EndGameAfter, self).update(milliseconds)
if self.__life_timer.is_time_up():
pygame.event.post(pygame.event.Event(pygame.locals.QUIT))
class Particle(Sprite):
def __init__(self, image, lifetime):
super(Particle, self).__init__(image)
self.lifetime = Timer(int(lifetime))
self.__velocity = Vector2D.from_angle(random.randrange(80.0, 100.0)) * -1.65
sheet = ImageSheet(ResourceLoader.load('data//puffs.png'), (32, 32))
puff = [(sheet[0], int(lifetime * 0.1)),
(sheet[1], int(lifetime * 0.15)),
(sheet[2], int(lifetime * 0.3)),
(sheet[3], int(lifetime * 2.0))]
self.animation = Animation('puff', puff)
self.add(self.animation)
def update(self, milliseconds=0):
super(Particle, self).update(milliseconds)
self.position += self.__velocity * (milliseconds / 1000.0) * 60
if self.lifetime.is_time_up():
self.lifetime.reset()
self.remove()
class EmitterPerformanceTest(Scene):
def __init__(self):
super(EmitterPerformanceTest, self).__init__()
bg = Gradient.createVerticalGradient((20, 15), (255,255,255), (228, 139, 165), (111,86,117))
bg = pygame.transform.scale(bg, (320, 240))
self.add(Sprite(bg, (160, 120)))
num_emitters = 8
for i in range(num_emitters):
e = Emitter(Particle, (ImageSheet(ResourceLoader.load('data//puffs.png'), (32, 32))[0], 1000), 40, self.onCreate)
e.position = 40 + (i * (256 / (num_emitters - 1))), 216
self.add(e)
self.add(EndGameAfter(1000 * 30))
def onCreate(self, emitter, particle):
particle.position = Vector2D(emitter.position)
particle.position += (random.random() - 0.5) * 2.0 * 8, (random.random() - 0.5) * 2.0 * 16
particle.animation.play('puff', 0)
if (random.random() < 0.3):
particle.lifetime = Timer(random.randint(1000, 1800))
game = DemoGame((640, 480), EmitterPerformanceTest)
game.run()
```
#### File: examples/quadtree/quadtree_demo_pick.py
```python
import pygame
import random
from toast.quadtree import QuadTree
from toast.scene_graph import GameObject, Scene
from toast.camera import Camera
from toast.event_manager import EventManager
from examples.demo_game import DemoGame
class QuadTreeVisualizer(GameObject):
def __init__(self, quadtree):
super(QuadTreeVisualizer, self).__init__()
self.quadtree = quadtree
def render(self, surface, offset=(0,0)):
self.render_quadtree(surface, self.quadtree)
def render_quadtree(self, surface, quadtree):
pygame.draw.rect(surface, (255,0,0), quadtree.quadrant, 1)
if quadtree.bucket is not []:
for item in quadtree.bucket:
item.render(surface)
if quadtree.northwest_tree is not None:
self.render_quadtree(surface, quadtree.northwest_tree)
if quadtree.northeast_tree is not None:
self.render_quadtree(surface, quadtree.northeast_tree)
if quadtree.southwest_tree is not None:
self.render_quadtree(surface, quadtree.southwest_tree)
if quadtree.southeast_tree is not None:
self.render_quadtree(surface, quadtree.southeast_tree)
class RectComponent(GameObject):
def __init__(self, left, top, width, height):
super(RectComponent, self).__init__()
self.left = left
self.top = top
self.width = width
self.height = height
self.__index = 0
self.color = 255, 255, 255
def __iter__(self):
return self
def next(self):
if self.__index >= 4:
self.__index = 0
raise StopIteration
self.__index += 1
return self[self.__index - 1]
def __getitem__(self, index):
if index == 0:
return self.left
if index == 1:
return self.top
if index == 2:
return self.width
if index == 3:
return self.height
def render(self, surface, offset=(0,0)):
rect = self.left, self.top, self.width, self.height
pygame.draw.rect(surface, self.color, rect, 1)
class NewScene(Scene):
def __init__(self):
super(NewScene, self).__init__()
EventManager.subscribe(self, 'onMouseMotion')
EventManager.subscribe(self, 'onMouseDown')
Camera.current_camera.viewport = 512, 512
Camera.current_camera.position = 256, 256
Camera.current_camera.clear_color = 0, 0, 0
w = h = 2**9
self.region = (0,0,w,h)
self.last_hits = []
self.items = []
self.use_quadtree_collision = True
for _ in range(250):
x = random.randint(0, 480)
y = random.randint(0, 480)
w = random.randint(4, 32)
h = random.randint(4, 32)
r = RectComponent(x,y,w,h)
self.items.append(r)
self.quadtree_visualizer = QuadTreeVisualizer(QuadTree(self.items, self.region))
self.add(self.quadtree_visualizer)
def update(self, milliseconds=0):
super(NewScene, self).update(milliseconds)
self.quadtree_visualizer.quadtree = QuadTree(self.items, self.region)
def onMouseDown(self, event):
self.use_quadtree_collision = not self.use_quadtree_collision
if self.use_quadtree_collision:
print('Using QuadTree Collision Detection')
else:
print('Using PyGame Rect Collision')
def onMouseMotion(self, event):
p = DemoGame.camera_to_world(event.pos)
if self.use_quadtree_collision:
r = (p[0], p[1], 8, 8)
current_hits = self.quadtree_visualizer.quadtree.hit(r)
else:
r = pygame.Rect(p[0], p[1], 8, 8)
current_hits = []
indexes = r.collidelistall([pygame.Rect(r[0], r[1], r[2], r[3]) for r in self.items])
for index in indexes:
current_hits.append(self.items[index])
for rect in self.last_hits:
rect.color = 255, 255, 255
for rect in current_hits:
rect.color = 0, 255, 0
self.last_hits = current_hits
game = DemoGame((512, 512), NewScene)
game.run()
```
#### File: examples/scene/scene_test.py
```python
from toast.scene_graph import Scene
from examples.demo_game import DemoGame
class NewScene(Scene):
def __init__(self):
super(NewScene, self).__init__()
game = DemoGame((640, 480), NewScene)
game.run()
```
#### File: toast/decorators/__init__.py
```python
class call_if(object):
def __init__(self, cond):
self.condition = cond
def __call__(self, func):
def inner(*args, **kwargs):
if getattr(args[0], self.condition):
return func(*args, **kwargs)
else:
return None
return inner
```
#### File: Toast/toast/emitter.py
```python
from toast.game_object_pool import GameObjectPool
from toast.math.vector import Vector2D
class Emitter(GameObjectPool):
def __init__(self, particle_class_name, default_args=(), frequency=0, on_particle_create=None):
super(Emitter, self).__init__(particle_class_name, default_args)
self.__frequency = frequency
self.__counter = 0
self.__on_particle_create = on_particle_create
self.position = Vector2D(0, 0)
self.is_emitting = True
def update(self, milliseconds=0):
super(Emitter, self).update(milliseconds)
if self.is_emitting:
self.__counter += milliseconds
if self.__counter >= self.__frequency:
self.__counter = 0
particle = self.getNextAvailable()
if self.__on_particle_create is not None:
self.__on_particle_create(self, particle)
```
#### File: Toast/toast/image_sheet.py
```python
import pygame
blank_pixel = pygame.Color(0, 255, 255, 255)
empty_pixel = pygame.Color(0, 0, 0, 0)
class ImageSheet(object):
"""
" * ImageSheet
" *
"""
def __init__(self, surface, dimension, keys = None):
"""Class Constructor
surface: A surface to be partitioned into sub-images.
dimension: A tuple of the form (width, height).
keys: A list of string identifiers for each sub-image.
If none is provided, defaults to filename + index.
"""
# Set the surface.
self.__image_sheet = surface
self.__dimension = dimension
# Create a dictionary to hold the sub-images.
self.__image_dict = {}
self.__empty_dict = {}
if keys != None:
self.__frame_list = keys
else:
self.__frame_list = []
# Determine number of steps needed
height = self.__image_sheet.get_height() // self.__dimension[1]
width = self.__image_sheet.get_width() // self.__dimension[0]
# Build the dictionary
for y in range(height):
for x in range(width):
i = x * dimension[0]
j = y * dimension[1]
frame_ID = ""
index = ((y * width) + x)
try:
frame_ID = self.__frame_list[index]
except:
frame_ID = 'FRAME_' + str(index)
self.__frame_list.append(frame_ID)
self.__image_dict[frame_ID] = \
self.__image_sheet.subsurface((i, j, self.__dimension[0], self.__dimension[1])).copy()
bounding_rect = self.__image_dict[frame_ID].get_bounding_rect()
self.__empty_dict[frame_ID] = bounding_rect.width == 0 or bounding_rect.height == 0
def __getitem__(self, key):
try:
return self.__image_dict[key]
except:
return self.__image_dict[self.__frame_list[key]]
def is_blank(self, frame):
try:
return self.__empty_dict[frame]
except:
return self.__empty_dict[self.__frame_list[frame]]
def get_dimension(self):
return self.__dimension
```
#### File: Toast/toast/sprite.py
```python
import pygame
from toast.scene_graph import GameObject
from toast.fast_transform import Transform
from toast.math.vector import Vector2D
class Sprite(GameObject):
def __init__(self, image_or_animation, position=(0,0)):
super(Sprite, self).__init__()
self.add(Transform())
self.transform.position = position
self.flip_x = False
self.flip_y = False
self.__image = None
self.__animation = None
self.__visibility = True
if hasattr(image_or_animation, 'add_animation'):
self.__animation = image_or_animation
self.add(image_or_animation)
self.image = self.__animation.image
else:
self.image = image_or_animation
@property
def animation(self):
return self.__animation
@animation.setter
def animation(self, new_anim):
self.__animation = new_anim
@property
def transform(self):
return self.get_component('Transform')
def change_transform_type(self, new_type):
x, y = self.transform.position
self.remove(self.transform)
t = new_type()
t.position = x, y
self.add(t)
@property
def position(self):
return self.transform.position
@position.setter
def position(self, value):
self.transform.position = value
@property
def visible(self):
return self.__visibility
@visible.setter
def visible(self, value):
self.__visibility = value
@property
def image(self):
return self.__image
@image.setter
def image(self, image):
self.__image = image
def render(self, surface, offset=(0,0)):
# If not visible, don't draw
if not self.visible:
return
image = self.image
w, h = image.get_size()
# Handle scaling if needed
if self.transform.scale != (1, 1):
sx, sy = self.transform.scale
sw = abs(sx) * w
sh = abs(sy) * h
image = pygame.transform.scale(image, (int(sw), int(sh)))
image = pygame.transform.flip(image, self.flip_x, self.flip_y)
# Handle rotation if needed
if self.transform.rotation:
image = pygame.transform.rotate(image, int(-self.transform.rotation))
# Calculate center
hw, hh = image.get_size()
hw = hw / 2
hh = hh / 2
pos = Vector2D(int(self.transform.position[0]), int(self.transform.position[1]))
# Draw image to surface
surface.blit(image, pos - (hw, hh) - offset)
# Draw children
for child in [c for c in self.children if hasattr(c, 'render')]:
child.render(surface, offset)
```
#### File: toast/text_effects/shaky_text.py
```python
from toast.text_effects import wrapper
import random
class ShakyText(wrapper.Wrapper):
def __init__(self, internal):
wrapper.Wrapper.__init__(self, internal)
self.scale = 0.1
random.seed()
def update(self, time = 16):
self.internal._update_chars(time)
self.char_list = self.internal.char_list
for (_, rect) in self.char_list:
rect.top += rect.height * random.random() * self.scale
rect.left += rect.width * random.random() * self.scale
```
#### File: toast/text_effects/wavy_text.py
```python
import math
from toast.text_effects import wrapper
class WavyText(wrapper.Wrapper):
def __init__(self, internal):
wrapper.Wrapper.__init__(self, internal)
self.amplitude = self.internal.char_list[0][1].height / 2
self.frequency = 1
self.phaseStep = 1
def update(self, time=16):
"""A simple harmonic motion function.
:param time: The amount of time lapsed since the last call to update.
"""
self.internal._update_chars(time)
self.char_list = self.internal.char_list
phase = 0
for (_, rect) in self.char_list:
rect.top += self.displacement(self.amplitude, self.frequency, self.internal.time / 1000.0, phase)
phase -= self.phaseStep
def displacement(self, amplitude, frequency, time, phase):
"""A simple harmonic motion function.
:returns: Vertical displacement
"""
return amplitude * math.cos((2 * math.pi * frequency * time) + phase)
```
#### File: Toast/toast/transform.py
```python
from toast.math.vector import Vector2D
from toast.math.matrix import MatrixHelper
from toast.scene_graph import Component
class Transform(Component):
def __init__(self):
super(Transform, self).__init__()
self.__local_matrix = None
self.global_matrix = None
self.__position = Vector2D.Zero()
self.__rotation = 0
self.__scale = Vector2D(1.0, 1.0)
@property
def matrix(self):
if self.__local_matrix == None:
t = MatrixHelper.translation_matrix(int(self.__position[0]), int(self.__position[1]))
r = MatrixHelper.rotation_matrix(self.__rotation)
s = MatrixHelper.scale_matrix(self.__scale[0], self.__scale[1])
self.__local_matrix = t * r * s
if self.global_matrix == None:
if hasattr(self.game_object.parent, 'transform'):
p = self.game_object.parent.transform.matrix
self.global_matrix = p * self.__local_matrix
else:
return self.__local_matrix
return self.global_matrix
def mark_dirty(self):
if not self.game_object:
return
self.global_matrix = None
self.__local_matrix = None
for child_transform in [c.transform for c in self.game_object.children]:
child_transform.mark_dirty()
@property
def position(self):
return Vector2D(self.matrix[0][2], self.matrix[1][2])
@position.setter
def position(self, other):
self.__position.x = other[0]
self.__position.y = other[1]
self.mark_dirty()
@property
def rotation(self):
a = self.matrix * (1, 0)
b = self.position
return (a - b).angle
@rotation.setter
def rotation(self, rotation):
self.__rotation = rotation * 0.0174532925
self.mark_dirty()
@property
def scale(self):
sx = (self.matrix * (1, 0)) - self.position
sy = (self.matrix * (0, 1)) - self.position
return Vector2D(sx.magnitude, sy.magnitude)
@scale.setter
def scale(self, scale):
self.__scale = scale
self.mark_dirty()
@property
def forward(self):
f = Vector2D.from_angle(self.rotation)
return f
@property
def right(self):
r = Vector2D.from_angle(self.rotation - 90.0)
r[1] = -r[1]
return r
@property
def offset(self):
return self.__offset
def look_at(self, pos):
angle = (pos - self.position).angle
self.rotation = -angle
```
#### File: Toast/toast/util.py
```python
from xml.etree import ElementTree
class XMLDict(dict):
""" A helper class that facilitates working with xml documents in a
read-only manner. Mostly lifted from:
http://code.activestate.com/recipes/573463-converting-xml-to-dictionary-and-back/
>>> doc = dict_from_xml('file.xml')
>>> doc.root.children[0]
>>> doc['root']['children'][0]
"""
def __init__(self, initial_dict=None):
if initial_dict is None:
initial_dict = {}
dict.__init__(self, initial_dict)
def __getattr__(self, item):
return self.__getitem__(item)
def __setattr(self, item, value):
self.__setitem__(item, value)
def __str__(self):
if self.has_key('_text'):
return self.__getitem__('_text')
else:
return ''
def __convert(node):
node_dict = XMLDict()
if len(node.items()) > 0:
node_dict.update(dict(node.items()))
for child in node:
new_node_dict = __convert(child)
if child.tag in node_dict:
if type(node_dict[child.tag]) is type([]):
node_dict[child.tag].append(new_node_dict)
else:
node_dict[child.tag] = [node_dict[child.tag], new_node_dict]
else:
node_dict[child.tag] = new_node_dict
if node.text is None:
text = ''
else:
text = node.text.strip()
if len(node_dict) > 0:
if len(text) > 0:
node_dict['_text'] = text
else:
node_dict = text
return node_dict
def dict_from_xml(root):
""" Builds and returns an XMLDict representing the given xml document.
>>> doc = dict_from_xml('file.xml')
>>> resolution = doc.config.display.resolution
"""
if type(root) == type(''):
root = ElementTree.parse(root).getroot()
elif not isinstance(root, ElementTree.Element):
raise(TypeError, 'blah')
return XMLDict({root.tag : __convert(root)})
``` |
{
"source": "JoshuaSkelly/TroubleInCloudLand",
"score": 3
} |
#### File: TroubleInCloudLand/core/actor.py
```python
import pygame
from core import animation
from utils import vector
from utils.settings import *
class Actor(pygame.sprite.Sprite):
"""The Generic Actor Class"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.actor_type = ACTOR_NONE
self.can_collide = False
self.active = False
self.hitrect = pygame.Rect(0, 0, 0, 0)
self.hitrect_offset_x = 0
self.hitrect_offset_y = 0
self.object_collided_with = self
self.bound_style = None
self.animation_list = animation.Animation()
self.image = None
self.position = vector.Vector2d(0, 0)
self.bounds = 0, 0, 0, 0
def actor_update(self):
pass
def update(self):
try:
self.animation_list.update()
self.image = self.animation_list.image
except:
pass
self.position += self.velocity
self.check_bounds()
self.rect.center = (self.position.x, self.position.y)
self.hitrect.center = (self.position.x + self.hitrect_offset_x, self.position.y + self.hitrect_offset_y)
self.actor_update()
def check_collision(self, group_checked):
for object_checked in group_checked:
if self.hitrect.colliderect(object_checked.hitrect):
if self.active and object_checked.active:
self.object_collided_with = object_checked
object_checked.object_collided_with = self
self.collide()
object_checked.collide()
def collide(self):
pass
def check_bounds(self):
current_x = self.position.x
current_y = self.position.y
if current_x < self.bounds[LEFT] or current_x > self.bounds[RIGHT] or current_y < self.bounds[TOP] or current_y > self.bounds[BOTTOM]:
self.out_of_bounds()
def die(self):
self.kill()
del self
def out_of_bounds(self):
if self.bound_style == BOUND_STYLE_CLAMP:
if self.position.x < self.bounds[LEFT]:
self.position = vector.Vector2d(self.bounds[LEFT], self.position.y)
elif self.position.x > self.bounds[RIGHT]:
self.position = vector.Vector2d(self.bounds[RIGHT], self.position.y)
if self.position.y < self.bounds[TOP]:
self.position = vector.Vector2d(self.position.x, self.bounds[TOP])
elif self.position.y > self.bounds[BOTTOM]:
self.position = vector.Vector2d(self.position.x, self.bounds[BOTTOM])
elif self.bound_style == BOUND_STYLE_WRAP:
if self.position.x < self.bounds[LEFT]:
self.position = vector.Vector2d(self.bounds[RIGHT], self.position.y)
elif self.position.x > self.bounds[RIGHT]:
self.position = (self.bounds[LEFT],self.position.y)
if self.position.y < self.bounds[TOP]:
self.position = (self.position.x, self.bounds[BOTTOM])
elif self.position.y > self.bounds[BOTTOM]:
self.position = (self.position.x, self.bounds[TOP])
elif self.bound_style == BOUND_STYLE_REFLECT:
if self.position.x < self.bounds[LEFT]:
self.position = vector.Vector2d(self.bounds[LEFT], self.position.y)
self.velocity *= -1.0, 1.0
elif self.position.x > self.bounds[RIGHT]:
self.position = vector.Vector2d(self.bounds[RIGHT], self.position.y)
self.velocity *= -1.0, 1.0
if self.position.y < self.bounds[TOP]:
self.position = vector.Vector2d(self.position.x, self.bounds[TOP])
self.velocity *= 1.0, -1.0
elif self.position.y > self.bounds[BOTTOM]:
self.position = vector.Vector2d(self.position.x, self.bounds[BOTTOM])
self.velocity *= 1.0, -1.0
elif self.bound_style == BOUND_STYLE_KILL:
self.kill()
elif self.bound_style == BOUND_STYLE_CUSTOM:
self.custom_bounds()
def custom_bounds(self):
pass
```
#### File: TroubleInCloudLand/core/animation.py
```python
from utils import utility
class Animation(object):
def __init__(self):
self.sequence_dict = {}
self.current_sequence = 0
self.frame_dict = {}
self.current_frame = 0
self.is_playing = True
self.image = None
self.parent = None
def __repr__(self):
return '<Animation({0} Sequences): {1}>'.format(self.length, self.keys)
def __len__(self):
return len(self.sequence_dict)
@property
def length(self):
return len(self.sequence_dict)
@property
def keys(self):
return self.sequence_dict.keys()
def set_parent(self, parent):
self.parent = parent
self.parent.image = self.sequence_dict[self.current_sequence][self.current_frame]
def update(self):
if self.is_playing:
self.current_frame += 1
if self.current_frame > len(self.sequence_dict[self.current_sequence]) - 1:
self.current_frame = 0
self.image = self.sequence_dict[self.current_sequence][self.current_frame]
def play(self, sequence_id, frame_id=0):
self.is_playing = True
if self.current_sequence != sequence_id:
self.current_sequence = sequence_id
self.current_frame = frame_id
def stop(self, sequence_id=None, frame_id=0):
self.is_playing = False
if sequence_id:
self.current_sequence = sequence_id
if frame_id:
self.current_frame = frame_id
def build_animation(self, sequence_id, frames):
self.sequence_dict[sequence_id] = []
if not self.current_sequence:
self.current_sequence = sequence_id
try:
for frame in frames:
try:
self.sequence_dict[sequence_id].append(self.frame_dict[frame])
except:
self.frame_dict[frame] = (utility.load_image(frame))
self.sequence_dict[sequence_id].append(self.frame_dict[frame])
except:
try:
self.sequence_dict[sequence_id].append(self.frame_dict[frames])
except:
self.frame_dict[frames] = (utility.load_image(frames))
self.sequence_dict[sequence_id].append(self.frame_dict[frames])
self.image = self.sequence_dict[self.current_sequence][self.current_frame]
```
#### File: TroubleInCloudLand/core/game.py
```python
from core import player, world
from scenes import credits, scene, tutorial
from ui import icon, menu, text
from utils import prettyprint, utility, vector
from utils.utility import *
pause_menu_dictionary = {
RESUME_GAME: ['Resume','Continue Playing'],
OPTION_MENU: ['Options','Change Sound and Video Options'],
EXIT_GAME: ['Exit','Exit to the Main Menu']
}
class Game(object):
def __init__(self, screen, world_to_start, music_list):
self.screen = screen
pygame.mouse.set_visible(False)
self.done = False
self.world_done = False
self.high_score = 0
self.bullet_group = pygame.sprite.Group()
self.player_group = pygame.sprite.Group()
self.powerup_group = pygame.sprite.Group()
self.boss_group = pygame.sprite.Group()
self.enemy_group = pygame.sprite.Group()
self.text_group = pygame.sprite.Group()
self.effects_group = pygame.sprite.Group()
self.mouse_last_move = MOUSE_DEFAULT_POSITION
self.group_list = [
self.powerup_group,
self.enemy_group,
self.boss_group,
self.text_group,
self.effects_group
]
self.score_board = text.Text(FONT_PATH, 36, FONT_COLOR)
self.temp_life_board = text.Text(FONT_PATH, 36, FONT_COLOR)
self.temp_life_board.position = vector.Vector2d(48, 40)
self.life_board = self.temp_life_board
self.life_icon = icon.Icon('life')
self.player = player.Player(self.bullet_group, self.effects_group, self.life_board, self.score_board)
self.player_group.add(self.player)
self.text_group.add(self.score_board)
self.text_group.add(self.temp_life_board)
self.text_group.add(self.life_icon)
self.music_list = music_list
self.timer = pygame.time.Clock()
# Get rid of the first mouse delta
pygame.mouse.get_rel()
world1_level0 = [
[0, ACTOR_MOONO, 45, 0],
[1, ACTOR_MOONO, 120, 0],
[2, ACTOR_MOONO, 240, 0],
[3, ACTOR_BAAKE, -1 ,1],
[4, ACTOR_BOSS_TUT, -1, 1],
[4, ACTOR_MOONO, 35, 0]
]
world1_level1 = [
[0, ACTOR_MOONO, 40, 0],
[1, ACTOR_MOONO, 85, 0],
[2, ACTOR_MOONO, 110, 0],
[3, ACTOR_BAAKE, -1 ,2],
[4, ACTOR_BOSS_TUT, -1, 1],
[4, ACTOR_MOONO, 30, 0]
]
world1_level2 = [
[0, ACTOR_MOONO, 30, 0],
[1, ACTOR_BAAKE, -1, 1],
[0, ACTOR_MOONO, 70, 0],
[2, ACTOR_BAAKE, -1, 1],
[0, ACTOR_MOONO, 130, 0],
[3, ACTOR_BAAKE, -1, 1],
[0, ACTOR_MOONO, 300, 0],
[4, ACTOR_BOSS_TUT, -1, 1],
[4, ACTOR_MOONO, 25, 0]
]
world1_level3 = [
[0, ACTOR_MOONO, 25, 0],
[1, ACTOR_BAAKE, -1, 1],
[1, ACTOR_MOONO, 50, 0],
[2, ACTOR_BAAKE, -1, 2],
[2, ACTOR_MOONO, 110, 0],
[3, ACTOR_BAAKE, -1, 2],
[3, ACTOR_MOONO, 210, 0],
[4, ACTOR_BOSS_TUT, -1, 1],
[4, ACTOR_MOONO, 20, 0]
]
world2_level0 = [
[0, ACTOR_MOONO, 45, 0],
[0, ACTOR_HAOYA, 65, 0],
[1, ACTOR_BAAKE, -1, 1],
[1, ACTOR_MOONO, 70, 0],
[2, ACTOR_HAOYA, 75, 0],
[3, ACTOR_MOONO, 85, 0],
[4, ACTOR_BAAKE_BOSS, -1, 1],
[4, ACTOR_HAOYA, 30, 0]
]
world2_level1 = [
[0, ACTOR_BAAKE, -1, 2],
[0, ACTOR_BATTO, 150 ,0],
[0, ACTOR_MOONO, 55, 0],
[1, ACTOR_HAOYA, 60, 0],
[2, ACTOR_MOONO, 100 ,0],
[3, ACTOR_BAAKE, -1, 1],
[3, ACTOR_BATTO, 280, 0],
[4, ACTOR_BAAKE_BOSS, -1, 1],
[4, ACTOR_BATTO, 70, 0]
]
world2_level2 = [
[0, ACTOR_ROKUBI, 60, 0],
[0, ACTOR_MOONO, 50, 0],
[0, ACTOR_BAAKE, -1, 2],
[1, ACTOR_BAAKE, -1, 1],
[1, ACTOR_BATTO, 160, 0],
[2, ACTOR_HAOYA, 60, 0],
[3, ACTOR_MOONO, 80, 0],
[4, ACTOR_BAAKE_BOSS, -1, 1],
[4, ACTOR_ROKUBI, 30, 0]
]
world2_level3 = [
[0, ACTOR_HAOYA, 60, 0],
[0, ACTOR_BATTO, 170, 0],
[0, ACTOR_ROKUBI, 75, 0],
[0, ACTOR_BAAKE, -1, 1],
[1, ACTOR_MOONO, 70, 0],
[1, ACTOR_BAAKE, -1, 1],
[2, ACTOR_BAAKE, -1, 1],
[2, ACTOR_ROKUBI, 180, 1],
[3, ACTOR_MOONO, 200, 0],
[4, ACTOR_BAAKE_BOSS, -1, 1],
[4, ACTOR_HAOYA, 100, 0],
[4, ACTOR_BATTO, 240, 0],
[4, ACTOR_ROKUBI, 90, 0],
[4, ACTOR_BAAKE, -1, 1]
]
world3_level0 = [
[0, ACTOR_HAKTA, 35, 0],
[0, ACTOR_HAOYA, 65, 0],
[1, ACTOR_BOKKO, -1, 1],
[2, ACTOR_BOKKO, -1, 1],
[2, ACTOR_HAKTA, 75, 0],
[3, ACTOR_BOKKO, -1, 1],
[4, ACTOR_MOONO_BOSS, -1, 1],
[4, ACTOR_HAKTA, 30, 0]
]
world3_level1 = [
[0, ACTOR_RAAYU, 45, 0],
[0, ACTOR_HAKTA, 50, 0],
[1, ACTOR_BOKKO, -1, 1],
[2, ACTOR_RAAYU, 60, 0],
[3, ACTOR_BOKKO, -1, 1],
[3, ACTOR_ROKUBI, 80, 0],
[4, ACTOR_MOONO_BOSS, -1, 1],
[4, ACTOR_RAAYU, 25, 0]
]
world3_level2 = [
[0, ACTOR_PAAJO, 95, 0],
[0, ACTOR_HAKTA, 40, 0],
[1, ACTOR_BOKKO, -1, 2],
[2, ACTOR_RAAYU, 80, 0],
[3, ACTOR_BOKKO, -1, 1],
[4, ACTOR_MOONO_BOSS, -1, 1],
[4, ACTOR_PAAJO, 70, 0]
]
world3_level3 = [
[0, ACTOR_HAKTA, 55, 0],
[0, ACTOR_RAAYU, 75, 0],
[0, ACTOR_PAAJO, 160, 0],
[1, ACTOR_BOKKO, -1, 2],
[1, ACTOR_ROKUBI, 50, 0],
[2, ACTOR_HAOYA, 120, 0],
[3, ACTOR_BOKKO, -1, 1],
[4, ACTOR_MOONO_BOSS, -1, 1],
[4, ACTOR_HAKTA, 60, 0],
[4, ACTOR_RAAYU, 50, 0],
[4, ACTOR_PAAJO, 110, 0],
[4, ACTOR_BOKKO, -1, 1]
]
tutorial_world = ['Tutorial', self.player, self.group_list]
temp_world_1 = ['Cloudopolis', self.player, self.group_list, [world1_level0, world1_level1, world1_level2, world1_level3]]
temp_world_2 = ['Nightmaria', self.player, self.group_list, [world2_level0, world2_level1, world2_level2, world2_level3]]
temp_world_3 = ['Opulent Dream', self.player, self.group_list, [world3_level0, world3_level1, world3_level2, world3_level3]]
self.world_list = [
tutorial_world,
temp_world_1,
temp_world_2,
temp_world_3
]
self.world_number = world_to_start
if self.world_number == 0:
self.current_world = tutorial.Tutorial(self.world_list[self.world_number])
else:
self.current_world = world.World(self.world_list[self.world_number], self.music_list[self.world_number])
self.current_world.load()
if self.world_number == 0:
self.new_scene = scene.TutorialScene()
self.player.lives = 99
self.life_board.set_text('x' + str(self.player.lives))
elif self.world_number == 1:
self.new_scene = scene.ForestScene()
elif self.world_number == 2:
self.new_scene = scene.RockyScene()
elif self.world_number == 3:
self.new_scene = scene.PinkScene()
def run(self):
while not self.done:
if self.world_done:
if self.world_number < MAX_WORLD:
self.world_beat()
# Resetting player lives so that it isn't in their best
# interest to play easier worlds just to have extra lives.
self.player.lives = 3
self.player.life_board.set_text('x' + str(self.player.lives))
self.player.score = 0
self.player.next_bonus = 50000
# Loading the new world
self.world_number += 1
if self.world_number == 0:
self.new_scene = scene.TutorialScene()
elif self.world_number == 1:
self.new_scene = scene.ForestScene()
elif self.world_number == 2:
self.new_scene = scene.RockyScene()
elif self.world_number == 3:
self.new_scene = scene.PinkScene()
if self.world_number > settings_list[WORLD_UNLOCKED]:
settings_list[WORLD_UNLOCKED] = self.world_number
utility.play_music(self.music_list[self.world_number], True)
self.current_world = world.World(self.world_list[self.world_number], self.music_list[self.world_number])
self.current_world.load()
self.world_done = False
else:
self.game_beat()
self.check_collision()
self.update()
self.draw()
self.handle_events()
pygame.mouse.set_pos(MOUSE_DEFAULT_POSITION)
pygame.mouse.get_rel()
self.mouse_last_move = pygame.mouse.get_pos()
self.timer.tick(FRAMES_PER_SECOND)
if self.player.dead:
high_score = read_high_scores()
if self.player.score < high_score[self.world_number]:
end_game_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You would need to score ' + str(high_score[self.world_number] - self.player.score) + ' more to beat it!'],
NEXT_WORLD: ['Exit', 'Return To The Menu']
}
elif self.player.score == high_score[self.world_number]:
end_game_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You Tied the High Score!'],
NEXT_WORLD: ['Exit', 'Return To The Menu']
}
else:
end_game_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You Beat the High Score!'],
NEXT_WORLD: ['Exit', 'Return To The Menu']
}
high_score[self.world_number] = self.player.score
write_high_scores(high_score)
utility.dim(128, FILL_COLOR)
end_game_menu = menu.Menu(self.screen,
self.music_list[self.world_number],
self.screen.convert(),
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Game Over', 128, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
end_game_dictionary)
end_game_menu.show()
self.done = True
utility.fade_music()
utility.play_music(self.music_list[MENU_MUSIC], True)
def handle_events(self):
for event in pygame.event.get():
if (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) or (event.type == pygame.MOUSEBUTTONDOWN and event.button == 3) or (event.type == pygame.ACTIVEEVENT and event.gain == 0):
utility.dim(128, FILL_COLOR)
# Grab a copy of the screen to show behind the menu
screen_grab = self.screen.copy()
pause_menu_running = True
while pause_menu_running:
pause_menu = menu.Menu(self.screen,
self.music_list[self.world_number],
screen_grab,
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Pause', 128, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
pause_menu_dictionary)
menu_result = pause_menu.show()
if menu_result == OPTION_MENU:
option_result = True
last_highlighted = 0
while option_result:
option_menu_dictionary = {
SOUND_MENU: ['Sound Options', 'Change Sound Options'],
DISPLAY_MENU: ['Video Options', 'Change Video Options'],
CHANGE_SENSITIVITY: ['Mouse Sensitivity: ' + prettyprint.mouse_sensitivity(settings_list[SENSITIVITY]), 'Change Mouse Sensitivity'],
EXIT_OPTIONS: ['Back', 'Go Back to the Main Menu']
}
sensitivity_menu_dictionary = {
0: ['Very Low', 'Change Sensitivity to Very Low'],
1: ['Low', 'Change Sensitivity to Low'],
2: ['Normal', 'Change Sensitivity to Normal'],
3: ['High', 'Change Sensitivity to High'],
4: ['Very High', 'Change Sensitivity to Very High']
}
sound_menu_dictionary = {
TOGGLE_SFX: ['Sound Effects: ' + prettyprint.on(settings_list[SFX]), 'Turn ' + prettyprint.on(not settings_list[SFX]) + ' Sound Effects'],
TOGGLE_MUSIC: ['Music: ' + prettyprint.on(settings_list[MUSIC]), 'Turn ' + prettyprint.on(not settings_list[MUSIC]) + ' Music'],
EXIT_OPTIONS: ['Back', 'Go Back to the Option Menu']
}
display_menu_dictionary = {
TOGGLE_PARTICLES: ['Particles: ' + prettyprint.able(settings_list[PARTICLES]), 'Turn ' + prettyprint.on(not settings_list[PARTICLES]) + ' Particle Effects'],
TOGGLE_FULLSCREEN: ['Video Mode: ' + prettyprint.screen_mode(settings_list[SETTING_FULLSCREEN]), 'Switch To ' + prettyprint.screen_mode(not settings_list[SETTING_FULLSCREEN]) + ' Mode'],
EXIT_OPTIONS: ['Back', 'Go Back to the Main Menu']
}
option_result = menu.Menu(self.screen,
self.music_list[self.world_number],
screen_grab,
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Options', 96,SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
option_menu_dictionary,
last_highlighted).show()
if option_result == SOUND_MENU:
sound_result = True
last_highlighted = 0
while sound_result:
sound_menu = menu.Menu(self.screen,
self.music_list[self.world_number],
screen_grab,
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Sound Options', 96,SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
sound_menu_dictionary,
last_highlighted)
sound_result = sound_menu.show()
if sound_result == TOGGLE_SFX:
settings_list[SFX] = not settings_list[SFX]
last_highlighted = 0
elif sound_result == TOGGLE_MUSIC:
settings_list[MUSIC] = not settings_list[MUSIC]
if not settings_list[MUSIC]:
pygame.mixer.Channel(MUSIC_CHANNEL).stop()
last_highlighted = 1
elif sound_result == EXIT_OPTIONS:
sound_result = False
sound_menu_dictionary = {
TOGGLE_SFX: ['Sound Effects: ' + prettyprint.on(settings_list[SFX]), 'Turn ' + prettyprint.on(not settings_list[SFX]) + ' Sound Effects'],
TOGGLE_MUSIC: ['Music: ' + prettyprint.on(settings_list[MUSIC]), 'Turn ' + prettyprint.on(not settings_list[MUSIC]) + ' Music'],
EXIT_OPTIONS: ['Back', 'Go Back to the Option Menu']
}
if option_result == DISPLAY_MENU:
display_result = True
last_highlighted = 0
while display_result:
display_menu = menu.Menu(self.screen,
self.music_list[self.world_number],
screen_grab,
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Video Options', 96,SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
display_menu_dictionary,
last_highlighted)
display_result = display_menu.show()
if display_result == TOGGLE_PARTICLES:
settings_list[PARTICLES] = not settings_list[PARTICLES]
last_highlighted = 0
elif display_result == TOGGLE_FULLSCREEN:
settings_list[SETTING_FULLSCREEN] = not settings_list[SETTING_FULLSCREEN]
last_highlighted = 1
pygame.mixer.quit()
pygame.mixer.init()
if settings_list[SETTING_FULLSCREEN]:
utility.set_fullscreen()
else:
utility.set_fullscreen(False)
pygame.mouse.set_visible(False)
elif display_result == EXIT_OPTIONS:
display_result = False
display_menu_dictionary = {
TOGGLE_PARTICLES: ['Particles: ' + prettyprint.able(settings_list[PARTICLES]), 'Turn ' + prettyprint.on(not settings_list[PARTICLES]) + ' Particle Effects'],
TOGGLE_FULLSCREEN: ['Video Mode: ' + prettyprint.screen_mode(settings_list[SETTING_FULLSCREEN]), 'Switch To ' + prettyprint.screen_mode(not settings_list[SETTING_FULLSCREEN]) + ' Mode'],
EXIT_OPTIONS: ['Back', 'Go Back to the Main Menu']
}
elif option_result == EXIT_OPTIONS:
option_result = False
elif option_result == CHANGE_SENSITIVITY:
sensitivity_result = True
last_highlighted = 0
while sensitivity_result:
sensitivity_menu = menu.Menu(self.screen,
self.music_list[self.world_number],
screen_grab,
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Mouse Sensitivity', 96,SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
sensitivity_menu_dictionary,
last_highlighted)
sensitivity_result = sensitivity_menu.show()
mouse_sensitivities = [0.5, 0.75, 1, 1.25, 1.5]
settings_list[SENSITIVITY] = mouse_sensitivities[sensitivity_result]
if sensitivity_result > 0:
sensitivity_result = False
elif menu_result == RESUME_GAME or menu_result == False:
pause_menu_running = False
pygame.mouse.get_rel()
elif menu_result == EXIT_GAME:
utility.fade_music()
utility.play_music(self.music_list[MENU_MUSIC], True)
self.done = True
pause_menu_running = False
elif event.type == pygame.MOUSEMOTION and self.player.lives:
mouse_input = [pygame.mouse.get_pos()[0] - 512.0, pygame.mouse.get_pos()[1] - 384.0]
if mouse_input[0] != 0 and mouse_input[1] != 0:
self.player.fire()
self.player.velocity = (self.player.velocity + mouse_input) / 1.5 * settings_list[SENSITIVITY]
def draw(self):
self.screen.fill(FILL_COLOR)
self.new_scene.draw(self.screen)
self.effects_group.draw(self.screen)
self.player_group.draw(self.screen)
self.bullet_group.draw(self.screen)
self.powerup_group.draw(self.screen)
self.enemy_group.draw(self.screen)
self.boss_group.draw(self.screen)
self.text_group.draw(self.screen)
pygame.display.flip()
def update(self):
self.world_done = self.current_world.update()
self.enemy_group.update()
self.player_group.update()
self.bullet_group.update()
self.powerup_group.update()
self.boss_group.update()
self.text_group.update()
self.effects_group.update()
def check_collision(self):
if self.player.active:
self.player.check_collision(self.powerup_group)
self.player.check_collision(self.enemy_group)
self.player.check_collision(self.boss_group)
for boss in self.boss_group:
if boss.active:
boss.check_collision(self.bullet_group)
for enemy in self.enemy_group:
if enemy.active:
enemy.check_collision(self.powerup_group)
enemy.check_collision(self.bullet_group)
def world_beat(self):
high_score = read_high_scores()
if self.player.score < high_score[self.world_number]:
world_end_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You would need to score ' + str(high_score[self.world_number] - self.player.score) + ' more to beat it!'],
NEXT_WORLD: ['Continue', 'On to the Next World!']
}
elif self.player.score == high_score[self.world_number]:
world_end_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You Tied the High Score!'],
NEXT_WORLD: ['Continue', 'On to the Next World!']
}
else:
world_end_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You Beat the High Score!'],
NEXT_WORLD: ['Continue', 'On to the Next World!']
}
high_score[self.world_number] = self.player.score
write_high_scores(high_score)
utility.dim(128, FILL_COLOR)
# Show world defeated menu
menu.Menu(self.screen,
self.music_list[self.world_number],
self.screen.convert(),
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['World Defeated!', 64, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
world_end_dictionary).show()
utility.fade_music()
def game_beat(self):
high_score = read_high_scores()
if self.player.score < high_score[self.world_number]:
world_end_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You would need to score ' + str(high_score[self.world_number] - self.player.score) + ' more to beat it!'],
NEXT_WORLD: ['Credits', 'On to the Credits!']
}
elif self.player.score == high_score[self.world_number]:
world_end_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You Tied the High Score!'],
NEXT_WORLD: ['Credits', 'On to the Credits!']
}
else:
world_end_dictionary = {
HIGH_SCORE: ['High Score For This World: ' + str(high_score[self.world_number]), 'You Beat the High Score!'],
NEXT_WORLD: ['Credits', 'On to the Credits!']
}
high_score[self.world_number] = self.player.score
write_high_scores(high_score)
utility.dim(128, FILL_COLOR)
world_end_menu = menu.Menu(self.screen,
self.music_list[self.world_number],
self.screen.convert(),
[0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT],
['Congratulations!', 64, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4],
world_end_dictionary)
world_end_menu.show()
utility.fade_music()
credits.Credits(self.screen, self.music_list[MENU_MUSIC])
self.done = True
```
#### File: TroubleInCloudLand/core/world.py
```python
import pygame
import enemies
from ui import text, infobubble
from utils import utility, vector
from utils.settings import *
def load_data():
World.bonus_tally_sound = utility.load_sound('bonusTally')
World.boss_fight_music = utility.load_sound('bossMusic')
World.get_ready_sound = utility.load_sound('getReady')
"""
Not a dictionary but rather a list of lists.
World.levelList =
{level0 = [[stageSpawned, actor-type, maxSpawn_rate, defaultSpawn],[...],...]
level1[[...],[...]...}
stageSpawned == in which stage of the level
does the actor start spawning
actorType == the actor type that is going to
be added to the spawn list for
this stage.
maxSpawn_rate == the least number of frames
possible between spawning.
Set to -1 to not allow any
new spawning.
defaultSpawn == how many of this actor type
spawn at the beginning of that
stage.
"""
WORLD_NAME = 0
PLAYER = 1
GROUP_LIST = 2
LEVEL_LIST = 3
class World(object):
def __init__(self, world_tuple, music):
self.world_name = world_tuple[WORLD_NAME]
self.player = world_tuple[PLAYER]
self.music = music
self.group_list = world_tuple[GROUP_LIST]
self.powerup_group = self.group_list[POWERUP_GROUP]
self.enemy_group = self.group_list[ENEMY_GROUP]
self.boss_group = self.group_list[BOSS_GROUP]
self.text_group = self.group_list[TEXT_GROUP]
self.effects_group = self.group_list[EFFECTS_GROUP]
self.stage_score = 0
self.level_list = world_tuple[LEVEL_LIST]
self.level = 0
self.stage = 0
self.done = False
self.enemy_list = []
self.defeat_stage = None
# ROKUBI VARIABLES
self.rokubi_group = pygame.sprite.Group()
# BOSS FIGHT VARIABLES
self.pause_spawning = 0
self.boss_fight = False
self.force_drop = 0
self.bonus_text = None
self.bonus_amount = None
self.bonus = -1
self.after_bonus_pause = 0
def load(self):
self.load_level()
self.player.increment_score_no_text(0)
def load_level(self):
if self.done:
return
utility.fade_music()
utility.play_music(self.music, True)
self.stage = 0
self.pause_spawning = 3 * FRAMES_PER_SECOND
self.player.bullet_bonus = 0
self.player.reflect_bonus = 0
self.powerup_group.empty()
self.enemy_group.empty()
self.effects_group.empty()
# Display Level text
display_name = text.Text(FONT_PATH, 64, FONT_COLOR, self.world_name, 90)
display_name.set_alignment(CENTER_MIDDLE)
display_name.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
self.group_list[TEXT_GROUP].add(display_name)
display_level = text.Text(FONT_PATH, 32, FONT_COLOR, 'Level ' + str(self.level + 1), 90)
display_level.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT * (2.0 / 3.0)))
display_level.set_alignment(CENTER_MIDDLE)
self.group_list[TEXT_GROUP].add(display_level)
# Reset all information for the new level
self.enemy_list = []
self.load_stage()
utility.play_sound(self.get_ready_sound, OW_CHANNEL)
temp_image = text.TextSurface(FONT_PATH, 36, FONT_COLOR, 'Get Ready...').image
help_bubble = infobubble.InfoBubble(temp_image, self.player, 2 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.effects_group.add(help_bubble)
def load_boss(self):
self.enemy_list = []
for enemy in self.level_list[self.level]:
if enemy[STAGE_SPAWNED] == self.stage:
while enemy[DEFAULT_SPAWN]:
enemy[DEFAULT_SPAWN] -= 1
self.create_actor(enemy[ACTOR_TYPE])
if enemy[STAGE_SPAWNED] == self.stage:
# Time until spawn, actor type, spawn rate
self.enemy_list.append([0, enemy[ACTOR_TYPE], enemy[SPAWN_RATE]])
def give_bonus(self):
increment_bonus = self.player.lives * 50
if self.bonus == -1:
self.boss_fight = False
self.bonus = 0
self.bonus_text = text.Text(FONT_PATH, 64, FONT_COLOR, 'Bonus Points!')
self.bonus_text.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 - 50))
self.bonus_text.set_alignment(CENTER_MIDDLE)
self.text_group.add(self.bonus_text)
self.bonus_amount = text.Text(FONT_PATH, 48, FONT_COLOR)
self.bonus_amount.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 + 50))
self.bonus_amount.set_alignment(CENTER_MIDDLE)
self.text_group.add(self.bonus_amount)
if self.bonus < self.player.lives * 5000 - increment_bonus:
self.bonus += increment_bonus
self.bonus_amount.set_text(self.bonus)
else:
self.bonus_amount.set_text(self.player.lives * 5000)
if self.level < MAX_LEVEL:
self.bonus_text.set_timer(FRAMES_PER_SECOND)
self.bonus_amount.set_timer(FRAMES_PER_SECOND)
self.bonus = -1
self.after_bonus_pause = 1.1 * FRAMES_PER_SECOND
if self.level < MAX_LEVEL:
self.level += 1
self.boss_fight = False
utility.fade_music()
utility.play_music(self.music, True)
utility.play_sound(self.bonus_tally_sound, BAAKE_CHANNEL)
self.player.increment_score_no_text(increment_bonus)
self.pause_spawning = 1.5 * FRAMES_PER_SECOND
def boss_text(self):
utility.fade_music()
utility.play_music(self.boss_fight_music, True)
# Display boss text
display_stage = text.Text(FONT_PATH, 64, FONT_COLOR, 'Boss Fight!', 90)
display_stage.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
display_stage.set_alignment(CENTER_MIDDLE)
self.text_group.add(display_stage)
def load_stage(self):
# Get player's current score
self.defeat_stage = DEFEAT_STAGE
# Display stage text
if self.stage != 0:
display_stage = text.Text(FONT_PATH, 32, FONT_COLOR, 'Stage ' + str(self.stage + 1), 90)
display_stage.position = vector.Vector2d(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)
display_stage.set_alignment(CENTER_MIDDLE)
self.group_list[TEXT_GROUP].add(display_stage)
# Enemies spawned here will appear during a level's warm up
for enemy in self.level_list[self.level]:
if enemy[STAGE_SPAWNED] == self.stage:
while enemy[DEFAULT_SPAWN]:
enemy[DEFAULT_SPAWN] -= 1
self.create_actor(enemy[ACTOR_TYPE])
if enemy[STAGE_SPAWNED] == self.stage:
# Time until spawn, actor type, spawn rate
self.enemy_list.append([0, enemy[ACTOR_TYPE], enemy[SPAWN_RATE]])
def warmup(self):
self.pause_spawning -= 1
if not self.pause_spawning:
if self.boss_fight:
self.load_boss()
return True
return False
def update(self):
if not self.boss_fight:
utility.play_music(self.music)
if self.after_bonus_pause >= 1:
self.after_bonus_pause -= 1
if self.after_bonus_pause < 1:
if self.level >= MAX_LEVEL:
self.bonus_text.kill()
self.bonus_amount.kill()
self.done = True
else:
self.load_level()
if self.done:
return self.done
if self.bonus != -1:
self.give_bonus()
if self.pause_spawning:
live = self.warmup()
if not live:
return
if not self.boss_fight:
self.defeat_stage -= 1
if not self.defeat_stage:
if self.stage < MAX_STAGE:
self.stage += 1
self.load_stage()
else:
for enemy in self.enemy_group:
if enemy.actor_type == ACTOR_TYPE_BAAKE:
enemy.leave_screen = True
self.boss_fight = True
self.stage += 1
self.pause_spawning = 3 * FRAMES_PER_SECOND
self.boss_text()
for enemy in self.enemy_list:
if enemy[SPAWN_RATE] != -1:
if not enemy[TIME_TO_SPAWN]:
self.create_actor(enemy[ACTOR_TYPE])
enemy[TIME_TO_SPAWN] = enemy[SPAWN_RATE]
enemy[TIME_TO_SPAWN] -= 1
def create_actor(self, actor_type):
if actor_type == ACTOR_MOONO:
new_moono = enemies.moono.Moono(self.player, self.group_list)
if self.boss_fight:
self.force_drop += 1
new_moono.boss_fight = True
if self.force_drop > 10:
new_moono.drop_item = True
self.force_drop = 0
self.enemy_group.add(new_moono)
elif actor_type == ACTOR_ROKUBI:
spawn = False
if len(self.rokubi_group) <= 5 and not self.boss_fight:
spawn = True
elif len(self.rokubi_group) <= 10 and self.boss_fight:
spawn = True
if spawn:
new_rokubi = enemies.rokubi.Rokubi(self.player, self.group_list)
self.rokubi_group.add(new_rokubi)
if self.boss_fight:
self.force_drop += 1
new_rokubi.boss_fight = True
if self.force_drop > 10:
new_rokubi.drop_item = True
self.force_drop = 0
self.enemy_group.add(new_rokubi)
elif actor_type == ACTOR_HAOYA:
new_haoya = enemies.haoya.Haoya(self.player, self.group_list)
if self.boss_fight:
self.force_drop += 1
new_haoya.boss_fight = True
if self.force_drop > 10:
new_haoya.drop_item = True
self.force_drop = 0
self.enemy_group.add(new_haoya)
elif actor_type == ACTOR_BATTO:
batto_spawn = 5
last_batto = None
while batto_spawn:
new_batto = enemies.batto.Batto(self.group_list, last_batto)
batto_spawn -= 1
last_batto = new_batto
if self.boss_fight:
self.force_drop += 1
new_batto.boss_fight = True
if self.force_drop > 19:
new_batto.drop_item = True
self.force_drop = 0
self.enemy_group.add(new_batto)
elif actor_type == ACTOR_YUREI:
self.enemy_group.add(enemies.yurei.Yurei(self.group_list))
elif actor_type == ACTOR_HAKTA:
new_hakta = enemies.hakta.Hakta(self.player, self.group_list)
if self.boss_fight:
self.force_drop += 1
new_hakta.boss_fight = True
if self.force_drop > 10:
new_hakta.drop_item = True
self.force_drop = 0
self.enemy_group.add(new_hakta)
elif actor_type == ACTOR_RAAYU:
new_raayu = enemies.raayu.Raayu(self.player, self.group_list)
if self.boss_fight:
self.force_drop += 1
new_raayu.boss_fight = True
if self.force_drop > 10:
new_raayu.drop_item = True
self.force_drop = 0
self.enemy_group.add(new_raayu)
elif actor_type == ACTOR_PAAJO:
paajo_spawn = 5
paajo_group = []
while paajo_spawn:
new_paajo = enemies.paajo.Paajo(self.group_list, paajo_spawn)
paajo_group.append(new_paajo)
paajo_spawn -= 1
if self.boss_fight:
self.force_drop += 1
new_paajo.boss_fight = True
if self.force_drop > 19:
new_paajo.drop_item = True
self.force_drop = 0
for member in paajo_group:
member.set_group(paajo_group)
self.enemy_group.add(paajo_group)
elif actor_type == ACTOR_BAAKE:
self.enemy_group.add(enemies.baake.Baake())
elif actor_type == ACTOR_BOKKO:
self.enemy_group.add(enemies.bokko.Bokko())
elif actor_type == ACTOR_BOSS_TUT:
self.boss_group.add(
enemies.boss.BossTut(self, self.player, self.group_list))
elif actor_type == ACTOR_BAAKE_BOSS:
self.boss_group.add(
enemies.boss.BaakeBoss(self, self.player, self.group_list))
elif actor_type == ACTOR_MOONO_BOSS:
self.boss_group.add(
enemies.boss.MoonoBoss(self, self.player, self.group_list))
```
#### File: TroubleInCloudLand/enemies/baake.py
```python
import copy
from core import actor
from core.actor import *
from utils import aitools, utility
def load_data():
Baake.bullet_sound = utility.load_sound('baakeHit')
Baake.master_animation_list.build_animation('Idle', ['baake'])
class Baake(actor.Actor):
master_animation_list = animation.Animation()
def __init__(self):
# COMMON VARIABLES
actor.Actor.__init__(self)
self.actor_type = ACTOR_TYPE_BAAKE
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.bound_style = BOUND_STYLE_REFLECT
self.bounds = 32, 32, SCREEN_WIDTH - 32, SCREEN_HEIGHT - 32
self.can_collide = True
self.hitrect = pygame.Rect(0,0,108,88)
self.position = vector.Vector2d(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)
self.velocity = vector.Vector2d(5.0, 0.0)
# UNIQUE VARIABLES
self.speed = 5.0
self.change_direction = 0
# BOSS FIGHT
self.leave_screen = False
aitools.spawn_on_screen(self)
def actor_update(self):
if not self.leave_screen:
self.process_ai()
else:
self.bound_style = BOUND_STYLE_KILL
if not self.active:
self.active = True
def process_ai(self):
if not self.change_direction:
self.change_direction = 2 * FRAMES_PER_SECOND
aitools.cardinal_direction(self)
self.change_direction -= 1
def collide(self):
if self.object_collided_with.actor_type == ACTOR_BULLET:
utility.play_sound(Baake.bullet_sound, BAAKE_CHANNEL)
elif self.object_collided_with.actor_type == ACTOR_PLAYER:
if self.object_collided_with.position.x < self.position.x - 64:
self.object_collided_with.position = vector.Vector2d((self.position.x - 94), self.object_collided_with.position.y)
if self.object_collided_with.velocity:
self.object_collided_with.velocity *= -1.0, 1.0
elif self.object_collided_with.position.x > self.position.x + 64:
self.object_collided_with.position = vector.Vector2d((self.position.x + 94), self.object_collided_with.position.y)
if self.object_collided_with.velocity:
self.object_collided_with.velocity *= -1.0, 1.0
if self.object_collided_with.position.y < self.position.y - 32:
self.object_collided_with.position = vector.Vector2d(self.object_collided_with.position.x, self.position.y - 76)
if self.object_collided_with.velocity:
self.object_collided_with.velocity *= 1.0, -1.0
elif self.object_collided_with.position.y > self.position.y + 32:
self.object_collided_with.position = vector.Vector2d(self.object_collided_with.position.x, self.position.y + 108)
if self.object_collided_with.velocity:
self.object_collided_with.velocity *= 1.0, -1.0
```
#### File: TroubleInCloudLand/enemies/batto.py
```python
import copy
import random
from core import enemy
from core.actor import *
from utils import aitools, utility
def load_data():
Batto.last_spawn = 0, 0
Batto.death_sound = utility.load_sound('pop')
Batto.master_animation_list.build_animation('Idle', ['batto'])
class Batto(enemy.Enemy):
death_sound = None
master_animation_list = animation.Animation()
def __init__(self, group_list, leader=None):
# COMMON VARIABLES
enemy.Enemy.__init__(self)
self.actor_type = ACTOR_TYPE_ENEMY
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.bound_style = BOUND_STYLE_CUSTOM
self.bounds = -32, -32, (SCREEN_WIDTH + 32), (SCREEN_HEIGHT + 32)
self.can_collide = True
self.hitrect = pygame.Rect(0,0,80,66)
self.position = vector.Vector2d.zero
self.velocity = vector.Vector2d.zero
# UNIQUE VARIABLES
self.speed = 10
self.change_direction = 0
self.target = 0, 0
self.powerup_group = group_list[POWERUP_GROUP]
self.text_group = group_list[TEXT_GROUP]
self.effects_group = group_list[EFFECTS_GROUP]
self.health = 1
self.boss_fight = False
self.drop_item = False
# EXIT GAME VARIABLES
self.on_screen = 0
self.dead = False
if not leader:
self.leader = self
aitools.spawn_off_screen(self, 128)
Batto.last_spawn = self.position
else:
self.leader = leader
aitools.spawn_at_point(self, Batto.last_spawn)
def actor_update(self):
if self.active:
if self.health <= 0:
self.die()
self.on_screen += 1
self.process_ai()
if not self.leader.active:
self.leader = self
else:
self.active = True
def process_ai(self):
if self.leader.dead:
pass
elif self.leader.active and self.leader != self:
temp_velocity = vector.Vector2d(self.leader.velocity[0], self.leader.velocity[1])
target_point = self.leader.position - (temp_velocity.make_normal()) * vector.Vector2d(150, 150)
aitools.go_to_point(self, target_point)
else:
if not self.change_direction:
self.target = vector.Vector2d((random.random() * (SCREEN_WIDTH + 200)) - 100, (random.random() * (SCREEN_HEIGHT + 200)) - 100)
self.change_direction = 30
self.change_direction -= 1
aitools.arc_to_point(self, self.target)
def custom_bounds(self):
if self.on_screen > FRAMES_PER_SECOND:
self.active = False
self.dead = True
self.on_screen = 0
if self.dead:
self.kill()
def collide(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
self.object_collided_with.hurt(1)
```
#### File: TroubleInCloudLand/enemies/haoya.py
```python
import copy
import pygame
from core import animation, enemy, particle
from utils import aitools, utility, vector
from utils.settings import *
def load_data():
Haoya.death_sound = utility.load_sound('pop')
Haoya.master_animation_list.build_animation('Idle', ['haoya'])
class Haoya(enemy.Enemy):
death_sound = None
master_animation_list = animation.Animation()
def __init__(self, target_object, group_list):
enemy.Enemy.__init__(self)
# COMMON VARIABLES
self.actor_type = ACTOR_TYPE_ENEMY
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.bound_style = BOUND_STYLE_CUSTOM
self.bounds = 32, 32, SCREEN_WIDTH - 32, SCREEN_HEIGHT - 32
self.can_collide = True
self.hitrect = pygame.Rect(0, 0, 54, 58)
self.hitrect_offset_y = 6
self.velocity = vector.Vector2d.zero
# UNIQUE VARIABLES
self.speed = 2
self.target = target_object
self.powerup_group = group_list[POWERUP_GROUP]
self.text_group = group_list[TEXT_GROUP]
self.effects_group = group_list[EFFECTS_GROUP]
self.health = 3
self.boss_fight = False
self.drop_item = False
self.death_emitter = particle.ParticleEmitter(vector.Vector2d.zero,
self.effects_group,
['puff'],
0.0, 360.0,
5.0, 0.0,
1.0, 4.0,
-1.0)
self.death_emitter.mount_to(self)
# LEAVE SCREEN VARIABLES
self.life_timer = 10 * FRAMES_PER_SECOND
self.leave_screen = False
aitools.spawn_off_screen(self)
def process_ai(self):
if self.leave_screen:
pass
elif self.active:
# Find the distance to the player
magnitude = self.target.position - self.position
if magnitude.get_magnitude() < 250:
# If the player is within x distance then charge the player.
self.speed = 7
else:
self.speed = 2
aitools.go_to_target(self, self.target)
def actor_update(self):
self.life_timer -= 1
if not self.life_timer:
self.leave_screen = True
self.speed = 7
aitools.go_to_point(self, aitools.point_off_screen())
if self.active and self.health <= 0:
self.active = False
self.die()
if not self.active and self.health:
self.active = True
self.process_ai()
def custom_bounds(self):
if self.leave_screen:
self.kill()
def collide(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
self.object_collided_with.hurt(1)
```
#### File: TroubleInCloudLand/scenes/credits.py
```python
from scenes import scene
from ui import text
from utils import utility
from utils.utility import *
class Credits(object):
def __init__(self, screen, music_list):
self.music_list = music_list
self.screen = screen
self.new_scene = scene.ForestScene()
self.finished = False
self.scroll_rate = -1
self.rolling_credits = True
self.roll_credits()
def roll_credits(self):
credit_group = pygame.sprite.Group()
# Create Text Labels
title_credit = text.Text(FONT_PATH, 48, FONT_COLOR, 'Credits')
title_credit.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT))
big_jony = text.Text(FONT_PATH, 36, FONT_COLOR, '<NAME>')
big_jony.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 100))
jony_credit0 = text.Text(FONT_PATH, 24, FONT_COLOR, 'Game Programming')
jony_credit0.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 150))
jony_credit1 = text.Text(FONT_PATH, 24, FONT_COLOR, 'Sound Design')
jony_credit1.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 200))
jony_credit2 = text.Text(FONT_PATH, 24, FONT_COLOR, 'Voice Acting')
jony_credit2.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 250))
big_josh = text.Text(FONT_PATH, 36, FONT_COLOR, '<NAME>')
big_josh.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 350))
josh_credit0 = text.Text(FONT_PATH, 24, FONT_COLOR, 'Game Programming')
josh_credit0.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 400))
josh_credit1 = text.Text(FONT_PATH, 24, FONT_COLOR, ' Graphic Design')
josh_credit1.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 450))
big_special = text.Text(FONT_PATH, 36, FONT_COLOR, 'Special Thanks To:')
big_special.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 550))
special_credit0 = text.Text(FONT_PATH, 24, FONT_COLOR, 'Python Software Foundation')
special_credit0.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 600))
special_credit1 = text.Text(FONT_PATH, 24, FONT_COLOR, 'PyGame')
special_credit1.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 650))
special_credit2 = text.Text(FONT_PATH, 24, FONT_COLOR, 'ShyFonts Type Foundry')
special_credit2.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 700))
thank_you = text.Text(FONT_PATH, 64, FONT_COLOR, 'Thank You For Playing!')
thank_you.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT + 800))
# Add Labels to Group
credit_group.add(title_credit)
credit_group.add(big_jony)
credit_group.add(jony_credit0)
credit_group.add(jony_credit1)
credit_group.add(jony_credit2)
credit_group.add(big_josh)
credit_group.add(josh_credit0)
credit_group.add(josh_credit1)
credit_group.add(big_special)
credit_group.add(special_credit0)
credit_group.add(special_credit1)
credit_group.add(special_credit2)
credit_group.add(thank_you)
timer = 5 * FRAMES_PER_SECOND
for credit in credit_group:
credit.set_alignment(CENTER_MIDDLE)
while self.rolling_credits:
utility.play_music(self.music_list)
for credit in credit_group:
credit_position = credit.get_position()
credit.set_position((credit_position[0], credit_position[1] + self.scroll_rate))
credit_group.update()
self.new_scene.draw(self.screen)
credit_group.draw(self.screen)
pygame.display.flip()
self.handle_events()
if special_credit2.get_position()[1] < 0:
if self.finished:
self.rolling_credits = False
if thank_you.get_position()[1] < (SCREEN_HEIGHT / 2):
thank_you.set_position((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
def handle_events(self):
for event in pygame.event.get():
if (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) or (event.type == pygame.MOUSEBUTTONDOWN):
self.scroll_rate = -10
self.finished = True
```
#### File: TroubleInCloudLand/utils/prettyprint.py
```python
def able(value):
return 'Enabled' if value else 'Disabled'
def on(value):
return 'On' if value else 'Off'
def mouse_sensitivity(value):
if value == .5:
return 'Very Low'
elif value == .75:
return 'Low'
elif value == 1:
return 'Normal'
elif value == 1.25:
return 'High'
elif value == 1.5:
return 'Very High'
def screen_mode(value):
return 'Fullscreen' if value else 'Windowed'
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.